repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
vilmibm/done | parsedatetime/parsedatetime.py | _parse_date_rfc822 | python | def _parse_date_rfc822(dateString):
'''Parse an RFC822, RFC1123, RFC2822, or asctime-style date'''
data = dateString.split()
if data[0][-1] in (',', '.') or data[0].lower() in rfc822._daynames:
del data[0]
if len(data) == 4:
s = data[3]
i = s.find('+')
if i > 0:
data[3:] = [s[:i], s[i+1:]]
else:
data.append('')
dateString = " ".join(data)
if len(data) < 5:
dateString += ' 00:00:00 GMT'
return rfc822.parsedate_tz(dateString) | Parse an RFC822, RFC1123, RFC2822, or asctime-style date | train | https://github.com/vilmibm/done/blob/7e5b60d2900ceddefa49de352a19b794199b51a8/parsedatetime/parsedatetime.py#L154-L169 | null | #!/usr/bin/env python
"""
Parse human-readable date/time text.
"""
__license__ = """
Copyright (c) 2004-2008 Mike Taylor
Copyright (c) 2006-2008 Darshana Chhajed
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
_debug = False
import re
import time
import datetime
import rfc822
import parsedatetime_consts
# Copied from feedparser.py
# Universal Feedparser
# Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved.
# Originally a def inside of _parse_date_w3dtf()
def _extract_date(m):
year = int(m.group('year'))
if year < 100:
year = 100 * int(time.gmtime()[0] / 100) + int(year)
if year < 1000:
return 0, 0, 0
julian = m.group('julian')
if julian:
julian = int(julian)
month = julian / 30 + 1
day = julian % 30 + 1
jday = None
while jday != julian:
t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0))
jday = time.gmtime(t)[-2]
diff = abs(jday - julian)
if jday > julian:
if diff < day:
day = day - diff
else:
month = month - 1
day = 31
elif jday < julian:
if day + diff < 28:
day = day + diff
else:
month = month + 1
return year, month, day
month = m.group('month')
day = 1
if month is None:
month = 1
else:
month = int(month)
day = m.group('day')
if day:
day = int(day)
else:
day = 1
return year, month, day
# Copied from feedparser.py
# Universal Feedparser
# Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved.
# Originally a def inside of _parse_date_w3dtf()
def _extract_time(m):
if not m:
return 0, 0, 0
hours = m.group('hours')
if not hours:
return 0, 0, 0
hours = int(hours)
minutes = int(m.group('minutes'))
seconds = m.group('seconds')
if seconds:
seconds = int(seconds)
else:
seconds = 0
return hours, minutes, seconds
# Copied from feedparser.py
# Universal Feedparser
# Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved.
# Modified to return a tuple instead of mktime
#
# Original comment:
# W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by
# Drake and licensed under the Python license. Removed all range checking
# for month, day, hour, minute, and second, since mktime will normalize
# these later
def _parse_date_w3dtf(dateString):
# the __extract_date and __extract_time methods were
# copied-out so they could be used by my code --bear
def __extract_tzd(m):
'''Return the Time Zone Designator as an offset in seconds from UTC.'''
if not m:
return 0
tzd = m.group('tzd')
if not tzd:
return 0
if tzd == 'Z':
return 0
hours = int(m.group('tzdhours'))
minutes = m.group('tzdminutes')
if minutes:
minutes = int(minutes)
else:
minutes = 0
offset = (hours*60 + minutes) * 60
if tzd[0] == '+':
return -offset
return offset
__date_re = ('(?P<year>\d\d\d\d)'
'(?:(?P<dsep>-|)'
'(?:(?P<julian>\d\d\d)'
'|(?P<month>\d\d)(?:(?P=dsep)(?P<day>\d\d))?))?')
__tzd_re = '(?P<tzd>[-+](?P<tzdhours>\d\d)(?::?(?P<tzdminutes>\d\d))|Z)'
__tzd_rx = re.compile(__tzd_re)
__time_re = ('(?P<hours>\d\d)(?P<tsep>:|)(?P<minutes>\d\d)'
'(?:(?P=tsep)(?P<seconds>\d\d(?:[.,]\d+)?))?'
+ __tzd_re)
__datetime_re = '%s(?:T%s)?' % (__date_re, __time_re)
__datetime_rx = re.compile(__datetime_re)
m = __datetime_rx.match(dateString)
if (m is None) or (m.group() != dateString): return
return _extract_date(m) + _extract_time(m) + (0, 0, 0)
# Copied from feedparser.py
# Universal Feedparser
# Copyright (c) 2002-2006, Mark Pilgrim, All rights reserved.
# Modified to return a tuple instead of mktime
#
def _parse_date_rfc822(dateString):
'''Parse an RFC822, RFC1123, RFC2822, or asctime-style date'''
data = dateString.split()
if data[0][-1] in (',', '.') or data[0].lower() in rfc822._daynames:
del data[0]
if len(data) == 4:
s = data[3]
i = s.find('+')
if i > 0:
data[3:] = [s[:i], s[i+1:]]
else:
data.append('')
dateString = " ".join(data)
if len(data) < 5:
dateString += ' 00:00:00 GMT'
return rfc822.parsedate_tz(dateString)
# rfc822.py defines several time zones, but we define some extra ones.
# 'ET' is equivalent to 'EST', etc.
_additional_timezones = {'AT': -400, 'ET': -500,
'CT': -600, 'MT': -700,
'PT': -800}
rfc822._timezones.update(_additional_timezones)
class Calendar:
"""
A collection of routines to input, parse and manipulate date and times.
The text can either be 'normal' date values or it can be human readable.
"""
def __init__(self, constants=None):
"""
Default constructor for the L{Calendar} class.
@type constants: object
@param constants: Instance of the class L{parsedatetime_consts.Constants}
@rtype: object
@return: L{Calendar} instance
"""
# if a constants reference is not included, use default
if constants is None:
self.ptc = parsedatetime_consts.Constants()
else:
self.ptc = constants
self.weekdyFlag = False # monday/tuesday/...
self.dateStdFlag = False # 07/21/06
self.dateStrFlag = False # July 21st, 2006
self.timeStdFlag = False # 5:50
self.meridianFlag = False # am/pm
self.dayStrFlag = False # tomorrow/yesterday/today/..
self.timeStrFlag = False # lunch/noon/breakfast/...
self.modifierFlag = False # after/before/prev/next/..
self.modifier2Flag = False # after/before/prev/next/..
self.unitsFlag = False # hrs/weeks/yrs/min/..
self.qunitsFlag = False # h/m/t/d..
self.timeFlag = 0
self.dateFlag = 0
def _convertUnitAsWords(self, unitText):
"""
Converts text units into their number value
Five = 5
Twenty Five = 25
Two hundred twenty five = 225
Two thousand and twenty five = 2025
Two thousand twenty five = 2025
@type unitText: string
@param unitText: number text to convert
@rtype: integer
@return: numerical value of unitText
"""
# TODO: implement this
pass
def _buildTime(self, source, quantity, modifier, units):
"""
Take C{quantity}, C{modifier} and C{unit} strings and convert them into values.
After converting, calcuate the time and return the adjusted sourceTime.
@type source: time
@param source: time to use as the base (or source)
@type quantity: string
@param quantity: quantity string
@type modifier: string
@param modifier: how quantity and units modify the source time
@type units: string
@param units: unit of the quantity (i.e. hours, days, months, etc)
@rtype: struct_time
@return: C{struct_time} of the calculated time
"""
if _debug:
print '_buildTime: [%s][%s][%s]' % (quantity, modifier, units)
if source is None:
source = time.localtime()
if quantity is None:
quantity = ''
else:
quantity = quantity.strip()
if len(quantity) == 0:
qty = 1
else:
try:
qty = int(quantity)
except ValueError:
qty = 0
if modifier in self.ptc.Modifiers:
qty = qty * self.ptc.Modifiers[modifier]
if units is None or units == '':
units = 'dy'
# plurals are handled by regex's (could be a bug tho)
(yr, mth, dy, hr, mn, sec, _, _, _) = source
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
target = start
if units.startswith('y'):
target = self.inc(start, year=qty)
self.dateFlag = 1
elif units.endswith('th') or units.endswith('ths'):
target = self.inc(start, month=qty)
self.dateFlag = 1
else:
if units.startswith('d'):
target = start + datetime.timedelta(days=qty)
self.dateFlag = 1
elif units.startswith('h'):
target = start + datetime.timedelta(hours=qty)
self.timeFlag = 2
elif units.startswith('m'):
target = start + datetime.timedelta(minutes=qty)
self.timeFlag = 2
elif units.startswith('s'):
target = start + datetime.timedelta(seconds=qty)
self.timeFlag = 2
elif units.startswith('w'):
target = start + datetime.timedelta(weeks=qty)
self.dateFlag = 1
return target.timetuple()
def parseDate(self, dateString):
"""
Parse short-form date strings::
'05/28/2006' or '04.21'
@type dateString: string
@param dateString: text to convert to a C{datetime}
@rtype: struct_time
@return: calculated C{struct_time} value of dateString
"""
yr, mth, dy, hr, mn, sec, wd, yd, isdst = time.localtime()
# values pulled from regex's will be stored here and later
# assigned to mth, dy, yr based on information from the locale
# -1 is used as the marker value because we want zero values
# to be passed thru so they can be flagged as errors later
v1 = -1
v2 = -1
v3 = -1
s = dateString
m = self.ptc.CRE_DATE2.search(s)
if m is not None:
index = m.start()
v1 = int(s[:index])
s = s[index + 1:]
m = self.ptc.CRE_DATE2.search(s)
if m is not None:
index = m.start()
v2 = int(s[:index])
v3 = int(s[index + 1:])
else:
v2 = int(s.strip())
v = [ v1, v2, v3 ]
d = { 'm': mth, 'd': dy, 'y': yr }
for i in range(0, 3):
n = v[i]
c = self.ptc.dp_order[i]
if n >= 0:
d[c] = n
# if the year is not specified and the date has already
# passed, increment the year
if v3 == -1 and ((mth > d['m']) or (mth == d['m'] and dy > d['d'])):
yr = d['y'] + 1
else:
yr = d['y']
mth = d['m']
dy = d['d']
# birthday epoch constraint
if yr < self.ptc.BirthdayEpoch:
yr += 2000
elif yr < 100:
yr += 1900
if _debug:
print 'parseDate: ', yr, mth, dy, self.ptc.daysInMonth(mth, yr)
if (mth > 0 and mth <= 12) and \
(dy > 0 and dy <= self.ptc.daysInMonth(mth, yr)):
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
else:
self.dateFlag = 0
self.timeFlag = 0
sourceTime = time.localtime() # return current time if date
# string is invalid
return sourceTime
def parseDateText(self, dateString):
"""
Parse long-form date strings::
'May 31st, 2006'
'Jan 1st'
'July 2006'
@type dateString: string
@param dateString: text to convert to a datetime
@rtype: struct_time
@return: calculated C{struct_time} value of dateString
"""
yr, mth, dy, hr, mn, sec, wd, yd, isdst = time.localtime()
currentMth = mth
currentDy = dy
s = dateString.lower()
m = self.ptc.CRE_DATE3.search(s)
mth = m.group('mthname')
mth = self.ptc.MonthOffsets[mth]
if m.group('day') != None:
dy = int(m.group('day'))
else:
dy = 1
if m.group('year') != None:
yr = int(m.group('year'))
# birthday epoch constraint
if yr < self.ptc.BirthdayEpoch:
yr += 2000
elif yr < 100:
yr += 1900
elif (mth < currentMth) or (mth == currentMth and dy < currentDy):
# if that day and month have already passed in this year,
# then increment the year by 1
yr += 1
if dy > 0 and dy <= self.ptc.daysInMonth(mth, yr):
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
else:
# Return current time if date string is invalid
self.dateFlag = 0
self.timeFlag = 0
sourceTime = time.localtime()
return sourceTime
def evalRanges(self, datetimeString, sourceTime=None):
"""
Evaluate the C{datetimeString} text and determine if
it represents a date or time range.
@type datetimeString: string
@param datetimeString: datetime text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: start datetime, end datetime and the invalid flag
"""
startTime = ''
endTime = ''
startDate = ''
endDate = ''
rangeFlag = 0
s = datetimeString.strip().lower()
if self.ptc.rangeSep in s:
s = s.replace(self.ptc.rangeSep, ' %s ' % self.ptc.rangeSep)
s = s.replace(' ', ' ')
m = self.ptc.CRE_TIMERNG1.search(s)
if m is not None:
rangeFlag = 1
else:
m = self.ptc.CRE_TIMERNG2.search(s)
if m is not None:
rangeFlag = 2
else:
m = self.ptc.CRE_TIMERNG4.search(s)
if m is not None:
rangeFlag = 7
else:
m = self.ptc.CRE_TIMERNG3.search(s)
if m is not None:
rangeFlag = 3
else:
m = self.ptc.CRE_DATERNG1.search(s)
if m is not None:
rangeFlag = 4
else:
m = self.ptc.CRE_DATERNG2.search(s)
if m is not None:
rangeFlag = 5
else:
m = self.ptc.CRE_DATERNG3.search(s)
if m is not None:
rangeFlag = 6
if _debug:
print 'evalRanges: rangeFlag =', rangeFlag, '[%s]' % s
if m is not None:
if (m.group() != s):
# capture remaining string
parseStr = m.group()
chunk1 = s[:m.start()]
chunk2 = s[m.end():]
s = '%s %s' % (chunk1, chunk2)
flag = 1
sourceTime, flag = self.parse(s, sourceTime)
if flag == 0:
sourceTime = None
else:
parseStr = s
if rangeFlag == 1:
m = re.search(self.ptc.rangeSep, parseStr)
startTime, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endTime, eflag = self.parse((parseStr[(m.start() + 1):]), sourceTime)
if (eflag != 0) and (sflag != 0):
return (startTime, endTime, 2)
elif rangeFlag == 2:
m = re.search(self.ptc.rangeSep, parseStr)
startTime, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endTime, eflag = self.parse((parseStr[(m.start() + 1):]), sourceTime)
if (eflag != 0) and (sflag != 0):
return (startTime, endTime, 2)
elif rangeFlag == 3 or rangeFlag == 7:
m = re.search(self.ptc.rangeSep, parseStr)
# capturing the meridian from the end time
if self.ptc.usesMeridian:
ampm = re.search(self.ptc.am[0], parseStr)
# appending the meridian to the start time
if ampm is not None:
startTime, sflag = self.parse((parseStr[:m.start()] + self.ptc.meridian[0]), sourceTime)
else:
startTime, sflag = self.parse((parseStr[:m.start()] + self.ptc.meridian[1]), sourceTime)
else:
startTime, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endTime, eflag = self.parse(parseStr[(m.start() + 1):], sourceTime)
if (eflag != 0) and (sflag != 0):
return (startTime, endTime, 2)
elif rangeFlag == 4:
m = re.search(self.ptc.rangeSep, parseStr)
startDate, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endDate, eflag = self.parse((parseStr[(m.start() + 1):]), sourceTime)
if (eflag != 0) and (sflag != 0):
return (startDate, endDate, 1)
elif rangeFlag == 5:
m = re.search(self.ptc.rangeSep, parseStr)
endDate = parseStr[(m.start() + 1):]
# capturing the year from the end date
date = self.ptc.CRE_DATE3.search(endDate)
endYear = date.group('year')
# appending the year to the start date if the start date
# does not have year information and the end date does.
# eg : "Aug 21 - Sep 4, 2007"
if endYear is not None:
startDate = (parseStr[:m.start()]).strip()
date = self.ptc.CRE_DATE3.search(startDate)
startYear = date.group('year')
if startYear is None:
startDate = startDate + ', ' + endYear
else:
startDate = parseStr[:m.start()]
startDate, sflag = self.parse(startDate, sourceTime)
endDate, eflag = self.parse(endDate, sourceTime)
if (eflag != 0) and (sflag != 0):
return (startDate, endDate, 1)
elif rangeFlag == 6:
m = re.search(self.ptc.rangeSep, parseStr)
startDate = parseStr[:m.start()]
# capturing the month from the start date
mth = self.ptc.CRE_DATE3.search(startDate)
mth = mth.group('mthname')
# appending the month name to the end date
endDate = mth + parseStr[(m.start() + 1):]
startDate, sflag = self.parse(startDate, sourceTime)
endDate, eflag = self.parse(endDate, sourceTime)
if (eflag != 0) and (sflag != 0):
return (startDate, endDate, 1)
else:
# if range is not found
sourceTime = time.localtime()
return (sourceTime, sourceTime, 0)
def _CalculateDOWDelta(self, wd, wkdy, offset, style, currentDayStyle):
"""
Based on the C{style} and C{currentDayStyle} determine what
day-of-week value is to be returned.
@type wd: integer
@param wd: day-of-week value for the current day
@type wkdy: integer
@param wkdy: day-of-week value for the parsed day
@type offset: integer
@param offset: offset direction for any modifiers (-1, 0, 1)
@type style: integer
@param style: normally the value set in C{Constants.DOWParseStyle}
@type currentDayStyle: integer
@param currentDayStyle: normally the value set in C{Constants.CurrentDOWParseStyle}
@rtype: integer
@return: calculated day-of-week
"""
if offset == 1:
# modifier is indicating future week eg: "next".
# DOW is calculated as DOW of next week
diff = 7 - wd + wkdy
elif offset == -1:
# modifier is indicating past week eg: "last","previous"
# DOW is calculated as DOW of previous week
diff = wkdy - wd - 7
elif offset == 0:
# modifier is indiacting current week eg: "this"
# DOW is calculated as DOW of this week
diff = wkdy - wd
elif offset == 2:
# no modifier is present.
# i.e. string to be parsed is just DOW
if style == 1:
# next occurance of the DOW is calculated
if currentDayStyle == True:
if wkdy >= wd:
diff = wkdy - wd
else:
diff = 7 - wd + wkdy
else:
if wkdy > wd:
diff = wkdy - wd
else:
diff = 7 - wd + wkdy
elif style == -1:
# last occurance of the DOW is calculated
if currentDayStyle == True:
if wkdy <= wd:
diff = wkdy - wd
else:
diff = wkdy - wd - 7
else:
if wkdy < wd:
diff = wkdy - wd
else:
diff = wkdy - wd - 7
else:
# occurance of the DOW in the current week is calculated
diff = wkdy - wd
if _debug:
print "wd %s, wkdy %s, offset %d, style %d\n" % (wd, wkdy, offset, style)
return diff
def _evalModifier(self, modifier, chunk1, chunk2, sourceTime):
"""
Evaluate the C{modifier} string and following text (passed in
as C{chunk1} and C{chunk2}) and if they match any known modifiers
calculate the delta and apply it to C{sourceTime}.
@type modifier: string
@param modifier: modifier text to apply to sourceTime
@type chunk1: string
@param chunk1: first text chunk that followed modifier (if any)
@type chunk2: string
@param chunk2: second text chunk that followed modifier (if any)
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: remaining text and the modified sourceTime
"""
offset = self.ptc.Modifiers[modifier]
if sourceTime is not None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
else:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = time.localtime()
# capture the units after the modifier and the remaining
# string after the unit
m = self.ptc.CRE_REMAINING.search(chunk2)
if m is not None:
index = m.start() + 1
unit = chunk2[:m.start()]
chunk2 = chunk2[index:]
else:
unit = chunk2
chunk2 = ''
flag = False
if unit == 'month' or \
unit == 'mth' or \
unit == 'm':
if offset == 0:
dy = self.ptc.daysInMonth(mth, yr)
sourceTime = (yr, mth, dy, 9, 0, 0, wd, yd, isdst)
elif offset == 2:
# if day is the last day of the month, calculate the last day
# of the next month
if dy == self.ptc.daysInMonth(mth, yr):
dy = self.ptc.daysInMonth(mth + 1, yr)
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = self.inc(start, month=1)
sourceTime = target.timetuple()
else:
start = datetime.datetime(yr, mth, 1, 9, 0, 0)
target = self.inc(start, month=offset)
sourceTime = target.timetuple()
flag = True
self.dateFlag = 1
if unit == 'week' or \
unit == 'wk' or \
unit == 'w':
if offset == 0:
start = datetime.datetime(yr, mth, dy, 17, 0, 0)
target = start + datetime.timedelta(days=(4 - wd))
sourceTime = target.timetuple()
elif offset == 2:
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=7)
sourceTime = target.timetuple()
else:
return self._evalModifier(modifier, chunk1, "monday " + chunk2, sourceTime)
flag = True
self.dateFlag = 1
if unit == 'day' or \
unit == 'dy' or \
unit == 'd':
if offset == 0:
sourceTime = (yr, mth, dy, 17, 0, 0, wd, yd, isdst)
self.timeFlag = 2
elif offset == 2:
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
target = start + datetime.timedelta(days=1)
sourceTime = target.timetuple()
else:
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=offset)
sourceTime = target.timetuple()
flag = True
self.dateFlag = 1
if unit == 'hour' or \
unit == 'hr':
if offset == 0:
sourceTime = (yr, mth, dy, hr, 0, 0, wd, yd, isdst)
else:
start = datetime.datetime(yr, mth, dy, hr, 0, 0)
target = start + datetime.timedelta(hours=offset)
sourceTime = target.timetuple()
flag = True
self.timeFlag = 2
if unit == 'year' or \
unit == 'yr' or \
unit == 'y':
if offset == 0:
sourceTime = (yr, 12, 31, hr, mn, sec, wd, yd, isdst)
elif offset == 2:
sourceTime = (yr + 1, mth, dy, hr, mn, sec, wd, yd, isdst)
else:
sourceTime = (yr + offset, 1, 1, 9, 0, 0, wd, yd, isdst)
flag = True
self.dateFlag = 1
if flag == False:
m = self.ptc.CRE_WEEKDAY.match(unit)
if m is not None:
wkdy = m.group()
self.dateFlag = 1
if modifier == 'eod':
# Calculate the upcoming weekday
self.modifierFlag = False
(sourceTime, _) = self.parse(wkdy, sourceTime)
sources = self.ptc.buildSources(sourceTime)
self.timeFlag = 2
if modifier in sources:
sourceTime = sources[modifier]
else:
wkdy = self.ptc.WeekdayOffsets[wkdy]
diff = self._CalculateDOWDelta(wd, wkdy, offset,
self.ptc.DOWParseStyle,
self.ptc.CurrentDOWParseStyle)
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=diff)
sourceTime = target.timetuple()
flag = True
self.dateFlag = 1
if not flag:
m = self.ptc.CRE_TIME.match(unit)
if m is not None:
self.modifierFlag = False
(yr, mth, dy, hr, mn, sec, wd, yd, isdst), _ = self.parse(unit)
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
target = start + datetime.timedelta(days=offset)
sourceTime = target.timetuple()
flag = True
else:
self.modifierFlag = False
# check if the remaining text is parsable and if so,
# use it as the base time for the modifier source time
t, flag2 = self.parse('%s %s' % (chunk1, unit), sourceTime)
if flag2 != 0:
sourceTime = t
sources = self.ptc.buildSources(sourceTime)
if modifier in sources:
sourceTime = sources[modifier]
flag = True
self.timeFlag = 2
# if the word after next is a number, the string is more than likely
# to be "next 4 hrs" which we will have to combine the units with the
# rest of the string
if not flag:
if offset < 0:
# if offset is negative, the unit has to be made negative
unit = '-%s' % unit
chunk2 = '%s %s' % (unit, chunk2)
self.modifierFlag = False
#return '%s %s' % (chunk1, chunk2), sourceTime
return '%s' % chunk2, sourceTime
def _evalModifier2(self, modifier, chunk1 , chunk2, sourceTime):
"""
Evaluate the C{modifier} string and following text (passed in
as C{chunk1} and C{chunk2}) and if they match any known modifiers
calculate the delta and apply it to C{sourceTime}.
@type modifier: string
@param modifier: modifier text to apply to C{sourceTime}
@type chunk1: string
@param chunk1: first text chunk that followed modifier (if any)
@type chunk2: string
@param chunk2: second text chunk that followed modifier (if any)
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: remaining text and the modified sourceTime
"""
offset = self.ptc.Modifiers[modifier]
digit = r'\d+'
self.modifier2Flag = False
# If the string after the negative modifier starts with digits,
# then it is likely that the string is similar to ' before 3 days'
# or 'evening prior to 3 days'.
# In this case, the total time is calculated by subtracting '3 days'
# from the current date.
# So, we have to identify the quantity and negate it before parsing
# the string.
# This is not required for strings not starting with digits since the
# string is enough to calculate the sourceTime
if chunk2 != '':
if offset < 0:
m = re.match(digit, chunk2.strip())
if m is not None:
qty = int(m.group()) * -1
chunk2 = chunk2[m.end():]
chunk2 = '%d%s' % (qty, chunk2)
sourceTime, flag1 = self.parse(chunk2, sourceTime)
if flag1 == 0:
flag1 = True
else:
flag1 = False
flag2 = False
else:
flag1 = False
if chunk1 != '':
if offset < 0:
m = re.search(digit, chunk1.strip())
if m is not None:
qty = int(m.group()) * -1
chunk1 = chunk1[m.end():]
chunk1 = '%d%s' % (qty, chunk1)
tempDateFlag = self.dateFlag
tempTimeFlag = self.timeFlag
sourceTime2, flag2 = self.parse(chunk1, sourceTime)
else:
return sourceTime, (flag1 and flag2)
# if chunk1 is not a datetime and chunk2 is then do not use datetime
# value returned by parsing chunk1
if not (flag1 == False and flag2 == 0):
sourceTime = sourceTime2
else:
self.timeFlag = tempTimeFlag
self.dateFlag = tempDateFlag
return sourceTime, (flag1 and flag2)
def _evalString(self, datetimeString, sourceTime=None):
"""
Calculate the datetime based on flags set by the L{parse()} routine
Examples handled::
RFC822, W3CDTF formatted dates
HH:MM[:SS][ am/pm]
MM/DD/YYYY
DD MMMM YYYY
@type datetimeString: string
@param datetimeString: text to try and parse as more "traditional"
date/time text
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: datetime
@return: calculated C{struct_time} value or current C{struct_time}
if not parsed
"""
s = datetimeString.strip()
now = time.localtime()
# Given string date is a RFC822 date
if sourceTime is None:
sourceTime = _parse_date_rfc822(s)
if sourceTime is not None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst, _) = sourceTime
self.dateFlag = 1
if (hr != 0) and (mn != 0) and (sec != 0):
self.timeFlag = 2
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
# Given string date is a W3CDTF date
if sourceTime is None:
sourceTime = _parse_date_w3dtf(s)
if sourceTime is not None:
self.dateFlag = 1
self.timeFlag = 2
if sourceTime is None:
s = s.lower()
# Given string is in the format HH:MM(:SS)(am/pm)
if self.meridianFlag:
if sourceTime is None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = now
else:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
m = self.ptc.CRE_TIMEHMS2.search(s)
if m is not None:
dt = s[:m.start('meridian')].strip()
if len(dt) <= 2:
hr = int(dt)
mn = 0
sec = 0
else:
hr, mn, sec = _extract_time(m)
if hr == 24:
hr = 0
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
meridian = m.group('meridian').lower()
# if 'am' found and hour is 12 - force hour to 0 (midnight)
if (meridian in self.ptc.am) and hr == 12:
sourceTime = (yr, mth, dy, 0, mn, sec, wd, yd, isdst)
# if 'pm' found and hour < 12, add 12 to shift to evening
if (meridian in self.ptc.pm) and hr < 12:
sourceTime = (yr, mth, dy, hr + 12, mn, sec, wd, yd, isdst)
# invalid time
if hr > 24 or mn > 59 or sec > 59:
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
self.meridianFlag = False
# Given string is in the format HH:MM(:SS)
if self.timeStdFlag:
if sourceTime is None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = now
else:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
m = self.ptc.CRE_TIMEHMS.search(s)
if m is not None:
hr, mn, sec = _extract_time(m)
if hr == 24:
hr = 0
if hr > 24 or mn > 59 or sec > 59:
# invalid time
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
else:
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
self.timeStdFlag = False
# Given string is in the format 07/21/2006
if self.dateStdFlag:
sourceTime = self.parseDate(s)
self.dateStdFlag = False
# Given string is in the format "May 23rd, 2005"
if self.dateStrFlag:
sourceTime = self.parseDateText(s)
self.dateStrFlag = False
# Given string is a weekday
if self.weekdyFlag:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = now
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
wkdy = self.ptc.WeekdayOffsets[s]
if wkdy > wd:
qty = self._CalculateDOWDelta(wd, wkdy, 2,
self.ptc.DOWParseStyle,
self.ptc.CurrentDOWParseStyle)
else:
qty = self._CalculateDOWDelta(wd, wkdy, 2,
self.ptc.DOWParseStyle,
self.ptc.CurrentDOWParseStyle)
target = start + datetime.timedelta(days=qty)
wd = wkdy
sourceTime = target.timetuple()
self.weekdyFlag = False
# Given string is a natural language time string like
# lunch, midnight, etc
if self.timeStrFlag:
if s in self.ptc.re_values['now']:
sourceTime = now
else:
sources = self.ptc.buildSources(sourceTime)
if s in sources:
sourceTime = sources[s]
else:
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
self.timeStrFlag = False
# Given string is a natural language date string like today, tomorrow..
if self.dayStrFlag:
if sourceTime is None:
sourceTime = now
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
if s in self.ptc.dayOffsets:
offset = self.ptc.dayOffsets[s]
else:
offset = 0
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=offset)
sourceTime = target.timetuple()
self.dayStrFlag = False
# Given string is a time string with units like "5 hrs 30 min"
if self.unitsFlag:
modifier = '' # TODO
if sourceTime is None:
sourceTime = now
m = self.ptc.CRE_UNITS.search(s)
if m is not None:
units = m.group('units')
quantity = s[:m.start('units')]
sourceTime = self._buildTime(sourceTime, quantity, modifier, units)
self.unitsFlag = False
# Given string is a time string with single char units like "5 h 30 m"
if self.qunitsFlag:
modifier = '' # TODO
if sourceTime is None:
sourceTime = now
m = self.ptc.CRE_QUNITS.search(s)
if m is not None:
units = m.group('qunits')
quantity = s[:m.start('qunits')]
sourceTime = self._buildTime(sourceTime, quantity, modifier, units)
self.qunitsFlag = False
# Given string does not match anything
if sourceTime is None:
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
return sourceTime
def parse(self, datetimeString, sourceTime=None):
"""
Splits the given C{datetimeString} into tokens, finds the regex
patterns that match and then calculates a C{struct_time} value from
the chunks.
If C{sourceTime} is given then the C{struct_time} value will be
calculated from that value, otherwise from the current date/time.
If the C{datetimeString} is parsed and date/time value found then
the second item of the returned tuple will be a flag to let you know
what kind of C{struct_time} value is being returned::
0 = not parsed at all
1 = parsed as a C{date}
2 = parsed as a C{time}
3 = parsed as a C{datetime}
@type datetimeString: string
@param datetimeString: date/time text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: modified C{sourceTime} and the result flag
"""
if sourceTime:
if isinstance(sourceTime, datetime.datetime):
if _debug:
print 'coercing datetime to timetuple'
sourceTime = sourceTime.timetuple()
else:
if not isinstance(sourceTime, time.struct_time) and \
not isinstance(sourceTime, tuple):
raise Exception('sourceTime is not a struct_time')
s = datetimeString.strip().lower()
parseStr = ''
totalTime = sourceTime
if s == '' :
if sourceTime is not None:
return (sourceTime, self.dateFlag + self.timeFlag)
else:
return (time.localtime(), 0)
self.timeFlag = 0
self.dateFlag = 0
while len(s) > 0:
flag = False
chunk1 = ''
chunk2 = ''
if _debug:
print 'parse (top of loop): [%s][%s]' % (s, parseStr)
if parseStr == '':
# Modifier like next\prev..
m = self.ptc.CRE_MODIFIER.search(s)
if m is not None:
self.modifierFlag = True
if (m.group('modifier') != s):
# capture remaining string
parseStr = m.group('modifier')
chunk1 = s[:m.start('modifier')].strip()
chunk2 = s[m.end('modifier'):].strip()
flag = True
else:
parseStr = s
if parseStr == '':
# Modifier like from\after\prior..
m = self.ptc.CRE_MODIFIER2.search(s)
if m is not None:
self.modifier2Flag = True
if (m.group('modifier') != s):
# capture remaining string
parseStr = m.group('modifier')
chunk1 = s[:m.start('modifier')].strip()
chunk2 = s[m.end('modifier'):].strip()
flag = True
else:
parseStr = s
if parseStr == '':
valid_date = False
for match in self.ptc.CRE_DATE3.finditer(s):
# to prevent "HH:MM(:SS) time strings" expressions from triggering
# this regex, we checks if the month field exists in the searched
# expression, if it doesn't exist, the date field is not valid
if match.group('mthname'):
m = self.ptc.CRE_DATE3.search(s, match.start())
valid_date = True
break
# String date format
if valid_date:
self.dateStrFlag = True
self.dateFlag = 1
if (m.group('date') != s):
# capture remaining string
parseStr = m.group('date')
chunk1 = s[:m.start('date')]
chunk2 = s[m.end('date'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Standard date format
m = self.ptc.CRE_DATE.search(s)
if m is not None:
self.dateStdFlag = True
self.dateFlag = 1
if (m.group('date') != s):
# capture remaining string
parseStr = m.group('date')
chunk1 = s[:m.start('date')]
chunk2 = s[m.end('date'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Natural language day strings
m = self.ptc.CRE_DAY.search(s)
if m is not None:
self.dayStrFlag = True
self.dateFlag = 1
if (m.group('day') != s):
# capture remaining string
parseStr = m.group('day')
chunk1 = s[:m.start('day')]
chunk2 = s[m.end('day'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Quantity + Units
m = self.ptc.CRE_UNITS.search(s)
if m is not None:
self.unitsFlag = True
if (m.group('qty') != s):
# capture remaining string
parseStr = m.group('qty')
chunk1 = s[:m.start('qty')].strip()
chunk2 = s[m.end('qty'):].strip()
if chunk1[-1:] == '-':
parseStr = '-%s' % parseStr
chunk1 = chunk1[:-1]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Quantity + Units
m = self.ptc.CRE_QUNITS.search(s)
if m is not None:
self.qunitsFlag = True
if (m.group('qty') != s):
# capture remaining string
parseStr = m.group('qty')
chunk1 = s[:m.start('qty')].strip()
chunk2 = s[m.end('qty'):].strip()
if chunk1[-1:] == '-':
parseStr = '-%s' % parseStr
chunk1 = chunk1[:-1]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Weekday
m = self.ptc.CRE_WEEKDAY.search(s)
if m is not None:
gv = m.group('weekday')
if s not in self.ptc.dayOffsets:
self.weekdyFlag = True
self.dateFlag = 1
if (gv != s):
# capture remaining string
parseStr = gv
chunk1 = s[:m.start('weekday')]
chunk2 = s[m.end('weekday'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Natural language time strings
m = self.ptc.CRE_TIME.search(s)
if m is not None:
self.timeStrFlag = True
self.timeFlag = 2
if (m.group('time') != s):
# capture remaining string
parseStr = m.group('time')
chunk1 = s[:m.start('time')]
chunk2 = s[m.end('time'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# HH:MM(:SS) am/pm time strings
m = self.ptc.CRE_TIMEHMS2.search(s)
if m is not None:
self.meridianFlag = True
self.timeFlag = 2
if m.group('minutes') is not None:
if m.group('seconds') is not None:
parseStr = '%s:%s:%s %s' % (m.group('hours'),
m.group('minutes'),
m.group('seconds'),
m.group('meridian'))
else:
parseStr = '%s:%s %s' % (m.group('hours'),
m.group('minutes'),
m.group('meridian'))
else:
parseStr = '%s %s' % (m.group('hours'),
m.group('meridian'))
chunk1 = s[:m.start('hours')]
chunk2 = s[m.end('meridian'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
if parseStr == '':
# HH:MM(:SS) time strings
m = self.ptc.CRE_TIMEHMS.search(s)
if m is not None:
self.timeStdFlag = True
self.timeFlag = 2
if m.group('seconds') is not None:
parseStr = '%s:%s:%s' % (m.group('hours'),
m.group('minutes'),
m.group('seconds'))
chunk1 = s[:m.start('hours')]
chunk2 = s[m.end('seconds'):]
else:
parseStr = '%s:%s' % (m.group('hours'),
m.group('minutes'))
chunk1 = s[:m.start('hours')]
chunk2 = s[m.end('minutes'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
# if string does not match any regex, empty string to
# come out of the while loop
if not flag:
s = ''
if _debug:
print 'parse (bottom) [%s][%s][%s][%s]' % (s, parseStr, chunk1, chunk2)
print 'weekday %s, dateStd %s, dateStr %s, time %s, timeStr %s, meridian %s' % \
(self.weekdyFlag, self.dateStdFlag, self.dateStrFlag, self.timeStdFlag, self.timeStrFlag, self.meridianFlag)
print 'dayStr %s, modifier %s, modifier2 %s, units %s, qunits %s' % \
(self.dayStrFlag, self.modifierFlag, self.modifier2Flag, self.unitsFlag, self.qunitsFlag)
# evaluate the matched string
if parseStr != '':
if self.modifierFlag == True:
t, totalTime = self._evalModifier(parseStr, chunk1, chunk2, totalTime)
# t is the unparsed part of the chunks.
# If it is not date/time, return current
# totalTime as it is; else return the output
# after parsing t.
if (t != '') and (t != None):
tempDateFlag = self.dateFlag
tempTimeFlag = self.timeFlag
(totalTime2, flag) = self.parse(t, totalTime)
if flag == 0 and totalTime is not None:
self.timeFlag = tempTimeFlag
self.dateFlag = tempDateFlag
return (totalTime, self.dateFlag + self.timeFlag)
else:
return (totalTime2, self.dateFlag + self.timeFlag)
elif self.modifier2Flag == True:
totalTime, invalidFlag = self._evalModifier2(parseStr, chunk1, chunk2, totalTime)
if invalidFlag == True:
self.dateFlag = 0
self.timeFlag = 0
else:
totalTime = self._evalString(parseStr, totalTime)
parseStr = ''
# String is not parsed at all
if totalTime is None or totalTime == sourceTime:
totalTime = time.localtime()
self.dateFlag = 0
self.timeFlag = 0
return (totalTime, self.dateFlag + self.timeFlag)
def inc(self, source, month=None, year=None):
"""
Takes the given C{source} date, or current date if none is
passed, and increments it according to the values passed in
by month and/or year.
This routine is needed because Python's C{timedelta()} function
does not allow for month or year increments.
@type source: struct_time
@param source: C{struct_time} value to increment
@type month: integer
@param month: optional number of months to increment
@type year: integer
@param year: optional number of years to increment
@rtype: datetime
@return: C{source} incremented by the number of months and/or years
"""
yr = source.year
mth = source.month
dy = source.day
if year:
try:
yi = int(year)
except ValueError:
yi = 0
yr += yi
if month:
try:
mi = int(month)
except ValueError:
mi = 0
m = abs(mi)
y = m / 12 # how many years are in month increment
m = m % 12 # get remaining months
if mi < 0:
mth = mth - m # sub months from start month
if mth < 1: # cross start-of-year?
y -= 1 # yes - decrement year
mth += 12 # and fix month
else:
mth = mth + m # add months to start month
if mth > 12: # cross end-of-year?
y += 1 # yes - increment year
mth -= 12 # and fix month
yr += y
# if the day ends up past the last day of
# the new month, set it to the last day
if dy > self.ptc.daysInMonth(mth, yr):
dy = self.ptc.daysInMonth(mth, yr)
d = source.replace(year=yr, month=mth, day=dy)
return source + (d - source)
|
vilmibm/done | parsedatetime/parsedatetime.py | Calendar._buildTime | python | def _buildTime(self, source, quantity, modifier, units):
"""
Take C{quantity}, C{modifier} and C{unit} strings and convert them into values.
After converting, calcuate the time and return the adjusted sourceTime.
@type source: time
@param source: time to use as the base (or source)
@type quantity: string
@param quantity: quantity string
@type modifier: string
@param modifier: how quantity and units modify the source time
@type units: string
@param units: unit of the quantity (i.e. hours, days, months, etc)
@rtype: struct_time
@return: C{struct_time} of the calculated time
"""
if _debug:
print '_buildTime: [%s][%s][%s]' % (quantity, modifier, units)
if source is None:
source = time.localtime()
if quantity is None:
quantity = ''
else:
quantity = quantity.strip()
if len(quantity) == 0:
qty = 1
else:
try:
qty = int(quantity)
except ValueError:
qty = 0
if modifier in self.ptc.Modifiers:
qty = qty * self.ptc.Modifiers[modifier]
if units is None or units == '':
units = 'dy'
# plurals are handled by regex's (could be a bug tho)
(yr, mth, dy, hr, mn, sec, _, _, _) = source
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
target = start
if units.startswith('y'):
target = self.inc(start, year=qty)
self.dateFlag = 1
elif units.endswith('th') or units.endswith('ths'):
target = self.inc(start, month=qty)
self.dateFlag = 1
else:
if units.startswith('d'):
target = start + datetime.timedelta(days=qty)
self.dateFlag = 1
elif units.startswith('h'):
target = start + datetime.timedelta(hours=qty)
self.timeFlag = 2
elif units.startswith('m'):
target = start + datetime.timedelta(minutes=qty)
self.timeFlag = 2
elif units.startswith('s'):
target = start + datetime.timedelta(seconds=qty)
self.timeFlag = 2
elif units.startswith('w'):
target = start + datetime.timedelta(weeks=qty)
self.dateFlag = 1
return target.timetuple() | Take C{quantity}, C{modifier} and C{unit} strings and convert them into values.
After converting, calcuate the time and return the adjusted sourceTime.
@type source: time
@param source: time to use as the base (or source)
@type quantity: string
@param quantity: quantity string
@type modifier: string
@param modifier: how quantity and units modify the source time
@type units: string
@param units: unit of the quantity (i.e. hours, days, months, etc)
@rtype: struct_time
@return: C{struct_time} of the calculated time | train | https://github.com/vilmibm/done/blob/7e5b60d2900ceddefa49de352a19b794199b51a8/parsedatetime/parsedatetime.py#L237-L309 | null | class Calendar:
"""
A collection of routines to input, parse and manipulate date and times.
The text can either be 'normal' date values or it can be human readable.
"""
def __init__(self, constants=None):
"""
Default constructor for the L{Calendar} class.
@type constants: object
@param constants: Instance of the class L{parsedatetime_consts.Constants}
@rtype: object
@return: L{Calendar} instance
"""
# if a constants reference is not included, use default
if constants is None:
self.ptc = parsedatetime_consts.Constants()
else:
self.ptc = constants
self.weekdyFlag = False # monday/tuesday/...
self.dateStdFlag = False # 07/21/06
self.dateStrFlag = False # July 21st, 2006
self.timeStdFlag = False # 5:50
self.meridianFlag = False # am/pm
self.dayStrFlag = False # tomorrow/yesterday/today/..
self.timeStrFlag = False # lunch/noon/breakfast/...
self.modifierFlag = False # after/before/prev/next/..
self.modifier2Flag = False # after/before/prev/next/..
self.unitsFlag = False # hrs/weeks/yrs/min/..
self.qunitsFlag = False # h/m/t/d..
self.timeFlag = 0
self.dateFlag = 0
def _convertUnitAsWords(self, unitText):
"""
Converts text units into their number value
Five = 5
Twenty Five = 25
Two hundred twenty five = 225
Two thousand and twenty five = 2025
Two thousand twenty five = 2025
@type unitText: string
@param unitText: number text to convert
@rtype: integer
@return: numerical value of unitText
"""
# TODO: implement this
pass
def _buildTime(self, source, quantity, modifier, units):
"""
Take C{quantity}, C{modifier} and C{unit} strings and convert them into values.
After converting, calcuate the time and return the adjusted sourceTime.
@type source: time
@param source: time to use as the base (or source)
@type quantity: string
@param quantity: quantity string
@type modifier: string
@param modifier: how quantity and units modify the source time
@type units: string
@param units: unit of the quantity (i.e. hours, days, months, etc)
@rtype: struct_time
@return: C{struct_time} of the calculated time
"""
if _debug:
print '_buildTime: [%s][%s][%s]' % (quantity, modifier, units)
if source is None:
source = time.localtime()
if quantity is None:
quantity = ''
else:
quantity = quantity.strip()
if len(quantity) == 0:
qty = 1
else:
try:
qty = int(quantity)
except ValueError:
qty = 0
if modifier in self.ptc.Modifiers:
qty = qty * self.ptc.Modifiers[modifier]
if units is None or units == '':
units = 'dy'
# plurals are handled by regex's (could be a bug tho)
(yr, mth, dy, hr, mn, sec, _, _, _) = source
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
target = start
if units.startswith('y'):
target = self.inc(start, year=qty)
self.dateFlag = 1
elif units.endswith('th') or units.endswith('ths'):
target = self.inc(start, month=qty)
self.dateFlag = 1
else:
if units.startswith('d'):
target = start + datetime.timedelta(days=qty)
self.dateFlag = 1
elif units.startswith('h'):
target = start + datetime.timedelta(hours=qty)
self.timeFlag = 2
elif units.startswith('m'):
target = start + datetime.timedelta(minutes=qty)
self.timeFlag = 2
elif units.startswith('s'):
target = start + datetime.timedelta(seconds=qty)
self.timeFlag = 2
elif units.startswith('w'):
target = start + datetime.timedelta(weeks=qty)
self.dateFlag = 1
return target.timetuple()
def parseDate(self, dateString):
"""
Parse short-form date strings::
'05/28/2006' or '04.21'
@type dateString: string
@param dateString: text to convert to a C{datetime}
@rtype: struct_time
@return: calculated C{struct_time} value of dateString
"""
yr, mth, dy, hr, mn, sec, wd, yd, isdst = time.localtime()
# values pulled from regex's will be stored here and later
# assigned to mth, dy, yr based on information from the locale
# -1 is used as the marker value because we want zero values
# to be passed thru so they can be flagged as errors later
v1 = -1
v2 = -1
v3 = -1
s = dateString
m = self.ptc.CRE_DATE2.search(s)
if m is not None:
index = m.start()
v1 = int(s[:index])
s = s[index + 1:]
m = self.ptc.CRE_DATE2.search(s)
if m is not None:
index = m.start()
v2 = int(s[:index])
v3 = int(s[index + 1:])
else:
v2 = int(s.strip())
v = [ v1, v2, v3 ]
d = { 'm': mth, 'd': dy, 'y': yr }
for i in range(0, 3):
n = v[i]
c = self.ptc.dp_order[i]
if n >= 0:
d[c] = n
# if the year is not specified and the date has already
# passed, increment the year
if v3 == -1 and ((mth > d['m']) or (mth == d['m'] and dy > d['d'])):
yr = d['y'] + 1
else:
yr = d['y']
mth = d['m']
dy = d['d']
# birthday epoch constraint
if yr < self.ptc.BirthdayEpoch:
yr += 2000
elif yr < 100:
yr += 1900
if _debug:
print 'parseDate: ', yr, mth, dy, self.ptc.daysInMonth(mth, yr)
if (mth > 0 and mth <= 12) and \
(dy > 0 and dy <= self.ptc.daysInMonth(mth, yr)):
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
else:
self.dateFlag = 0
self.timeFlag = 0
sourceTime = time.localtime() # return current time if date
# string is invalid
return sourceTime
def parseDateText(self, dateString):
"""
Parse long-form date strings::
'May 31st, 2006'
'Jan 1st'
'July 2006'
@type dateString: string
@param dateString: text to convert to a datetime
@rtype: struct_time
@return: calculated C{struct_time} value of dateString
"""
yr, mth, dy, hr, mn, sec, wd, yd, isdst = time.localtime()
currentMth = mth
currentDy = dy
s = dateString.lower()
m = self.ptc.CRE_DATE3.search(s)
mth = m.group('mthname')
mth = self.ptc.MonthOffsets[mth]
if m.group('day') != None:
dy = int(m.group('day'))
else:
dy = 1
if m.group('year') != None:
yr = int(m.group('year'))
# birthday epoch constraint
if yr < self.ptc.BirthdayEpoch:
yr += 2000
elif yr < 100:
yr += 1900
elif (mth < currentMth) or (mth == currentMth and dy < currentDy):
# if that day and month have already passed in this year,
# then increment the year by 1
yr += 1
if dy > 0 and dy <= self.ptc.daysInMonth(mth, yr):
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
else:
# Return current time if date string is invalid
self.dateFlag = 0
self.timeFlag = 0
sourceTime = time.localtime()
return sourceTime
def evalRanges(self, datetimeString, sourceTime=None):
"""
Evaluate the C{datetimeString} text and determine if
it represents a date or time range.
@type datetimeString: string
@param datetimeString: datetime text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: start datetime, end datetime and the invalid flag
"""
startTime = ''
endTime = ''
startDate = ''
endDate = ''
rangeFlag = 0
s = datetimeString.strip().lower()
if self.ptc.rangeSep in s:
s = s.replace(self.ptc.rangeSep, ' %s ' % self.ptc.rangeSep)
s = s.replace(' ', ' ')
m = self.ptc.CRE_TIMERNG1.search(s)
if m is not None:
rangeFlag = 1
else:
m = self.ptc.CRE_TIMERNG2.search(s)
if m is not None:
rangeFlag = 2
else:
m = self.ptc.CRE_TIMERNG4.search(s)
if m is not None:
rangeFlag = 7
else:
m = self.ptc.CRE_TIMERNG3.search(s)
if m is not None:
rangeFlag = 3
else:
m = self.ptc.CRE_DATERNG1.search(s)
if m is not None:
rangeFlag = 4
else:
m = self.ptc.CRE_DATERNG2.search(s)
if m is not None:
rangeFlag = 5
else:
m = self.ptc.CRE_DATERNG3.search(s)
if m is not None:
rangeFlag = 6
if _debug:
print 'evalRanges: rangeFlag =', rangeFlag, '[%s]' % s
if m is not None:
if (m.group() != s):
# capture remaining string
parseStr = m.group()
chunk1 = s[:m.start()]
chunk2 = s[m.end():]
s = '%s %s' % (chunk1, chunk2)
flag = 1
sourceTime, flag = self.parse(s, sourceTime)
if flag == 0:
sourceTime = None
else:
parseStr = s
if rangeFlag == 1:
m = re.search(self.ptc.rangeSep, parseStr)
startTime, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endTime, eflag = self.parse((parseStr[(m.start() + 1):]), sourceTime)
if (eflag != 0) and (sflag != 0):
return (startTime, endTime, 2)
elif rangeFlag == 2:
m = re.search(self.ptc.rangeSep, parseStr)
startTime, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endTime, eflag = self.parse((parseStr[(m.start() + 1):]), sourceTime)
if (eflag != 0) and (sflag != 0):
return (startTime, endTime, 2)
elif rangeFlag == 3 or rangeFlag == 7:
m = re.search(self.ptc.rangeSep, parseStr)
# capturing the meridian from the end time
if self.ptc.usesMeridian:
ampm = re.search(self.ptc.am[0], parseStr)
# appending the meridian to the start time
if ampm is not None:
startTime, sflag = self.parse((parseStr[:m.start()] + self.ptc.meridian[0]), sourceTime)
else:
startTime, sflag = self.parse((parseStr[:m.start()] + self.ptc.meridian[1]), sourceTime)
else:
startTime, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endTime, eflag = self.parse(parseStr[(m.start() + 1):], sourceTime)
if (eflag != 0) and (sflag != 0):
return (startTime, endTime, 2)
elif rangeFlag == 4:
m = re.search(self.ptc.rangeSep, parseStr)
startDate, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endDate, eflag = self.parse((parseStr[(m.start() + 1):]), sourceTime)
if (eflag != 0) and (sflag != 0):
return (startDate, endDate, 1)
elif rangeFlag == 5:
m = re.search(self.ptc.rangeSep, parseStr)
endDate = parseStr[(m.start() + 1):]
# capturing the year from the end date
date = self.ptc.CRE_DATE3.search(endDate)
endYear = date.group('year')
# appending the year to the start date if the start date
# does not have year information and the end date does.
# eg : "Aug 21 - Sep 4, 2007"
if endYear is not None:
startDate = (parseStr[:m.start()]).strip()
date = self.ptc.CRE_DATE3.search(startDate)
startYear = date.group('year')
if startYear is None:
startDate = startDate + ', ' + endYear
else:
startDate = parseStr[:m.start()]
startDate, sflag = self.parse(startDate, sourceTime)
endDate, eflag = self.parse(endDate, sourceTime)
if (eflag != 0) and (sflag != 0):
return (startDate, endDate, 1)
elif rangeFlag == 6:
m = re.search(self.ptc.rangeSep, parseStr)
startDate = parseStr[:m.start()]
# capturing the month from the start date
mth = self.ptc.CRE_DATE3.search(startDate)
mth = mth.group('mthname')
# appending the month name to the end date
endDate = mth + parseStr[(m.start() + 1):]
startDate, sflag = self.parse(startDate, sourceTime)
endDate, eflag = self.parse(endDate, sourceTime)
if (eflag != 0) and (sflag != 0):
return (startDate, endDate, 1)
else:
# if range is not found
sourceTime = time.localtime()
return (sourceTime, sourceTime, 0)
def _CalculateDOWDelta(self, wd, wkdy, offset, style, currentDayStyle):
"""
Based on the C{style} and C{currentDayStyle} determine what
day-of-week value is to be returned.
@type wd: integer
@param wd: day-of-week value for the current day
@type wkdy: integer
@param wkdy: day-of-week value for the parsed day
@type offset: integer
@param offset: offset direction for any modifiers (-1, 0, 1)
@type style: integer
@param style: normally the value set in C{Constants.DOWParseStyle}
@type currentDayStyle: integer
@param currentDayStyle: normally the value set in C{Constants.CurrentDOWParseStyle}
@rtype: integer
@return: calculated day-of-week
"""
if offset == 1:
# modifier is indicating future week eg: "next".
# DOW is calculated as DOW of next week
diff = 7 - wd + wkdy
elif offset == -1:
# modifier is indicating past week eg: "last","previous"
# DOW is calculated as DOW of previous week
diff = wkdy - wd - 7
elif offset == 0:
# modifier is indiacting current week eg: "this"
# DOW is calculated as DOW of this week
diff = wkdy - wd
elif offset == 2:
# no modifier is present.
# i.e. string to be parsed is just DOW
if style == 1:
# next occurance of the DOW is calculated
if currentDayStyle == True:
if wkdy >= wd:
diff = wkdy - wd
else:
diff = 7 - wd + wkdy
else:
if wkdy > wd:
diff = wkdy - wd
else:
diff = 7 - wd + wkdy
elif style == -1:
# last occurance of the DOW is calculated
if currentDayStyle == True:
if wkdy <= wd:
diff = wkdy - wd
else:
diff = wkdy - wd - 7
else:
if wkdy < wd:
diff = wkdy - wd
else:
diff = wkdy - wd - 7
else:
# occurance of the DOW in the current week is calculated
diff = wkdy - wd
if _debug:
print "wd %s, wkdy %s, offset %d, style %d\n" % (wd, wkdy, offset, style)
return diff
def _evalModifier(self, modifier, chunk1, chunk2, sourceTime):
"""
Evaluate the C{modifier} string and following text (passed in
as C{chunk1} and C{chunk2}) and if they match any known modifiers
calculate the delta and apply it to C{sourceTime}.
@type modifier: string
@param modifier: modifier text to apply to sourceTime
@type chunk1: string
@param chunk1: first text chunk that followed modifier (if any)
@type chunk2: string
@param chunk2: second text chunk that followed modifier (if any)
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: remaining text and the modified sourceTime
"""
offset = self.ptc.Modifiers[modifier]
if sourceTime is not None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
else:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = time.localtime()
# capture the units after the modifier and the remaining
# string after the unit
m = self.ptc.CRE_REMAINING.search(chunk2)
if m is not None:
index = m.start() + 1
unit = chunk2[:m.start()]
chunk2 = chunk2[index:]
else:
unit = chunk2
chunk2 = ''
flag = False
if unit == 'month' or \
unit == 'mth' or \
unit == 'm':
if offset == 0:
dy = self.ptc.daysInMonth(mth, yr)
sourceTime = (yr, mth, dy, 9, 0, 0, wd, yd, isdst)
elif offset == 2:
# if day is the last day of the month, calculate the last day
# of the next month
if dy == self.ptc.daysInMonth(mth, yr):
dy = self.ptc.daysInMonth(mth + 1, yr)
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = self.inc(start, month=1)
sourceTime = target.timetuple()
else:
start = datetime.datetime(yr, mth, 1, 9, 0, 0)
target = self.inc(start, month=offset)
sourceTime = target.timetuple()
flag = True
self.dateFlag = 1
if unit == 'week' or \
unit == 'wk' or \
unit == 'w':
if offset == 0:
start = datetime.datetime(yr, mth, dy, 17, 0, 0)
target = start + datetime.timedelta(days=(4 - wd))
sourceTime = target.timetuple()
elif offset == 2:
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=7)
sourceTime = target.timetuple()
else:
return self._evalModifier(modifier, chunk1, "monday " + chunk2, sourceTime)
flag = True
self.dateFlag = 1
if unit == 'day' or \
unit == 'dy' or \
unit == 'd':
if offset == 0:
sourceTime = (yr, mth, dy, 17, 0, 0, wd, yd, isdst)
self.timeFlag = 2
elif offset == 2:
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
target = start + datetime.timedelta(days=1)
sourceTime = target.timetuple()
else:
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=offset)
sourceTime = target.timetuple()
flag = True
self.dateFlag = 1
if unit == 'hour' or \
unit == 'hr':
if offset == 0:
sourceTime = (yr, mth, dy, hr, 0, 0, wd, yd, isdst)
else:
start = datetime.datetime(yr, mth, dy, hr, 0, 0)
target = start + datetime.timedelta(hours=offset)
sourceTime = target.timetuple()
flag = True
self.timeFlag = 2
if unit == 'year' or \
unit == 'yr' or \
unit == 'y':
if offset == 0:
sourceTime = (yr, 12, 31, hr, mn, sec, wd, yd, isdst)
elif offset == 2:
sourceTime = (yr + 1, mth, dy, hr, mn, sec, wd, yd, isdst)
else:
sourceTime = (yr + offset, 1, 1, 9, 0, 0, wd, yd, isdst)
flag = True
self.dateFlag = 1
if flag == False:
m = self.ptc.CRE_WEEKDAY.match(unit)
if m is not None:
wkdy = m.group()
self.dateFlag = 1
if modifier == 'eod':
# Calculate the upcoming weekday
self.modifierFlag = False
(sourceTime, _) = self.parse(wkdy, sourceTime)
sources = self.ptc.buildSources(sourceTime)
self.timeFlag = 2
if modifier in sources:
sourceTime = sources[modifier]
else:
wkdy = self.ptc.WeekdayOffsets[wkdy]
diff = self._CalculateDOWDelta(wd, wkdy, offset,
self.ptc.DOWParseStyle,
self.ptc.CurrentDOWParseStyle)
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=diff)
sourceTime = target.timetuple()
flag = True
self.dateFlag = 1
if not flag:
m = self.ptc.CRE_TIME.match(unit)
if m is not None:
self.modifierFlag = False
(yr, mth, dy, hr, mn, sec, wd, yd, isdst), _ = self.parse(unit)
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
target = start + datetime.timedelta(days=offset)
sourceTime = target.timetuple()
flag = True
else:
self.modifierFlag = False
# check if the remaining text is parsable and if so,
# use it as the base time for the modifier source time
t, flag2 = self.parse('%s %s' % (chunk1, unit), sourceTime)
if flag2 != 0:
sourceTime = t
sources = self.ptc.buildSources(sourceTime)
if modifier in sources:
sourceTime = sources[modifier]
flag = True
self.timeFlag = 2
# if the word after next is a number, the string is more than likely
# to be "next 4 hrs" which we will have to combine the units with the
# rest of the string
if not flag:
if offset < 0:
# if offset is negative, the unit has to be made negative
unit = '-%s' % unit
chunk2 = '%s %s' % (unit, chunk2)
self.modifierFlag = False
#return '%s %s' % (chunk1, chunk2), sourceTime
return '%s' % chunk2, sourceTime
def _evalModifier2(self, modifier, chunk1 , chunk2, sourceTime):
"""
Evaluate the C{modifier} string and following text (passed in
as C{chunk1} and C{chunk2}) and if they match any known modifiers
calculate the delta and apply it to C{sourceTime}.
@type modifier: string
@param modifier: modifier text to apply to C{sourceTime}
@type chunk1: string
@param chunk1: first text chunk that followed modifier (if any)
@type chunk2: string
@param chunk2: second text chunk that followed modifier (if any)
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: remaining text and the modified sourceTime
"""
offset = self.ptc.Modifiers[modifier]
digit = r'\d+'
self.modifier2Flag = False
# If the string after the negative modifier starts with digits,
# then it is likely that the string is similar to ' before 3 days'
# or 'evening prior to 3 days'.
# In this case, the total time is calculated by subtracting '3 days'
# from the current date.
# So, we have to identify the quantity and negate it before parsing
# the string.
# This is not required for strings not starting with digits since the
# string is enough to calculate the sourceTime
if chunk2 != '':
if offset < 0:
m = re.match(digit, chunk2.strip())
if m is not None:
qty = int(m.group()) * -1
chunk2 = chunk2[m.end():]
chunk2 = '%d%s' % (qty, chunk2)
sourceTime, flag1 = self.parse(chunk2, sourceTime)
if flag1 == 0:
flag1 = True
else:
flag1 = False
flag2 = False
else:
flag1 = False
if chunk1 != '':
if offset < 0:
m = re.search(digit, chunk1.strip())
if m is not None:
qty = int(m.group()) * -1
chunk1 = chunk1[m.end():]
chunk1 = '%d%s' % (qty, chunk1)
tempDateFlag = self.dateFlag
tempTimeFlag = self.timeFlag
sourceTime2, flag2 = self.parse(chunk1, sourceTime)
else:
return sourceTime, (flag1 and flag2)
# if chunk1 is not a datetime and chunk2 is then do not use datetime
# value returned by parsing chunk1
if not (flag1 == False and flag2 == 0):
sourceTime = sourceTime2
else:
self.timeFlag = tempTimeFlag
self.dateFlag = tempDateFlag
return sourceTime, (flag1 and flag2)
def _evalString(self, datetimeString, sourceTime=None):
"""
Calculate the datetime based on flags set by the L{parse()} routine
Examples handled::
RFC822, W3CDTF formatted dates
HH:MM[:SS][ am/pm]
MM/DD/YYYY
DD MMMM YYYY
@type datetimeString: string
@param datetimeString: text to try and parse as more "traditional"
date/time text
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: datetime
@return: calculated C{struct_time} value or current C{struct_time}
if not parsed
"""
s = datetimeString.strip()
now = time.localtime()
# Given string date is a RFC822 date
if sourceTime is None:
sourceTime = _parse_date_rfc822(s)
if sourceTime is not None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst, _) = sourceTime
self.dateFlag = 1
if (hr != 0) and (mn != 0) and (sec != 0):
self.timeFlag = 2
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
# Given string date is a W3CDTF date
if sourceTime is None:
sourceTime = _parse_date_w3dtf(s)
if sourceTime is not None:
self.dateFlag = 1
self.timeFlag = 2
if sourceTime is None:
s = s.lower()
# Given string is in the format HH:MM(:SS)(am/pm)
if self.meridianFlag:
if sourceTime is None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = now
else:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
m = self.ptc.CRE_TIMEHMS2.search(s)
if m is not None:
dt = s[:m.start('meridian')].strip()
if len(dt) <= 2:
hr = int(dt)
mn = 0
sec = 0
else:
hr, mn, sec = _extract_time(m)
if hr == 24:
hr = 0
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
meridian = m.group('meridian').lower()
# if 'am' found and hour is 12 - force hour to 0 (midnight)
if (meridian in self.ptc.am) and hr == 12:
sourceTime = (yr, mth, dy, 0, mn, sec, wd, yd, isdst)
# if 'pm' found and hour < 12, add 12 to shift to evening
if (meridian in self.ptc.pm) and hr < 12:
sourceTime = (yr, mth, dy, hr + 12, mn, sec, wd, yd, isdst)
# invalid time
if hr > 24 or mn > 59 or sec > 59:
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
self.meridianFlag = False
# Given string is in the format HH:MM(:SS)
if self.timeStdFlag:
if sourceTime is None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = now
else:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
m = self.ptc.CRE_TIMEHMS.search(s)
if m is not None:
hr, mn, sec = _extract_time(m)
if hr == 24:
hr = 0
if hr > 24 or mn > 59 or sec > 59:
# invalid time
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
else:
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
self.timeStdFlag = False
# Given string is in the format 07/21/2006
if self.dateStdFlag:
sourceTime = self.parseDate(s)
self.dateStdFlag = False
# Given string is in the format "May 23rd, 2005"
if self.dateStrFlag:
sourceTime = self.parseDateText(s)
self.dateStrFlag = False
# Given string is a weekday
if self.weekdyFlag:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = now
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
wkdy = self.ptc.WeekdayOffsets[s]
if wkdy > wd:
qty = self._CalculateDOWDelta(wd, wkdy, 2,
self.ptc.DOWParseStyle,
self.ptc.CurrentDOWParseStyle)
else:
qty = self._CalculateDOWDelta(wd, wkdy, 2,
self.ptc.DOWParseStyle,
self.ptc.CurrentDOWParseStyle)
target = start + datetime.timedelta(days=qty)
wd = wkdy
sourceTime = target.timetuple()
self.weekdyFlag = False
# Given string is a natural language time string like
# lunch, midnight, etc
if self.timeStrFlag:
if s in self.ptc.re_values['now']:
sourceTime = now
else:
sources = self.ptc.buildSources(sourceTime)
if s in sources:
sourceTime = sources[s]
else:
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
self.timeStrFlag = False
# Given string is a natural language date string like today, tomorrow..
if self.dayStrFlag:
if sourceTime is None:
sourceTime = now
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
if s in self.ptc.dayOffsets:
offset = self.ptc.dayOffsets[s]
else:
offset = 0
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=offset)
sourceTime = target.timetuple()
self.dayStrFlag = False
# Given string is a time string with units like "5 hrs 30 min"
if self.unitsFlag:
modifier = '' # TODO
if sourceTime is None:
sourceTime = now
m = self.ptc.CRE_UNITS.search(s)
if m is not None:
units = m.group('units')
quantity = s[:m.start('units')]
sourceTime = self._buildTime(sourceTime, quantity, modifier, units)
self.unitsFlag = False
# Given string is a time string with single char units like "5 h 30 m"
if self.qunitsFlag:
modifier = '' # TODO
if sourceTime is None:
sourceTime = now
m = self.ptc.CRE_QUNITS.search(s)
if m is not None:
units = m.group('qunits')
quantity = s[:m.start('qunits')]
sourceTime = self._buildTime(sourceTime, quantity, modifier, units)
self.qunitsFlag = False
# Given string does not match anything
if sourceTime is None:
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
return sourceTime
def parse(self, datetimeString, sourceTime=None):
"""
Splits the given C{datetimeString} into tokens, finds the regex
patterns that match and then calculates a C{struct_time} value from
the chunks.
If C{sourceTime} is given then the C{struct_time} value will be
calculated from that value, otherwise from the current date/time.
If the C{datetimeString} is parsed and date/time value found then
the second item of the returned tuple will be a flag to let you know
what kind of C{struct_time} value is being returned::
0 = not parsed at all
1 = parsed as a C{date}
2 = parsed as a C{time}
3 = parsed as a C{datetime}
@type datetimeString: string
@param datetimeString: date/time text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: modified C{sourceTime} and the result flag
"""
if sourceTime:
if isinstance(sourceTime, datetime.datetime):
if _debug:
print 'coercing datetime to timetuple'
sourceTime = sourceTime.timetuple()
else:
if not isinstance(sourceTime, time.struct_time) and \
not isinstance(sourceTime, tuple):
raise Exception('sourceTime is not a struct_time')
s = datetimeString.strip().lower()
parseStr = ''
totalTime = sourceTime
if s == '' :
if sourceTime is not None:
return (sourceTime, self.dateFlag + self.timeFlag)
else:
return (time.localtime(), 0)
self.timeFlag = 0
self.dateFlag = 0
while len(s) > 0:
flag = False
chunk1 = ''
chunk2 = ''
if _debug:
print 'parse (top of loop): [%s][%s]' % (s, parseStr)
if parseStr == '':
# Modifier like next\prev..
m = self.ptc.CRE_MODIFIER.search(s)
if m is not None:
self.modifierFlag = True
if (m.group('modifier') != s):
# capture remaining string
parseStr = m.group('modifier')
chunk1 = s[:m.start('modifier')].strip()
chunk2 = s[m.end('modifier'):].strip()
flag = True
else:
parseStr = s
if parseStr == '':
# Modifier like from\after\prior..
m = self.ptc.CRE_MODIFIER2.search(s)
if m is not None:
self.modifier2Flag = True
if (m.group('modifier') != s):
# capture remaining string
parseStr = m.group('modifier')
chunk1 = s[:m.start('modifier')].strip()
chunk2 = s[m.end('modifier'):].strip()
flag = True
else:
parseStr = s
if parseStr == '':
valid_date = False
for match in self.ptc.CRE_DATE3.finditer(s):
# to prevent "HH:MM(:SS) time strings" expressions from triggering
# this regex, we checks if the month field exists in the searched
# expression, if it doesn't exist, the date field is not valid
if match.group('mthname'):
m = self.ptc.CRE_DATE3.search(s, match.start())
valid_date = True
break
# String date format
if valid_date:
self.dateStrFlag = True
self.dateFlag = 1
if (m.group('date') != s):
# capture remaining string
parseStr = m.group('date')
chunk1 = s[:m.start('date')]
chunk2 = s[m.end('date'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Standard date format
m = self.ptc.CRE_DATE.search(s)
if m is not None:
self.dateStdFlag = True
self.dateFlag = 1
if (m.group('date') != s):
# capture remaining string
parseStr = m.group('date')
chunk1 = s[:m.start('date')]
chunk2 = s[m.end('date'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Natural language day strings
m = self.ptc.CRE_DAY.search(s)
if m is not None:
self.dayStrFlag = True
self.dateFlag = 1
if (m.group('day') != s):
# capture remaining string
parseStr = m.group('day')
chunk1 = s[:m.start('day')]
chunk2 = s[m.end('day'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Quantity + Units
m = self.ptc.CRE_UNITS.search(s)
if m is not None:
self.unitsFlag = True
if (m.group('qty') != s):
# capture remaining string
parseStr = m.group('qty')
chunk1 = s[:m.start('qty')].strip()
chunk2 = s[m.end('qty'):].strip()
if chunk1[-1:] == '-':
parseStr = '-%s' % parseStr
chunk1 = chunk1[:-1]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Quantity + Units
m = self.ptc.CRE_QUNITS.search(s)
if m is not None:
self.qunitsFlag = True
if (m.group('qty') != s):
# capture remaining string
parseStr = m.group('qty')
chunk1 = s[:m.start('qty')].strip()
chunk2 = s[m.end('qty'):].strip()
if chunk1[-1:] == '-':
parseStr = '-%s' % parseStr
chunk1 = chunk1[:-1]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Weekday
m = self.ptc.CRE_WEEKDAY.search(s)
if m is not None:
gv = m.group('weekday')
if s not in self.ptc.dayOffsets:
self.weekdyFlag = True
self.dateFlag = 1
if (gv != s):
# capture remaining string
parseStr = gv
chunk1 = s[:m.start('weekday')]
chunk2 = s[m.end('weekday'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Natural language time strings
m = self.ptc.CRE_TIME.search(s)
if m is not None:
self.timeStrFlag = True
self.timeFlag = 2
if (m.group('time') != s):
# capture remaining string
parseStr = m.group('time')
chunk1 = s[:m.start('time')]
chunk2 = s[m.end('time'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# HH:MM(:SS) am/pm time strings
m = self.ptc.CRE_TIMEHMS2.search(s)
if m is not None:
self.meridianFlag = True
self.timeFlag = 2
if m.group('minutes') is not None:
if m.group('seconds') is not None:
parseStr = '%s:%s:%s %s' % (m.group('hours'),
m.group('minutes'),
m.group('seconds'),
m.group('meridian'))
else:
parseStr = '%s:%s %s' % (m.group('hours'),
m.group('minutes'),
m.group('meridian'))
else:
parseStr = '%s %s' % (m.group('hours'),
m.group('meridian'))
chunk1 = s[:m.start('hours')]
chunk2 = s[m.end('meridian'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
if parseStr == '':
# HH:MM(:SS) time strings
m = self.ptc.CRE_TIMEHMS.search(s)
if m is not None:
self.timeStdFlag = True
self.timeFlag = 2
if m.group('seconds') is not None:
parseStr = '%s:%s:%s' % (m.group('hours'),
m.group('minutes'),
m.group('seconds'))
chunk1 = s[:m.start('hours')]
chunk2 = s[m.end('seconds'):]
else:
parseStr = '%s:%s' % (m.group('hours'),
m.group('minutes'))
chunk1 = s[:m.start('hours')]
chunk2 = s[m.end('minutes'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
# if string does not match any regex, empty string to
# come out of the while loop
if not flag:
s = ''
if _debug:
print 'parse (bottom) [%s][%s][%s][%s]' % (s, parseStr, chunk1, chunk2)
print 'weekday %s, dateStd %s, dateStr %s, time %s, timeStr %s, meridian %s' % \
(self.weekdyFlag, self.dateStdFlag, self.dateStrFlag, self.timeStdFlag, self.timeStrFlag, self.meridianFlag)
print 'dayStr %s, modifier %s, modifier2 %s, units %s, qunits %s' % \
(self.dayStrFlag, self.modifierFlag, self.modifier2Flag, self.unitsFlag, self.qunitsFlag)
# evaluate the matched string
if parseStr != '':
if self.modifierFlag == True:
t, totalTime = self._evalModifier(parseStr, chunk1, chunk2, totalTime)
# t is the unparsed part of the chunks.
# If it is not date/time, return current
# totalTime as it is; else return the output
# after parsing t.
if (t != '') and (t != None):
tempDateFlag = self.dateFlag
tempTimeFlag = self.timeFlag
(totalTime2, flag) = self.parse(t, totalTime)
if flag == 0 and totalTime is not None:
self.timeFlag = tempTimeFlag
self.dateFlag = tempDateFlag
return (totalTime, self.dateFlag + self.timeFlag)
else:
return (totalTime2, self.dateFlag + self.timeFlag)
elif self.modifier2Flag == True:
totalTime, invalidFlag = self._evalModifier2(parseStr, chunk1, chunk2, totalTime)
if invalidFlag == True:
self.dateFlag = 0
self.timeFlag = 0
else:
totalTime = self._evalString(parseStr, totalTime)
parseStr = ''
# String is not parsed at all
if totalTime is None or totalTime == sourceTime:
totalTime = time.localtime()
self.dateFlag = 0
self.timeFlag = 0
return (totalTime, self.dateFlag + self.timeFlag)
def inc(self, source, month=None, year=None):
"""
Takes the given C{source} date, or current date if none is
passed, and increments it according to the values passed in
by month and/or year.
This routine is needed because Python's C{timedelta()} function
does not allow for month or year increments.
@type source: struct_time
@param source: C{struct_time} value to increment
@type month: integer
@param month: optional number of months to increment
@type year: integer
@param year: optional number of years to increment
@rtype: datetime
@return: C{source} incremented by the number of months and/or years
"""
yr = source.year
mth = source.month
dy = source.day
if year:
try:
yi = int(year)
except ValueError:
yi = 0
yr += yi
if month:
try:
mi = int(month)
except ValueError:
mi = 0
m = abs(mi)
y = m / 12 # how many years are in month increment
m = m % 12 # get remaining months
if mi < 0:
mth = mth - m # sub months from start month
if mth < 1: # cross start-of-year?
y -= 1 # yes - decrement year
mth += 12 # and fix month
else:
mth = mth + m # add months to start month
if mth > 12: # cross end-of-year?
y += 1 # yes - increment year
mth -= 12 # and fix month
yr += y
# if the day ends up past the last day of
# the new month, set it to the last day
if dy > self.ptc.daysInMonth(mth, yr):
dy = self.ptc.daysInMonth(mth, yr)
d = source.replace(year=yr, month=mth, day=dy)
return source + (d - source)
|
vilmibm/done | parsedatetime/parsedatetime.py | Calendar.parseDate | python | def parseDate(self, dateString):
"""
Parse short-form date strings::
'05/28/2006' or '04.21'
@type dateString: string
@param dateString: text to convert to a C{datetime}
@rtype: struct_time
@return: calculated C{struct_time} value of dateString
"""
yr, mth, dy, hr, mn, sec, wd, yd, isdst = time.localtime()
# values pulled from regex's will be stored here and later
# assigned to mth, dy, yr based on information from the locale
# -1 is used as the marker value because we want zero values
# to be passed thru so they can be flagged as errors later
v1 = -1
v2 = -1
v3 = -1
s = dateString
m = self.ptc.CRE_DATE2.search(s)
if m is not None:
index = m.start()
v1 = int(s[:index])
s = s[index + 1:]
m = self.ptc.CRE_DATE2.search(s)
if m is not None:
index = m.start()
v2 = int(s[:index])
v3 = int(s[index + 1:])
else:
v2 = int(s.strip())
v = [ v1, v2, v3 ]
d = { 'm': mth, 'd': dy, 'y': yr }
for i in range(0, 3):
n = v[i]
c = self.ptc.dp_order[i]
if n >= 0:
d[c] = n
# if the year is not specified and the date has already
# passed, increment the year
if v3 == -1 and ((mth > d['m']) or (mth == d['m'] and dy > d['d'])):
yr = d['y'] + 1
else:
yr = d['y']
mth = d['m']
dy = d['d']
# birthday epoch constraint
if yr < self.ptc.BirthdayEpoch:
yr += 2000
elif yr < 100:
yr += 1900
if _debug:
print 'parseDate: ', yr, mth, dy, self.ptc.daysInMonth(mth, yr)
if (mth > 0 and mth <= 12) and \
(dy > 0 and dy <= self.ptc.daysInMonth(mth, yr)):
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
else:
self.dateFlag = 0
self.timeFlag = 0
sourceTime = time.localtime() # return current time if date
# string is invalid
return sourceTime | Parse short-form date strings::
'05/28/2006' or '04.21'
@type dateString: string
@param dateString: text to convert to a C{datetime}
@rtype: struct_time
@return: calculated C{struct_time} value of dateString | train | https://github.com/vilmibm/done/blob/7e5b60d2900ceddefa49de352a19b794199b51a8/parsedatetime/parsedatetime.py#L312-L386 | null | class Calendar:
"""
A collection of routines to input, parse and manipulate date and times.
The text can either be 'normal' date values or it can be human readable.
"""
def __init__(self, constants=None):
"""
Default constructor for the L{Calendar} class.
@type constants: object
@param constants: Instance of the class L{parsedatetime_consts.Constants}
@rtype: object
@return: L{Calendar} instance
"""
# if a constants reference is not included, use default
if constants is None:
self.ptc = parsedatetime_consts.Constants()
else:
self.ptc = constants
self.weekdyFlag = False # monday/tuesday/...
self.dateStdFlag = False # 07/21/06
self.dateStrFlag = False # July 21st, 2006
self.timeStdFlag = False # 5:50
self.meridianFlag = False # am/pm
self.dayStrFlag = False # tomorrow/yesterday/today/..
self.timeStrFlag = False # lunch/noon/breakfast/...
self.modifierFlag = False # after/before/prev/next/..
self.modifier2Flag = False # after/before/prev/next/..
self.unitsFlag = False # hrs/weeks/yrs/min/..
self.qunitsFlag = False # h/m/t/d..
self.timeFlag = 0
self.dateFlag = 0
def _convertUnitAsWords(self, unitText):
"""
Converts text units into their number value
Five = 5
Twenty Five = 25
Two hundred twenty five = 225
Two thousand and twenty five = 2025
Two thousand twenty five = 2025
@type unitText: string
@param unitText: number text to convert
@rtype: integer
@return: numerical value of unitText
"""
# TODO: implement this
pass
def _buildTime(self, source, quantity, modifier, units):
"""
Take C{quantity}, C{modifier} and C{unit} strings and convert them into values.
After converting, calcuate the time and return the adjusted sourceTime.
@type source: time
@param source: time to use as the base (or source)
@type quantity: string
@param quantity: quantity string
@type modifier: string
@param modifier: how quantity and units modify the source time
@type units: string
@param units: unit of the quantity (i.e. hours, days, months, etc)
@rtype: struct_time
@return: C{struct_time} of the calculated time
"""
if _debug:
print '_buildTime: [%s][%s][%s]' % (quantity, modifier, units)
if source is None:
source = time.localtime()
if quantity is None:
quantity = ''
else:
quantity = quantity.strip()
if len(quantity) == 0:
qty = 1
else:
try:
qty = int(quantity)
except ValueError:
qty = 0
if modifier in self.ptc.Modifiers:
qty = qty * self.ptc.Modifiers[modifier]
if units is None or units == '':
units = 'dy'
# plurals are handled by regex's (could be a bug tho)
(yr, mth, dy, hr, mn, sec, _, _, _) = source
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
target = start
if units.startswith('y'):
target = self.inc(start, year=qty)
self.dateFlag = 1
elif units.endswith('th') or units.endswith('ths'):
target = self.inc(start, month=qty)
self.dateFlag = 1
else:
if units.startswith('d'):
target = start + datetime.timedelta(days=qty)
self.dateFlag = 1
elif units.startswith('h'):
target = start + datetime.timedelta(hours=qty)
self.timeFlag = 2
elif units.startswith('m'):
target = start + datetime.timedelta(minutes=qty)
self.timeFlag = 2
elif units.startswith('s'):
target = start + datetime.timedelta(seconds=qty)
self.timeFlag = 2
elif units.startswith('w'):
target = start + datetime.timedelta(weeks=qty)
self.dateFlag = 1
return target.timetuple()
def parseDate(self, dateString):
"""
Parse short-form date strings::
'05/28/2006' or '04.21'
@type dateString: string
@param dateString: text to convert to a C{datetime}
@rtype: struct_time
@return: calculated C{struct_time} value of dateString
"""
yr, mth, dy, hr, mn, sec, wd, yd, isdst = time.localtime()
# values pulled from regex's will be stored here and later
# assigned to mth, dy, yr based on information from the locale
# -1 is used as the marker value because we want zero values
# to be passed thru so they can be flagged as errors later
v1 = -1
v2 = -1
v3 = -1
s = dateString
m = self.ptc.CRE_DATE2.search(s)
if m is not None:
index = m.start()
v1 = int(s[:index])
s = s[index + 1:]
m = self.ptc.CRE_DATE2.search(s)
if m is not None:
index = m.start()
v2 = int(s[:index])
v3 = int(s[index + 1:])
else:
v2 = int(s.strip())
v = [ v1, v2, v3 ]
d = { 'm': mth, 'd': dy, 'y': yr }
for i in range(0, 3):
n = v[i]
c = self.ptc.dp_order[i]
if n >= 0:
d[c] = n
# if the year is not specified and the date has already
# passed, increment the year
if v3 == -1 and ((mth > d['m']) or (mth == d['m'] and dy > d['d'])):
yr = d['y'] + 1
else:
yr = d['y']
mth = d['m']
dy = d['d']
# birthday epoch constraint
if yr < self.ptc.BirthdayEpoch:
yr += 2000
elif yr < 100:
yr += 1900
if _debug:
print 'parseDate: ', yr, mth, dy, self.ptc.daysInMonth(mth, yr)
if (mth > 0 and mth <= 12) and \
(dy > 0 and dy <= self.ptc.daysInMonth(mth, yr)):
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
else:
self.dateFlag = 0
self.timeFlag = 0
sourceTime = time.localtime() # return current time if date
# string is invalid
return sourceTime
def parseDateText(self, dateString):
"""
Parse long-form date strings::
'May 31st, 2006'
'Jan 1st'
'July 2006'
@type dateString: string
@param dateString: text to convert to a datetime
@rtype: struct_time
@return: calculated C{struct_time} value of dateString
"""
yr, mth, dy, hr, mn, sec, wd, yd, isdst = time.localtime()
currentMth = mth
currentDy = dy
s = dateString.lower()
m = self.ptc.CRE_DATE3.search(s)
mth = m.group('mthname')
mth = self.ptc.MonthOffsets[mth]
if m.group('day') != None:
dy = int(m.group('day'))
else:
dy = 1
if m.group('year') != None:
yr = int(m.group('year'))
# birthday epoch constraint
if yr < self.ptc.BirthdayEpoch:
yr += 2000
elif yr < 100:
yr += 1900
elif (mth < currentMth) or (mth == currentMth and dy < currentDy):
# if that day and month have already passed in this year,
# then increment the year by 1
yr += 1
if dy > 0 and dy <= self.ptc.daysInMonth(mth, yr):
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
else:
# Return current time if date string is invalid
self.dateFlag = 0
self.timeFlag = 0
sourceTime = time.localtime()
return sourceTime
def evalRanges(self, datetimeString, sourceTime=None):
"""
Evaluate the C{datetimeString} text and determine if
it represents a date or time range.
@type datetimeString: string
@param datetimeString: datetime text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: start datetime, end datetime and the invalid flag
"""
startTime = ''
endTime = ''
startDate = ''
endDate = ''
rangeFlag = 0
s = datetimeString.strip().lower()
if self.ptc.rangeSep in s:
s = s.replace(self.ptc.rangeSep, ' %s ' % self.ptc.rangeSep)
s = s.replace(' ', ' ')
m = self.ptc.CRE_TIMERNG1.search(s)
if m is not None:
rangeFlag = 1
else:
m = self.ptc.CRE_TIMERNG2.search(s)
if m is not None:
rangeFlag = 2
else:
m = self.ptc.CRE_TIMERNG4.search(s)
if m is not None:
rangeFlag = 7
else:
m = self.ptc.CRE_TIMERNG3.search(s)
if m is not None:
rangeFlag = 3
else:
m = self.ptc.CRE_DATERNG1.search(s)
if m is not None:
rangeFlag = 4
else:
m = self.ptc.CRE_DATERNG2.search(s)
if m is not None:
rangeFlag = 5
else:
m = self.ptc.CRE_DATERNG3.search(s)
if m is not None:
rangeFlag = 6
if _debug:
print 'evalRanges: rangeFlag =', rangeFlag, '[%s]' % s
if m is not None:
if (m.group() != s):
# capture remaining string
parseStr = m.group()
chunk1 = s[:m.start()]
chunk2 = s[m.end():]
s = '%s %s' % (chunk1, chunk2)
flag = 1
sourceTime, flag = self.parse(s, sourceTime)
if flag == 0:
sourceTime = None
else:
parseStr = s
if rangeFlag == 1:
m = re.search(self.ptc.rangeSep, parseStr)
startTime, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endTime, eflag = self.parse((parseStr[(m.start() + 1):]), sourceTime)
if (eflag != 0) and (sflag != 0):
return (startTime, endTime, 2)
elif rangeFlag == 2:
m = re.search(self.ptc.rangeSep, parseStr)
startTime, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endTime, eflag = self.parse((parseStr[(m.start() + 1):]), sourceTime)
if (eflag != 0) and (sflag != 0):
return (startTime, endTime, 2)
elif rangeFlag == 3 or rangeFlag == 7:
m = re.search(self.ptc.rangeSep, parseStr)
# capturing the meridian from the end time
if self.ptc.usesMeridian:
ampm = re.search(self.ptc.am[0], parseStr)
# appending the meridian to the start time
if ampm is not None:
startTime, sflag = self.parse((parseStr[:m.start()] + self.ptc.meridian[0]), sourceTime)
else:
startTime, sflag = self.parse((parseStr[:m.start()] + self.ptc.meridian[1]), sourceTime)
else:
startTime, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endTime, eflag = self.parse(parseStr[(m.start() + 1):], sourceTime)
if (eflag != 0) and (sflag != 0):
return (startTime, endTime, 2)
elif rangeFlag == 4:
m = re.search(self.ptc.rangeSep, parseStr)
startDate, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endDate, eflag = self.parse((parseStr[(m.start() + 1):]), sourceTime)
if (eflag != 0) and (sflag != 0):
return (startDate, endDate, 1)
elif rangeFlag == 5:
m = re.search(self.ptc.rangeSep, parseStr)
endDate = parseStr[(m.start() + 1):]
# capturing the year from the end date
date = self.ptc.CRE_DATE3.search(endDate)
endYear = date.group('year')
# appending the year to the start date if the start date
# does not have year information and the end date does.
# eg : "Aug 21 - Sep 4, 2007"
if endYear is not None:
startDate = (parseStr[:m.start()]).strip()
date = self.ptc.CRE_DATE3.search(startDate)
startYear = date.group('year')
if startYear is None:
startDate = startDate + ', ' + endYear
else:
startDate = parseStr[:m.start()]
startDate, sflag = self.parse(startDate, sourceTime)
endDate, eflag = self.parse(endDate, sourceTime)
if (eflag != 0) and (sflag != 0):
return (startDate, endDate, 1)
elif rangeFlag == 6:
m = re.search(self.ptc.rangeSep, parseStr)
startDate = parseStr[:m.start()]
# capturing the month from the start date
mth = self.ptc.CRE_DATE3.search(startDate)
mth = mth.group('mthname')
# appending the month name to the end date
endDate = mth + parseStr[(m.start() + 1):]
startDate, sflag = self.parse(startDate, sourceTime)
endDate, eflag = self.parse(endDate, sourceTime)
if (eflag != 0) and (sflag != 0):
return (startDate, endDate, 1)
else:
# if range is not found
sourceTime = time.localtime()
return (sourceTime, sourceTime, 0)
def _CalculateDOWDelta(self, wd, wkdy, offset, style, currentDayStyle):
"""
Based on the C{style} and C{currentDayStyle} determine what
day-of-week value is to be returned.
@type wd: integer
@param wd: day-of-week value for the current day
@type wkdy: integer
@param wkdy: day-of-week value for the parsed day
@type offset: integer
@param offset: offset direction for any modifiers (-1, 0, 1)
@type style: integer
@param style: normally the value set in C{Constants.DOWParseStyle}
@type currentDayStyle: integer
@param currentDayStyle: normally the value set in C{Constants.CurrentDOWParseStyle}
@rtype: integer
@return: calculated day-of-week
"""
if offset == 1:
# modifier is indicating future week eg: "next".
# DOW is calculated as DOW of next week
diff = 7 - wd + wkdy
elif offset == -1:
# modifier is indicating past week eg: "last","previous"
# DOW is calculated as DOW of previous week
diff = wkdy - wd - 7
elif offset == 0:
# modifier is indiacting current week eg: "this"
# DOW is calculated as DOW of this week
diff = wkdy - wd
elif offset == 2:
# no modifier is present.
# i.e. string to be parsed is just DOW
if style == 1:
# next occurance of the DOW is calculated
if currentDayStyle == True:
if wkdy >= wd:
diff = wkdy - wd
else:
diff = 7 - wd + wkdy
else:
if wkdy > wd:
diff = wkdy - wd
else:
diff = 7 - wd + wkdy
elif style == -1:
# last occurance of the DOW is calculated
if currentDayStyle == True:
if wkdy <= wd:
diff = wkdy - wd
else:
diff = wkdy - wd - 7
else:
if wkdy < wd:
diff = wkdy - wd
else:
diff = wkdy - wd - 7
else:
# occurance of the DOW in the current week is calculated
diff = wkdy - wd
if _debug:
print "wd %s, wkdy %s, offset %d, style %d\n" % (wd, wkdy, offset, style)
return diff
def _evalModifier(self, modifier, chunk1, chunk2, sourceTime):
"""
Evaluate the C{modifier} string and following text (passed in
as C{chunk1} and C{chunk2}) and if they match any known modifiers
calculate the delta and apply it to C{sourceTime}.
@type modifier: string
@param modifier: modifier text to apply to sourceTime
@type chunk1: string
@param chunk1: first text chunk that followed modifier (if any)
@type chunk2: string
@param chunk2: second text chunk that followed modifier (if any)
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: remaining text and the modified sourceTime
"""
offset = self.ptc.Modifiers[modifier]
if sourceTime is not None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
else:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = time.localtime()
# capture the units after the modifier and the remaining
# string after the unit
m = self.ptc.CRE_REMAINING.search(chunk2)
if m is not None:
index = m.start() + 1
unit = chunk2[:m.start()]
chunk2 = chunk2[index:]
else:
unit = chunk2
chunk2 = ''
flag = False
if unit == 'month' or \
unit == 'mth' or \
unit == 'm':
if offset == 0:
dy = self.ptc.daysInMonth(mth, yr)
sourceTime = (yr, mth, dy, 9, 0, 0, wd, yd, isdst)
elif offset == 2:
# if day is the last day of the month, calculate the last day
# of the next month
if dy == self.ptc.daysInMonth(mth, yr):
dy = self.ptc.daysInMonth(mth + 1, yr)
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = self.inc(start, month=1)
sourceTime = target.timetuple()
else:
start = datetime.datetime(yr, mth, 1, 9, 0, 0)
target = self.inc(start, month=offset)
sourceTime = target.timetuple()
flag = True
self.dateFlag = 1
if unit == 'week' or \
unit == 'wk' or \
unit == 'w':
if offset == 0:
start = datetime.datetime(yr, mth, dy, 17, 0, 0)
target = start + datetime.timedelta(days=(4 - wd))
sourceTime = target.timetuple()
elif offset == 2:
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=7)
sourceTime = target.timetuple()
else:
return self._evalModifier(modifier, chunk1, "monday " + chunk2, sourceTime)
flag = True
self.dateFlag = 1
if unit == 'day' or \
unit == 'dy' or \
unit == 'd':
if offset == 0:
sourceTime = (yr, mth, dy, 17, 0, 0, wd, yd, isdst)
self.timeFlag = 2
elif offset == 2:
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
target = start + datetime.timedelta(days=1)
sourceTime = target.timetuple()
else:
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=offset)
sourceTime = target.timetuple()
flag = True
self.dateFlag = 1
if unit == 'hour' or \
unit == 'hr':
if offset == 0:
sourceTime = (yr, mth, dy, hr, 0, 0, wd, yd, isdst)
else:
start = datetime.datetime(yr, mth, dy, hr, 0, 0)
target = start + datetime.timedelta(hours=offset)
sourceTime = target.timetuple()
flag = True
self.timeFlag = 2
if unit == 'year' or \
unit == 'yr' or \
unit == 'y':
if offset == 0:
sourceTime = (yr, 12, 31, hr, mn, sec, wd, yd, isdst)
elif offset == 2:
sourceTime = (yr + 1, mth, dy, hr, mn, sec, wd, yd, isdst)
else:
sourceTime = (yr + offset, 1, 1, 9, 0, 0, wd, yd, isdst)
flag = True
self.dateFlag = 1
if flag == False:
m = self.ptc.CRE_WEEKDAY.match(unit)
if m is not None:
wkdy = m.group()
self.dateFlag = 1
if modifier == 'eod':
# Calculate the upcoming weekday
self.modifierFlag = False
(sourceTime, _) = self.parse(wkdy, sourceTime)
sources = self.ptc.buildSources(sourceTime)
self.timeFlag = 2
if modifier in sources:
sourceTime = sources[modifier]
else:
wkdy = self.ptc.WeekdayOffsets[wkdy]
diff = self._CalculateDOWDelta(wd, wkdy, offset,
self.ptc.DOWParseStyle,
self.ptc.CurrentDOWParseStyle)
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=diff)
sourceTime = target.timetuple()
flag = True
self.dateFlag = 1
if not flag:
m = self.ptc.CRE_TIME.match(unit)
if m is not None:
self.modifierFlag = False
(yr, mth, dy, hr, mn, sec, wd, yd, isdst), _ = self.parse(unit)
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
target = start + datetime.timedelta(days=offset)
sourceTime = target.timetuple()
flag = True
else:
self.modifierFlag = False
# check if the remaining text is parsable and if so,
# use it as the base time for the modifier source time
t, flag2 = self.parse('%s %s' % (chunk1, unit), sourceTime)
if flag2 != 0:
sourceTime = t
sources = self.ptc.buildSources(sourceTime)
if modifier in sources:
sourceTime = sources[modifier]
flag = True
self.timeFlag = 2
# if the word after next is a number, the string is more than likely
# to be "next 4 hrs" which we will have to combine the units with the
# rest of the string
if not flag:
if offset < 0:
# if offset is negative, the unit has to be made negative
unit = '-%s' % unit
chunk2 = '%s %s' % (unit, chunk2)
self.modifierFlag = False
#return '%s %s' % (chunk1, chunk2), sourceTime
return '%s' % chunk2, sourceTime
def _evalModifier2(self, modifier, chunk1 , chunk2, sourceTime):
"""
Evaluate the C{modifier} string and following text (passed in
as C{chunk1} and C{chunk2}) and if they match any known modifiers
calculate the delta and apply it to C{sourceTime}.
@type modifier: string
@param modifier: modifier text to apply to C{sourceTime}
@type chunk1: string
@param chunk1: first text chunk that followed modifier (if any)
@type chunk2: string
@param chunk2: second text chunk that followed modifier (if any)
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: remaining text and the modified sourceTime
"""
offset = self.ptc.Modifiers[modifier]
digit = r'\d+'
self.modifier2Flag = False
# If the string after the negative modifier starts with digits,
# then it is likely that the string is similar to ' before 3 days'
# or 'evening prior to 3 days'.
# In this case, the total time is calculated by subtracting '3 days'
# from the current date.
# So, we have to identify the quantity and negate it before parsing
# the string.
# This is not required for strings not starting with digits since the
# string is enough to calculate the sourceTime
if chunk2 != '':
if offset < 0:
m = re.match(digit, chunk2.strip())
if m is not None:
qty = int(m.group()) * -1
chunk2 = chunk2[m.end():]
chunk2 = '%d%s' % (qty, chunk2)
sourceTime, flag1 = self.parse(chunk2, sourceTime)
if flag1 == 0:
flag1 = True
else:
flag1 = False
flag2 = False
else:
flag1 = False
if chunk1 != '':
if offset < 0:
m = re.search(digit, chunk1.strip())
if m is not None:
qty = int(m.group()) * -1
chunk1 = chunk1[m.end():]
chunk1 = '%d%s' % (qty, chunk1)
tempDateFlag = self.dateFlag
tempTimeFlag = self.timeFlag
sourceTime2, flag2 = self.parse(chunk1, sourceTime)
else:
return sourceTime, (flag1 and flag2)
# if chunk1 is not a datetime and chunk2 is then do not use datetime
# value returned by parsing chunk1
if not (flag1 == False and flag2 == 0):
sourceTime = sourceTime2
else:
self.timeFlag = tempTimeFlag
self.dateFlag = tempDateFlag
return sourceTime, (flag1 and flag2)
def _evalString(self, datetimeString, sourceTime=None):
"""
Calculate the datetime based on flags set by the L{parse()} routine
Examples handled::
RFC822, W3CDTF formatted dates
HH:MM[:SS][ am/pm]
MM/DD/YYYY
DD MMMM YYYY
@type datetimeString: string
@param datetimeString: text to try and parse as more "traditional"
date/time text
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: datetime
@return: calculated C{struct_time} value or current C{struct_time}
if not parsed
"""
s = datetimeString.strip()
now = time.localtime()
# Given string date is a RFC822 date
if sourceTime is None:
sourceTime = _parse_date_rfc822(s)
if sourceTime is not None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst, _) = sourceTime
self.dateFlag = 1
if (hr != 0) and (mn != 0) and (sec != 0):
self.timeFlag = 2
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
# Given string date is a W3CDTF date
if sourceTime is None:
sourceTime = _parse_date_w3dtf(s)
if sourceTime is not None:
self.dateFlag = 1
self.timeFlag = 2
if sourceTime is None:
s = s.lower()
# Given string is in the format HH:MM(:SS)(am/pm)
if self.meridianFlag:
if sourceTime is None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = now
else:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
m = self.ptc.CRE_TIMEHMS2.search(s)
if m is not None:
dt = s[:m.start('meridian')].strip()
if len(dt) <= 2:
hr = int(dt)
mn = 0
sec = 0
else:
hr, mn, sec = _extract_time(m)
if hr == 24:
hr = 0
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
meridian = m.group('meridian').lower()
# if 'am' found and hour is 12 - force hour to 0 (midnight)
if (meridian in self.ptc.am) and hr == 12:
sourceTime = (yr, mth, dy, 0, mn, sec, wd, yd, isdst)
# if 'pm' found and hour < 12, add 12 to shift to evening
if (meridian in self.ptc.pm) and hr < 12:
sourceTime = (yr, mth, dy, hr + 12, mn, sec, wd, yd, isdst)
# invalid time
if hr > 24 or mn > 59 or sec > 59:
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
self.meridianFlag = False
# Given string is in the format HH:MM(:SS)
if self.timeStdFlag:
if sourceTime is None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = now
else:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
m = self.ptc.CRE_TIMEHMS.search(s)
if m is not None:
hr, mn, sec = _extract_time(m)
if hr == 24:
hr = 0
if hr > 24 or mn > 59 or sec > 59:
# invalid time
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
else:
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
self.timeStdFlag = False
# Given string is in the format 07/21/2006
if self.dateStdFlag:
sourceTime = self.parseDate(s)
self.dateStdFlag = False
# Given string is in the format "May 23rd, 2005"
if self.dateStrFlag:
sourceTime = self.parseDateText(s)
self.dateStrFlag = False
# Given string is a weekday
if self.weekdyFlag:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = now
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
wkdy = self.ptc.WeekdayOffsets[s]
if wkdy > wd:
qty = self._CalculateDOWDelta(wd, wkdy, 2,
self.ptc.DOWParseStyle,
self.ptc.CurrentDOWParseStyle)
else:
qty = self._CalculateDOWDelta(wd, wkdy, 2,
self.ptc.DOWParseStyle,
self.ptc.CurrentDOWParseStyle)
target = start + datetime.timedelta(days=qty)
wd = wkdy
sourceTime = target.timetuple()
self.weekdyFlag = False
# Given string is a natural language time string like
# lunch, midnight, etc
if self.timeStrFlag:
if s in self.ptc.re_values['now']:
sourceTime = now
else:
sources = self.ptc.buildSources(sourceTime)
if s in sources:
sourceTime = sources[s]
else:
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
self.timeStrFlag = False
# Given string is a natural language date string like today, tomorrow..
if self.dayStrFlag:
if sourceTime is None:
sourceTime = now
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
if s in self.ptc.dayOffsets:
offset = self.ptc.dayOffsets[s]
else:
offset = 0
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=offset)
sourceTime = target.timetuple()
self.dayStrFlag = False
# Given string is a time string with units like "5 hrs 30 min"
if self.unitsFlag:
modifier = '' # TODO
if sourceTime is None:
sourceTime = now
m = self.ptc.CRE_UNITS.search(s)
if m is not None:
units = m.group('units')
quantity = s[:m.start('units')]
sourceTime = self._buildTime(sourceTime, quantity, modifier, units)
self.unitsFlag = False
# Given string is a time string with single char units like "5 h 30 m"
if self.qunitsFlag:
modifier = '' # TODO
if sourceTime is None:
sourceTime = now
m = self.ptc.CRE_QUNITS.search(s)
if m is not None:
units = m.group('qunits')
quantity = s[:m.start('qunits')]
sourceTime = self._buildTime(sourceTime, quantity, modifier, units)
self.qunitsFlag = False
# Given string does not match anything
if sourceTime is None:
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
return sourceTime
def parse(self, datetimeString, sourceTime=None):
"""
Splits the given C{datetimeString} into tokens, finds the regex
patterns that match and then calculates a C{struct_time} value from
the chunks.
If C{sourceTime} is given then the C{struct_time} value will be
calculated from that value, otherwise from the current date/time.
If the C{datetimeString} is parsed and date/time value found then
the second item of the returned tuple will be a flag to let you know
what kind of C{struct_time} value is being returned::
0 = not parsed at all
1 = parsed as a C{date}
2 = parsed as a C{time}
3 = parsed as a C{datetime}
@type datetimeString: string
@param datetimeString: date/time text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: modified C{sourceTime} and the result flag
"""
if sourceTime:
if isinstance(sourceTime, datetime.datetime):
if _debug:
print 'coercing datetime to timetuple'
sourceTime = sourceTime.timetuple()
else:
if not isinstance(sourceTime, time.struct_time) and \
not isinstance(sourceTime, tuple):
raise Exception('sourceTime is not a struct_time')
s = datetimeString.strip().lower()
parseStr = ''
totalTime = sourceTime
if s == '' :
if sourceTime is not None:
return (sourceTime, self.dateFlag + self.timeFlag)
else:
return (time.localtime(), 0)
self.timeFlag = 0
self.dateFlag = 0
while len(s) > 0:
flag = False
chunk1 = ''
chunk2 = ''
if _debug:
print 'parse (top of loop): [%s][%s]' % (s, parseStr)
if parseStr == '':
# Modifier like next\prev..
m = self.ptc.CRE_MODIFIER.search(s)
if m is not None:
self.modifierFlag = True
if (m.group('modifier') != s):
# capture remaining string
parseStr = m.group('modifier')
chunk1 = s[:m.start('modifier')].strip()
chunk2 = s[m.end('modifier'):].strip()
flag = True
else:
parseStr = s
if parseStr == '':
# Modifier like from\after\prior..
m = self.ptc.CRE_MODIFIER2.search(s)
if m is not None:
self.modifier2Flag = True
if (m.group('modifier') != s):
# capture remaining string
parseStr = m.group('modifier')
chunk1 = s[:m.start('modifier')].strip()
chunk2 = s[m.end('modifier'):].strip()
flag = True
else:
parseStr = s
if parseStr == '':
valid_date = False
for match in self.ptc.CRE_DATE3.finditer(s):
# to prevent "HH:MM(:SS) time strings" expressions from triggering
# this regex, we checks if the month field exists in the searched
# expression, if it doesn't exist, the date field is not valid
if match.group('mthname'):
m = self.ptc.CRE_DATE3.search(s, match.start())
valid_date = True
break
# String date format
if valid_date:
self.dateStrFlag = True
self.dateFlag = 1
if (m.group('date') != s):
# capture remaining string
parseStr = m.group('date')
chunk1 = s[:m.start('date')]
chunk2 = s[m.end('date'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Standard date format
m = self.ptc.CRE_DATE.search(s)
if m is not None:
self.dateStdFlag = True
self.dateFlag = 1
if (m.group('date') != s):
# capture remaining string
parseStr = m.group('date')
chunk1 = s[:m.start('date')]
chunk2 = s[m.end('date'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Natural language day strings
m = self.ptc.CRE_DAY.search(s)
if m is not None:
self.dayStrFlag = True
self.dateFlag = 1
if (m.group('day') != s):
# capture remaining string
parseStr = m.group('day')
chunk1 = s[:m.start('day')]
chunk2 = s[m.end('day'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Quantity + Units
m = self.ptc.CRE_UNITS.search(s)
if m is not None:
self.unitsFlag = True
if (m.group('qty') != s):
# capture remaining string
parseStr = m.group('qty')
chunk1 = s[:m.start('qty')].strip()
chunk2 = s[m.end('qty'):].strip()
if chunk1[-1:] == '-':
parseStr = '-%s' % parseStr
chunk1 = chunk1[:-1]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Quantity + Units
m = self.ptc.CRE_QUNITS.search(s)
if m is not None:
self.qunitsFlag = True
if (m.group('qty') != s):
# capture remaining string
parseStr = m.group('qty')
chunk1 = s[:m.start('qty')].strip()
chunk2 = s[m.end('qty'):].strip()
if chunk1[-1:] == '-':
parseStr = '-%s' % parseStr
chunk1 = chunk1[:-1]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Weekday
m = self.ptc.CRE_WEEKDAY.search(s)
if m is not None:
gv = m.group('weekday')
if s not in self.ptc.dayOffsets:
self.weekdyFlag = True
self.dateFlag = 1
if (gv != s):
# capture remaining string
parseStr = gv
chunk1 = s[:m.start('weekday')]
chunk2 = s[m.end('weekday'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Natural language time strings
m = self.ptc.CRE_TIME.search(s)
if m is not None:
self.timeStrFlag = True
self.timeFlag = 2
if (m.group('time') != s):
# capture remaining string
parseStr = m.group('time')
chunk1 = s[:m.start('time')]
chunk2 = s[m.end('time'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# HH:MM(:SS) am/pm time strings
m = self.ptc.CRE_TIMEHMS2.search(s)
if m is not None:
self.meridianFlag = True
self.timeFlag = 2
if m.group('minutes') is not None:
if m.group('seconds') is not None:
parseStr = '%s:%s:%s %s' % (m.group('hours'),
m.group('minutes'),
m.group('seconds'),
m.group('meridian'))
else:
parseStr = '%s:%s %s' % (m.group('hours'),
m.group('minutes'),
m.group('meridian'))
else:
parseStr = '%s %s' % (m.group('hours'),
m.group('meridian'))
chunk1 = s[:m.start('hours')]
chunk2 = s[m.end('meridian'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
if parseStr == '':
# HH:MM(:SS) time strings
m = self.ptc.CRE_TIMEHMS.search(s)
if m is not None:
self.timeStdFlag = True
self.timeFlag = 2
if m.group('seconds') is not None:
parseStr = '%s:%s:%s' % (m.group('hours'),
m.group('minutes'),
m.group('seconds'))
chunk1 = s[:m.start('hours')]
chunk2 = s[m.end('seconds'):]
else:
parseStr = '%s:%s' % (m.group('hours'),
m.group('minutes'))
chunk1 = s[:m.start('hours')]
chunk2 = s[m.end('minutes'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
# if string does not match any regex, empty string to
# come out of the while loop
if not flag:
s = ''
if _debug:
print 'parse (bottom) [%s][%s][%s][%s]' % (s, parseStr, chunk1, chunk2)
print 'weekday %s, dateStd %s, dateStr %s, time %s, timeStr %s, meridian %s' % \
(self.weekdyFlag, self.dateStdFlag, self.dateStrFlag, self.timeStdFlag, self.timeStrFlag, self.meridianFlag)
print 'dayStr %s, modifier %s, modifier2 %s, units %s, qunits %s' % \
(self.dayStrFlag, self.modifierFlag, self.modifier2Flag, self.unitsFlag, self.qunitsFlag)
# evaluate the matched string
if parseStr != '':
if self.modifierFlag == True:
t, totalTime = self._evalModifier(parseStr, chunk1, chunk2, totalTime)
# t is the unparsed part of the chunks.
# If it is not date/time, return current
# totalTime as it is; else return the output
# after parsing t.
if (t != '') and (t != None):
tempDateFlag = self.dateFlag
tempTimeFlag = self.timeFlag
(totalTime2, flag) = self.parse(t, totalTime)
if flag == 0 and totalTime is not None:
self.timeFlag = tempTimeFlag
self.dateFlag = tempDateFlag
return (totalTime, self.dateFlag + self.timeFlag)
else:
return (totalTime2, self.dateFlag + self.timeFlag)
elif self.modifier2Flag == True:
totalTime, invalidFlag = self._evalModifier2(parseStr, chunk1, chunk2, totalTime)
if invalidFlag == True:
self.dateFlag = 0
self.timeFlag = 0
else:
totalTime = self._evalString(parseStr, totalTime)
parseStr = ''
# String is not parsed at all
if totalTime is None or totalTime == sourceTime:
totalTime = time.localtime()
self.dateFlag = 0
self.timeFlag = 0
return (totalTime, self.dateFlag + self.timeFlag)
def inc(self, source, month=None, year=None):
"""
Takes the given C{source} date, or current date if none is
passed, and increments it according to the values passed in
by month and/or year.
This routine is needed because Python's C{timedelta()} function
does not allow for month or year increments.
@type source: struct_time
@param source: C{struct_time} value to increment
@type month: integer
@param month: optional number of months to increment
@type year: integer
@param year: optional number of years to increment
@rtype: datetime
@return: C{source} incremented by the number of months and/or years
"""
yr = source.year
mth = source.month
dy = source.day
if year:
try:
yi = int(year)
except ValueError:
yi = 0
yr += yi
if month:
try:
mi = int(month)
except ValueError:
mi = 0
m = abs(mi)
y = m / 12 # how many years are in month increment
m = m % 12 # get remaining months
if mi < 0:
mth = mth - m # sub months from start month
if mth < 1: # cross start-of-year?
y -= 1 # yes - decrement year
mth += 12 # and fix month
else:
mth = mth + m # add months to start month
if mth > 12: # cross end-of-year?
y += 1 # yes - increment year
mth -= 12 # and fix month
yr += y
# if the day ends up past the last day of
# the new month, set it to the last day
if dy > self.ptc.daysInMonth(mth, yr):
dy = self.ptc.daysInMonth(mth, yr)
d = source.replace(year=yr, month=mth, day=dy)
return source + (d - source)
|
vilmibm/done | parsedatetime/parsedatetime.py | Calendar.parseDateText | python | def parseDateText(self, dateString):
"""
Parse long-form date strings::
'May 31st, 2006'
'Jan 1st'
'July 2006'
@type dateString: string
@param dateString: text to convert to a datetime
@rtype: struct_time
@return: calculated C{struct_time} value of dateString
"""
yr, mth, dy, hr, mn, sec, wd, yd, isdst = time.localtime()
currentMth = mth
currentDy = dy
s = dateString.lower()
m = self.ptc.CRE_DATE3.search(s)
mth = m.group('mthname')
mth = self.ptc.MonthOffsets[mth]
if m.group('day') != None:
dy = int(m.group('day'))
else:
dy = 1
if m.group('year') != None:
yr = int(m.group('year'))
# birthday epoch constraint
if yr < self.ptc.BirthdayEpoch:
yr += 2000
elif yr < 100:
yr += 1900
elif (mth < currentMth) or (mth == currentMth and dy < currentDy):
# if that day and month have already passed in this year,
# then increment the year by 1
yr += 1
if dy > 0 and dy <= self.ptc.daysInMonth(mth, yr):
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
else:
# Return current time if date string is invalid
self.dateFlag = 0
self.timeFlag = 0
sourceTime = time.localtime()
return sourceTime | Parse long-form date strings::
'May 31st, 2006'
'Jan 1st'
'July 2006'
@type dateString: string
@param dateString: text to convert to a datetime
@rtype: struct_time
@return: calculated C{struct_time} value of dateString | train | https://github.com/vilmibm/done/blob/7e5b60d2900ceddefa49de352a19b794199b51a8/parsedatetime/parsedatetime.py#L389-L440 | null | class Calendar:
"""
A collection of routines to input, parse and manipulate date and times.
The text can either be 'normal' date values or it can be human readable.
"""
def __init__(self, constants=None):
"""
Default constructor for the L{Calendar} class.
@type constants: object
@param constants: Instance of the class L{parsedatetime_consts.Constants}
@rtype: object
@return: L{Calendar} instance
"""
# if a constants reference is not included, use default
if constants is None:
self.ptc = parsedatetime_consts.Constants()
else:
self.ptc = constants
self.weekdyFlag = False # monday/tuesday/...
self.dateStdFlag = False # 07/21/06
self.dateStrFlag = False # July 21st, 2006
self.timeStdFlag = False # 5:50
self.meridianFlag = False # am/pm
self.dayStrFlag = False # tomorrow/yesterday/today/..
self.timeStrFlag = False # lunch/noon/breakfast/...
self.modifierFlag = False # after/before/prev/next/..
self.modifier2Flag = False # after/before/prev/next/..
self.unitsFlag = False # hrs/weeks/yrs/min/..
self.qunitsFlag = False # h/m/t/d..
self.timeFlag = 0
self.dateFlag = 0
def _convertUnitAsWords(self, unitText):
"""
Converts text units into their number value
Five = 5
Twenty Five = 25
Two hundred twenty five = 225
Two thousand and twenty five = 2025
Two thousand twenty five = 2025
@type unitText: string
@param unitText: number text to convert
@rtype: integer
@return: numerical value of unitText
"""
# TODO: implement this
pass
def _buildTime(self, source, quantity, modifier, units):
"""
Take C{quantity}, C{modifier} and C{unit} strings and convert them into values.
After converting, calcuate the time and return the adjusted sourceTime.
@type source: time
@param source: time to use as the base (or source)
@type quantity: string
@param quantity: quantity string
@type modifier: string
@param modifier: how quantity and units modify the source time
@type units: string
@param units: unit of the quantity (i.e. hours, days, months, etc)
@rtype: struct_time
@return: C{struct_time} of the calculated time
"""
if _debug:
print '_buildTime: [%s][%s][%s]' % (quantity, modifier, units)
if source is None:
source = time.localtime()
if quantity is None:
quantity = ''
else:
quantity = quantity.strip()
if len(quantity) == 0:
qty = 1
else:
try:
qty = int(quantity)
except ValueError:
qty = 0
if modifier in self.ptc.Modifiers:
qty = qty * self.ptc.Modifiers[modifier]
if units is None or units == '':
units = 'dy'
# plurals are handled by regex's (could be a bug tho)
(yr, mth, dy, hr, mn, sec, _, _, _) = source
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
target = start
if units.startswith('y'):
target = self.inc(start, year=qty)
self.dateFlag = 1
elif units.endswith('th') or units.endswith('ths'):
target = self.inc(start, month=qty)
self.dateFlag = 1
else:
if units.startswith('d'):
target = start + datetime.timedelta(days=qty)
self.dateFlag = 1
elif units.startswith('h'):
target = start + datetime.timedelta(hours=qty)
self.timeFlag = 2
elif units.startswith('m'):
target = start + datetime.timedelta(minutes=qty)
self.timeFlag = 2
elif units.startswith('s'):
target = start + datetime.timedelta(seconds=qty)
self.timeFlag = 2
elif units.startswith('w'):
target = start + datetime.timedelta(weeks=qty)
self.dateFlag = 1
return target.timetuple()
def parseDate(self, dateString):
"""
Parse short-form date strings::
'05/28/2006' or '04.21'
@type dateString: string
@param dateString: text to convert to a C{datetime}
@rtype: struct_time
@return: calculated C{struct_time} value of dateString
"""
yr, mth, dy, hr, mn, sec, wd, yd, isdst = time.localtime()
# values pulled from regex's will be stored here and later
# assigned to mth, dy, yr based on information from the locale
# -1 is used as the marker value because we want zero values
# to be passed thru so they can be flagged as errors later
v1 = -1
v2 = -1
v3 = -1
s = dateString
m = self.ptc.CRE_DATE2.search(s)
if m is not None:
index = m.start()
v1 = int(s[:index])
s = s[index + 1:]
m = self.ptc.CRE_DATE2.search(s)
if m is not None:
index = m.start()
v2 = int(s[:index])
v3 = int(s[index + 1:])
else:
v2 = int(s.strip())
v = [ v1, v2, v3 ]
d = { 'm': mth, 'd': dy, 'y': yr }
for i in range(0, 3):
n = v[i]
c = self.ptc.dp_order[i]
if n >= 0:
d[c] = n
# if the year is not specified and the date has already
# passed, increment the year
if v3 == -1 and ((mth > d['m']) or (mth == d['m'] and dy > d['d'])):
yr = d['y'] + 1
else:
yr = d['y']
mth = d['m']
dy = d['d']
# birthday epoch constraint
if yr < self.ptc.BirthdayEpoch:
yr += 2000
elif yr < 100:
yr += 1900
if _debug:
print 'parseDate: ', yr, mth, dy, self.ptc.daysInMonth(mth, yr)
if (mth > 0 and mth <= 12) and \
(dy > 0 and dy <= self.ptc.daysInMonth(mth, yr)):
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
else:
self.dateFlag = 0
self.timeFlag = 0
sourceTime = time.localtime() # return current time if date
# string is invalid
return sourceTime
def parseDateText(self, dateString):
"""
Parse long-form date strings::
'May 31st, 2006'
'Jan 1st'
'July 2006'
@type dateString: string
@param dateString: text to convert to a datetime
@rtype: struct_time
@return: calculated C{struct_time} value of dateString
"""
yr, mth, dy, hr, mn, sec, wd, yd, isdst = time.localtime()
currentMth = mth
currentDy = dy
s = dateString.lower()
m = self.ptc.CRE_DATE3.search(s)
mth = m.group('mthname')
mth = self.ptc.MonthOffsets[mth]
if m.group('day') != None:
dy = int(m.group('day'))
else:
dy = 1
if m.group('year') != None:
yr = int(m.group('year'))
# birthday epoch constraint
if yr < self.ptc.BirthdayEpoch:
yr += 2000
elif yr < 100:
yr += 1900
elif (mth < currentMth) or (mth == currentMth and dy < currentDy):
# if that day and month have already passed in this year,
# then increment the year by 1
yr += 1
if dy > 0 and dy <= self.ptc.daysInMonth(mth, yr):
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
else:
# Return current time if date string is invalid
self.dateFlag = 0
self.timeFlag = 0
sourceTime = time.localtime()
return sourceTime
def evalRanges(self, datetimeString, sourceTime=None):
"""
Evaluate the C{datetimeString} text and determine if
it represents a date or time range.
@type datetimeString: string
@param datetimeString: datetime text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: start datetime, end datetime and the invalid flag
"""
startTime = ''
endTime = ''
startDate = ''
endDate = ''
rangeFlag = 0
s = datetimeString.strip().lower()
if self.ptc.rangeSep in s:
s = s.replace(self.ptc.rangeSep, ' %s ' % self.ptc.rangeSep)
s = s.replace(' ', ' ')
m = self.ptc.CRE_TIMERNG1.search(s)
if m is not None:
rangeFlag = 1
else:
m = self.ptc.CRE_TIMERNG2.search(s)
if m is not None:
rangeFlag = 2
else:
m = self.ptc.CRE_TIMERNG4.search(s)
if m is not None:
rangeFlag = 7
else:
m = self.ptc.CRE_TIMERNG3.search(s)
if m is not None:
rangeFlag = 3
else:
m = self.ptc.CRE_DATERNG1.search(s)
if m is not None:
rangeFlag = 4
else:
m = self.ptc.CRE_DATERNG2.search(s)
if m is not None:
rangeFlag = 5
else:
m = self.ptc.CRE_DATERNG3.search(s)
if m is not None:
rangeFlag = 6
if _debug:
print 'evalRanges: rangeFlag =', rangeFlag, '[%s]' % s
if m is not None:
if (m.group() != s):
# capture remaining string
parseStr = m.group()
chunk1 = s[:m.start()]
chunk2 = s[m.end():]
s = '%s %s' % (chunk1, chunk2)
flag = 1
sourceTime, flag = self.parse(s, sourceTime)
if flag == 0:
sourceTime = None
else:
parseStr = s
if rangeFlag == 1:
m = re.search(self.ptc.rangeSep, parseStr)
startTime, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endTime, eflag = self.parse((parseStr[(m.start() + 1):]), sourceTime)
if (eflag != 0) and (sflag != 0):
return (startTime, endTime, 2)
elif rangeFlag == 2:
m = re.search(self.ptc.rangeSep, parseStr)
startTime, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endTime, eflag = self.parse((parseStr[(m.start() + 1):]), sourceTime)
if (eflag != 0) and (sflag != 0):
return (startTime, endTime, 2)
elif rangeFlag == 3 or rangeFlag == 7:
m = re.search(self.ptc.rangeSep, parseStr)
# capturing the meridian from the end time
if self.ptc.usesMeridian:
ampm = re.search(self.ptc.am[0], parseStr)
# appending the meridian to the start time
if ampm is not None:
startTime, sflag = self.parse((parseStr[:m.start()] + self.ptc.meridian[0]), sourceTime)
else:
startTime, sflag = self.parse((parseStr[:m.start()] + self.ptc.meridian[1]), sourceTime)
else:
startTime, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endTime, eflag = self.parse(parseStr[(m.start() + 1):], sourceTime)
if (eflag != 0) and (sflag != 0):
return (startTime, endTime, 2)
elif rangeFlag == 4:
m = re.search(self.ptc.rangeSep, parseStr)
startDate, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endDate, eflag = self.parse((parseStr[(m.start() + 1):]), sourceTime)
if (eflag != 0) and (sflag != 0):
return (startDate, endDate, 1)
elif rangeFlag == 5:
m = re.search(self.ptc.rangeSep, parseStr)
endDate = parseStr[(m.start() + 1):]
# capturing the year from the end date
date = self.ptc.CRE_DATE3.search(endDate)
endYear = date.group('year')
# appending the year to the start date if the start date
# does not have year information and the end date does.
# eg : "Aug 21 - Sep 4, 2007"
if endYear is not None:
startDate = (parseStr[:m.start()]).strip()
date = self.ptc.CRE_DATE3.search(startDate)
startYear = date.group('year')
if startYear is None:
startDate = startDate + ', ' + endYear
else:
startDate = parseStr[:m.start()]
startDate, sflag = self.parse(startDate, sourceTime)
endDate, eflag = self.parse(endDate, sourceTime)
if (eflag != 0) and (sflag != 0):
return (startDate, endDate, 1)
elif rangeFlag == 6:
m = re.search(self.ptc.rangeSep, parseStr)
startDate = parseStr[:m.start()]
# capturing the month from the start date
mth = self.ptc.CRE_DATE3.search(startDate)
mth = mth.group('mthname')
# appending the month name to the end date
endDate = mth + parseStr[(m.start() + 1):]
startDate, sflag = self.parse(startDate, sourceTime)
endDate, eflag = self.parse(endDate, sourceTime)
if (eflag != 0) and (sflag != 0):
return (startDate, endDate, 1)
else:
# if range is not found
sourceTime = time.localtime()
return (sourceTime, sourceTime, 0)
def _CalculateDOWDelta(self, wd, wkdy, offset, style, currentDayStyle):
"""
Based on the C{style} and C{currentDayStyle} determine what
day-of-week value is to be returned.
@type wd: integer
@param wd: day-of-week value for the current day
@type wkdy: integer
@param wkdy: day-of-week value for the parsed day
@type offset: integer
@param offset: offset direction for any modifiers (-1, 0, 1)
@type style: integer
@param style: normally the value set in C{Constants.DOWParseStyle}
@type currentDayStyle: integer
@param currentDayStyle: normally the value set in C{Constants.CurrentDOWParseStyle}
@rtype: integer
@return: calculated day-of-week
"""
if offset == 1:
# modifier is indicating future week eg: "next".
# DOW is calculated as DOW of next week
diff = 7 - wd + wkdy
elif offset == -1:
# modifier is indicating past week eg: "last","previous"
# DOW is calculated as DOW of previous week
diff = wkdy - wd - 7
elif offset == 0:
# modifier is indiacting current week eg: "this"
# DOW is calculated as DOW of this week
diff = wkdy - wd
elif offset == 2:
# no modifier is present.
# i.e. string to be parsed is just DOW
if style == 1:
# next occurance of the DOW is calculated
if currentDayStyle == True:
if wkdy >= wd:
diff = wkdy - wd
else:
diff = 7 - wd + wkdy
else:
if wkdy > wd:
diff = wkdy - wd
else:
diff = 7 - wd + wkdy
elif style == -1:
# last occurance of the DOW is calculated
if currentDayStyle == True:
if wkdy <= wd:
diff = wkdy - wd
else:
diff = wkdy - wd - 7
else:
if wkdy < wd:
diff = wkdy - wd
else:
diff = wkdy - wd - 7
else:
# occurance of the DOW in the current week is calculated
diff = wkdy - wd
if _debug:
print "wd %s, wkdy %s, offset %d, style %d\n" % (wd, wkdy, offset, style)
return diff
def _evalModifier(self, modifier, chunk1, chunk2, sourceTime):
"""
Evaluate the C{modifier} string and following text (passed in
as C{chunk1} and C{chunk2}) and if they match any known modifiers
calculate the delta and apply it to C{sourceTime}.
@type modifier: string
@param modifier: modifier text to apply to sourceTime
@type chunk1: string
@param chunk1: first text chunk that followed modifier (if any)
@type chunk2: string
@param chunk2: second text chunk that followed modifier (if any)
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: remaining text and the modified sourceTime
"""
offset = self.ptc.Modifiers[modifier]
if sourceTime is not None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
else:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = time.localtime()
# capture the units after the modifier and the remaining
# string after the unit
m = self.ptc.CRE_REMAINING.search(chunk2)
if m is not None:
index = m.start() + 1
unit = chunk2[:m.start()]
chunk2 = chunk2[index:]
else:
unit = chunk2
chunk2 = ''
flag = False
if unit == 'month' or \
unit == 'mth' or \
unit == 'm':
if offset == 0:
dy = self.ptc.daysInMonth(mth, yr)
sourceTime = (yr, mth, dy, 9, 0, 0, wd, yd, isdst)
elif offset == 2:
# if day is the last day of the month, calculate the last day
# of the next month
if dy == self.ptc.daysInMonth(mth, yr):
dy = self.ptc.daysInMonth(mth + 1, yr)
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = self.inc(start, month=1)
sourceTime = target.timetuple()
else:
start = datetime.datetime(yr, mth, 1, 9, 0, 0)
target = self.inc(start, month=offset)
sourceTime = target.timetuple()
flag = True
self.dateFlag = 1
if unit == 'week' or \
unit == 'wk' or \
unit == 'w':
if offset == 0:
start = datetime.datetime(yr, mth, dy, 17, 0, 0)
target = start + datetime.timedelta(days=(4 - wd))
sourceTime = target.timetuple()
elif offset == 2:
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=7)
sourceTime = target.timetuple()
else:
return self._evalModifier(modifier, chunk1, "monday " + chunk2, sourceTime)
flag = True
self.dateFlag = 1
if unit == 'day' or \
unit == 'dy' or \
unit == 'd':
if offset == 0:
sourceTime = (yr, mth, dy, 17, 0, 0, wd, yd, isdst)
self.timeFlag = 2
elif offset == 2:
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
target = start + datetime.timedelta(days=1)
sourceTime = target.timetuple()
else:
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=offset)
sourceTime = target.timetuple()
flag = True
self.dateFlag = 1
if unit == 'hour' or \
unit == 'hr':
if offset == 0:
sourceTime = (yr, mth, dy, hr, 0, 0, wd, yd, isdst)
else:
start = datetime.datetime(yr, mth, dy, hr, 0, 0)
target = start + datetime.timedelta(hours=offset)
sourceTime = target.timetuple()
flag = True
self.timeFlag = 2
if unit == 'year' or \
unit == 'yr' or \
unit == 'y':
if offset == 0:
sourceTime = (yr, 12, 31, hr, mn, sec, wd, yd, isdst)
elif offset == 2:
sourceTime = (yr + 1, mth, dy, hr, mn, sec, wd, yd, isdst)
else:
sourceTime = (yr + offset, 1, 1, 9, 0, 0, wd, yd, isdst)
flag = True
self.dateFlag = 1
if flag == False:
m = self.ptc.CRE_WEEKDAY.match(unit)
if m is not None:
wkdy = m.group()
self.dateFlag = 1
if modifier == 'eod':
# Calculate the upcoming weekday
self.modifierFlag = False
(sourceTime, _) = self.parse(wkdy, sourceTime)
sources = self.ptc.buildSources(sourceTime)
self.timeFlag = 2
if modifier in sources:
sourceTime = sources[modifier]
else:
wkdy = self.ptc.WeekdayOffsets[wkdy]
diff = self._CalculateDOWDelta(wd, wkdy, offset,
self.ptc.DOWParseStyle,
self.ptc.CurrentDOWParseStyle)
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=diff)
sourceTime = target.timetuple()
flag = True
self.dateFlag = 1
if not flag:
m = self.ptc.CRE_TIME.match(unit)
if m is not None:
self.modifierFlag = False
(yr, mth, dy, hr, mn, sec, wd, yd, isdst), _ = self.parse(unit)
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
target = start + datetime.timedelta(days=offset)
sourceTime = target.timetuple()
flag = True
else:
self.modifierFlag = False
# check if the remaining text is parsable and if so,
# use it as the base time for the modifier source time
t, flag2 = self.parse('%s %s' % (chunk1, unit), sourceTime)
if flag2 != 0:
sourceTime = t
sources = self.ptc.buildSources(sourceTime)
if modifier in sources:
sourceTime = sources[modifier]
flag = True
self.timeFlag = 2
# if the word after next is a number, the string is more than likely
# to be "next 4 hrs" which we will have to combine the units with the
# rest of the string
if not flag:
if offset < 0:
# if offset is negative, the unit has to be made negative
unit = '-%s' % unit
chunk2 = '%s %s' % (unit, chunk2)
self.modifierFlag = False
#return '%s %s' % (chunk1, chunk2), sourceTime
return '%s' % chunk2, sourceTime
def _evalModifier2(self, modifier, chunk1 , chunk2, sourceTime):
"""
Evaluate the C{modifier} string and following text (passed in
as C{chunk1} and C{chunk2}) and if they match any known modifiers
calculate the delta and apply it to C{sourceTime}.
@type modifier: string
@param modifier: modifier text to apply to C{sourceTime}
@type chunk1: string
@param chunk1: first text chunk that followed modifier (if any)
@type chunk2: string
@param chunk2: second text chunk that followed modifier (if any)
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: remaining text and the modified sourceTime
"""
offset = self.ptc.Modifiers[modifier]
digit = r'\d+'
self.modifier2Flag = False
# If the string after the negative modifier starts with digits,
# then it is likely that the string is similar to ' before 3 days'
# or 'evening prior to 3 days'.
# In this case, the total time is calculated by subtracting '3 days'
# from the current date.
# So, we have to identify the quantity and negate it before parsing
# the string.
# This is not required for strings not starting with digits since the
# string is enough to calculate the sourceTime
if chunk2 != '':
if offset < 0:
m = re.match(digit, chunk2.strip())
if m is not None:
qty = int(m.group()) * -1
chunk2 = chunk2[m.end():]
chunk2 = '%d%s' % (qty, chunk2)
sourceTime, flag1 = self.parse(chunk2, sourceTime)
if flag1 == 0:
flag1 = True
else:
flag1 = False
flag2 = False
else:
flag1 = False
if chunk1 != '':
if offset < 0:
m = re.search(digit, chunk1.strip())
if m is not None:
qty = int(m.group()) * -1
chunk1 = chunk1[m.end():]
chunk1 = '%d%s' % (qty, chunk1)
tempDateFlag = self.dateFlag
tempTimeFlag = self.timeFlag
sourceTime2, flag2 = self.parse(chunk1, sourceTime)
else:
return sourceTime, (flag1 and flag2)
# if chunk1 is not a datetime and chunk2 is then do not use datetime
# value returned by parsing chunk1
if not (flag1 == False and flag2 == 0):
sourceTime = sourceTime2
else:
self.timeFlag = tempTimeFlag
self.dateFlag = tempDateFlag
return sourceTime, (flag1 and flag2)
def _evalString(self, datetimeString, sourceTime=None):
"""
Calculate the datetime based on flags set by the L{parse()} routine
Examples handled::
RFC822, W3CDTF formatted dates
HH:MM[:SS][ am/pm]
MM/DD/YYYY
DD MMMM YYYY
@type datetimeString: string
@param datetimeString: text to try and parse as more "traditional"
date/time text
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: datetime
@return: calculated C{struct_time} value or current C{struct_time}
if not parsed
"""
s = datetimeString.strip()
now = time.localtime()
# Given string date is a RFC822 date
if sourceTime is None:
sourceTime = _parse_date_rfc822(s)
if sourceTime is not None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst, _) = sourceTime
self.dateFlag = 1
if (hr != 0) and (mn != 0) and (sec != 0):
self.timeFlag = 2
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
# Given string date is a W3CDTF date
if sourceTime is None:
sourceTime = _parse_date_w3dtf(s)
if sourceTime is not None:
self.dateFlag = 1
self.timeFlag = 2
if sourceTime is None:
s = s.lower()
# Given string is in the format HH:MM(:SS)(am/pm)
if self.meridianFlag:
if sourceTime is None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = now
else:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
m = self.ptc.CRE_TIMEHMS2.search(s)
if m is not None:
dt = s[:m.start('meridian')].strip()
if len(dt) <= 2:
hr = int(dt)
mn = 0
sec = 0
else:
hr, mn, sec = _extract_time(m)
if hr == 24:
hr = 0
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
meridian = m.group('meridian').lower()
# if 'am' found and hour is 12 - force hour to 0 (midnight)
if (meridian in self.ptc.am) and hr == 12:
sourceTime = (yr, mth, dy, 0, mn, sec, wd, yd, isdst)
# if 'pm' found and hour < 12, add 12 to shift to evening
if (meridian in self.ptc.pm) and hr < 12:
sourceTime = (yr, mth, dy, hr + 12, mn, sec, wd, yd, isdst)
# invalid time
if hr > 24 or mn > 59 or sec > 59:
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
self.meridianFlag = False
# Given string is in the format HH:MM(:SS)
if self.timeStdFlag:
if sourceTime is None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = now
else:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
m = self.ptc.CRE_TIMEHMS.search(s)
if m is not None:
hr, mn, sec = _extract_time(m)
if hr == 24:
hr = 0
if hr > 24 or mn > 59 or sec > 59:
# invalid time
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
else:
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
self.timeStdFlag = False
# Given string is in the format 07/21/2006
if self.dateStdFlag:
sourceTime = self.parseDate(s)
self.dateStdFlag = False
# Given string is in the format "May 23rd, 2005"
if self.dateStrFlag:
sourceTime = self.parseDateText(s)
self.dateStrFlag = False
# Given string is a weekday
if self.weekdyFlag:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = now
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
wkdy = self.ptc.WeekdayOffsets[s]
if wkdy > wd:
qty = self._CalculateDOWDelta(wd, wkdy, 2,
self.ptc.DOWParseStyle,
self.ptc.CurrentDOWParseStyle)
else:
qty = self._CalculateDOWDelta(wd, wkdy, 2,
self.ptc.DOWParseStyle,
self.ptc.CurrentDOWParseStyle)
target = start + datetime.timedelta(days=qty)
wd = wkdy
sourceTime = target.timetuple()
self.weekdyFlag = False
# Given string is a natural language time string like
# lunch, midnight, etc
if self.timeStrFlag:
if s in self.ptc.re_values['now']:
sourceTime = now
else:
sources = self.ptc.buildSources(sourceTime)
if s in sources:
sourceTime = sources[s]
else:
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
self.timeStrFlag = False
# Given string is a natural language date string like today, tomorrow..
if self.dayStrFlag:
if sourceTime is None:
sourceTime = now
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
if s in self.ptc.dayOffsets:
offset = self.ptc.dayOffsets[s]
else:
offset = 0
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=offset)
sourceTime = target.timetuple()
self.dayStrFlag = False
# Given string is a time string with units like "5 hrs 30 min"
if self.unitsFlag:
modifier = '' # TODO
if sourceTime is None:
sourceTime = now
m = self.ptc.CRE_UNITS.search(s)
if m is not None:
units = m.group('units')
quantity = s[:m.start('units')]
sourceTime = self._buildTime(sourceTime, quantity, modifier, units)
self.unitsFlag = False
# Given string is a time string with single char units like "5 h 30 m"
if self.qunitsFlag:
modifier = '' # TODO
if sourceTime is None:
sourceTime = now
m = self.ptc.CRE_QUNITS.search(s)
if m is not None:
units = m.group('qunits')
quantity = s[:m.start('qunits')]
sourceTime = self._buildTime(sourceTime, quantity, modifier, units)
self.qunitsFlag = False
# Given string does not match anything
if sourceTime is None:
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
return sourceTime
def parse(self, datetimeString, sourceTime=None):
"""
Splits the given C{datetimeString} into tokens, finds the regex
patterns that match and then calculates a C{struct_time} value from
the chunks.
If C{sourceTime} is given then the C{struct_time} value will be
calculated from that value, otherwise from the current date/time.
If the C{datetimeString} is parsed and date/time value found then
the second item of the returned tuple will be a flag to let you know
what kind of C{struct_time} value is being returned::
0 = not parsed at all
1 = parsed as a C{date}
2 = parsed as a C{time}
3 = parsed as a C{datetime}
@type datetimeString: string
@param datetimeString: date/time text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: modified C{sourceTime} and the result flag
"""
if sourceTime:
if isinstance(sourceTime, datetime.datetime):
if _debug:
print 'coercing datetime to timetuple'
sourceTime = sourceTime.timetuple()
else:
if not isinstance(sourceTime, time.struct_time) and \
not isinstance(sourceTime, tuple):
raise Exception('sourceTime is not a struct_time')
s = datetimeString.strip().lower()
parseStr = ''
totalTime = sourceTime
if s == '' :
if sourceTime is not None:
return (sourceTime, self.dateFlag + self.timeFlag)
else:
return (time.localtime(), 0)
self.timeFlag = 0
self.dateFlag = 0
while len(s) > 0:
flag = False
chunk1 = ''
chunk2 = ''
if _debug:
print 'parse (top of loop): [%s][%s]' % (s, parseStr)
if parseStr == '':
# Modifier like next\prev..
m = self.ptc.CRE_MODIFIER.search(s)
if m is not None:
self.modifierFlag = True
if (m.group('modifier') != s):
# capture remaining string
parseStr = m.group('modifier')
chunk1 = s[:m.start('modifier')].strip()
chunk2 = s[m.end('modifier'):].strip()
flag = True
else:
parseStr = s
if parseStr == '':
# Modifier like from\after\prior..
m = self.ptc.CRE_MODIFIER2.search(s)
if m is not None:
self.modifier2Flag = True
if (m.group('modifier') != s):
# capture remaining string
parseStr = m.group('modifier')
chunk1 = s[:m.start('modifier')].strip()
chunk2 = s[m.end('modifier'):].strip()
flag = True
else:
parseStr = s
if parseStr == '':
valid_date = False
for match in self.ptc.CRE_DATE3.finditer(s):
# to prevent "HH:MM(:SS) time strings" expressions from triggering
# this regex, we checks if the month field exists in the searched
# expression, if it doesn't exist, the date field is not valid
if match.group('mthname'):
m = self.ptc.CRE_DATE3.search(s, match.start())
valid_date = True
break
# String date format
if valid_date:
self.dateStrFlag = True
self.dateFlag = 1
if (m.group('date') != s):
# capture remaining string
parseStr = m.group('date')
chunk1 = s[:m.start('date')]
chunk2 = s[m.end('date'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Standard date format
m = self.ptc.CRE_DATE.search(s)
if m is not None:
self.dateStdFlag = True
self.dateFlag = 1
if (m.group('date') != s):
# capture remaining string
parseStr = m.group('date')
chunk1 = s[:m.start('date')]
chunk2 = s[m.end('date'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Natural language day strings
m = self.ptc.CRE_DAY.search(s)
if m is not None:
self.dayStrFlag = True
self.dateFlag = 1
if (m.group('day') != s):
# capture remaining string
parseStr = m.group('day')
chunk1 = s[:m.start('day')]
chunk2 = s[m.end('day'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Quantity + Units
m = self.ptc.CRE_UNITS.search(s)
if m is not None:
self.unitsFlag = True
if (m.group('qty') != s):
# capture remaining string
parseStr = m.group('qty')
chunk1 = s[:m.start('qty')].strip()
chunk2 = s[m.end('qty'):].strip()
if chunk1[-1:] == '-':
parseStr = '-%s' % parseStr
chunk1 = chunk1[:-1]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Quantity + Units
m = self.ptc.CRE_QUNITS.search(s)
if m is not None:
self.qunitsFlag = True
if (m.group('qty') != s):
# capture remaining string
parseStr = m.group('qty')
chunk1 = s[:m.start('qty')].strip()
chunk2 = s[m.end('qty'):].strip()
if chunk1[-1:] == '-':
parseStr = '-%s' % parseStr
chunk1 = chunk1[:-1]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Weekday
m = self.ptc.CRE_WEEKDAY.search(s)
if m is not None:
gv = m.group('weekday')
if s not in self.ptc.dayOffsets:
self.weekdyFlag = True
self.dateFlag = 1
if (gv != s):
# capture remaining string
parseStr = gv
chunk1 = s[:m.start('weekday')]
chunk2 = s[m.end('weekday'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Natural language time strings
m = self.ptc.CRE_TIME.search(s)
if m is not None:
self.timeStrFlag = True
self.timeFlag = 2
if (m.group('time') != s):
# capture remaining string
parseStr = m.group('time')
chunk1 = s[:m.start('time')]
chunk2 = s[m.end('time'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# HH:MM(:SS) am/pm time strings
m = self.ptc.CRE_TIMEHMS2.search(s)
if m is not None:
self.meridianFlag = True
self.timeFlag = 2
if m.group('minutes') is not None:
if m.group('seconds') is not None:
parseStr = '%s:%s:%s %s' % (m.group('hours'),
m.group('minutes'),
m.group('seconds'),
m.group('meridian'))
else:
parseStr = '%s:%s %s' % (m.group('hours'),
m.group('minutes'),
m.group('meridian'))
else:
parseStr = '%s %s' % (m.group('hours'),
m.group('meridian'))
chunk1 = s[:m.start('hours')]
chunk2 = s[m.end('meridian'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
if parseStr == '':
# HH:MM(:SS) time strings
m = self.ptc.CRE_TIMEHMS.search(s)
if m is not None:
self.timeStdFlag = True
self.timeFlag = 2
if m.group('seconds') is not None:
parseStr = '%s:%s:%s' % (m.group('hours'),
m.group('minutes'),
m.group('seconds'))
chunk1 = s[:m.start('hours')]
chunk2 = s[m.end('seconds'):]
else:
parseStr = '%s:%s' % (m.group('hours'),
m.group('minutes'))
chunk1 = s[:m.start('hours')]
chunk2 = s[m.end('minutes'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
# if string does not match any regex, empty string to
# come out of the while loop
if not flag:
s = ''
if _debug:
print 'parse (bottom) [%s][%s][%s][%s]' % (s, parseStr, chunk1, chunk2)
print 'weekday %s, dateStd %s, dateStr %s, time %s, timeStr %s, meridian %s' % \
(self.weekdyFlag, self.dateStdFlag, self.dateStrFlag, self.timeStdFlag, self.timeStrFlag, self.meridianFlag)
print 'dayStr %s, modifier %s, modifier2 %s, units %s, qunits %s' % \
(self.dayStrFlag, self.modifierFlag, self.modifier2Flag, self.unitsFlag, self.qunitsFlag)
# evaluate the matched string
if parseStr != '':
if self.modifierFlag == True:
t, totalTime = self._evalModifier(parseStr, chunk1, chunk2, totalTime)
# t is the unparsed part of the chunks.
# If it is not date/time, return current
# totalTime as it is; else return the output
# after parsing t.
if (t != '') and (t != None):
tempDateFlag = self.dateFlag
tempTimeFlag = self.timeFlag
(totalTime2, flag) = self.parse(t, totalTime)
if flag == 0 and totalTime is not None:
self.timeFlag = tempTimeFlag
self.dateFlag = tempDateFlag
return (totalTime, self.dateFlag + self.timeFlag)
else:
return (totalTime2, self.dateFlag + self.timeFlag)
elif self.modifier2Flag == True:
totalTime, invalidFlag = self._evalModifier2(parseStr, chunk1, chunk2, totalTime)
if invalidFlag == True:
self.dateFlag = 0
self.timeFlag = 0
else:
totalTime = self._evalString(parseStr, totalTime)
parseStr = ''
# String is not parsed at all
if totalTime is None or totalTime == sourceTime:
totalTime = time.localtime()
self.dateFlag = 0
self.timeFlag = 0
return (totalTime, self.dateFlag + self.timeFlag)
def inc(self, source, month=None, year=None):
"""
Takes the given C{source} date, or current date if none is
passed, and increments it according to the values passed in
by month and/or year.
This routine is needed because Python's C{timedelta()} function
does not allow for month or year increments.
@type source: struct_time
@param source: C{struct_time} value to increment
@type month: integer
@param month: optional number of months to increment
@type year: integer
@param year: optional number of years to increment
@rtype: datetime
@return: C{source} incremented by the number of months and/or years
"""
yr = source.year
mth = source.month
dy = source.day
if year:
try:
yi = int(year)
except ValueError:
yi = 0
yr += yi
if month:
try:
mi = int(month)
except ValueError:
mi = 0
m = abs(mi)
y = m / 12 # how many years are in month increment
m = m % 12 # get remaining months
if mi < 0:
mth = mth - m # sub months from start month
if mth < 1: # cross start-of-year?
y -= 1 # yes - decrement year
mth += 12 # and fix month
else:
mth = mth + m # add months to start month
if mth > 12: # cross end-of-year?
y += 1 # yes - increment year
mth -= 12 # and fix month
yr += y
# if the day ends up past the last day of
# the new month, set it to the last day
if dy > self.ptc.daysInMonth(mth, yr):
dy = self.ptc.daysInMonth(mth, yr)
d = source.replace(year=yr, month=mth, day=dy)
return source + (d - source)
|
vilmibm/done | parsedatetime/parsedatetime.py | Calendar.evalRanges | python | def evalRanges(self, datetimeString, sourceTime=None):
"""
Evaluate the C{datetimeString} text and determine if
it represents a date or time range.
@type datetimeString: string
@param datetimeString: datetime text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: start datetime, end datetime and the invalid flag
"""
startTime = ''
endTime = ''
startDate = ''
endDate = ''
rangeFlag = 0
s = datetimeString.strip().lower()
if self.ptc.rangeSep in s:
s = s.replace(self.ptc.rangeSep, ' %s ' % self.ptc.rangeSep)
s = s.replace(' ', ' ')
m = self.ptc.CRE_TIMERNG1.search(s)
if m is not None:
rangeFlag = 1
else:
m = self.ptc.CRE_TIMERNG2.search(s)
if m is not None:
rangeFlag = 2
else:
m = self.ptc.CRE_TIMERNG4.search(s)
if m is not None:
rangeFlag = 7
else:
m = self.ptc.CRE_TIMERNG3.search(s)
if m is not None:
rangeFlag = 3
else:
m = self.ptc.CRE_DATERNG1.search(s)
if m is not None:
rangeFlag = 4
else:
m = self.ptc.CRE_DATERNG2.search(s)
if m is not None:
rangeFlag = 5
else:
m = self.ptc.CRE_DATERNG3.search(s)
if m is not None:
rangeFlag = 6
if _debug:
print 'evalRanges: rangeFlag =', rangeFlag, '[%s]' % s
if m is not None:
if (m.group() != s):
# capture remaining string
parseStr = m.group()
chunk1 = s[:m.start()]
chunk2 = s[m.end():]
s = '%s %s' % (chunk1, chunk2)
flag = 1
sourceTime, flag = self.parse(s, sourceTime)
if flag == 0:
sourceTime = None
else:
parseStr = s
if rangeFlag == 1:
m = re.search(self.ptc.rangeSep, parseStr)
startTime, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endTime, eflag = self.parse((parseStr[(m.start() + 1):]), sourceTime)
if (eflag != 0) and (sflag != 0):
return (startTime, endTime, 2)
elif rangeFlag == 2:
m = re.search(self.ptc.rangeSep, parseStr)
startTime, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endTime, eflag = self.parse((parseStr[(m.start() + 1):]), sourceTime)
if (eflag != 0) and (sflag != 0):
return (startTime, endTime, 2)
elif rangeFlag == 3 or rangeFlag == 7:
m = re.search(self.ptc.rangeSep, parseStr)
# capturing the meridian from the end time
if self.ptc.usesMeridian:
ampm = re.search(self.ptc.am[0], parseStr)
# appending the meridian to the start time
if ampm is not None:
startTime, sflag = self.parse((parseStr[:m.start()] + self.ptc.meridian[0]), sourceTime)
else:
startTime, sflag = self.parse((parseStr[:m.start()] + self.ptc.meridian[1]), sourceTime)
else:
startTime, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endTime, eflag = self.parse(parseStr[(m.start() + 1):], sourceTime)
if (eflag != 0) and (sflag != 0):
return (startTime, endTime, 2)
elif rangeFlag == 4:
m = re.search(self.ptc.rangeSep, parseStr)
startDate, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endDate, eflag = self.parse((parseStr[(m.start() + 1):]), sourceTime)
if (eflag != 0) and (sflag != 0):
return (startDate, endDate, 1)
elif rangeFlag == 5:
m = re.search(self.ptc.rangeSep, parseStr)
endDate = parseStr[(m.start() + 1):]
# capturing the year from the end date
date = self.ptc.CRE_DATE3.search(endDate)
endYear = date.group('year')
# appending the year to the start date if the start date
# does not have year information and the end date does.
# eg : "Aug 21 - Sep 4, 2007"
if endYear is not None:
startDate = (parseStr[:m.start()]).strip()
date = self.ptc.CRE_DATE3.search(startDate)
startYear = date.group('year')
if startYear is None:
startDate = startDate + ', ' + endYear
else:
startDate = parseStr[:m.start()]
startDate, sflag = self.parse(startDate, sourceTime)
endDate, eflag = self.parse(endDate, sourceTime)
if (eflag != 0) and (sflag != 0):
return (startDate, endDate, 1)
elif rangeFlag == 6:
m = re.search(self.ptc.rangeSep, parseStr)
startDate = parseStr[:m.start()]
# capturing the month from the start date
mth = self.ptc.CRE_DATE3.search(startDate)
mth = mth.group('mthname')
# appending the month name to the end date
endDate = mth + parseStr[(m.start() + 1):]
startDate, sflag = self.parse(startDate, sourceTime)
endDate, eflag = self.parse(endDate, sourceTime)
if (eflag != 0) and (sflag != 0):
return (startDate, endDate, 1)
else:
# if range is not found
sourceTime = time.localtime()
return (sourceTime, sourceTime, 0) | Evaluate the C{datetimeString} text and determine if
it represents a date or time range.
@type datetimeString: string
@param datetimeString: datetime text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: start datetime, end datetime and the invalid flag | train | https://github.com/vilmibm/done/blob/7e5b60d2900ceddefa49de352a19b794199b51a8/parsedatetime/parsedatetime.py#L443-L606 | [
"def parse(self, datetimeString, sourceTime=None):\n \"\"\"\n Splits the given C{datetimeString} into tokens, finds the regex\n patterns that match and then calculates a C{struct_time} value from\n the chunks.\n\n If C{sourceTime} is given then the C{struct_time} value will be\n calculated from that value, otherwise from the current date/time.\n\n If the C{datetimeString} is parsed and date/time value found then\n the second item of the returned tuple will be a flag to let you know\n what kind of C{struct_time} value is being returned::\n\n 0 = not parsed at all\n 1 = parsed as a C{date}\n 2 = parsed as a C{time}\n 3 = parsed as a C{datetime}\n\n @type datetimeString: string\n @param datetimeString: date/time text to evaluate\n @type sourceTime: struct_time\n @param sourceTime: C{struct_time} value to use as the base\n\n @rtype: tuple\n @return: tuple of: modified C{sourceTime} and the result flag\n \"\"\"\n\n if sourceTime:\n if isinstance(sourceTime, datetime.datetime):\n if _debug:\n print 'coercing datetime to timetuple'\n sourceTime = sourceTime.timetuple()\n else:\n if not isinstance(sourceTime, time.struct_time) and \\\n not isinstance(sourceTime, tuple):\n raise Exception('sourceTime is not a struct_time')\n\n s = datetimeString.strip().lower()\n parseStr = ''\n totalTime = sourceTime\n\n if s == '' :\n if sourceTime is not None:\n return (sourceTime, self.dateFlag + self.timeFlag)\n else:\n return (time.localtime(), 0)\n\n self.timeFlag = 0\n self.dateFlag = 0\n\n while len(s) > 0:\n flag = False\n chunk1 = ''\n chunk2 = ''\n\n if _debug:\n print 'parse (top of loop): [%s][%s]' % (s, parseStr)\n\n if parseStr == '':\n # Modifier like next\\prev..\n m = self.ptc.CRE_MODIFIER.search(s)\n if m is not None:\n self.modifierFlag = True\n if (m.group('modifier') != s):\n # capture remaining string\n parseStr = m.group('modifier')\n chunk1 = s[:m.start('modifier')].strip()\n chunk2 = s[m.end('modifier'):].strip()\n flag = True\n else:\n parseStr = s\n\n if parseStr == '':\n # Modifier like from\\after\\prior..\n m = self.ptc.CRE_MODIFIER2.search(s)\n if m is not None:\n self.modifier2Flag = True\n if (m.group('modifier') != s):\n # capture remaining string\n parseStr = m.group('modifier')\n chunk1 = s[:m.start('modifier')].strip()\n chunk2 = s[m.end('modifier'):].strip()\n flag = True\n else:\n parseStr = s\n\n if parseStr == '':\n valid_date = False\n for match in self.ptc.CRE_DATE3.finditer(s):\n # to prevent \"HH:MM(:SS) time strings\" expressions from triggering\n # this regex, we checks if the month field exists in the searched \n # expression, if it doesn't exist, the date field is not valid\n if match.group('mthname'):\n m = self.ptc.CRE_DATE3.search(s, match.start())\n valid_date = True\n break\n\n # String date format\n if valid_date:\n self.dateStrFlag = True\n self.dateFlag = 1\n if (m.group('date') != s):\n # capture remaining string\n parseStr = m.group('date')\n chunk1 = s[:m.start('date')]\n chunk2 = s[m.end('date'):]\n s = '%s %s' % (chunk1, chunk2)\n flag = True\n else:\n parseStr = s\n\n if parseStr == '':\n # Standard date format\n m = self.ptc.CRE_DATE.search(s)\n if m is not None:\n self.dateStdFlag = True\n self.dateFlag = 1\n if (m.group('date') != s):\n # capture remaining string\n parseStr = m.group('date')\n chunk1 = s[:m.start('date')]\n chunk2 = s[m.end('date'):]\n s = '%s %s' % (chunk1, chunk2)\n flag = True\n else:\n parseStr = s\n\n if parseStr == '':\n # Natural language day strings\n m = self.ptc.CRE_DAY.search(s)\n if m is not None:\n self.dayStrFlag = True\n self.dateFlag = 1\n if (m.group('day') != s):\n # capture remaining string\n parseStr = m.group('day')\n chunk1 = s[:m.start('day')]\n chunk2 = s[m.end('day'):]\n s = '%s %s' % (chunk1, chunk2)\n flag = True\n else:\n parseStr = s\n\n if parseStr == '':\n # Quantity + Units\n m = self.ptc.CRE_UNITS.search(s)\n if m is not None:\n self.unitsFlag = True\n if (m.group('qty') != s):\n # capture remaining string\n parseStr = m.group('qty')\n chunk1 = s[:m.start('qty')].strip()\n chunk2 = s[m.end('qty'):].strip()\n\n if chunk1[-1:] == '-':\n parseStr = '-%s' % parseStr\n chunk1 = chunk1[:-1]\n\n s = '%s %s' % (chunk1, chunk2)\n flag = True\n else:\n parseStr = s\n\n if parseStr == '':\n # Quantity + Units\n m = self.ptc.CRE_QUNITS.search(s)\n if m is not None:\n self.qunitsFlag = True\n\n if (m.group('qty') != s):\n # capture remaining string\n parseStr = m.group('qty')\n chunk1 = s[:m.start('qty')].strip()\n chunk2 = s[m.end('qty'):].strip()\n\n if chunk1[-1:] == '-':\n parseStr = '-%s' % parseStr\n chunk1 = chunk1[:-1]\n\n s = '%s %s' % (chunk1, chunk2)\n flag = True\n else:\n parseStr = s \n\n if parseStr == '':\n # Weekday\n m = self.ptc.CRE_WEEKDAY.search(s)\n if m is not None:\n gv = m.group('weekday')\n if s not in self.ptc.dayOffsets:\n self.weekdyFlag = True\n self.dateFlag = 1\n if (gv != s):\n # capture remaining string\n parseStr = gv\n chunk1 = s[:m.start('weekday')]\n chunk2 = s[m.end('weekday'):]\n s = '%s %s' % (chunk1, chunk2)\n flag = True\n else:\n parseStr = s\n\n if parseStr == '':\n # Natural language time strings\n m = self.ptc.CRE_TIME.search(s)\n if m is not None:\n self.timeStrFlag = True\n self.timeFlag = 2\n if (m.group('time') != s):\n # capture remaining string\n parseStr = m.group('time')\n chunk1 = s[:m.start('time')]\n chunk2 = s[m.end('time'):]\n s = '%s %s' % (chunk1, chunk2)\n flag = True\n else:\n parseStr = s\n\n if parseStr == '':\n # HH:MM(:SS) am/pm time strings\n m = self.ptc.CRE_TIMEHMS2.search(s)\n if m is not None:\n self.meridianFlag = True\n self.timeFlag = 2\n if m.group('minutes') is not None:\n if m.group('seconds') is not None:\n parseStr = '%s:%s:%s %s' % (m.group('hours'),\n m.group('minutes'),\n m.group('seconds'),\n m.group('meridian'))\n else:\n parseStr = '%s:%s %s' % (m.group('hours'),\n m.group('minutes'),\n m.group('meridian'))\n else:\n parseStr = '%s %s' % (m.group('hours'),\n m.group('meridian'))\n\n chunk1 = s[:m.start('hours')]\n chunk2 = s[m.end('meridian'):]\n\n s = '%s %s' % (chunk1, chunk2)\n flag = True\n\n if parseStr == '':\n # HH:MM(:SS) time strings\n m = self.ptc.CRE_TIMEHMS.search(s)\n if m is not None:\n self.timeStdFlag = True\n self.timeFlag = 2\n if m.group('seconds') is not None:\n parseStr = '%s:%s:%s' % (m.group('hours'),\n m.group('minutes'),\n m.group('seconds'))\n chunk1 = s[:m.start('hours')]\n chunk2 = s[m.end('seconds'):]\n else:\n parseStr = '%s:%s' % (m.group('hours'),\n m.group('minutes'))\n chunk1 = s[:m.start('hours')]\n chunk2 = s[m.end('minutes'):]\n\n s = '%s %s' % (chunk1, chunk2)\n flag = True\n\n # if string does not match any regex, empty string to\n # come out of the while loop\n if not flag:\n s = ''\n\n if _debug:\n print 'parse (bottom) [%s][%s][%s][%s]' % (s, parseStr, chunk1, chunk2)\n print 'weekday %s, dateStd %s, dateStr %s, time %s, timeStr %s, meridian %s' % \\\n (self.weekdyFlag, self.dateStdFlag, self.dateStrFlag, self.timeStdFlag, self.timeStrFlag, self.meridianFlag)\n print 'dayStr %s, modifier %s, modifier2 %s, units %s, qunits %s' % \\\n (self.dayStrFlag, self.modifierFlag, self.modifier2Flag, self.unitsFlag, self.qunitsFlag)\n\n # evaluate the matched string\n if parseStr != '':\n if self.modifierFlag == True:\n t, totalTime = self._evalModifier(parseStr, chunk1, chunk2, totalTime)\n # t is the unparsed part of the chunks.\n # If it is not date/time, return current\n # totalTime as it is; else return the output\n # after parsing t.\n if (t != '') and (t != None):\n tempDateFlag = self.dateFlag\n tempTimeFlag = self.timeFlag\n (totalTime2, flag) = self.parse(t, totalTime)\n\n if flag == 0 and totalTime is not None:\n self.timeFlag = tempTimeFlag\n self.dateFlag = tempDateFlag\n\n return (totalTime, self.dateFlag + self.timeFlag)\n else:\n return (totalTime2, self.dateFlag + self.timeFlag)\n\n elif self.modifier2Flag == True:\n totalTime, invalidFlag = self._evalModifier2(parseStr, chunk1, chunk2, totalTime)\n\n if invalidFlag == True:\n self.dateFlag = 0\n self.timeFlag = 0\n\n else:\n totalTime = self._evalString(parseStr, totalTime)\n parseStr = ''\n\n # String is not parsed at all\n if totalTime is None or totalTime == sourceTime:\n totalTime = time.localtime()\n self.dateFlag = 0\n self.timeFlag = 0\n\n return (totalTime, self.dateFlag + self.timeFlag)\n"
] | class Calendar:
"""
A collection of routines to input, parse and manipulate date and times.
The text can either be 'normal' date values or it can be human readable.
"""
def __init__(self, constants=None):
"""
Default constructor for the L{Calendar} class.
@type constants: object
@param constants: Instance of the class L{parsedatetime_consts.Constants}
@rtype: object
@return: L{Calendar} instance
"""
# if a constants reference is not included, use default
if constants is None:
self.ptc = parsedatetime_consts.Constants()
else:
self.ptc = constants
self.weekdyFlag = False # monday/tuesday/...
self.dateStdFlag = False # 07/21/06
self.dateStrFlag = False # July 21st, 2006
self.timeStdFlag = False # 5:50
self.meridianFlag = False # am/pm
self.dayStrFlag = False # tomorrow/yesterday/today/..
self.timeStrFlag = False # lunch/noon/breakfast/...
self.modifierFlag = False # after/before/prev/next/..
self.modifier2Flag = False # after/before/prev/next/..
self.unitsFlag = False # hrs/weeks/yrs/min/..
self.qunitsFlag = False # h/m/t/d..
self.timeFlag = 0
self.dateFlag = 0
def _convertUnitAsWords(self, unitText):
"""
Converts text units into their number value
Five = 5
Twenty Five = 25
Two hundred twenty five = 225
Two thousand and twenty five = 2025
Two thousand twenty five = 2025
@type unitText: string
@param unitText: number text to convert
@rtype: integer
@return: numerical value of unitText
"""
# TODO: implement this
pass
def _buildTime(self, source, quantity, modifier, units):
"""
Take C{quantity}, C{modifier} and C{unit} strings and convert them into values.
After converting, calcuate the time and return the adjusted sourceTime.
@type source: time
@param source: time to use as the base (or source)
@type quantity: string
@param quantity: quantity string
@type modifier: string
@param modifier: how quantity and units modify the source time
@type units: string
@param units: unit of the quantity (i.e. hours, days, months, etc)
@rtype: struct_time
@return: C{struct_time} of the calculated time
"""
if _debug:
print '_buildTime: [%s][%s][%s]' % (quantity, modifier, units)
if source is None:
source = time.localtime()
if quantity is None:
quantity = ''
else:
quantity = quantity.strip()
if len(quantity) == 0:
qty = 1
else:
try:
qty = int(quantity)
except ValueError:
qty = 0
if modifier in self.ptc.Modifiers:
qty = qty * self.ptc.Modifiers[modifier]
if units is None or units == '':
units = 'dy'
# plurals are handled by regex's (could be a bug tho)
(yr, mth, dy, hr, mn, sec, _, _, _) = source
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
target = start
if units.startswith('y'):
target = self.inc(start, year=qty)
self.dateFlag = 1
elif units.endswith('th') or units.endswith('ths'):
target = self.inc(start, month=qty)
self.dateFlag = 1
else:
if units.startswith('d'):
target = start + datetime.timedelta(days=qty)
self.dateFlag = 1
elif units.startswith('h'):
target = start + datetime.timedelta(hours=qty)
self.timeFlag = 2
elif units.startswith('m'):
target = start + datetime.timedelta(minutes=qty)
self.timeFlag = 2
elif units.startswith('s'):
target = start + datetime.timedelta(seconds=qty)
self.timeFlag = 2
elif units.startswith('w'):
target = start + datetime.timedelta(weeks=qty)
self.dateFlag = 1
return target.timetuple()
def parseDate(self, dateString):
"""
Parse short-form date strings::
'05/28/2006' or '04.21'
@type dateString: string
@param dateString: text to convert to a C{datetime}
@rtype: struct_time
@return: calculated C{struct_time} value of dateString
"""
yr, mth, dy, hr, mn, sec, wd, yd, isdst = time.localtime()
# values pulled from regex's will be stored here and later
# assigned to mth, dy, yr based on information from the locale
# -1 is used as the marker value because we want zero values
# to be passed thru so they can be flagged as errors later
v1 = -1
v2 = -1
v3 = -1
s = dateString
m = self.ptc.CRE_DATE2.search(s)
if m is not None:
index = m.start()
v1 = int(s[:index])
s = s[index + 1:]
m = self.ptc.CRE_DATE2.search(s)
if m is not None:
index = m.start()
v2 = int(s[:index])
v3 = int(s[index + 1:])
else:
v2 = int(s.strip())
v = [ v1, v2, v3 ]
d = { 'm': mth, 'd': dy, 'y': yr }
for i in range(0, 3):
n = v[i]
c = self.ptc.dp_order[i]
if n >= 0:
d[c] = n
# if the year is not specified and the date has already
# passed, increment the year
if v3 == -1 and ((mth > d['m']) or (mth == d['m'] and dy > d['d'])):
yr = d['y'] + 1
else:
yr = d['y']
mth = d['m']
dy = d['d']
# birthday epoch constraint
if yr < self.ptc.BirthdayEpoch:
yr += 2000
elif yr < 100:
yr += 1900
if _debug:
print 'parseDate: ', yr, mth, dy, self.ptc.daysInMonth(mth, yr)
if (mth > 0 and mth <= 12) and \
(dy > 0 and dy <= self.ptc.daysInMonth(mth, yr)):
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
else:
self.dateFlag = 0
self.timeFlag = 0
sourceTime = time.localtime() # return current time if date
# string is invalid
return sourceTime
def parseDateText(self, dateString):
"""
Parse long-form date strings::
'May 31st, 2006'
'Jan 1st'
'July 2006'
@type dateString: string
@param dateString: text to convert to a datetime
@rtype: struct_time
@return: calculated C{struct_time} value of dateString
"""
yr, mth, dy, hr, mn, sec, wd, yd, isdst = time.localtime()
currentMth = mth
currentDy = dy
s = dateString.lower()
m = self.ptc.CRE_DATE3.search(s)
mth = m.group('mthname')
mth = self.ptc.MonthOffsets[mth]
if m.group('day') != None:
dy = int(m.group('day'))
else:
dy = 1
if m.group('year') != None:
yr = int(m.group('year'))
# birthday epoch constraint
if yr < self.ptc.BirthdayEpoch:
yr += 2000
elif yr < 100:
yr += 1900
elif (mth < currentMth) or (mth == currentMth and dy < currentDy):
# if that day and month have already passed in this year,
# then increment the year by 1
yr += 1
if dy > 0 and dy <= self.ptc.daysInMonth(mth, yr):
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
else:
# Return current time if date string is invalid
self.dateFlag = 0
self.timeFlag = 0
sourceTime = time.localtime()
return sourceTime
def evalRanges(self, datetimeString, sourceTime=None):
"""
Evaluate the C{datetimeString} text and determine if
it represents a date or time range.
@type datetimeString: string
@param datetimeString: datetime text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: start datetime, end datetime and the invalid flag
"""
startTime = ''
endTime = ''
startDate = ''
endDate = ''
rangeFlag = 0
s = datetimeString.strip().lower()
if self.ptc.rangeSep in s:
s = s.replace(self.ptc.rangeSep, ' %s ' % self.ptc.rangeSep)
s = s.replace(' ', ' ')
m = self.ptc.CRE_TIMERNG1.search(s)
if m is not None:
rangeFlag = 1
else:
m = self.ptc.CRE_TIMERNG2.search(s)
if m is not None:
rangeFlag = 2
else:
m = self.ptc.CRE_TIMERNG4.search(s)
if m is not None:
rangeFlag = 7
else:
m = self.ptc.CRE_TIMERNG3.search(s)
if m is not None:
rangeFlag = 3
else:
m = self.ptc.CRE_DATERNG1.search(s)
if m is not None:
rangeFlag = 4
else:
m = self.ptc.CRE_DATERNG2.search(s)
if m is not None:
rangeFlag = 5
else:
m = self.ptc.CRE_DATERNG3.search(s)
if m is not None:
rangeFlag = 6
if _debug:
print 'evalRanges: rangeFlag =', rangeFlag, '[%s]' % s
if m is not None:
if (m.group() != s):
# capture remaining string
parseStr = m.group()
chunk1 = s[:m.start()]
chunk2 = s[m.end():]
s = '%s %s' % (chunk1, chunk2)
flag = 1
sourceTime, flag = self.parse(s, sourceTime)
if flag == 0:
sourceTime = None
else:
parseStr = s
if rangeFlag == 1:
m = re.search(self.ptc.rangeSep, parseStr)
startTime, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endTime, eflag = self.parse((parseStr[(m.start() + 1):]), sourceTime)
if (eflag != 0) and (sflag != 0):
return (startTime, endTime, 2)
elif rangeFlag == 2:
m = re.search(self.ptc.rangeSep, parseStr)
startTime, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endTime, eflag = self.parse((parseStr[(m.start() + 1):]), sourceTime)
if (eflag != 0) and (sflag != 0):
return (startTime, endTime, 2)
elif rangeFlag == 3 or rangeFlag == 7:
m = re.search(self.ptc.rangeSep, parseStr)
# capturing the meridian from the end time
if self.ptc.usesMeridian:
ampm = re.search(self.ptc.am[0], parseStr)
# appending the meridian to the start time
if ampm is not None:
startTime, sflag = self.parse((parseStr[:m.start()] + self.ptc.meridian[0]), sourceTime)
else:
startTime, sflag = self.parse((parseStr[:m.start()] + self.ptc.meridian[1]), sourceTime)
else:
startTime, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endTime, eflag = self.parse(parseStr[(m.start() + 1):], sourceTime)
if (eflag != 0) and (sflag != 0):
return (startTime, endTime, 2)
elif rangeFlag == 4:
m = re.search(self.ptc.rangeSep, parseStr)
startDate, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endDate, eflag = self.parse((parseStr[(m.start() + 1):]), sourceTime)
if (eflag != 0) and (sflag != 0):
return (startDate, endDate, 1)
elif rangeFlag == 5:
m = re.search(self.ptc.rangeSep, parseStr)
endDate = parseStr[(m.start() + 1):]
# capturing the year from the end date
date = self.ptc.CRE_DATE3.search(endDate)
endYear = date.group('year')
# appending the year to the start date if the start date
# does not have year information and the end date does.
# eg : "Aug 21 - Sep 4, 2007"
if endYear is not None:
startDate = (parseStr[:m.start()]).strip()
date = self.ptc.CRE_DATE3.search(startDate)
startYear = date.group('year')
if startYear is None:
startDate = startDate + ', ' + endYear
else:
startDate = parseStr[:m.start()]
startDate, sflag = self.parse(startDate, sourceTime)
endDate, eflag = self.parse(endDate, sourceTime)
if (eflag != 0) and (sflag != 0):
return (startDate, endDate, 1)
elif rangeFlag == 6:
m = re.search(self.ptc.rangeSep, parseStr)
startDate = parseStr[:m.start()]
# capturing the month from the start date
mth = self.ptc.CRE_DATE3.search(startDate)
mth = mth.group('mthname')
# appending the month name to the end date
endDate = mth + parseStr[(m.start() + 1):]
startDate, sflag = self.parse(startDate, sourceTime)
endDate, eflag = self.parse(endDate, sourceTime)
if (eflag != 0) and (sflag != 0):
return (startDate, endDate, 1)
else:
# if range is not found
sourceTime = time.localtime()
return (sourceTime, sourceTime, 0)
def _CalculateDOWDelta(self, wd, wkdy, offset, style, currentDayStyle):
"""
Based on the C{style} and C{currentDayStyle} determine what
day-of-week value is to be returned.
@type wd: integer
@param wd: day-of-week value for the current day
@type wkdy: integer
@param wkdy: day-of-week value for the parsed day
@type offset: integer
@param offset: offset direction for any modifiers (-1, 0, 1)
@type style: integer
@param style: normally the value set in C{Constants.DOWParseStyle}
@type currentDayStyle: integer
@param currentDayStyle: normally the value set in C{Constants.CurrentDOWParseStyle}
@rtype: integer
@return: calculated day-of-week
"""
if offset == 1:
# modifier is indicating future week eg: "next".
# DOW is calculated as DOW of next week
diff = 7 - wd + wkdy
elif offset == -1:
# modifier is indicating past week eg: "last","previous"
# DOW is calculated as DOW of previous week
diff = wkdy - wd - 7
elif offset == 0:
# modifier is indiacting current week eg: "this"
# DOW is calculated as DOW of this week
diff = wkdy - wd
elif offset == 2:
# no modifier is present.
# i.e. string to be parsed is just DOW
if style == 1:
# next occurance of the DOW is calculated
if currentDayStyle == True:
if wkdy >= wd:
diff = wkdy - wd
else:
diff = 7 - wd + wkdy
else:
if wkdy > wd:
diff = wkdy - wd
else:
diff = 7 - wd + wkdy
elif style == -1:
# last occurance of the DOW is calculated
if currentDayStyle == True:
if wkdy <= wd:
diff = wkdy - wd
else:
diff = wkdy - wd - 7
else:
if wkdy < wd:
diff = wkdy - wd
else:
diff = wkdy - wd - 7
else:
# occurance of the DOW in the current week is calculated
diff = wkdy - wd
if _debug:
print "wd %s, wkdy %s, offset %d, style %d\n" % (wd, wkdy, offset, style)
return diff
def _evalModifier(self, modifier, chunk1, chunk2, sourceTime):
"""
Evaluate the C{modifier} string and following text (passed in
as C{chunk1} and C{chunk2}) and if they match any known modifiers
calculate the delta and apply it to C{sourceTime}.
@type modifier: string
@param modifier: modifier text to apply to sourceTime
@type chunk1: string
@param chunk1: first text chunk that followed modifier (if any)
@type chunk2: string
@param chunk2: second text chunk that followed modifier (if any)
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: remaining text and the modified sourceTime
"""
offset = self.ptc.Modifiers[modifier]
if sourceTime is not None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
else:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = time.localtime()
# capture the units after the modifier and the remaining
# string after the unit
m = self.ptc.CRE_REMAINING.search(chunk2)
if m is not None:
index = m.start() + 1
unit = chunk2[:m.start()]
chunk2 = chunk2[index:]
else:
unit = chunk2
chunk2 = ''
flag = False
if unit == 'month' or \
unit == 'mth' or \
unit == 'm':
if offset == 0:
dy = self.ptc.daysInMonth(mth, yr)
sourceTime = (yr, mth, dy, 9, 0, 0, wd, yd, isdst)
elif offset == 2:
# if day is the last day of the month, calculate the last day
# of the next month
if dy == self.ptc.daysInMonth(mth, yr):
dy = self.ptc.daysInMonth(mth + 1, yr)
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = self.inc(start, month=1)
sourceTime = target.timetuple()
else:
start = datetime.datetime(yr, mth, 1, 9, 0, 0)
target = self.inc(start, month=offset)
sourceTime = target.timetuple()
flag = True
self.dateFlag = 1
if unit == 'week' or \
unit == 'wk' or \
unit == 'w':
if offset == 0:
start = datetime.datetime(yr, mth, dy, 17, 0, 0)
target = start + datetime.timedelta(days=(4 - wd))
sourceTime = target.timetuple()
elif offset == 2:
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=7)
sourceTime = target.timetuple()
else:
return self._evalModifier(modifier, chunk1, "monday " + chunk2, sourceTime)
flag = True
self.dateFlag = 1
if unit == 'day' or \
unit == 'dy' or \
unit == 'd':
if offset == 0:
sourceTime = (yr, mth, dy, 17, 0, 0, wd, yd, isdst)
self.timeFlag = 2
elif offset == 2:
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
target = start + datetime.timedelta(days=1)
sourceTime = target.timetuple()
else:
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=offset)
sourceTime = target.timetuple()
flag = True
self.dateFlag = 1
if unit == 'hour' or \
unit == 'hr':
if offset == 0:
sourceTime = (yr, mth, dy, hr, 0, 0, wd, yd, isdst)
else:
start = datetime.datetime(yr, mth, dy, hr, 0, 0)
target = start + datetime.timedelta(hours=offset)
sourceTime = target.timetuple()
flag = True
self.timeFlag = 2
if unit == 'year' or \
unit == 'yr' or \
unit == 'y':
if offset == 0:
sourceTime = (yr, 12, 31, hr, mn, sec, wd, yd, isdst)
elif offset == 2:
sourceTime = (yr + 1, mth, dy, hr, mn, sec, wd, yd, isdst)
else:
sourceTime = (yr + offset, 1, 1, 9, 0, 0, wd, yd, isdst)
flag = True
self.dateFlag = 1
if flag == False:
m = self.ptc.CRE_WEEKDAY.match(unit)
if m is not None:
wkdy = m.group()
self.dateFlag = 1
if modifier == 'eod':
# Calculate the upcoming weekday
self.modifierFlag = False
(sourceTime, _) = self.parse(wkdy, sourceTime)
sources = self.ptc.buildSources(sourceTime)
self.timeFlag = 2
if modifier in sources:
sourceTime = sources[modifier]
else:
wkdy = self.ptc.WeekdayOffsets[wkdy]
diff = self._CalculateDOWDelta(wd, wkdy, offset,
self.ptc.DOWParseStyle,
self.ptc.CurrentDOWParseStyle)
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=diff)
sourceTime = target.timetuple()
flag = True
self.dateFlag = 1
if not flag:
m = self.ptc.CRE_TIME.match(unit)
if m is not None:
self.modifierFlag = False
(yr, mth, dy, hr, mn, sec, wd, yd, isdst), _ = self.parse(unit)
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
target = start + datetime.timedelta(days=offset)
sourceTime = target.timetuple()
flag = True
else:
self.modifierFlag = False
# check if the remaining text is parsable and if so,
# use it as the base time for the modifier source time
t, flag2 = self.parse('%s %s' % (chunk1, unit), sourceTime)
if flag2 != 0:
sourceTime = t
sources = self.ptc.buildSources(sourceTime)
if modifier in sources:
sourceTime = sources[modifier]
flag = True
self.timeFlag = 2
# if the word after next is a number, the string is more than likely
# to be "next 4 hrs" which we will have to combine the units with the
# rest of the string
if not flag:
if offset < 0:
# if offset is negative, the unit has to be made negative
unit = '-%s' % unit
chunk2 = '%s %s' % (unit, chunk2)
self.modifierFlag = False
#return '%s %s' % (chunk1, chunk2), sourceTime
return '%s' % chunk2, sourceTime
def _evalModifier2(self, modifier, chunk1 , chunk2, sourceTime):
"""
Evaluate the C{modifier} string and following text (passed in
as C{chunk1} and C{chunk2}) and if they match any known modifiers
calculate the delta and apply it to C{sourceTime}.
@type modifier: string
@param modifier: modifier text to apply to C{sourceTime}
@type chunk1: string
@param chunk1: first text chunk that followed modifier (if any)
@type chunk2: string
@param chunk2: second text chunk that followed modifier (if any)
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: remaining text and the modified sourceTime
"""
offset = self.ptc.Modifiers[modifier]
digit = r'\d+'
self.modifier2Flag = False
# If the string after the negative modifier starts with digits,
# then it is likely that the string is similar to ' before 3 days'
# or 'evening prior to 3 days'.
# In this case, the total time is calculated by subtracting '3 days'
# from the current date.
# So, we have to identify the quantity and negate it before parsing
# the string.
# This is not required for strings not starting with digits since the
# string is enough to calculate the sourceTime
if chunk2 != '':
if offset < 0:
m = re.match(digit, chunk2.strip())
if m is not None:
qty = int(m.group()) * -1
chunk2 = chunk2[m.end():]
chunk2 = '%d%s' % (qty, chunk2)
sourceTime, flag1 = self.parse(chunk2, sourceTime)
if flag1 == 0:
flag1 = True
else:
flag1 = False
flag2 = False
else:
flag1 = False
if chunk1 != '':
if offset < 0:
m = re.search(digit, chunk1.strip())
if m is not None:
qty = int(m.group()) * -1
chunk1 = chunk1[m.end():]
chunk1 = '%d%s' % (qty, chunk1)
tempDateFlag = self.dateFlag
tempTimeFlag = self.timeFlag
sourceTime2, flag2 = self.parse(chunk1, sourceTime)
else:
return sourceTime, (flag1 and flag2)
# if chunk1 is not a datetime and chunk2 is then do not use datetime
# value returned by parsing chunk1
if not (flag1 == False and flag2 == 0):
sourceTime = sourceTime2
else:
self.timeFlag = tempTimeFlag
self.dateFlag = tempDateFlag
return sourceTime, (flag1 and flag2)
def _evalString(self, datetimeString, sourceTime=None):
"""
Calculate the datetime based on flags set by the L{parse()} routine
Examples handled::
RFC822, W3CDTF formatted dates
HH:MM[:SS][ am/pm]
MM/DD/YYYY
DD MMMM YYYY
@type datetimeString: string
@param datetimeString: text to try and parse as more "traditional"
date/time text
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: datetime
@return: calculated C{struct_time} value or current C{struct_time}
if not parsed
"""
s = datetimeString.strip()
now = time.localtime()
# Given string date is a RFC822 date
if sourceTime is None:
sourceTime = _parse_date_rfc822(s)
if sourceTime is not None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst, _) = sourceTime
self.dateFlag = 1
if (hr != 0) and (mn != 0) and (sec != 0):
self.timeFlag = 2
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
# Given string date is a W3CDTF date
if sourceTime is None:
sourceTime = _parse_date_w3dtf(s)
if sourceTime is not None:
self.dateFlag = 1
self.timeFlag = 2
if sourceTime is None:
s = s.lower()
# Given string is in the format HH:MM(:SS)(am/pm)
if self.meridianFlag:
if sourceTime is None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = now
else:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
m = self.ptc.CRE_TIMEHMS2.search(s)
if m is not None:
dt = s[:m.start('meridian')].strip()
if len(dt) <= 2:
hr = int(dt)
mn = 0
sec = 0
else:
hr, mn, sec = _extract_time(m)
if hr == 24:
hr = 0
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
meridian = m.group('meridian').lower()
# if 'am' found and hour is 12 - force hour to 0 (midnight)
if (meridian in self.ptc.am) and hr == 12:
sourceTime = (yr, mth, dy, 0, mn, sec, wd, yd, isdst)
# if 'pm' found and hour < 12, add 12 to shift to evening
if (meridian in self.ptc.pm) and hr < 12:
sourceTime = (yr, mth, dy, hr + 12, mn, sec, wd, yd, isdst)
# invalid time
if hr > 24 or mn > 59 or sec > 59:
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
self.meridianFlag = False
# Given string is in the format HH:MM(:SS)
if self.timeStdFlag:
if sourceTime is None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = now
else:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
m = self.ptc.CRE_TIMEHMS.search(s)
if m is not None:
hr, mn, sec = _extract_time(m)
if hr == 24:
hr = 0
if hr > 24 or mn > 59 or sec > 59:
# invalid time
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
else:
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
self.timeStdFlag = False
# Given string is in the format 07/21/2006
if self.dateStdFlag:
sourceTime = self.parseDate(s)
self.dateStdFlag = False
# Given string is in the format "May 23rd, 2005"
if self.dateStrFlag:
sourceTime = self.parseDateText(s)
self.dateStrFlag = False
# Given string is a weekday
if self.weekdyFlag:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = now
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
wkdy = self.ptc.WeekdayOffsets[s]
if wkdy > wd:
qty = self._CalculateDOWDelta(wd, wkdy, 2,
self.ptc.DOWParseStyle,
self.ptc.CurrentDOWParseStyle)
else:
qty = self._CalculateDOWDelta(wd, wkdy, 2,
self.ptc.DOWParseStyle,
self.ptc.CurrentDOWParseStyle)
target = start + datetime.timedelta(days=qty)
wd = wkdy
sourceTime = target.timetuple()
self.weekdyFlag = False
# Given string is a natural language time string like
# lunch, midnight, etc
if self.timeStrFlag:
if s in self.ptc.re_values['now']:
sourceTime = now
else:
sources = self.ptc.buildSources(sourceTime)
if s in sources:
sourceTime = sources[s]
else:
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
self.timeStrFlag = False
# Given string is a natural language date string like today, tomorrow..
if self.dayStrFlag:
if sourceTime is None:
sourceTime = now
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
if s in self.ptc.dayOffsets:
offset = self.ptc.dayOffsets[s]
else:
offset = 0
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=offset)
sourceTime = target.timetuple()
self.dayStrFlag = False
# Given string is a time string with units like "5 hrs 30 min"
if self.unitsFlag:
modifier = '' # TODO
if sourceTime is None:
sourceTime = now
m = self.ptc.CRE_UNITS.search(s)
if m is not None:
units = m.group('units')
quantity = s[:m.start('units')]
sourceTime = self._buildTime(sourceTime, quantity, modifier, units)
self.unitsFlag = False
# Given string is a time string with single char units like "5 h 30 m"
if self.qunitsFlag:
modifier = '' # TODO
if sourceTime is None:
sourceTime = now
m = self.ptc.CRE_QUNITS.search(s)
if m is not None:
units = m.group('qunits')
quantity = s[:m.start('qunits')]
sourceTime = self._buildTime(sourceTime, quantity, modifier, units)
self.qunitsFlag = False
# Given string does not match anything
if sourceTime is None:
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
return sourceTime
def parse(self, datetimeString, sourceTime=None):
"""
Splits the given C{datetimeString} into tokens, finds the regex
patterns that match and then calculates a C{struct_time} value from
the chunks.
If C{sourceTime} is given then the C{struct_time} value will be
calculated from that value, otherwise from the current date/time.
If the C{datetimeString} is parsed and date/time value found then
the second item of the returned tuple will be a flag to let you know
what kind of C{struct_time} value is being returned::
0 = not parsed at all
1 = parsed as a C{date}
2 = parsed as a C{time}
3 = parsed as a C{datetime}
@type datetimeString: string
@param datetimeString: date/time text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: modified C{sourceTime} and the result flag
"""
if sourceTime:
if isinstance(sourceTime, datetime.datetime):
if _debug:
print 'coercing datetime to timetuple'
sourceTime = sourceTime.timetuple()
else:
if not isinstance(sourceTime, time.struct_time) and \
not isinstance(sourceTime, tuple):
raise Exception('sourceTime is not a struct_time')
s = datetimeString.strip().lower()
parseStr = ''
totalTime = sourceTime
if s == '' :
if sourceTime is not None:
return (sourceTime, self.dateFlag + self.timeFlag)
else:
return (time.localtime(), 0)
self.timeFlag = 0
self.dateFlag = 0
while len(s) > 0:
flag = False
chunk1 = ''
chunk2 = ''
if _debug:
print 'parse (top of loop): [%s][%s]' % (s, parseStr)
if parseStr == '':
# Modifier like next\prev..
m = self.ptc.CRE_MODIFIER.search(s)
if m is not None:
self.modifierFlag = True
if (m.group('modifier') != s):
# capture remaining string
parseStr = m.group('modifier')
chunk1 = s[:m.start('modifier')].strip()
chunk2 = s[m.end('modifier'):].strip()
flag = True
else:
parseStr = s
if parseStr == '':
# Modifier like from\after\prior..
m = self.ptc.CRE_MODIFIER2.search(s)
if m is not None:
self.modifier2Flag = True
if (m.group('modifier') != s):
# capture remaining string
parseStr = m.group('modifier')
chunk1 = s[:m.start('modifier')].strip()
chunk2 = s[m.end('modifier'):].strip()
flag = True
else:
parseStr = s
if parseStr == '':
valid_date = False
for match in self.ptc.CRE_DATE3.finditer(s):
# to prevent "HH:MM(:SS) time strings" expressions from triggering
# this regex, we checks if the month field exists in the searched
# expression, if it doesn't exist, the date field is not valid
if match.group('mthname'):
m = self.ptc.CRE_DATE3.search(s, match.start())
valid_date = True
break
# String date format
if valid_date:
self.dateStrFlag = True
self.dateFlag = 1
if (m.group('date') != s):
# capture remaining string
parseStr = m.group('date')
chunk1 = s[:m.start('date')]
chunk2 = s[m.end('date'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Standard date format
m = self.ptc.CRE_DATE.search(s)
if m is not None:
self.dateStdFlag = True
self.dateFlag = 1
if (m.group('date') != s):
# capture remaining string
parseStr = m.group('date')
chunk1 = s[:m.start('date')]
chunk2 = s[m.end('date'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Natural language day strings
m = self.ptc.CRE_DAY.search(s)
if m is not None:
self.dayStrFlag = True
self.dateFlag = 1
if (m.group('day') != s):
# capture remaining string
parseStr = m.group('day')
chunk1 = s[:m.start('day')]
chunk2 = s[m.end('day'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Quantity + Units
m = self.ptc.CRE_UNITS.search(s)
if m is not None:
self.unitsFlag = True
if (m.group('qty') != s):
# capture remaining string
parseStr = m.group('qty')
chunk1 = s[:m.start('qty')].strip()
chunk2 = s[m.end('qty'):].strip()
if chunk1[-1:] == '-':
parseStr = '-%s' % parseStr
chunk1 = chunk1[:-1]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Quantity + Units
m = self.ptc.CRE_QUNITS.search(s)
if m is not None:
self.qunitsFlag = True
if (m.group('qty') != s):
# capture remaining string
parseStr = m.group('qty')
chunk1 = s[:m.start('qty')].strip()
chunk2 = s[m.end('qty'):].strip()
if chunk1[-1:] == '-':
parseStr = '-%s' % parseStr
chunk1 = chunk1[:-1]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Weekday
m = self.ptc.CRE_WEEKDAY.search(s)
if m is not None:
gv = m.group('weekday')
if s not in self.ptc.dayOffsets:
self.weekdyFlag = True
self.dateFlag = 1
if (gv != s):
# capture remaining string
parseStr = gv
chunk1 = s[:m.start('weekday')]
chunk2 = s[m.end('weekday'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Natural language time strings
m = self.ptc.CRE_TIME.search(s)
if m is not None:
self.timeStrFlag = True
self.timeFlag = 2
if (m.group('time') != s):
# capture remaining string
parseStr = m.group('time')
chunk1 = s[:m.start('time')]
chunk2 = s[m.end('time'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# HH:MM(:SS) am/pm time strings
m = self.ptc.CRE_TIMEHMS2.search(s)
if m is not None:
self.meridianFlag = True
self.timeFlag = 2
if m.group('minutes') is not None:
if m.group('seconds') is not None:
parseStr = '%s:%s:%s %s' % (m.group('hours'),
m.group('minutes'),
m.group('seconds'),
m.group('meridian'))
else:
parseStr = '%s:%s %s' % (m.group('hours'),
m.group('minutes'),
m.group('meridian'))
else:
parseStr = '%s %s' % (m.group('hours'),
m.group('meridian'))
chunk1 = s[:m.start('hours')]
chunk2 = s[m.end('meridian'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
if parseStr == '':
# HH:MM(:SS) time strings
m = self.ptc.CRE_TIMEHMS.search(s)
if m is not None:
self.timeStdFlag = True
self.timeFlag = 2
if m.group('seconds') is not None:
parseStr = '%s:%s:%s' % (m.group('hours'),
m.group('minutes'),
m.group('seconds'))
chunk1 = s[:m.start('hours')]
chunk2 = s[m.end('seconds'):]
else:
parseStr = '%s:%s' % (m.group('hours'),
m.group('minutes'))
chunk1 = s[:m.start('hours')]
chunk2 = s[m.end('minutes'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
# if string does not match any regex, empty string to
# come out of the while loop
if not flag:
s = ''
if _debug:
print 'parse (bottom) [%s][%s][%s][%s]' % (s, parseStr, chunk1, chunk2)
print 'weekday %s, dateStd %s, dateStr %s, time %s, timeStr %s, meridian %s' % \
(self.weekdyFlag, self.dateStdFlag, self.dateStrFlag, self.timeStdFlag, self.timeStrFlag, self.meridianFlag)
print 'dayStr %s, modifier %s, modifier2 %s, units %s, qunits %s' % \
(self.dayStrFlag, self.modifierFlag, self.modifier2Flag, self.unitsFlag, self.qunitsFlag)
# evaluate the matched string
if parseStr != '':
if self.modifierFlag == True:
t, totalTime = self._evalModifier(parseStr, chunk1, chunk2, totalTime)
# t is the unparsed part of the chunks.
# If it is not date/time, return current
# totalTime as it is; else return the output
# after parsing t.
if (t != '') and (t != None):
tempDateFlag = self.dateFlag
tempTimeFlag = self.timeFlag
(totalTime2, flag) = self.parse(t, totalTime)
if flag == 0 and totalTime is not None:
self.timeFlag = tempTimeFlag
self.dateFlag = tempDateFlag
return (totalTime, self.dateFlag + self.timeFlag)
else:
return (totalTime2, self.dateFlag + self.timeFlag)
elif self.modifier2Flag == True:
totalTime, invalidFlag = self._evalModifier2(parseStr, chunk1, chunk2, totalTime)
if invalidFlag == True:
self.dateFlag = 0
self.timeFlag = 0
else:
totalTime = self._evalString(parseStr, totalTime)
parseStr = ''
# String is not parsed at all
if totalTime is None or totalTime == sourceTime:
totalTime = time.localtime()
self.dateFlag = 0
self.timeFlag = 0
return (totalTime, self.dateFlag + self.timeFlag)
def inc(self, source, month=None, year=None):
"""
Takes the given C{source} date, or current date if none is
passed, and increments it according to the values passed in
by month and/or year.
This routine is needed because Python's C{timedelta()} function
does not allow for month or year increments.
@type source: struct_time
@param source: C{struct_time} value to increment
@type month: integer
@param month: optional number of months to increment
@type year: integer
@param year: optional number of years to increment
@rtype: datetime
@return: C{source} incremented by the number of months and/or years
"""
yr = source.year
mth = source.month
dy = source.day
if year:
try:
yi = int(year)
except ValueError:
yi = 0
yr += yi
if month:
try:
mi = int(month)
except ValueError:
mi = 0
m = abs(mi)
y = m / 12 # how many years are in month increment
m = m % 12 # get remaining months
if mi < 0:
mth = mth - m # sub months from start month
if mth < 1: # cross start-of-year?
y -= 1 # yes - decrement year
mth += 12 # and fix month
else:
mth = mth + m # add months to start month
if mth > 12: # cross end-of-year?
y += 1 # yes - increment year
mth -= 12 # and fix month
yr += y
# if the day ends up past the last day of
# the new month, set it to the last day
if dy > self.ptc.daysInMonth(mth, yr):
dy = self.ptc.daysInMonth(mth, yr)
d = source.replace(year=yr, month=mth, day=dy)
return source + (d - source)
|
vilmibm/done | parsedatetime/parsedatetime.py | Calendar._CalculateDOWDelta | python | def _CalculateDOWDelta(self, wd, wkdy, offset, style, currentDayStyle):
"""
Based on the C{style} and C{currentDayStyle} determine what
day-of-week value is to be returned.
@type wd: integer
@param wd: day-of-week value for the current day
@type wkdy: integer
@param wkdy: day-of-week value for the parsed day
@type offset: integer
@param offset: offset direction for any modifiers (-1, 0, 1)
@type style: integer
@param style: normally the value set in C{Constants.DOWParseStyle}
@type currentDayStyle: integer
@param currentDayStyle: normally the value set in C{Constants.CurrentDOWParseStyle}
@rtype: integer
@return: calculated day-of-week
"""
if offset == 1:
# modifier is indicating future week eg: "next".
# DOW is calculated as DOW of next week
diff = 7 - wd + wkdy
elif offset == -1:
# modifier is indicating past week eg: "last","previous"
# DOW is calculated as DOW of previous week
diff = wkdy - wd - 7
elif offset == 0:
# modifier is indiacting current week eg: "this"
# DOW is calculated as DOW of this week
diff = wkdy - wd
elif offset == 2:
# no modifier is present.
# i.e. string to be parsed is just DOW
if style == 1:
# next occurance of the DOW is calculated
if currentDayStyle == True:
if wkdy >= wd:
diff = wkdy - wd
else:
diff = 7 - wd + wkdy
else:
if wkdy > wd:
diff = wkdy - wd
else:
diff = 7 - wd + wkdy
elif style == -1:
# last occurance of the DOW is calculated
if currentDayStyle == True:
if wkdy <= wd:
diff = wkdy - wd
else:
diff = wkdy - wd - 7
else:
if wkdy < wd:
diff = wkdy - wd
else:
diff = wkdy - wd - 7
else:
# occurance of the DOW in the current week is calculated
diff = wkdy - wd
if _debug:
print "wd %s, wkdy %s, offset %d, style %d\n" % (wd, wkdy, offset, style)
return diff | Based on the C{style} and C{currentDayStyle} determine what
day-of-week value is to be returned.
@type wd: integer
@param wd: day-of-week value for the current day
@type wkdy: integer
@param wkdy: day-of-week value for the parsed day
@type offset: integer
@param offset: offset direction for any modifiers (-1, 0, 1)
@type style: integer
@param style: normally the value set in C{Constants.DOWParseStyle}
@type currentDayStyle: integer
@param currentDayStyle: normally the value set in C{Constants.CurrentDOWParseStyle}
@rtype: integer
@return: calculated day-of-week | train | https://github.com/vilmibm/done/blob/7e5b60d2900ceddefa49de352a19b794199b51a8/parsedatetime/parsedatetime.py#L609-L678 | null | class Calendar:
"""
A collection of routines to input, parse and manipulate date and times.
The text can either be 'normal' date values or it can be human readable.
"""
def __init__(self, constants=None):
"""
Default constructor for the L{Calendar} class.
@type constants: object
@param constants: Instance of the class L{parsedatetime_consts.Constants}
@rtype: object
@return: L{Calendar} instance
"""
# if a constants reference is not included, use default
if constants is None:
self.ptc = parsedatetime_consts.Constants()
else:
self.ptc = constants
self.weekdyFlag = False # monday/tuesday/...
self.dateStdFlag = False # 07/21/06
self.dateStrFlag = False # July 21st, 2006
self.timeStdFlag = False # 5:50
self.meridianFlag = False # am/pm
self.dayStrFlag = False # tomorrow/yesterday/today/..
self.timeStrFlag = False # lunch/noon/breakfast/...
self.modifierFlag = False # after/before/prev/next/..
self.modifier2Flag = False # after/before/prev/next/..
self.unitsFlag = False # hrs/weeks/yrs/min/..
self.qunitsFlag = False # h/m/t/d..
self.timeFlag = 0
self.dateFlag = 0
def _convertUnitAsWords(self, unitText):
"""
Converts text units into their number value
Five = 5
Twenty Five = 25
Two hundred twenty five = 225
Two thousand and twenty five = 2025
Two thousand twenty five = 2025
@type unitText: string
@param unitText: number text to convert
@rtype: integer
@return: numerical value of unitText
"""
# TODO: implement this
pass
def _buildTime(self, source, quantity, modifier, units):
"""
Take C{quantity}, C{modifier} and C{unit} strings and convert them into values.
After converting, calcuate the time and return the adjusted sourceTime.
@type source: time
@param source: time to use as the base (or source)
@type quantity: string
@param quantity: quantity string
@type modifier: string
@param modifier: how quantity and units modify the source time
@type units: string
@param units: unit of the quantity (i.e. hours, days, months, etc)
@rtype: struct_time
@return: C{struct_time} of the calculated time
"""
if _debug:
print '_buildTime: [%s][%s][%s]' % (quantity, modifier, units)
if source is None:
source = time.localtime()
if quantity is None:
quantity = ''
else:
quantity = quantity.strip()
if len(quantity) == 0:
qty = 1
else:
try:
qty = int(quantity)
except ValueError:
qty = 0
if modifier in self.ptc.Modifiers:
qty = qty * self.ptc.Modifiers[modifier]
if units is None or units == '':
units = 'dy'
# plurals are handled by regex's (could be a bug tho)
(yr, mth, dy, hr, mn, sec, _, _, _) = source
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
target = start
if units.startswith('y'):
target = self.inc(start, year=qty)
self.dateFlag = 1
elif units.endswith('th') or units.endswith('ths'):
target = self.inc(start, month=qty)
self.dateFlag = 1
else:
if units.startswith('d'):
target = start + datetime.timedelta(days=qty)
self.dateFlag = 1
elif units.startswith('h'):
target = start + datetime.timedelta(hours=qty)
self.timeFlag = 2
elif units.startswith('m'):
target = start + datetime.timedelta(minutes=qty)
self.timeFlag = 2
elif units.startswith('s'):
target = start + datetime.timedelta(seconds=qty)
self.timeFlag = 2
elif units.startswith('w'):
target = start + datetime.timedelta(weeks=qty)
self.dateFlag = 1
return target.timetuple()
def parseDate(self, dateString):
"""
Parse short-form date strings::
'05/28/2006' or '04.21'
@type dateString: string
@param dateString: text to convert to a C{datetime}
@rtype: struct_time
@return: calculated C{struct_time} value of dateString
"""
yr, mth, dy, hr, mn, sec, wd, yd, isdst = time.localtime()
# values pulled from regex's will be stored here and later
# assigned to mth, dy, yr based on information from the locale
# -1 is used as the marker value because we want zero values
# to be passed thru so they can be flagged as errors later
v1 = -1
v2 = -1
v3 = -1
s = dateString
m = self.ptc.CRE_DATE2.search(s)
if m is not None:
index = m.start()
v1 = int(s[:index])
s = s[index + 1:]
m = self.ptc.CRE_DATE2.search(s)
if m is not None:
index = m.start()
v2 = int(s[:index])
v3 = int(s[index + 1:])
else:
v2 = int(s.strip())
v = [ v1, v2, v3 ]
d = { 'm': mth, 'd': dy, 'y': yr }
for i in range(0, 3):
n = v[i]
c = self.ptc.dp_order[i]
if n >= 0:
d[c] = n
# if the year is not specified and the date has already
# passed, increment the year
if v3 == -1 and ((mth > d['m']) or (mth == d['m'] and dy > d['d'])):
yr = d['y'] + 1
else:
yr = d['y']
mth = d['m']
dy = d['d']
# birthday epoch constraint
if yr < self.ptc.BirthdayEpoch:
yr += 2000
elif yr < 100:
yr += 1900
if _debug:
print 'parseDate: ', yr, mth, dy, self.ptc.daysInMonth(mth, yr)
if (mth > 0 and mth <= 12) and \
(dy > 0 and dy <= self.ptc.daysInMonth(mth, yr)):
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
else:
self.dateFlag = 0
self.timeFlag = 0
sourceTime = time.localtime() # return current time if date
# string is invalid
return sourceTime
def parseDateText(self, dateString):
"""
Parse long-form date strings::
'May 31st, 2006'
'Jan 1st'
'July 2006'
@type dateString: string
@param dateString: text to convert to a datetime
@rtype: struct_time
@return: calculated C{struct_time} value of dateString
"""
yr, mth, dy, hr, mn, sec, wd, yd, isdst = time.localtime()
currentMth = mth
currentDy = dy
s = dateString.lower()
m = self.ptc.CRE_DATE3.search(s)
mth = m.group('mthname')
mth = self.ptc.MonthOffsets[mth]
if m.group('day') != None:
dy = int(m.group('day'))
else:
dy = 1
if m.group('year') != None:
yr = int(m.group('year'))
# birthday epoch constraint
if yr < self.ptc.BirthdayEpoch:
yr += 2000
elif yr < 100:
yr += 1900
elif (mth < currentMth) or (mth == currentMth and dy < currentDy):
# if that day and month have already passed in this year,
# then increment the year by 1
yr += 1
if dy > 0 and dy <= self.ptc.daysInMonth(mth, yr):
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
else:
# Return current time if date string is invalid
self.dateFlag = 0
self.timeFlag = 0
sourceTime = time.localtime()
return sourceTime
def evalRanges(self, datetimeString, sourceTime=None):
"""
Evaluate the C{datetimeString} text and determine if
it represents a date or time range.
@type datetimeString: string
@param datetimeString: datetime text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: start datetime, end datetime and the invalid flag
"""
startTime = ''
endTime = ''
startDate = ''
endDate = ''
rangeFlag = 0
s = datetimeString.strip().lower()
if self.ptc.rangeSep in s:
s = s.replace(self.ptc.rangeSep, ' %s ' % self.ptc.rangeSep)
s = s.replace(' ', ' ')
m = self.ptc.CRE_TIMERNG1.search(s)
if m is not None:
rangeFlag = 1
else:
m = self.ptc.CRE_TIMERNG2.search(s)
if m is not None:
rangeFlag = 2
else:
m = self.ptc.CRE_TIMERNG4.search(s)
if m is not None:
rangeFlag = 7
else:
m = self.ptc.CRE_TIMERNG3.search(s)
if m is not None:
rangeFlag = 3
else:
m = self.ptc.CRE_DATERNG1.search(s)
if m is not None:
rangeFlag = 4
else:
m = self.ptc.CRE_DATERNG2.search(s)
if m is not None:
rangeFlag = 5
else:
m = self.ptc.CRE_DATERNG3.search(s)
if m is not None:
rangeFlag = 6
if _debug:
print 'evalRanges: rangeFlag =', rangeFlag, '[%s]' % s
if m is not None:
if (m.group() != s):
# capture remaining string
parseStr = m.group()
chunk1 = s[:m.start()]
chunk2 = s[m.end():]
s = '%s %s' % (chunk1, chunk2)
flag = 1
sourceTime, flag = self.parse(s, sourceTime)
if flag == 0:
sourceTime = None
else:
parseStr = s
if rangeFlag == 1:
m = re.search(self.ptc.rangeSep, parseStr)
startTime, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endTime, eflag = self.parse((parseStr[(m.start() + 1):]), sourceTime)
if (eflag != 0) and (sflag != 0):
return (startTime, endTime, 2)
elif rangeFlag == 2:
m = re.search(self.ptc.rangeSep, parseStr)
startTime, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endTime, eflag = self.parse((parseStr[(m.start() + 1):]), sourceTime)
if (eflag != 0) and (sflag != 0):
return (startTime, endTime, 2)
elif rangeFlag == 3 or rangeFlag == 7:
m = re.search(self.ptc.rangeSep, parseStr)
# capturing the meridian from the end time
if self.ptc.usesMeridian:
ampm = re.search(self.ptc.am[0], parseStr)
# appending the meridian to the start time
if ampm is not None:
startTime, sflag = self.parse((parseStr[:m.start()] + self.ptc.meridian[0]), sourceTime)
else:
startTime, sflag = self.parse((parseStr[:m.start()] + self.ptc.meridian[1]), sourceTime)
else:
startTime, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endTime, eflag = self.parse(parseStr[(m.start() + 1):], sourceTime)
if (eflag != 0) and (sflag != 0):
return (startTime, endTime, 2)
elif rangeFlag == 4:
m = re.search(self.ptc.rangeSep, parseStr)
startDate, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endDate, eflag = self.parse((parseStr[(m.start() + 1):]), sourceTime)
if (eflag != 0) and (sflag != 0):
return (startDate, endDate, 1)
elif rangeFlag == 5:
m = re.search(self.ptc.rangeSep, parseStr)
endDate = parseStr[(m.start() + 1):]
# capturing the year from the end date
date = self.ptc.CRE_DATE3.search(endDate)
endYear = date.group('year')
# appending the year to the start date if the start date
# does not have year information and the end date does.
# eg : "Aug 21 - Sep 4, 2007"
if endYear is not None:
startDate = (parseStr[:m.start()]).strip()
date = self.ptc.CRE_DATE3.search(startDate)
startYear = date.group('year')
if startYear is None:
startDate = startDate + ', ' + endYear
else:
startDate = parseStr[:m.start()]
startDate, sflag = self.parse(startDate, sourceTime)
endDate, eflag = self.parse(endDate, sourceTime)
if (eflag != 0) and (sflag != 0):
return (startDate, endDate, 1)
elif rangeFlag == 6:
m = re.search(self.ptc.rangeSep, parseStr)
startDate = parseStr[:m.start()]
# capturing the month from the start date
mth = self.ptc.CRE_DATE3.search(startDate)
mth = mth.group('mthname')
# appending the month name to the end date
endDate = mth + parseStr[(m.start() + 1):]
startDate, sflag = self.parse(startDate, sourceTime)
endDate, eflag = self.parse(endDate, sourceTime)
if (eflag != 0) and (sflag != 0):
return (startDate, endDate, 1)
else:
# if range is not found
sourceTime = time.localtime()
return (sourceTime, sourceTime, 0)
def _CalculateDOWDelta(self, wd, wkdy, offset, style, currentDayStyle):
"""
Based on the C{style} and C{currentDayStyle} determine what
day-of-week value is to be returned.
@type wd: integer
@param wd: day-of-week value for the current day
@type wkdy: integer
@param wkdy: day-of-week value for the parsed day
@type offset: integer
@param offset: offset direction for any modifiers (-1, 0, 1)
@type style: integer
@param style: normally the value set in C{Constants.DOWParseStyle}
@type currentDayStyle: integer
@param currentDayStyle: normally the value set in C{Constants.CurrentDOWParseStyle}
@rtype: integer
@return: calculated day-of-week
"""
if offset == 1:
# modifier is indicating future week eg: "next".
# DOW is calculated as DOW of next week
diff = 7 - wd + wkdy
elif offset == -1:
# modifier is indicating past week eg: "last","previous"
# DOW is calculated as DOW of previous week
diff = wkdy - wd - 7
elif offset == 0:
# modifier is indiacting current week eg: "this"
# DOW is calculated as DOW of this week
diff = wkdy - wd
elif offset == 2:
# no modifier is present.
# i.e. string to be parsed is just DOW
if style == 1:
# next occurance of the DOW is calculated
if currentDayStyle == True:
if wkdy >= wd:
diff = wkdy - wd
else:
diff = 7 - wd + wkdy
else:
if wkdy > wd:
diff = wkdy - wd
else:
diff = 7 - wd + wkdy
elif style == -1:
# last occurance of the DOW is calculated
if currentDayStyle == True:
if wkdy <= wd:
diff = wkdy - wd
else:
diff = wkdy - wd - 7
else:
if wkdy < wd:
diff = wkdy - wd
else:
diff = wkdy - wd - 7
else:
# occurance of the DOW in the current week is calculated
diff = wkdy - wd
if _debug:
print "wd %s, wkdy %s, offset %d, style %d\n" % (wd, wkdy, offset, style)
return diff
def _evalModifier(self, modifier, chunk1, chunk2, sourceTime):
"""
Evaluate the C{modifier} string and following text (passed in
as C{chunk1} and C{chunk2}) and if they match any known modifiers
calculate the delta and apply it to C{sourceTime}.
@type modifier: string
@param modifier: modifier text to apply to sourceTime
@type chunk1: string
@param chunk1: first text chunk that followed modifier (if any)
@type chunk2: string
@param chunk2: second text chunk that followed modifier (if any)
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: remaining text and the modified sourceTime
"""
offset = self.ptc.Modifiers[modifier]
if sourceTime is not None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
else:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = time.localtime()
# capture the units after the modifier and the remaining
# string after the unit
m = self.ptc.CRE_REMAINING.search(chunk2)
if m is not None:
index = m.start() + 1
unit = chunk2[:m.start()]
chunk2 = chunk2[index:]
else:
unit = chunk2
chunk2 = ''
flag = False
if unit == 'month' or \
unit == 'mth' or \
unit == 'm':
if offset == 0:
dy = self.ptc.daysInMonth(mth, yr)
sourceTime = (yr, mth, dy, 9, 0, 0, wd, yd, isdst)
elif offset == 2:
# if day is the last day of the month, calculate the last day
# of the next month
if dy == self.ptc.daysInMonth(mth, yr):
dy = self.ptc.daysInMonth(mth + 1, yr)
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = self.inc(start, month=1)
sourceTime = target.timetuple()
else:
start = datetime.datetime(yr, mth, 1, 9, 0, 0)
target = self.inc(start, month=offset)
sourceTime = target.timetuple()
flag = True
self.dateFlag = 1
if unit == 'week' or \
unit == 'wk' or \
unit == 'w':
if offset == 0:
start = datetime.datetime(yr, mth, dy, 17, 0, 0)
target = start + datetime.timedelta(days=(4 - wd))
sourceTime = target.timetuple()
elif offset == 2:
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=7)
sourceTime = target.timetuple()
else:
return self._evalModifier(modifier, chunk1, "monday " + chunk2, sourceTime)
flag = True
self.dateFlag = 1
if unit == 'day' or \
unit == 'dy' or \
unit == 'd':
if offset == 0:
sourceTime = (yr, mth, dy, 17, 0, 0, wd, yd, isdst)
self.timeFlag = 2
elif offset == 2:
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
target = start + datetime.timedelta(days=1)
sourceTime = target.timetuple()
else:
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=offset)
sourceTime = target.timetuple()
flag = True
self.dateFlag = 1
if unit == 'hour' or \
unit == 'hr':
if offset == 0:
sourceTime = (yr, mth, dy, hr, 0, 0, wd, yd, isdst)
else:
start = datetime.datetime(yr, mth, dy, hr, 0, 0)
target = start + datetime.timedelta(hours=offset)
sourceTime = target.timetuple()
flag = True
self.timeFlag = 2
if unit == 'year' or \
unit == 'yr' or \
unit == 'y':
if offset == 0:
sourceTime = (yr, 12, 31, hr, mn, sec, wd, yd, isdst)
elif offset == 2:
sourceTime = (yr + 1, mth, dy, hr, mn, sec, wd, yd, isdst)
else:
sourceTime = (yr + offset, 1, 1, 9, 0, 0, wd, yd, isdst)
flag = True
self.dateFlag = 1
if flag == False:
m = self.ptc.CRE_WEEKDAY.match(unit)
if m is not None:
wkdy = m.group()
self.dateFlag = 1
if modifier == 'eod':
# Calculate the upcoming weekday
self.modifierFlag = False
(sourceTime, _) = self.parse(wkdy, sourceTime)
sources = self.ptc.buildSources(sourceTime)
self.timeFlag = 2
if modifier in sources:
sourceTime = sources[modifier]
else:
wkdy = self.ptc.WeekdayOffsets[wkdy]
diff = self._CalculateDOWDelta(wd, wkdy, offset,
self.ptc.DOWParseStyle,
self.ptc.CurrentDOWParseStyle)
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=diff)
sourceTime = target.timetuple()
flag = True
self.dateFlag = 1
if not flag:
m = self.ptc.CRE_TIME.match(unit)
if m is not None:
self.modifierFlag = False
(yr, mth, dy, hr, mn, sec, wd, yd, isdst), _ = self.parse(unit)
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
target = start + datetime.timedelta(days=offset)
sourceTime = target.timetuple()
flag = True
else:
self.modifierFlag = False
# check if the remaining text is parsable and if so,
# use it as the base time for the modifier source time
t, flag2 = self.parse('%s %s' % (chunk1, unit), sourceTime)
if flag2 != 0:
sourceTime = t
sources = self.ptc.buildSources(sourceTime)
if modifier in sources:
sourceTime = sources[modifier]
flag = True
self.timeFlag = 2
# if the word after next is a number, the string is more than likely
# to be "next 4 hrs" which we will have to combine the units with the
# rest of the string
if not flag:
if offset < 0:
# if offset is negative, the unit has to be made negative
unit = '-%s' % unit
chunk2 = '%s %s' % (unit, chunk2)
self.modifierFlag = False
#return '%s %s' % (chunk1, chunk2), sourceTime
return '%s' % chunk2, sourceTime
def _evalModifier2(self, modifier, chunk1 , chunk2, sourceTime):
"""
Evaluate the C{modifier} string and following text (passed in
as C{chunk1} and C{chunk2}) and if they match any known modifiers
calculate the delta and apply it to C{sourceTime}.
@type modifier: string
@param modifier: modifier text to apply to C{sourceTime}
@type chunk1: string
@param chunk1: first text chunk that followed modifier (if any)
@type chunk2: string
@param chunk2: second text chunk that followed modifier (if any)
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: remaining text and the modified sourceTime
"""
offset = self.ptc.Modifiers[modifier]
digit = r'\d+'
self.modifier2Flag = False
# If the string after the negative modifier starts with digits,
# then it is likely that the string is similar to ' before 3 days'
# or 'evening prior to 3 days'.
# In this case, the total time is calculated by subtracting '3 days'
# from the current date.
# So, we have to identify the quantity and negate it before parsing
# the string.
# This is not required for strings not starting with digits since the
# string is enough to calculate the sourceTime
if chunk2 != '':
if offset < 0:
m = re.match(digit, chunk2.strip())
if m is not None:
qty = int(m.group()) * -1
chunk2 = chunk2[m.end():]
chunk2 = '%d%s' % (qty, chunk2)
sourceTime, flag1 = self.parse(chunk2, sourceTime)
if flag1 == 0:
flag1 = True
else:
flag1 = False
flag2 = False
else:
flag1 = False
if chunk1 != '':
if offset < 0:
m = re.search(digit, chunk1.strip())
if m is not None:
qty = int(m.group()) * -1
chunk1 = chunk1[m.end():]
chunk1 = '%d%s' % (qty, chunk1)
tempDateFlag = self.dateFlag
tempTimeFlag = self.timeFlag
sourceTime2, flag2 = self.parse(chunk1, sourceTime)
else:
return sourceTime, (flag1 and flag2)
# if chunk1 is not a datetime and chunk2 is then do not use datetime
# value returned by parsing chunk1
if not (flag1 == False and flag2 == 0):
sourceTime = sourceTime2
else:
self.timeFlag = tempTimeFlag
self.dateFlag = tempDateFlag
return sourceTime, (flag1 and flag2)
def _evalString(self, datetimeString, sourceTime=None):
"""
Calculate the datetime based on flags set by the L{parse()} routine
Examples handled::
RFC822, W3CDTF formatted dates
HH:MM[:SS][ am/pm]
MM/DD/YYYY
DD MMMM YYYY
@type datetimeString: string
@param datetimeString: text to try and parse as more "traditional"
date/time text
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: datetime
@return: calculated C{struct_time} value or current C{struct_time}
if not parsed
"""
s = datetimeString.strip()
now = time.localtime()
# Given string date is a RFC822 date
if sourceTime is None:
sourceTime = _parse_date_rfc822(s)
if sourceTime is not None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst, _) = sourceTime
self.dateFlag = 1
if (hr != 0) and (mn != 0) and (sec != 0):
self.timeFlag = 2
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
# Given string date is a W3CDTF date
if sourceTime is None:
sourceTime = _parse_date_w3dtf(s)
if sourceTime is not None:
self.dateFlag = 1
self.timeFlag = 2
if sourceTime is None:
s = s.lower()
# Given string is in the format HH:MM(:SS)(am/pm)
if self.meridianFlag:
if sourceTime is None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = now
else:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
m = self.ptc.CRE_TIMEHMS2.search(s)
if m is not None:
dt = s[:m.start('meridian')].strip()
if len(dt) <= 2:
hr = int(dt)
mn = 0
sec = 0
else:
hr, mn, sec = _extract_time(m)
if hr == 24:
hr = 0
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
meridian = m.group('meridian').lower()
# if 'am' found and hour is 12 - force hour to 0 (midnight)
if (meridian in self.ptc.am) and hr == 12:
sourceTime = (yr, mth, dy, 0, mn, sec, wd, yd, isdst)
# if 'pm' found and hour < 12, add 12 to shift to evening
if (meridian in self.ptc.pm) and hr < 12:
sourceTime = (yr, mth, dy, hr + 12, mn, sec, wd, yd, isdst)
# invalid time
if hr > 24 or mn > 59 or sec > 59:
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
self.meridianFlag = False
# Given string is in the format HH:MM(:SS)
if self.timeStdFlag:
if sourceTime is None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = now
else:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
m = self.ptc.CRE_TIMEHMS.search(s)
if m is not None:
hr, mn, sec = _extract_time(m)
if hr == 24:
hr = 0
if hr > 24 or mn > 59 or sec > 59:
# invalid time
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
else:
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
self.timeStdFlag = False
# Given string is in the format 07/21/2006
if self.dateStdFlag:
sourceTime = self.parseDate(s)
self.dateStdFlag = False
# Given string is in the format "May 23rd, 2005"
if self.dateStrFlag:
sourceTime = self.parseDateText(s)
self.dateStrFlag = False
# Given string is a weekday
if self.weekdyFlag:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = now
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
wkdy = self.ptc.WeekdayOffsets[s]
if wkdy > wd:
qty = self._CalculateDOWDelta(wd, wkdy, 2,
self.ptc.DOWParseStyle,
self.ptc.CurrentDOWParseStyle)
else:
qty = self._CalculateDOWDelta(wd, wkdy, 2,
self.ptc.DOWParseStyle,
self.ptc.CurrentDOWParseStyle)
target = start + datetime.timedelta(days=qty)
wd = wkdy
sourceTime = target.timetuple()
self.weekdyFlag = False
# Given string is a natural language time string like
# lunch, midnight, etc
if self.timeStrFlag:
if s in self.ptc.re_values['now']:
sourceTime = now
else:
sources = self.ptc.buildSources(sourceTime)
if s in sources:
sourceTime = sources[s]
else:
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
self.timeStrFlag = False
# Given string is a natural language date string like today, tomorrow..
if self.dayStrFlag:
if sourceTime is None:
sourceTime = now
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
if s in self.ptc.dayOffsets:
offset = self.ptc.dayOffsets[s]
else:
offset = 0
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=offset)
sourceTime = target.timetuple()
self.dayStrFlag = False
# Given string is a time string with units like "5 hrs 30 min"
if self.unitsFlag:
modifier = '' # TODO
if sourceTime is None:
sourceTime = now
m = self.ptc.CRE_UNITS.search(s)
if m is not None:
units = m.group('units')
quantity = s[:m.start('units')]
sourceTime = self._buildTime(sourceTime, quantity, modifier, units)
self.unitsFlag = False
# Given string is a time string with single char units like "5 h 30 m"
if self.qunitsFlag:
modifier = '' # TODO
if sourceTime is None:
sourceTime = now
m = self.ptc.CRE_QUNITS.search(s)
if m is not None:
units = m.group('qunits')
quantity = s[:m.start('qunits')]
sourceTime = self._buildTime(sourceTime, quantity, modifier, units)
self.qunitsFlag = False
# Given string does not match anything
if sourceTime is None:
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
return sourceTime
def parse(self, datetimeString, sourceTime=None):
"""
Splits the given C{datetimeString} into tokens, finds the regex
patterns that match and then calculates a C{struct_time} value from
the chunks.
If C{sourceTime} is given then the C{struct_time} value will be
calculated from that value, otherwise from the current date/time.
If the C{datetimeString} is parsed and date/time value found then
the second item of the returned tuple will be a flag to let you know
what kind of C{struct_time} value is being returned::
0 = not parsed at all
1 = parsed as a C{date}
2 = parsed as a C{time}
3 = parsed as a C{datetime}
@type datetimeString: string
@param datetimeString: date/time text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: modified C{sourceTime} and the result flag
"""
if sourceTime:
if isinstance(sourceTime, datetime.datetime):
if _debug:
print 'coercing datetime to timetuple'
sourceTime = sourceTime.timetuple()
else:
if not isinstance(sourceTime, time.struct_time) and \
not isinstance(sourceTime, tuple):
raise Exception('sourceTime is not a struct_time')
s = datetimeString.strip().lower()
parseStr = ''
totalTime = sourceTime
if s == '' :
if sourceTime is not None:
return (sourceTime, self.dateFlag + self.timeFlag)
else:
return (time.localtime(), 0)
self.timeFlag = 0
self.dateFlag = 0
while len(s) > 0:
flag = False
chunk1 = ''
chunk2 = ''
if _debug:
print 'parse (top of loop): [%s][%s]' % (s, parseStr)
if parseStr == '':
# Modifier like next\prev..
m = self.ptc.CRE_MODIFIER.search(s)
if m is not None:
self.modifierFlag = True
if (m.group('modifier') != s):
# capture remaining string
parseStr = m.group('modifier')
chunk1 = s[:m.start('modifier')].strip()
chunk2 = s[m.end('modifier'):].strip()
flag = True
else:
parseStr = s
if parseStr == '':
# Modifier like from\after\prior..
m = self.ptc.CRE_MODIFIER2.search(s)
if m is not None:
self.modifier2Flag = True
if (m.group('modifier') != s):
# capture remaining string
parseStr = m.group('modifier')
chunk1 = s[:m.start('modifier')].strip()
chunk2 = s[m.end('modifier'):].strip()
flag = True
else:
parseStr = s
if parseStr == '':
valid_date = False
for match in self.ptc.CRE_DATE3.finditer(s):
# to prevent "HH:MM(:SS) time strings" expressions from triggering
# this regex, we checks if the month field exists in the searched
# expression, if it doesn't exist, the date field is not valid
if match.group('mthname'):
m = self.ptc.CRE_DATE3.search(s, match.start())
valid_date = True
break
# String date format
if valid_date:
self.dateStrFlag = True
self.dateFlag = 1
if (m.group('date') != s):
# capture remaining string
parseStr = m.group('date')
chunk1 = s[:m.start('date')]
chunk2 = s[m.end('date'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Standard date format
m = self.ptc.CRE_DATE.search(s)
if m is not None:
self.dateStdFlag = True
self.dateFlag = 1
if (m.group('date') != s):
# capture remaining string
parseStr = m.group('date')
chunk1 = s[:m.start('date')]
chunk2 = s[m.end('date'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Natural language day strings
m = self.ptc.CRE_DAY.search(s)
if m is not None:
self.dayStrFlag = True
self.dateFlag = 1
if (m.group('day') != s):
# capture remaining string
parseStr = m.group('day')
chunk1 = s[:m.start('day')]
chunk2 = s[m.end('day'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Quantity + Units
m = self.ptc.CRE_UNITS.search(s)
if m is not None:
self.unitsFlag = True
if (m.group('qty') != s):
# capture remaining string
parseStr = m.group('qty')
chunk1 = s[:m.start('qty')].strip()
chunk2 = s[m.end('qty'):].strip()
if chunk1[-1:] == '-':
parseStr = '-%s' % parseStr
chunk1 = chunk1[:-1]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Quantity + Units
m = self.ptc.CRE_QUNITS.search(s)
if m is not None:
self.qunitsFlag = True
if (m.group('qty') != s):
# capture remaining string
parseStr = m.group('qty')
chunk1 = s[:m.start('qty')].strip()
chunk2 = s[m.end('qty'):].strip()
if chunk1[-1:] == '-':
parseStr = '-%s' % parseStr
chunk1 = chunk1[:-1]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Weekday
m = self.ptc.CRE_WEEKDAY.search(s)
if m is not None:
gv = m.group('weekday')
if s not in self.ptc.dayOffsets:
self.weekdyFlag = True
self.dateFlag = 1
if (gv != s):
# capture remaining string
parseStr = gv
chunk1 = s[:m.start('weekday')]
chunk2 = s[m.end('weekday'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Natural language time strings
m = self.ptc.CRE_TIME.search(s)
if m is not None:
self.timeStrFlag = True
self.timeFlag = 2
if (m.group('time') != s):
# capture remaining string
parseStr = m.group('time')
chunk1 = s[:m.start('time')]
chunk2 = s[m.end('time'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# HH:MM(:SS) am/pm time strings
m = self.ptc.CRE_TIMEHMS2.search(s)
if m is not None:
self.meridianFlag = True
self.timeFlag = 2
if m.group('minutes') is not None:
if m.group('seconds') is not None:
parseStr = '%s:%s:%s %s' % (m.group('hours'),
m.group('minutes'),
m.group('seconds'),
m.group('meridian'))
else:
parseStr = '%s:%s %s' % (m.group('hours'),
m.group('minutes'),
m.group('meridian'))
else:
parseStr = '%s %s' % (m.group('hours'),
m.group('meridian'))
chunk1 = s[:m.start('hours')]
chunk2 = s[m.end('meridian'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
if parseStr == '':
# HH:MM(:SS) time strings
m = self.ptc.CRE_TIMEHMS.search(s)
if m is not None:
self.timeStdFlag = True
self.timeFlag = 2
if m.group('seconds') is not None:
parseStr = '%s:%s:%s' % (m.group('hours'),
m.group('minutes'),
m.group('seconds'))
chunk1 = s[:m.start('hours')]
chunk2 = s[m.end('seconds'):]
else:
parseStr = '%s:%s' % (m.group('hours'),
m.group('minutes'))
chunk1 = s[:m.start('hours')]
chunk2 = s[m.end('minutes'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
# if string does not match any regex, empty string to
# come out of the while loop
if not flag:
s = ''
if _debug:
print 'parse (bottom) [%s][%s][%s][%s]' % (s, parseStr, chunk1, chunk2)
print 'weekday %s, dateStd %s, dateStr %s, time %s, timeStr %s, meridian %s' % \
(self.weekdyFlag, self.dateStdFlag, self.dateStrFlag, self.timeStdFlag, self.timeStrFlag, self.meridianFlag)
print 'dayStr %s, modifier %s, modifier2 %s, units %s, qunits %s' % \
(self.dayStrFlag, self.modifierFlag, self.modifier2Flag, self.unitsFlag, self.qunitsFlag)
# evaluate the matched string
if parseStr != '':
if self.modifierFlag == True:
t, totalTime = self._evalModifier(parseStr, chunk1, chunk2, totalTime)
# t is the unparsed part of the chunks.
# If it is not date/time, return current
# totalTime as it is; else return the output
# after parsing t.
if (t != '') and (t != None):
tempDateFlag = self.dateFlag
tempTimeFlag = self.timeFlag
(totalTime2, flag) = self.parse(t, totalTime)
if flag == 0 and totalTime is not None:
self.timeFlag = tempTimeFlag
self.dateFlag = tempDateFlag
return (totalTime, self.dateFlag + self.timeFlag)
else:
return (totalTime2, self.dateFlag + self.timeFlag)
elif self.modifier2Flag == True:
totalTime, invalidFlag = self._evalModifier2(parseStr, chunk1, chunk2, totalTime)
if invalidFlag == True:
self.dateFlag = 0
self.timeFlag = 0
else:
totalTime = self._evalString(parseStr, totalTime)
parseStr = ''
# String is not parsed at all
if totalTime is None or totalTime == sourceTime:
totalTime = time.localtime()
self.dateFlag = 0
self.timeFlag = 0
return (totalTime, self.dateFlag + self.timeFlag)
def inc(self, source, month=None, year=None):
"""
Takes the given C{source} date, or current date if none is
passed, and increments it according to the values passed in
by month and/or year.
This routine is needed because Python's C{timedelta()} function
does not allow for month or year increments.
@type source: struct_time
@param source: C{struct_time} value to increment
@type month: integer
@param month: optional number of months to increment
@type year: integer
@param year: optional number of years to increment
@rtype: datetime
@return: C{source} incremented by the number of months and/or years
"""
yr = source.year
mth = source.month
dy = source.day
if year:
try:
yi = int(year)
except ValueError:
yi = 0
yr += yi
if month:
try:
mi = int(month)
except ValueError:
mi = 0
m = abs(mi)
y = m / 12 # how many years are in month increment
m = m % 12 # get remaining months
if mi < 0:
mth = mth - m # sub months from start month
if mth < 1: # cross start-of-year?
y -= 1 # yes - decrement year
mth += 12 # and fix month
else:
mth = mth + m # add months to start month
if mth > 12: # cross end-of-year?
y += 1 # yes - increment year
mth -= 12 # and fix month
yr += y
# if the day ends up past the last day of
# the new month, set it to the last day
if dy > self.ptc.daysInMonth(mth, yr):
dy = self.ptc.daysInMonth(mth, yr)
d = source.replace(year=yr, month=mth, day=dy)
return source + (d - source)
|
vilmibm/done | parsedatetime/parsedatetime.py | Calendar._evalModifier | python | def _evalModifier(self, modifier, chunk1, chunk2, sourceTime):
"""
Evaluate the C{modifier} string and following text (passed in
as C{chunk1} and C{chunk2}) and if they match any known modifiers
calculate the delta and apply it to C{sourceTime}.
@type modifier: string
@param modifier: modifier text to apply to sourceTime
@type chunk1: string
@param chunk1: first text chunk that followed modifier (if any)
@type chunk2: string
@param chunk2: second text chunk that followed modifier (if any)
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: remaining text and the modified sourceTime
"""
offset = self.ptc.Modifiers[modifier]
if sourceTime is not None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
else:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = time.localtime()
# capture the units after the modifier and the remaining
# string after the unit
m = self.ptc.CRE_REMAINING.search(chunk2)
if m is not None:
index = m.start() + 1
unit = chunk2[:m.start()]
chunk2 = chunk2[index:]
else:
unit = chunk2
chunk2 = ''
flag = False
if unit == 'month' or \
unit == 'mth' or \
unit == 'm':
if offset == 0:
dy = self.ptc.daysInMonth(mth, yr)
sourceTime = (yr, mth, dy, 9, 0, 0, wd, yd, isdst)
elif offset == 2:
# if day is the last day of the month, calculate the last day
# of the next month
if dy == self.ptc.daysInMonth(mth, yr):
dy = self.ptc.daysInMonth(mth + 1, yr)
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = self.inc(start, month=1)
sourceTime = target.timetuple()
else:
start = datetime.datetime(yr, mth, 1, 9, 0, 0)
target = self.inc(start, month=offset)
sourceTime = target.timetuple()
flag = True
self.dateFlag = 1
if unit == 'week' or \
unit == 'wk' or \
unit == 'w':
if offset == 0:
start = datetime.datetime(yr, mth, dy, 17, 0, 0)
target = start + datetime.timedelta(days=(4 - wd))
sourceTime = target.timetuple()
elif offset == 2:
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=7)
sourceTime = target.timetuple()
else:
return self._evalModifier(modifier, chunk1, "monday " + chunk2, sourceTime)
flag = True
self.dateFlag = 1
if unit == 'day' or \
unit == 'dy' or \
unit == 'd':
if offset == 0:
sourceTime = (yr, mth, dy, 17, 0, 0, wd, yd, isdst)
self.timeFlag = 2
elif offset == 2:
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
target = start + datetime.timedelta(days=1)
sourceTime = target.timetuple()
else:
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=offset)
sourceTime = target.timetuple()
flag = True
self.dateFlag = 1
if unit == 'hour' or \
unit == 'hr':
if offset == 0:
sourceTime = (yr, mth, dy, hr, 0, 0, wd, yd, isdst)
else:
start = datetime.datetime(yr, mth, dy, hr, 0, 0)
target = start + datetime.timedelta(hours=offset)
sourceTime = target.timetuple()
flag = True
self.timeFlag = 2
if unit == 'year' or \
unit == 'yr' or \
unit == 'y':
if offset == 0:
sourceTime = (yr, 12, 31, hr, mn, sec, wd, yd, isdst)
elif offset == 2:
sourceTime = (yr + 1, mth, dy, hr, mn, sec, wd, yd, isdst)
else:
sourceTime = (yr + offset, 1, 1, 9, 0, 0, wd, yd, isdst)
flag = True
self.dateFlag = 1
if flag == False:
m = self.ptc.CRE_WEEKDAY.match(unit)
if m is not None:
wkdy = m.group()
self.dateFlag = 1
if modifier == 'eod':
# Calculate the upcoming weekday
self.modifierFlag = False
(sourceTime, _) = self.parse(wkdy, sourceTime)
sources = self.ptc.buildSources(sourceTime)
self.timeFlag = 2
if modifier in sources:
sourceTime = sources[modifier]
else:
wkdy = self.ptc.WeekdayOffsets[wkdy]
diff = self._CalculateDOWDelta(wd, wkdy, offset,
self.ptc.DOWParseStyle,
self.ptc.CurrentDOWParseStyle)
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=diff)
sourceTime = target.timetuple()
flag = True
self.dateFlag = 1
if not flag:
m = self.ptc.CRE_TIME.match(unit)
if m is not None:
self.modifierFlag = False
(yr, mth, dy, hr, mn, sec, wd, yd, isdst), _ = self.parse(unit)
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
target = start + datetime.timedelta(days=offset)
sourceTime = target.timetuple()
flag = True
else:
self.modifierFlag = False
# check if the remaining text is parsable and if so,
# use it as the base time for the modifier source time
t, flag2 = self.parse('%s %s' % (chunk1, unit), sourceTime)
if flag2 != 0:
sourceTime = t
sources = self.ptc.buildSources(sourceTime)
if modifier in sources:
sourceTime = sources[modifier]
flag = True
self.timeFlag = 2
# if the word after next is a number, the string is more than likely
# to be "next 4 hrs" which we will have to combine the units with the
# rest of the string
if not flag:
if offset < 0:
# if offset is negative, the unit has to be made negative
unit = '-%s' % unit
chunk2 = '%s %s' % (unit, chunk2)
self.modifierFlag = False
#return '%s %s' % (chunk1, chunk2), sourceTime
return '%s' % chunk2, sourceTime | Evaluate the C{modifier} string and following text (passed in
as C{chunk1} and C{chunk2}) and if they match any known modifiers
calculate the delta and apply it to C{sourceTime}.
@type modifier: string
@param modifier: modifier text to apply to sourceTime
@type chunk1: string
@param chunk1: first text chunk that followed modifier (if any)
@type chunk2: string
@param chunk2: second text chunk that followed modifier (if any)
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: remaining text and the modified sourceTime | train | https://github.com/vilmibm/done/blob/7e5b60d2900ceddefa49de352a19b794199b51a8/parsedatetime/parsedatetime.py#L681-L870 | null | class Calendar:
"""
A collection of routines to input, parse and manipulate date and times.
The text can either be 'normal' date values or it can be human readable.
"""
def __init__(self, constants=None):
"""
Default constructor for the L{Calendar} class.
@type constants: object
@param constants: Instance of the class L{parsedatetime_consts.Constants}
@rtype: object
@return: L{Calendar} instance
"""
# if a constants reference is not included, use default
if constants is None:
self.ptc = parsedatetime_consts.Constants()
else:
self.ptc = constants
self.weekdyFlag = False # monday/tuesday/...
self.dateStdFlag = False # 07/21/06
self.dateStrFlag = False # July 21st, 2006
self.timeStdFlag = False # 5:50
self.meridianFlag = False # am/pm
self.dayStrFlag = False # tomorrow/yesterday/today/..
self.timeStrFlag = False # lunch/noon/breakfast/...
self.modifierFlag = False # after/before/prev/next/..
self.modifier2Flag = False # after/before/prev/next/..
self.unitsFlag = False # hrs/weeks/yrs/min/..
self.qunitsFlag = False # h/m/t/d..
self.timeFlag = 0
self.dateFlag = 0
def _convertUnitAsWords(self, unitText):
"""
Converts text units into their number value
Five = 5
Twenty Five = 25
Two hundred twenty five = 225
Two thousand and twenty five = 2025
Two thousand twenty five = 2025
@type unitText: string
@param unitText: number text to convert
@rtype: integer
@return: numerical value of unitText
"""
# TODO: implement this
pass
def _buildTime(self, source, quantity, modifier, units):
"""
Take C{quantity}, C{modifier} and C{unit} strings and convert them into values.
After converting, calcuate the time and return the adjusted sourceTime.
@type source: time
@param source: time to use as the base (or source)
@type quantity: string
@param quantity: quantity string
@type modifier: string
@param modifier: how quantity and units modify the source time
@type units: string
@param units: unit of the quantity (i.e. hours, days, months, etc)
@rtype: struct_time
@return: C{struct_time} of the calculated time
"""
if _debug:
print '_buildTime: [%s][%s][%s]' % (quantity, modifier, units)
if source is None:
source = time.localtime()
if quantity is None:
quantity = ''
else:
quantity = quantity.strip()
if len(quantity) == 0:
qty = 1
else:
try:
qty = int(quantity)
except ValueError:
qty = 0
if modifier in self.ptc.Modifiers:
qty = qty * self.ptc.Modifiers[modifier]
if units is None or units == '':
units = 'dy'
# plurals are handled by regex's (could be a bug tho)
(yr, mth, dy, hr, mn, sec, _, _, _) = source
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
target = start
if units.startswith('y'):
target = self.inc(start, year=qty)
self.dateFlag = 1
elif units.endswith('th') or units.endswith('ths'):
target = self.inc(start, month=qty)
self.dateFlag = 1
else:
if units.startswith('d'):
target = start + datetime.timedelta(days=qty)
self.dateFlag = 1
elif units.startswith('h'):
target = start + datetime.timedelta(hours=qty)
self.timeFlag = 2
elif units.startswith('m'):
target = start + datetime.timedelta(minutes=qty)
self.timeFlag = 2
elif units.startswith('s'):
target = start + datetime.timedelta(seconds=qty)
self.timeFlag = 2
elif units.startswith('w'):
target = start + datetime.timedelta(weeks=qty)
self.dateFlag = 1
return target.timetuple()
def parseDate(self, dateString):
"""
Parse short-form date strings::
'05/28/2006' or '04.21'
@type dateString: string
@param dateString: text to convert to a C{datetime}
@rtype: struct_time
@return: calculated C{struct_time} value of dateString
"""
yr, mth, dy, hr, mn, sec, wd, yd, isdst = time.localtime()
# values pulled from regex's will be stored here and later
# assigned to mth, dy, yr based on information from the locale
# -1 is used as the marker value because we want zero values
# to be passed thru so they can be flagged as errors later
v1 = -1
v2 = -1
v3 = -1
s = dateString
m = self.ptc.CRE_DATE2.search(s)
if m is not None:
index = m.start()
v1 = int(s[:index])
s = s[index + 1:]
m = self.ptc.CRE_DATE2.search(s)
if m is not None:
index = m.start()
v2 = int(s[:index])
v3 = int(s[index + 1:])
else:
v2 = int(s.strip())
v = [ v1, v2, v3 ]
d = { 'm': mth, 'd': dy, 'y': yr }
for i in range(0, 3):
n = v[i]
c = self.ptc.dp_order[i]
if n >= 0:
d[c] = n
# if the year is not specified and the date has already
# passed, increment the year
if v3 == -1 and ((mth > d['m']) or (mth == d['m'] and dy > d['d'])):
yr = d['y'] + 1
else:
yr = d['y']
mth = d['m']
dy = d['d']
# birthday epoch constraint
if yr < self.ptc.BirthdayEpoch:
yr += 2000
elif yr < 100:
yr += 1900
if _debug:
print 'parseDate: ', yr, mth, dy, self.ptc.daysInMonth(mth, yr)
if (mth > 0 and mth <= 12) and \
(dy > 0 and dy <= self.ptc.daysInMonth(mth, yr)):
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
else:
self.dateFlag = 0
self.timeFlag = 0
sourceTime = time.localtime() # return current time if date
# string is invalid
return sourceTime
def parseDateText(self, dateString):
"""
Parse long-form date strings::
'May 31st, 2006'
'Jan 1st'
'July 2006'
@type dateString: string
@param dateString: text to convert to a datetime
@rtype: struct_time
@return: calculated C{struct_time} value of dateString
"""
yr, mth, dy, hr, mn, sec, wd, yd, isdst = time.localtime()
currentMth = mth
currentDy = dy
s = dateString.lower()
m = self.ptc.CRE_DATE3.search(s)
mth = m.group('mthname')
mth = self.ptc.MonthOffsets[mth]
if m.group('day') != None:
dy = int(m.group('day'))
else:
dy = 1
if m.group('year') != None:
yr = int(m.group('year'))
# birthday epoch constraint
if yr < self.ptc.BirthdayEpoch:
yr += 2000
elif yr < 100:
yr += 1900
elif (mth < currentMth) or (mth == currentMth and dy < currentDy):
# if that day and month have already passed in this year,
# then increment the year by 1
yr += 1
if dy > 0 and dy <= self.ptc.daysInMonth(mth, yr):
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
else:
# Return current time if date string is invalid
self.dateFlag = 0
self.timeFlag = 0
sourceTime = time.localtime()
return sourceTime
def evalRanges(self, datetimeString, sourceTime=None):
"""
Evaluate the C{datetimeString} text and determine if
it represents a date or time range.
@type datetimeString: string
@param datetimeString: datetime text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: start datetime, end datetime and the invalid flag
"""
startTime = ''
endTime = ''
startDate = ''
endDate = ''
rangeFlag = 0
s = datetimeString.strip().lower()
if self.ptc.rangeSep in s:
s = s.replace(self.ptc.rangeSep, ' %s ' % self.ptc.rangeSep)
s = s.replace(' ', ' ')
m = self.ptc.CRE_TIMERNG1.search(s)
if m is not None:
rangeFlag = 1
else:
m = self.ptc.CRE_TIMERNG2.search(s)
if m is not None:
rangeFlag = 2
else:
m = self.ptc.CRE_TIMERNG4.search(s)
if m is not None:
rangeFlag = 7
else:
m = self.ptc.CRE_TIMERNG3.search(s)
if m is not None:
rangeFlag = 3
else:
m = self.ptc.CRE_DATERNG1.search(s)
if m is not None:
rangeFlag = 4
else:
m = self.ptc.CRE_DATERNG2.search(s)
if m is not None:
rangeFlag = 5
else:
m = self.ptc.CRE_DATERNG3.search(s)
if m is not None:
rangeFlag = 6
if _debug:
print 'evalRanges: rangeFlag =', rangeFlag, '[%s]' % s
if m is not None:
if (m.group() != s):
# capture remaining string
parseStr = m.group()
chunk1 = s[:m.start()]
chunk2 = s[m.end():]
s = '%s %s' % (chunk1, chunk2)
flag = 1
sourceTime, flag = self.parse(s, sourceTime)
if flag == 0:
sourceTime = None
else:
parseStr = s
if rangeFlag == 1:
m = re.search(self.ptc.rangeSep, parseStr)
startTime, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endTime, eflag = self.parse((parseStr[(m.start() + 1):]), sourceTime)
if (eflag != 0) and (sflag != 0):
return (startTime, endTime, 2)
elif rangeFlag == 2:
m = re.search(self.ptc.rangeSep, parseStr)
startTime, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endTime, eflag = self.parse((parseStr[(m.start() + 1):]), sourceTime)
if (eflag != 0) and (sflag != 0):
return (startTime, endTime, 2)
elif rangeFlag == 3 or rangeFlag == 7:
m = re.search(self.ptc.rangeSep, parseStr)
# capturing the meridian from the end time
if self.ptc.usesMeridian:
ampm = re.search(self.ptc.am[0], parseStr)
# appending the meridian to the start time
if ampm is not None:
startTime, sflag = self.parse((parseStr[:m.start()] + self.ptc.meridian[0]), sourceTime)
else:
startTime, sflag = self.parse((parseStr[:m.start()] + self.ptc.meridian[1]), sourceTime)
else:
startTime, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endTime, eflag = self.parse(parseStr[(m.start() + 1):], sourceTime)
if (eflag != 0) and (sflag != 0):
return (startTime, endTime, 2)
elif rangeFlag == 4:
m = re.search(self.ptc.rangeSep, parseStr)
startDate, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endDate, eflag = self.parse((parseStr[(m.start() + 1):]), sourceTime)
if (eflag != 0) and (sflag != 0):
return (startDate, endDate, 1)
elif rangeFlag == 5:
m = re.search(self.ptc.rangeSep, parseStr)
endDate = parseStr[(m.start() + 1):]
# capturing the year from the end date
date = self.ptc.CRE_DATE3.search(endDate)
endYear = date.group('year')
# appending the year to the start date if the start date
# does not have year information and the end date does.
# eg : "Aug 21 - Sep 4, 2007"
if endYear is not None:
startDate = (parseStr[:m.start()]).strip()
date = self.ptc.CRE_DATE3.search(startDate)
startYear = date.group('year')
if startYear is None:
startDate = startDate + ', ' + endYear
else:
startDate = parseStr[:m.start()]
startDate, sflag = self.parse(startDate, sourceTime)
endDate, eflag = self.parse(endDate, sourceTime)
if (eflag != 0) and (sflag != 0):
return (startDate, endDate, 1)
elif rangeFlag == 6:
m = re.search(self.ptc.rangeSep, parseStr)
startDate = parseStr[:m.start()]
# capturing the month from the start date
mth = self.ptc.CRE_DATE3.search(startDate)
mth = mth.group('mthname')
# appending the month name to the end date
endDate = mth + parseStr[(m.start() + 1):]
startDate, sflag = self.parse(startDate, sourceTime)
endDate, eflag = self.parse(endDate, sourceTime)
if (eflag != 0) and (sflag != 0):
return (startDate, endDate, 1)
else:
# if range is not found
sourceTime = time.localtime()
return (sourceTime, sourceTime, 0)
def _CalculateDOWDelta(self, wd, wkdy, offset, style, currentDayStyle):
"""
Based on the C{style} and C{currentDayStyle} determine what
day-of-week value is to be returned.
@type wd: integer
@param wd: day-of-week value for the current day
@type wkdy: integer
@param wkdy: day-of-week value for the parsed day
@type offset: integer
@param offset: offset direction for any modifiers (-1, 0, 1)
@type style: integer
@param style: normally the value set in C{Constants.DOWParseStyle}
@type currentDayStyle: integer
@param currentDayStyle: normally the value set in C{Constants.CurrentDOWParseStyle}
@rtype: integer
@return: calculated day-of-week
"""
if offset == 1:
# modifier is indicating future week eg: "next".
# DOW is calculated as DOW of next week
diff = 7 - wd + wkdy
elif offset == -1:
# modifier is indicating past week eg: "last","previous"
# DOW is calculated as DOW of previous week
diff = wkdy - wd - 7
elif offset == 0:
# modifier is indiacting current week eg: "this"
# DOW is calculated as DOW of this week
diff = wkdy - wd
elif offset == 2:
# no modifier is present.
# i.e. string to be parsed is just DOW
if style == 1:
# next occurance of the DOW is calculated
if currentDayStyle == True:
if wkdy >= wd:
diff = wkdy - wd
else:
diff = 7 - wd + wkdy
else:
if wkdy > wd:
diff = wkdy - wd
else:
diff = 7 - wd + wkdy
elif style == -1:
# last occurance of the DOW is calculated
if currentDayStyle == True:
if wkdy <= wd:
diff = wkdy - wd
else:
diff = wkdy - wd - 7
else:
if wkdy < wd:
diff = wkdy - wd
else:
diff = wkdy - wd - 7
else:
# occurance of the DOW in the current week is calculated
diff = wkdy - wd
if _debug:
print "wd %s, wkdy %s, offset %d, style %d\n" % (wd, wkdy, offset, style)
return diff
def _evalModifier(self, modifier, chunk1, chunk2, sourceTime):
"""
Evaluate the C{modifier} string and following text (passed in
as C{chunk1} and C{chunk2}) and if they match any known modifiers
calculate the delta and apply it to C{sourceTime}.
@type modifier: string
@param modifier: modifier text to apply to sourceTime
@type chunk1: string
@param chunk1: first text chunk that followed modifier (if any)
@type chunk2: string
@param chunk2: second text chunk that followed modifier (if any)
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: remaining text and the modified sourceTime
"""
offset = self.ptc.Modifiers[modifier]
if sourceTime is not None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
else:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = time.localtime()
# capture the units after the modifier and the remaining
# string after the unit
m = self.ptc.CRE_REMAINING.search(chunk2)
if m is not None:
index = m.start() + 1
unit = chunk2[:m.start()]
chunk2 = chunk2[index:]
else:
unit = chunk2
chunk2 = ''
flag = False
if unit == 'month' or \
unit == 'mth' or \
unit == 'm':
if offset == 0:
dy = self.ptc.daysInMonth(mth, yr)
sourceTime = (yr, mth, dy, 9, 0, 0, wd, yd, isdst)
elif offset == 2:
# if day is the last day of the month, calculate the last day
# of the next month
if dy == self.ptc.daysInMonth(mth, yr):
dy = self.ptc.daysInMonth(mth + 1, yr)
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = self.inc(start, month=1)
sourceTime = target.timetuple()
else:
start = datetime.datetime(yr, mth, 1, 9, 0, 0)
target = self.inc(start, month=offset)
sourceTime = target.timetuple()
flag = True
self.dateFlag = 1
if unit == 'week' or \
unit == 'wk' or \
unit == 'w':
if offset == 0:
start = datetime.datetime(yr, mth, dy, 17, 0, 0)
target = start + datetime.timedelta(days=(4 - wd))
sourceTime = target.timetuple()
elif offset == 2:
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=7)
sourceTime = target.timetuple()
else:
return self._evalModifier(modifier, chunk1, "monday " + chunk2, sourceTime)
flag = True
self.dateFlag = 1
if unit == 'day' or \
unit == 'dy' or \
unit == 'd':
if offset == 0:
sourceTime = (yr, mth, dy, 17, 0, 0, wd, yd, isdst)
self.timeFlag = 2
elif offset == 2:
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
target = start + datetime.timedelta(days=1)
sourceTime = target.timetuple()
else:
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=offset)
sourceTime = target.timetuple()
flag = True
self.dateFlag = 1
if unit == 'hour' or \
unit == 'hr':
if offset == 0:
sourceTime = (yr, mth, dy, hr, 0, 0, wd, yd, isdst)
else:
start = datetime.datetime(yr, mth, dy, hr, 0, 0)
target = start + datetime.timedelta(hours=offset)
sourceTime = target.timetuple()
flag = True
self.timeFlag = 2
if unit == 'year' or \
unit == 'yr' or \
unit == 'y':
if offset == 0:
sourceTime = (yr, 12, 31, hr, mn, sec, wd, yd, isdst)
elif offset == 2:
sourceTime = (yr + 1, mth, dy, hr, mn, sec, wd, yd, isdst)
else:
sourceTime = (yr + offset, 1, 1, 9, 0, 0, wd, yd, isdst)
flag = True
self.dateFlag = 1
if flag == False:
m = self.ptc.CRE_WEEKDAY.match(unit)
if m is not None:
wkdy = m.group()
self.dateFlag = 1
if modifier == 'eod':
# Calculate the upcoming weekday
self.modifierFlag = False
(sourceTime, _) = self.parse(wkdy, sourceTime)
sources = self.ptc.buildSources(sourceTime)
self.timeFlag = 2
if modifier in sources:
sourceTime = sources[modifier]
else:
wkdy = self.ptc.WeekdayOffsets[wkdy]
diff = self._CalculateDOWDelta(wd, wkdy, offset,
self.ptc.DOWParseStyle,
self.ptc.CurrentDOWParseStyle)
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=diff)
sourceTime = target.timetuple()
flag = True
self.dateFlag = 1
if not flag:
m = self.ptc.CRE_TIME.match(unit)
if m is not None:
self.modifierFlag = False
(yr, mth, dy, hr, mn, sec, wd, yd, isdst), _ = self.parse(unit)
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
target = start + datetime.timedelta(days=offset)
sourceTime = target.timetuple()
flag = True
else:
self.modifierFlag = False
# check if the remaining text is parsable and if so,
# use it as the base time for the modifier source time
t, flag2 = self.parse('%s %s' % (chunk1, unit), sourceTime)
if flag2 != 0:
sourceTime = t
sources = self.ptc.buildSources(sourceTime)
if modifier in sources:
sourceTime = sources[modifier]
flag = True
self.timeFlag = 2
# if the word after next is a number, the string is more than likely
# to be "next 4 hrs" which we will have to combine the units with the
# rest of the string
if not flag:
if offset < 0:
# if offset is negative, the unit has to be made negative
unit = '-%s' % unit
chunk2 = '%s %s' % (unit, chunk2)
self.modifierFlag = False
#return '%s %s' % (chunk1, chunk2), sourceTime
return '%s' % chunk2, sourceTime
def _evalModifier2(self, modifier, chunk1 , chunk2, sourceTime):
"""
Evaluate the C{modifier} string and following text (passed in
as C{chunk1} and C{chunk2}) and if they match any known modifiers
calculate the delta and apply it to C{sourceTime}.
@type modifier: string
@param modifier: modifier text to apply to C{sourceTime}
@type chunk1: string
@param chunk1: first text chunk that followed modifier (if any)
@type chunk2: string
@param chunk2: second text chunk that followed modifier (if any)
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: remaining text and the modified sourceTime
"""
offset = self.ptc.Modifiers[modifier]
digit = r'\d+'
self.modifier2Flag = False
# If the string after the negative modifier starts with digits,
# then it is likely that the string is similar to ' before 3 days'
# or 'evening prior to 3 days'.
# In this case, the total time is calculated by subtracting '3 days'
# from the current date.
# So, we have to identify the quantity and negate it before parsing
# the string.
# This is not required for strings not starting with digits since the
# string is enough to calculate the sourceTime
if chunk2 != '':
if offset < 0:
m = re.match(digit, chunk2.strip())
if m is not None:
qty = int(m.group()) * -1
chunk2 = chunk2[m.end():]
chunk2 = '%d%s' % (qty, chunk2)
sourceTime, flag1 = self.parse(chunk2, sourceTime)
if flag1 == 0:
flag1 = True
else:
flag1 = False
flag2 = False
else:
flag1 = False
if chunk1 != '':
if offset < 0:
m = re.search(digit, chunk1.strip())
if m is not None:
qty = int(m.group()) * -1
chunk1 = chunk1[m.end():]
chunk1 = '%d%s' % (qty, chunk1)
tempDateFlag = self.dateFlag
tempTimeFlag = self.timeFlag
sourceTime2, flag2 = self.parse(chunk1, sourceTime)
else:
return sourceTime, (flag1 and flag2)
# if chunk1 is not a datetime and chunk2 is then do not use datetime
# value returned by parsing chunk1
if not (flag1 == False and flag2 == 0):
sourceTime = sourceTime2
else:
self.timeFlag = tempTimeFlag
self.dateFlag = tempDateFlag
return sourceTime, (flag1 and flag2)
def _evalString(self, datetimeString, sourceTime=None):
"""
Calculate the datetime based on flags set by the L{parse()} routine
Examples handled::
RFC822, W3CDTF formatted dates
HH:MM[:SS][ am/pm]
MM/DD/YYYY
DD MMMM YYYY
@type datetimeString: string
@param datetimeString: text to try and parse as more "traditional"
date/time text
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: datetime
@return: calculated C{struct_time} value or current C{struct_time}
if not parsed
"""
s = datetimeString.strip()
now = time.localtime()
# Given string date is a RFC822 date
if sourceTime is None:
sourceTime = _parse_date_rfc822(s)
if sourceTime is not None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst, _) = sourceTime
self.dateFlag = 1
if (hr != 0) and (mn != 0) and (sec != 0):
self.timeFlag = 2
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
# Given string date is a W3CDTF date
if sourceTime is None:
sourceTime = _parse_date_w3dtf(s)
if sourceTime is not None:
self.dateFlag = 1
self.timeFlag = 2
if sourceTime is None:
s = s.lower()
# Given string is in the format HH:MM(:SS)(am/pm)
if self.meridianFlag:
if sourceTime is None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = now
else:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
m = self.ptc.CRE_TIMEHMS2.search(s)
if m is not None:
dt = s[:m.start('meridian')].strip()
if len(dt) <= 2:
hr = int(dt)
mn = 0
sec = 0
else:
hr, mn, sec = _extract_time(m)
if hr == 24:
hr = 0
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
meridian = m.group('meridian').lower()
# if 'am' found and hour is 12 - force hour to 0 (midnight)
if (meridian in self.ptc.am) and hr == 12:
sourceTime = (yr, mth, dy, 0, mn, sec, wd, yd, isdst)
# if 'pm' found and hour < 12, add 12 to shift to evening
if (meridian in self.ptc.pm) and hr < 12:
sourceTime = (yr, mth, dy, hr + 12, mn, sec, wd, yd, isdst)
# invalid time
if hr > 24 or mn > 59 or sec > 59:
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
self.meridianFlag = False
# Given string is in the format HH:MM(:SS)
if self.timeStdFlag:
if sourceTime is None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = now
else:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
m = self.ptc.CRE_TIMEHMS.search(s)
if m is not None:
hr, mn, sec = _extract_time(m)
if hr == 24:
hr = 0
if hr > 24 or mn > 59 or sec > 59:
# invalid time
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
else:
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
self.timeStdFlag = False
# Given string is in the format 07/21/2006
if self.dateStdFlag:
sourceTime = self.parseDate(s)
self.dateStdFlag = False
# Given string is in the format "May 23rd, 2005"
if self.dateStrFlag:
sourceTime = self.parseDateText(s)
self.dateStrFlag = False
# Given string is a weekday
if self.weekdyFlag:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = now
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
wkdy = self.ptc.WeekdayOffsets[s]
if wkdy > wd:
qty = self._CalculateDOWDelta(wd, wkdy, 2,
self.ptc.DOWParseStyle,
self.ptc.CurrentDOWParseStyle)
else:
qty = self._CalculateDOWDelta(wd, wkdy, 2,
self.ptc.DOWParseStyle,
self.ptc.CurrentDOWParseStyle)
target = start + datetime.timedelta(days=qty)
wd = wkdy
sourceTime = target.timetuple()
self.weekdyFlag = False
# Given string is a natural language time string like
# lunch, midnight, etc
if self.timeStrFlag:
if s in self.ptc.re_values['now']:
sourceTime = now
else:
sources = self.ptc.buildSources(sourceTime)
if s in sources:
sourceTime = sources[s]
else:
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
self.timeStrFlag = False
# Given string is a natural language date string like today, tomorrow..
if self.dayStrFlag:
if sourceTime is None:
sourceTime = now
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
if s in self.ptc.dayOffsets:
offset = self.ptc.dayOffsets[s]
else:
offset = 0
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=offset)
sourceTime = target.timetuple()
self.dayStrFlag = False
# Given string is a time string with units like "5 hrs 30 min"
if self.unitsFlag:
modifier = '' # TODO
if sourceTime is None:
sourceTime = now
m = self.ptc.CRE_UNITS.search(s)
if m is not None:
units = m.group('units')
quantity = s[:m.start('units')]
sourceTime = self._buildTime(sourceTime, quantity, modifier, units)
self.unitsFlag = False
# Given string is a time string with single char units like "5 h 30 m"
if self.qunitsFlag:
modifier = '' # TODO
if sourceTime is None:
sourceTime = now
m = self.ptc.CRE_QUNITS.search(s)
if m is not None:
units = m.group('qunits')
quantity = s[:m.start('qunits')]
sourceTime = self._buildTime(sourceTime, quantity, modifier, units)
self.qunitsFlag = False
# Given string does not match anything
if sourceTime is None:
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
return sourceTime
def parse(self, datetimeString, sourceTime=None):
"""
Splits the given C{datetimeString} into tokens, finds the regex
patterns that match and then calculates a C{struct_time} value from
the chunks.
If C{sourceTime} is given then the C{struct_time} value will be
calculated from that value, otherwise from the current date/time.
If the C{datetimeString} is parsed and date/time value found then
the second item of the returned tuple will be a flag to let you know
what kind of C{struct_time} value is being returned::
0 = not parsed at all
1 = parsed as a C{date}
2 = parsed as a C{time}
3 = parsed as a C{datetime}
@type datetimeString: string
@param datetimeString: date/time text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: modified C{sourceTime} and the result flag
"""
if sourceTime:
if isinstance(sourceTime, datetime.datetime):
if _debug:
print 'coercing datetime to timetuple'
sourceTime = sourceTime.timetuple()
else:
if not isinstance(sourceTime, time.struct_time) and \
not isinstance(sourceTime, tuple):
raise Exception('sourceTime is not a struct_time')
s = datetimeString.strip().lower()
parseStr = ''
totalTime = sourceTime
if s == '' :
if sourceTime is not None:
return (sourceTime, self.dateFlag + self.timeFlag)
else:
return (time.localtime(), 0)
self.timeFlag = 0
self.dateFlag = 0
while len(s) > 0:
flag = False
chunk1 = ''
chunk2 = ''
if _debug:
print 'parse (top of loop): [%s][%s]' % (s, parseStr)
if parseStr == '':
# Modifier like next\prev..
m = self.ptc.CRE_MODIFIER.search(s)
if m is not None:
self.modifierFlag = True
if (m.group('modifier') != s):
# capture remaining string
parseStr = m.group('modifier')
chunk1 = s[:m.start('modifier')].strip()
chunk2 = s[m.end('modifier'):].strip()
flag = True
else:
parseStr = s
if parseStr == '':
# Modifier like from\after\prior..
m = self.ptc.CRE_MODIFIER2.search(s)
if m is not None:
self.modifier2Flag = True
if (m.group('modifier') != s):
# capture remaining string
parseStr = m.group('modifier')
chunk1 = s[:m.start('modifier')].strip()
chunk2 = s[m.end('modifier'):].strip()
flag = True
else:
parseStr = s
if parseStr == '':
valid_date = False
for match in self.ptc.CRE_DATE3.finditer(s):
# to prevent "HH:MM(:SS) time strings" expressions from triggering
# this regex, we checks if the month field exists in the searched
# expression, if it doesn't exist, the date field is not valid
if match.group('mthname'):
m = self.ptc.CRE_DATE3.search(s, match.start())
valid_date = True
break
# String date format
if valid_date:
self.dateStrFlag = True
self.dateFlag = 1
if (m.group('date') != s):
# capture remaining string
parseStr = m.group('date')
chunk1 = s[:m.start('date')]
chunk2 = s[m.end('date'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Standard date format
m = self.ptc.CRE_DATE.search(s)
if m is not None:
self.dateStdFlag = True
self.dateFlag = 1
if (m.group('date') != s):
# capture remaining string
parseStr = m.group('date')
chunk1 = s[:m.start('date')]
chunk2 = s[m.end('date'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Natural language day strings
m = self.ptc.CRE_DAY.search(s)
if m is not None:
self.dayStrFlag = True
self.dateFlag = 1
if (m.group('day') != s):
# capture remaining string
parseStr = m.group('day')
chunk1 = s[:m.start('day')]
chunk2 = s[m.end('day'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Quantity + Units
m = self.ptc.CRE_UNITS.search(s)
if m is not None:
self.unitsFlag = True
if (m.group('qty') != s):
# capture remaining string
parseStr = m.group('qty')
chunk1 = s[:m.start('qty')].strip()
chunk2 = s[m.end('qty'):].strip()
if chunk1[-1:] == '-':
parseStr = '-%s' % parseStr
chunk1 = chunk1[:-1]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Quantity + Units
m = self.ptc.CRE_QUNITS.search(s)
if m is not None:
self.qunitsFlag = True
if (m.group('qty') != s):
# capture remaining string
parseStr = m.group('qty')
chunk1 = s[:m.start('qty')].strip()
chunk2 = s[m.end('qty'):].strip()
if chunk1[-1:] == '-':
parseStr = '-%s' % parseStr
chunk1 = chunk1[:-1]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Weekday
m = self.ptc.CRE_WEEKDAY.search(s)
if m is not None:
gv = m.group('weekday')
if s not in self.ptc.dayOffsets:
self.weekdyFlag = True
self.dateFlag = 1
if (gv != s):
# capture remaining string
parseStr = gv
chunk1 = s[:m.start('weekday')]
chunk2 = s[m.end('weekday'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Natural language time strings
m = self.ptc.CRE_TIME.search(s)
if m is not None:
self.timeStrFlag = True
self.timeFlag = 2
if (m.group('time') != s):
# capture remaining string
parseStr = m.group('time')
chunk1 = s[:m.start('time')]
chunk2 = s[m.end('time'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# HH:MM(:SS) am/pm time strings
m = self.ptc.CRE_TIMEHMS2.search(s)
if m is not None:
self.meridianFlag = True
self.timeFlag = 2
if m.group('minutes') is not None:
if m.group('seconds') is not None:
parseStr = '%s:%s:%s %s' % (m.group('hours'),
m.group('minutes'),
m.group('seconds'),
m.group('meridian'))
else:
parseStr = '%s:%s %s' % (m.group('hours'),
m.group('minutes'),
m.group('meridian'))
else:
parseStr = '%s %s' % (m.group('hours'),
m.group('meridian'))
chunk1 = s[:m.start('hours')]
chunk2 = s[m.end('meridian'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
if parseStr == '':
# HH:MM(:SS) time strings
m = self.ptc.CRE_TIMEHMS.search(s)
if m is not None:
self.timeStdFlag = True
self.timeFlag = 2
if m.group('seconds') is not None:
parseStr = '%s:%s:%s' % (m.group('hours'),
m.group('minutes'),
m.group('seconds'))
chunk1 = s[:m.start('hours')]
chunk2 = s[m.end('seconds'):]
else:
parseStr = '%s:%s' % (m.group('hours'),
m.group('minutes'))
chunk1 = s[:m.start('hours')]
chunk2 = s[m.end('minutes'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
# if string does not match any regex, empty string to
# come out of the while loop
if not flag:
s = ''
if _debug:
print 'parse (bottom) [%s][%s][%s][%s]' % (s, parseStr, chunk1, chunk2)
print 'weekday %s, dateStd %s, dateStr %s, time %s, timeStr %s, meridian %s' % \
(self.weekdyFlag, self.dateStdFlag, self.dateStrFlag, self.timeStdFlag, self.timeStrFlag, self.meridianFlag)
print 'dayStr %s, modifier %s, modifier2 %s, units %s, qunits %s' % \
(self.dayStrFlag, self.modifierFlag, self.modifier2Flag, self.unitsFlag, self.qunitsFlag)
# evaluate the matched string
if parseStr != '':
if self.modifierFlag == True:
t, totalTime = self._evalModifier(parseStr, chunk1, chunk2, totalTime)
# t is the unparsed part of the chunks.
# If it is not date/time, return current
# totalTime as it is; else return the output
# after parsing t.
if (t != '') and (t != None):
tempDateFlag = self.dateFlag
tempTimeFlag = self.timeFlag
(totalTime2, flag) = self.parse(t, totalTime)
if flag == 0 and totalTime is not None:
self.timeFlag = tempTimeFlag
self.dateFlag = tempDateFlag
return (totalTime, self.dateFlag + self.timeFlag)
else:
return (totalTime2, self.dateFlag + self.timeFlag)
elif self.modifier2Flag == True:
totalTime, invalidFlag = self._evalModifier2(parseStr, chunk1, chunk2, totalTime)
if invalidFlag == True:
self.dateFlag = 0
self.timeFlag = 0
else:
totalTime = self._evalString(parseStr, totalTime)
parseStr = ''
# String is not parsed at all
if totalTime is None or totalTime == sourceTime:
totalTime = time.localtime()
self.dateFlag = 0
self.timeFlag = 0
return (totalTime, self.dateFlag + self.timeFlag)
def inc(self, source, month=None, year=None):
"""
Takes the given C{source} date, or current date if none is
passed, and increments it according to the values passed in
by month and/or year.
This routine is needed because Python's C{timedelta()} function
does not allow for month or year increments.
@type source: struct_time
@param source: C{struct_time} value to increment
@type month: integer
@param month: optional number of months to increment
@type year: integer
@param year: optional number of years to increment
@rtype: datetime
@return: C{source} incremented by the number of months and/or years
"""
yr = source.year
mth = source.month
dy = source.day
if year:
try:
yi = int(year)
except ValueError:
yi = 0
yr += yi
if month:
try:
mi = int(month)
except ValueError:
mi = 0
m = abs(mi)
y = m / 12 # how many years are in month increment
m = m % 12 # get remaining months
if mi < 0:
mth = mth - m # sub months from start month
if mth < 1: # cross start-of-year?
y -= 1 # yes - decrement year
mth += 12 # and fix month
else:
mth = mth + m # add months to start month
if mth > 12: # cross end-of-year?
y += 1 # yes - increment year
mth -= 12 # and fix month
yr += y
# if the day ends up past the last day of
# the new month, set it to the last day
if dy > self.ptc.daysInMonth(mth, yr):
dy = self.ptc.daysInMonth(mth, yr)
d = source.replace(year=yr, month=mth, day=dy)
return source + (d - source)
|
vilmibm/done | parsedatetime/parsedatetime.py | Calendar._evalModifier2 | python | def _evalModifier2(self, modifier, chunk1 , chunk2, sourceTime):
"""
Evaluate the C{modifier} string and following text (passed in
as C{chunk1} and C{chunk2}) and if they match any known modifiers
calculate the delta and apply it to C{sourceTime}.
@type modifier: string
@param modifier: modifier text to apply to C{sourceTime}
@type chunk1: string
@param chunk1: first text chunk that followed modifier (if any)
@type chunk2: string
@param chunk2: second text chunk that followed modifier (if any)
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: remaining text and the modified sourceTime
"""
offset = self.ptc.Modifiers[modifier]
digit = r'\d+'
self.modifier2Flag = False
# If the string after the negative modifier starts with digits,
# then it is likely that the string is similar to ' before 3 days'
# or 'evening prior to 3 days'.
# In this case, the total time is calculated by subtracting '3 days'
# from the current date.
# So, we have to identify the quantity and negate it before parsing
# the string.
# This is not required for strings not starting with digits since the
# string is enough to calculate the sourceTime
if chunk2 != '':
if offset < 0:
m = re.match(digit, chunk2.strip())
if m is not None:
qty = int(m.group()) * -1
chunk2 = chunk2[m.end():]
chunk2 = '%d%s' % (qty, chunk2)
sourceTime, flag1 = self.parse(chunk2, sourceTime)
if flag1 == 0:
flag1 = True
else:
flag1 = False
flag2 = False
else:
flag1 = False
if chunk1 != '':
if offset < 0:
m = re.search(digit, chunk1.strip())
if m is not None:
qty = int(m.group()) * -1
chunk1 = chunk1[m.end():]
chunk1 = '%d%s' % (qty, chunk1)
tempDateFlag = self.dateFlag
tempTimeFlag = self.timeFlag
sourceTime2, flag2 = self.parse(chunk1, sourceTime)
else:
return sourceTime, (flag1 and flag2)
# if chunk1 is not a datetime and chunk2 is then do not use datetime
# value returned by parsing chunk1
if not (flag1 == False and flag2 == 0):
sourceTime = sourceTime2
else:
self.timeFlag = tempTimeFlag
self.dateFlag = tempDateFlag
return sourceTime, (flag1 and flag2) | Evaluate the C{modifier} string and following text (passed in
as C{chunk1} and C{chunk2}) and if they match any known modifiers
calculate the delta and apply it to C{sourceTime}.
@type modifier: string
@param modifier: modifier text to apply to C{sourceTime}
@type chunk1: string
@param chunk1: first text chunk that followed modifier (if any)
@type chunk2: string
@param chunk2: second text chunk that followed modifier (if any)
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: remaining text and the modified sourceTime | train | https://github.com/vilmibm/done/blob/7e5b60d2900ceddefa49de352a19b794199b51a8/parsedatetime/parsedatetime.py#L872-L943 | null | class Calendar:
"""
A collection of routines to input, parse and manipulate date and times.
The text can either be 'normal' date values or it can be human readable.
"""
def __init__(self, constants=None):
"""
Default constructor for the L{Calendar} class.
@type constants: object
@param constants: Instance of the class L{parsedatetime_consts.Constants}
@rtype: object
@return: L{Calendar} instance
"""
# if a constants reference is not included, use default
if constants is None:
self.ptc = parsedatetime_consts.Constants()
else:
self.ptc = constants
self.weekdyFlag = False # monday/tuesday/...
self.dateStdFlag = False # 07/21/06
self.dateStrFlag = False # July 21st, 2006
self.timeStdFlag = False # 5:50
self.meridianFlag = False # am/pm
self.dayStrFlag = False # tomorrow/yesterday/today/..
self.timeStrFlag = False # lunch/noon/breakfast/...
self.modifierFlag = False # after/before/prev/next/..
self.modifier2Flag = False # after/before/prev/next/..
self.unitsFlag = False # hrs/weeks/yrs/min/..
self.qunitsFlag = False # h/m/t/d..
self.timeFlag = 0
self.dateFlag = 0
def _convertUnitAsWords(self, unitText):
"""
Converts text units into their number value
Five = 5
Twenty Five = 25
Two hundred twenty five = 225
Two thousand and twenty five = 2025
Two thousand twenty five = 2025
@type unitText: string
@param unitText: number text to convert
@rtype: integer
@return: numerical value of unitText
"""
# TODO: implement this
pass
def _buildTime(self, source, quantity, modifier, units):
"""
Take C{quantity}, C{modifier} and C{unit} strings and convert them into values.
After converting, calcuate the time and return the adjusted sourceTime.
@type source: time
@param source: time to use as the base (or source)
@type quantity: string
@param quantity: quantity string
@type modifier: string
@param modifier: how quantity and units modify the source time
@type units: string
@param units: unit of the quantity (i.e. hours, days, months, etc)
@rtype: struct_time
@return: C{struct_time} of the calculated time
"""
if _debug:
print '_buildTime: [%s][%s][%s]' % (quantity, modifier, units)
if source is None:
source = time.localtime()
if quantity is None:
quantity = ''
else:
quantity = quantity.strip()
if len(quantity) == 0:
qty = 1
else:
try:
qty = int(quantity)
except ValueError:
qty = 0
if modifier in self.ptc.Modifiers:
qty = qty * self.ptc.Modifiers[modifier]
if units is None or units == '':
units = 'dy'
# plurals are handled by regex's (could be a bug tho)
(yr, mth, dy, hr, mn, sec, _, _, _) = source
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
target = start
if units.startswith('y'):
target = self.inc(start, year=qty)
self.dateFlag = 1
elif units.endswith('th') or units.endswith('ths'):
target = self.inc(start, month=qty)
self.dateFlag = 1
else:
if units.startswith('d'):
target = start + datetime.timedelta(days=qty)
self.dateFlag = 1
elif units.startswith('h'):
target = start + datetime.timedelta(hours=qty)
self.timeFlag = 2
elif units.startswith('m'):
target = start + datetime.timedelta(minutes=qty)
self.timeFlag = 2
elif units.startswith('s'):
target = start + datetime.timedelta(seconds=qty)
self.timeFlag = 2
elif units.startswith('w'):
target = start + datetime.timedelta(weeks=qty)
self.dateFlag = 1
return target.timetuple()
def parseDate(self, dateString):
"""
Parse short-form date strings::
'05/28/2006' or '04.21'
@type dateString: string
@param dateString: text to convert to a C{datetime}
@rtype: struct_time
@return: calculated C{struct_time} value of dateString
"""
yr, mth, dy, hr, mn, sec, wd, yd, isdst = time.localtime()
# values pulled from regex's will be stored here and later
# assigned to mth, dy, yr based on information from the locale
# -1 is used as the marker value because we want zero values
# to be passed thru so they can be flagged as errors later
v1 = -1
v2 = -1
v3 = -1
s = dateString
m = self.ptc.CRE_DATE2.search(s)
if m is not None:
index = m.start()
v1 = int(s[:index])
s = s[index + 1:]
m = self.ptc.CRE_DATE2.search(s)
if m is not None:
index = m.start()
v2 = int(s[:index])
v3 = int(s[index + 1:])
else:
v2 = int(s.strip())
v = [ v1, v2, v3 ]
d = { 'm': mth, 'd': dy, 'y': yr }
for i in range(0, 3):
n = v[i]
c = self.ptc.dp_order[i]
if n >= 0:
d[c] = n
# if the year is not specified and the date has already
# passed, increment the year
if v3 == -1 and ((mth > d['m']) or (mth == d['m'] and dy > d['d'])):
yr = d['y'] + 1
else:
yr = d['y']
mth = d['m']
dy = d['d']
# birthday epoch constraint
if yr < self.ptc.BirthdayEpoch:
yr += 2000
elif yr < 100:
yr += 1900
if _debug:
print 'parseDate: ', yr, mth, dy, self.ptc.daysInMonth(mth, yr)
if (mth > 0 and mth <= 12) and \
(dy > 0 and dy <= self.ptc.daysInMonth(mth, yr)):
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
else:
self.dateFlag = 0
self.timeFlag = 0
sourceTime = time.localtime() # return current time if date
# string is invalid
return sourceTime
def parseDateText(self, dateString):
"""
Parse long-form date strings::
'May 31st, 2006'
'Jan 1st'
'July 2006'
@type dateString: string
@param dateString: text to convert to a datetime
@rtype: struct_time
@return: calculated C{struct_time} value of dateString
"""
yr, mth, dy, hr, mn, sec, wd, yd, isdst = time.localtime()
currentMth = mth
currentDy = dy
s = dateString.lower()
m = self.ptc.CRE_DATE3.search(s)
mth = m.group('mthname')
mth = self.ptc.MonthOffsets[mth]
if m.group('day') != None:
dy = int(m.group('day'))
else:
dy = 1
if m.group('year') != None:
yr = int(m.group('year'))
# birthday epoch constraint
if yr < self.ptc.BirthdayEpoch:
yr += 2000
elif yr < 100:
yr += 1900
elif (mth < currentMth) or (mth == currentMth and dy < currentDy):
# if that day and month have already passed in this year,
# then increment the year by 1
yr += 1
if dy > 0 and dy <= self.ptc.daysInMonth(mth, yr):
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
else:
# Return current time if date string is invalid
self.dateFlag = 0
self.timeFlag = 0
sourceTime = time.localtime()
return sourceTime
def evalRanges(self, datetimeString, sourceTime=None):
"""
Evaluate the C{datetimeString} text and determine if
it represents a date or time range.
@type datetimeString: string
@param datetimeString: datetime text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: start datetime, end datetime and the invalid flag
"""
startTime = ''
endTime = ''
startDate = ''
endDate = ''
rangeFlag = 0
s = datetimeString.strip().lower()
if self.ptc.rangeSep in s:
s = s.replace(self.ptc.rangeSep, ' %s ' % self.ptc.rangeSep)
s = s.replace(' ', ' ')
m = self.ptc.CRE_TIMERNG1.search(s)
if m is not None:
rangeFlag = 1
else:
m = self.ptc.CRE_TIMERNG2.search(s)
if m is not None:
rangeFlag = 2
else:
m = self.ptc.CRE_TIMERNG4.search(s)
if m is not None:
rangeFlag = 7
else:
m = self.ptc.CRE_TIMERNG3.search(s)
if m is not None:
rangeFlag = 3
else:
m = self.ptc.CRE_DATERNG1.search(s)
if m is not None:
rangeFlag = 4
else:
m = self.ptc.CRE_DATERNG2.search(s)
if m is not None:
rangeFlag = 5
else:
m = self.ptc.CRE_DATERNG3.search(s)
if m is not None:
rangeFlag = 6
if _debug:
print 'evalRanges: rangeFlag =', rangeFlag, '[%s]' % s
if m is not None:
if (m.group() != s):
# capture remaining string
parseStr = m.group()
chunk1 = s[:m.start()]
chunk2 = s[m.end():]
s = '%s %s' % (chunk1, chunk2)
flag = 1
sourceTime, flag = self.parse(s, sourceTime)
if flag == 0:
sourceTime = None
else:
parseStr = s
if rangeFlag == 1:
m = re.search(self.ptc.rangeSep, parseStr)
startTime, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endTime, eflag = self.parse((parseStr[(m.start() + 1):]), sourceTime)
if (eflag != 0) and (sflag != 0):
return (startTime, endTime, 2)
elif rangeFlag == 2:
m = re.search(self.ptc.rangeSep, parseStr)
startTime, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endTime, eflag = self.parse((parseStr[(m.start() + 1):]), sourceTime)
if (eflag != 0) and (sflag != 0):
return (startTime, endTime, 2)
elif rangeFlag == 3 or rangeFlag == 7:
m = re.search(self.ptc.rangeSep, parseStr)
# capturing the meridian from the end time
if self.ptc.usesMeridian:
ampm = re.search(self.ptc.am[0], parseStr)
# appending the meridian to the start time
if ampm is not None:
startTime, sflag = self.parse((parseStr[:m.start()] + self.ptc.meridian[0]), sourceTime)
else:
startTime, sflag = self.parse((parseStr[:m.start()] + self.ptc.meridian[1]), sourceTime)
else:
startTime, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endTime, eflag = self.parse(parseStr[(m.start() + 1):], sourceTime)
if (eflag != 0) and (sflag != 0):
return (startTime, endTime, 2)
elif rangeFlag == 4:
m = re.search(self.ptc.rangeSep, parseStr)
startDate, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endDate, eflag = self.parse((parseStr[(m.start() + 1):]), sourceTime)
if (eflag != 0) and (sflag != 0):
return (startDate, endDate, 1)
elif rangeFlag == 5:
m = re.search(self.ptc.rangeSep, parseStr)
endDate = parseStr[(m.start() + 1):]
# capturing the year from the end date
date = self.ptc.CRE_DATE3.search(endDate)
endYear = date.group('year')
# appending the year to the start date if the start date
# does not have year information and the end date does.
# eg : "Aug 21 - Sep 4, 2007"
if endYear is not None:
startDate = (parseStr[:m.start()]).strip()
date = self.ptc.CRE_DATE3.search(startDate)
startYear = date.group('year')
if startYear is None:
startDate = startDate + ', ' + endYear
else:
startDate = parseStr[:m.start()]
startDate, sflag = self.parse(startDate, sourceTime)
endDate, eflag = self.parse(endDate, sourceTime)
if (eflag != 0) and (sflag != 0):
return (startDate, endDate, 1)
elif rangeFlag == 6:
m = re.search(self.ptc.rangeSep, parseStr)
startDate = parseStr[:m.start()]
# capturing the month from the start date
mth = self.ptc.CRE_DATE3.search(startDate)
mth = mth.group('mthname')
# appending the month name to the end date
endDate = mth + parseStr[(m.start() + 1):]
startDate, sflag = self.parse(startDate, sourceTime)
endDate, eflag = self.parse(endDate, sourceTime)
if (eflag != 0) and (sflag != 0):
return (startDate, endDate, 1)
else:
# if range is not found
sourceTime = time.localtime()
return (sourceTime, sourceTime, 0)
def _CalculateDOWDelta(self, wd, wkdy, offset, style, currentDayStyle):
"""
Based on the C{style} and C{currentDayStyle} determine what
day-of-week value is to be returned.
@type wd: integer
@param wd: day-of-week value for the current day
@type wkdy: integer
@param wkdy: day-of-week value for the parsed day
@type offset: integer
@param offset: offset direction for any modifiers (-1, 0, 1)
@type style: integer
@param style: normally the value set in C{Constants.DOWParseStyle}
@type currentDayStyle: integer
@param currentDayStyle: normally the value set in C{Constants.CurrentDOWParseStyle}
@rtype: integer
@return: calculated day-of-week
"""
if offset == 1:
# modifier is indicating future week eg: "next".
# DOW is calculated as DOW of next week
diff = 7 - wd + wkdy
elif offset == -1:
# modifier is indicating past week eg: "last","previous"
# DOW is calculated as DOW of previous week
diff = wkdy - wd - 7
elif offset == 0:
# modifier is indiacting current week eg: "this"
# DOW is calculated as DOW of this week
diff = wkdy - wd
elif offset == 2:
# no modifier is present.
# i.e. string to be parsed is just DOW
if style == 1:
# next occurance of the DOW is calculated
if currentDayStyle == True:
if wkdy >= wd:
diff = wkdy - wd
else:
diff = 7 - wd + wkdy
else:
if wkdy > wd:
diff = wkdy - wd
else:
diff = 7 - wd + wkdy
elif style == -1:
# last occurance of the DOW is calculated
if currentDayStyle == True:
if wkdy <= wd:
diff = wkdy - wd
else:
diff = wkdy - wd - 7
else:
if wkdy < wd:
diff = wkdy - wd
else:
diff = wkdy - wd - 7
else:
# occurance of the DOW in the current week is calculated
diff = wkdy - wd
if _debug:
print "wd %s, wkdy %s, offset %d, style %d\n" % (wd, wkdy, offset, style)
return diff
def _evalModifier(self, modifier, chunk1, chunk2, sourceTime):
"""
Evaluate the C{modifier} string and following text (passed in
as C{chunk1} and C{chunk2}) and if they match any known modifiers
calculate the delta and apply it to C{sourceTime}.
@type modifier: string
@param modifier: modifier text to apply to sourceTime
@type chunk1: string
@param chunk1: first text chunk that followed modifier (if any)
@type chunk2: string
@param chunk2: second text chunk that followed modifier (if any)
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: remaining text and the modified sourceTime
"""
offset = self.ptc.Modifiers[modifier]
if sourceTime is not None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
else:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = time.localtime()
# capture the units after the modifier and the remaining
# string after the unit
m = self.ptc.CRE_REMAINING.search(chunk2)
if m is not None:
index = m.start() + 1
unit = chunk2[:m.start()]
chunk2 = chunk2[index:]
else:
unit = chunk2
chunk2 = ''
flag = False
if unit == 'month' or \
unit == 'mth' or \
unit == 'm':
if offset == 0:
dy = self.ptc.daysInMonth(mth, yr)
sourceTime = (yr, mth, dy, 9, 0, 0, wd, yd, isdst)
elif offset == 2:
# if day is the last day of the month, calculate the last day
# of the next month
if dy == self.ptc.daysInMonth(mth, yr):
dy = self.ptc.daysInMonth(mth + 1, yr)
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = self.inc(start, month=1)
sourceTime = target.timetuple()
else:
start = datetime.datetime(yr, mth, 1, 9, 0, 0)
target = self.inc(start, month=offset)
sourceTime = target.timetuple()
flag = True
self.dateFlag = 1
if unit == 'week' or \
unit == 'wk' or \
unit == 'w':
if offset == 0:
start = datetime.datetime(yr, mth, dy, 17, 0, 0)
target = start + datetime.timedelta(days=(4 - wd))
sourceTime = target.timetuple()
elif offset == 2:
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=7)
sourceTime = target.timetuple()
else:
return self._evalModifier(modifier, chunk1, "monday " + chunk2, sourceTime)
flag = True
self.dateFlag = 1
if unit == 'day' or \
unit == 'dy' or \
unit == 'd':
if offset == 0:
sourceTime = (yr, mth, dy, 17, 0, 0, wd, yd, isdst)
self.timeFlag = 2
elif offset == 2:
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
target = start + datetime.timedelta(days=1)
sourceTime = target.timetuple()
else:
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=offset)
sourceTime = target.timetuple()
flag = True
self.dateFlag = 1
if unit == 'hour' or \
unit == 'hr':
if offset == 0:
sourceTime = (yr, mth, dy, hr, 0, 0, wd, yd, isdst)
else:
start = datetime.datetime(yr, mth, dy, hr, 0, 0)
target = start + datetime.timedelta(hours=offset)
sourceTime = target.timetuple()
flag = True
self.timeFlag = 2
if unit == 'year' or \
unit == 'yr' or \
unit == 'y':
if offset == 0:
sourceTime = (yr, 12, 31, hr, mn, sec, wd, yd, isdst)
elif offset == 2:
sourceTime = (yr + 1, mth, dy, hr, mn, sec, wd, yd, isdst)
else:
sourceTime = (yr + offset, 1, 1, 9, 0, 0, wd, yd, isdst)
flag = True
self.dateFlag = 1
if flag == False:
m = self.ptc.CRE_WEEKDAY.match(unit)
if m is not None:
wkdy = m.group()
self.dateFlag = 1
if modifier == 'eod':
# Calculate the upcoming weekday
self.modifierFlag = False
(sourceTime, _) = self.parse(wkdy, sourceTime)
sources = self.ptc.buildSources(sourceTime)
self.timeFlag = 2
if modifier in sources:
sourceTime = sources[modifier]
else:
wkdy = self.ptc.WeekdayOffsets[wkdy]
diff = self._CalculateDOWDelta(wd, wkdy, offset,
self.ptc.DOWParseStyle,
self.ptc.CurrentDOWParseStyle)
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=diff)
sourceTime = target.timetuple()
flag = True
self.dateFlag = 1
if not flag:
m = self.ptc.CRE_TIME.match(unit)
if m is not None:
self.modifierFlag = False
(yr, mth, dy, hr, mn, sec, wd, yd, isdst), _ = self.parse(unit)
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
target = start + datetime.timedelta(days=offset)
sourceTime = target.timetuple()
flag = True
else:
self.modifierFlag = False
# check if the remaining text is parsable and if so,
# use it as the base time for the modifier source time
t, flag2 = self.parse('%s %s' % (chunk1, unit), sourceTime)
if flag2 != 0:
sourceTime = t
sources = self.ptc.buildSources(sourceTime)
if modifier in sources:
sourceTime = sources[modifier]
flag = True
self.timeFlag = 2
# if the word after next is a number, the string is more than likely
# to be "next 4 hrs" which we will have to combine the units with the
# rest of the string
if not flag:
if offset < 0:
# if offset is negative, the unit has to be made negative
unit = '-%s' % unit
chunk2 = '%s %s' % (unit, chunk2)
self.modifierFlag = False
#return '%s %s' % (chunk1, chunk2), sourceTime
return '%s' % chunk2, sourceTime
def _evalModifier2(self, modifier, chunk1 , chunk2, sourceTime):
"""
Evaluate the C{modifier} string and following text (passed in
as C{chunk1} and C{chunk2}) and if they match any known modifiers
calculate the delta and apply it to C{sourceTime}.
@type modifier: string
@param modifier: modifier text to apply to C{sourceTime}
@type chunk1: string
@param chunk1: first text chunk that followed modifier (if any)
@type chunk2: string
@param chunk2: second text chunk that followed modifier (if any)
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: remaining text and the modified sourceTime
"""
offset = self.ptc.Modifiers[modifier]
digit = r'\d+'
self.modifier2Flag = False
# If the string after the negative modifier starts with digits,
# then it is likely that the string is similar to ' before 3 days'
# or 'evening prior to 3 days'.
# In this case, the total time is calculated by subtracting '3 days'
# from the current date.
# So, we have to identify the quantity and negate it before parsing
# the string.
# This is not required for strings not starting with digits since the
# string is enough to calculate the sourceTime
if chunk2 != '':
if offset < 0:
m = re.match(digit, chunk2.strip())
if m is not None:
qty = int(m.group()) * -1
chunk2 = chunk2[m.end():]
chunk2 = '%d%s' % (qty, chunk2)
sourceTime, flag1 = self.parse(chunk2, sourceTime)
if flag1 == 0:
flag1 = True
else:
flag1 = False
flag2 = False
else:
flag1 = False
if chunk1 != '':
if offset < 0:
m = re.search(digit, chunk1.strip())
if m is not None:
qty = int(m.group()) * -1
chunk1 = chunk1[m.end():]
chunk1 = '%d%s' % (qty, chunk1)
tempDateFlag = self.dateFlag
tempTimeFlag = self.timeFlag
sourceTime2, flag2 = self.parse(chunk1, sourceTime)
else:
return sourceTime, (flag1 and flag2)
# if chunk1 is not a datetime and chunk2 is then do not use datetime
# value returned by parsing chunk1
if not (flag1 == False and flag2 == 0):
sourceTime = sourceTime2
else:
self.timeFlag = tempTimeFlag
self.dateFlag = tempDateFlag
return sourceTime, (flag1 and flag2)
def _evalString(self, datetimeString, sourceTime=None):
"""
Calculate the datetime based on flags set by the L{parse()} routine
Examples handled::
RFC822, W3CDTF formatted dates
HH:MM[:SS][ am/pm]
MM/DD/YYYY
DD MMMM YYYY
@type datetimeString: string
@param datetimeString: text to try and parse as more "traditional"
date/time text
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: datetime
@return: calculated C{struct_time} value or current C{struct_time}
if not parsed
"""
s = datetimeString.strip()
now = time.localtime()
# Given string date is a RFC822 date
if sourceTime is None:
sourceTime = _parse_date_rfc822(s)
if sourceTime is not None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst, _) = sourceTime
self.dateFlag = 1
if (hr != 0) and (mn != 0) and (sec != 0):
self.timeFlag = 2
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
# Given string date is a W3CDTF date
if sourceTime is None:
sourceTime = _parse_date_w3dtf(s)
if sourceTime is not None:
self.dateFlag = 1
self.timeFlag = 2
if sourceTime is None:
s = s.lower()
# Given string is in the format HH:MM(:SS)(am/pm)
if self.meridianFlag:
if sourceTime is None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = now
else:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
m = self.ptc.CRE_TIMEHMS2.search(s)
if m is not None:
dt = s[:m.start('meridian')].strip()
if len(dt) <= 2:
hr = int(dt)
mn = 0
sec = 0
else:
hr, mn, sec = _extract_time(m)
if hr == 24:
hr = 0
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
meridian = m.group('meridian').lower()
# if 'am' found and hour is 12 - force hour to 0 (midnight)
if (meridian in self.ptc.am) and hr == 12:
sourceTime = (yr, mth, dy, 0, mn, sec, wd, yd, isdst)
# if 'pm' found and hour < 12, add 12 to shift to evening
if (meridian in self.ptc.pm) and hr < 12:
sourceTime = (yr, mth, dy, hr + 12, mn, sec, wd, yd, isdst)
# invalid time
if hr > 24 or mn > 59 or sec > 59:
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
self.meridianFlag = False
# Given string is in the format HH:MM(:SS)
if self.timeStdFlag:
if sourceTime is None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = now
else:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
m = self.ptc.CRE_TIMEHMS.search(s)
if m is not None:
hr, mn, sec = _extract_time(m)
if hr == 24:
hr = 0
if hr > 24 or mn > 59 or sec > 59:
# invalid time
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
else:
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
self.timeStdFlag = False
# Given string is in the format 07/21/2006
if self.dateStdFlag:
sourceTime = self.parseDate(s)
self.dateStdFlag = False
# Given string is in the format "May 23rd, 2005"
if self.dateStrFlag:
sourceTime = self.parseDateText(s)
self.dateStrFlag = False
# Given string is a weekday
if self.weekdyFlag:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = now
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
wkdy = self.ptc.WeekdayOffsets[s]
if wkdy > wd:
qty = self._CalculateDOWDelta(wd, wkdy, 2,
self.ptc.DOWParseStyle,
self.ptc.CurrentDOWParseStyle)
else:
qty = self._CalculateDOWDelta(wd, wkdy, 2,
self.ptc.DOWParseStyle,
self.ptc.CurrentDOWParseStyle)
target = start + datetime.timedelta(days=qty)
wd = wkdy
sourceTime = target.timetuple()
self.weekdyFlag = False
# Given string is a natural language time string like
# lunch, midnight, etc
if self.timeStrFlag:
if s in self.ptc.re_values['now']:
sourceTime = now
else:
sources = self.ptc.buildSources(sourceTime)
if s in sources:
sourceTime = sources[s]
else:
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
self.timeStrFlag = False
# Given string is a natural language date string like today, tomorrow..
if self.dayStrFlag:
if sourceTime is None:
sourceTime = now
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
if s in self.ptc.dayOffsets:
offset = self.ptc.dayOffsets[s]
else:
offset = 0
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=offset)
sourceTime = target.timetuple()
self.dayStrFlag = False
# Given string is a time string with units like "5 hrs 30 min"
if self.unitsFlag:
modifier = '' # TODO
if sourceTime is None:
sourceTime = now
m = self.ptc.CRE_UNITS.search(s)
if m is not None:
units = m.group('units')
quantity = s[:m.start('units')]
sourceTime = self._buildTime(sourceTime, quantity, modifier, units)
self.unitsFlag = False
# Given string is a time string with single char units like "5 h 30 m"
if self.qunitsFlag:
modifier = '' # TODO
if sourceTime is None:
sourceTime = now
m = self.ptc.CRE_QUNITS.search(s)
if m is not None:
units = m.group('qunits')
quantity = s[:m.start('qunits')]
sourceTime = self._buildTime(sourceTime, quantity, modifier, units)
self.qunitsFlag = False
# Given string does not match anything
if sourceTime is None:
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
return sourceTime
def parse(self, datetimeString, sourceTime=None):
"""
Splits the given C{datetimeString} into tokens, finds the regex
patterns that match and then calculates a C{struct_time} value from
the chunks.
If C{sourceTime} is given then the C{struct_time} value will be
calculated from that value, otherwise from the current date/time.
If the C{datetimeString} is parsed and date/time value found then
the second item of the returned tuple will be a flag to let you know
what kind of C{struct_time} value is being returned::
0 = not parsed at all
1 = parsed as a C{date}
2 = parsed as a C{time}
3 = parsed as a C{datetime}
@type datetimeString: string
@param datetimeString: date/time text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: modified C{sourceTime} and the result flag
"""
if sourceTime:
if isinstance(sourceTime, datetime.datetime):
if _debug:
print 'coercing datetime to timetuple'
sourceTime = sourceTime.timetuple()
else:
if not isinstance(sourceTime, time.struct_time) and \
not isinstance(sourceTime, tuple):
raise Exception('sourceTime is not a struct_time')
s = datetimeString.strip().lower()
parseStr = ''
totalTime = sourceTime
if s == '' :
if sourceTime is not None:
return (sourceTime, self.dateFlag + self.timeFlag)
else:
return (time.localtime(), 0)
self.timeFlag = 0
self.dateFlag = 0
while len(s) > 0:
flag = False
chunk1 = ''
chunk2 = ''
if _debug:
print 'parse (top of loop): [%s][%s]' % (s, parseStr)
if parseStr == '':
# Modifier like next\prev..
m = self.ptc.CRE_MODIFIER.search(s)
if m is not None:
self.modifierFlag = True
if (m.group('modifier') != s):
# capture remaining string
parseStr = m.group('modifier')
chunk1 = s[:m.start('modifier')].strip()
chunk2 = s[m.end('modifier'):].strip()
flag = True
else:
parseStr = s
if parseStr == '':
# Modifier like from\after\prior..
m = self.ptc.CRE_MODIFIER2.search(s)
if m is not None:
self.modifier2Flag = True
if (m.group('modifier') != s):
# capture remaining string
parseStr = m.group('modifier')
chunk1 = s[:m.start('modifier')].strip()
chunk2 = s[m.end('modifier'):].strip()
flag = True
else:
parseStr = s
if parseStr == '':
valid_date = False
for match in self.ptc.CRE_DATE3.finditer(s):
# to prevent "HH:MM(:SS) time strings" expressions from triggering
# this regex, we checks if the month field exists in the searched
# expression, if it doesn't exist, the date field is not valid
if match.group('mthname'):
m = self.ptc.CRE_DATE3.search(s, match.start())
valid_date = True
break
# String date format
if valid_date:
self.dateStrFlag = True
self.dateFlag = 1
if (m.group('date') != s):
# capture remaining string
parseStr = m.group('date')
chunk1 = s[:m.start('date')]
chunk2 = s[m.end('date'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Standard date format
m = self.ptc.CRE_DATE.search(s)
if m is not None:
self.dateStdFlag = True
self.dateFlag = 1
if (m.group('date') != s):
# capture remaining string
parseStr = m.group('date')
chunk1 = s[:m.start('date')]
chunk2 = s[m.end('date'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Natural language day strings
m = self.ptc.CRE_DAY.search(s)
if m is not None:
self.dayStrFlag = True
self.dateFlag = 1
if (m.group('day') != s):
# capture remaining string
parseStr = m.group('day')
chunk1 = s[:m.start('day')]
chunk2 = s[m.end('day'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Quantity + Units
m = self.ptc.CRE_UNITS.search(s)
if m is not None:
self.unitsFlag = True
if (m.group('qty') != s):
# capture remaining string
parseStr = m.group('qty')
chunk1 = s[:m.start('qty')].strip()
chunk2 = s[m.end('qty'):].strip()
if chunk1[-1:] == '-':
parseStr = '-%s' % parseStr
chunk1 = chunk1[:-1]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Quantity + Units
m = self.ptc.CRE_QUNITS.search(s)
if m is not None:
self.qunitsFlag = True
if (m.group('qty') != s):
# capture remaining string
parseStr = m.group('qty')
chunk1 = s[:m.start('qty')].strip()
chunk2 = s[m.end('qty'):].strip()
if chunk1[-1:] == '-':
parseStr = '-%s' % parseStr
chunk1 = chunk1[:-1]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Weekday
m = self.ptc.CRE_WEEKDAY.search(s)
if m is not None:
gv = m.group('weekday')
if s not in self.ptc.dayOffsets:
self.weekdyFlag = True
self.dateFlag = 1
if (gv != s):
# capture remaining string
parseStr = gv
chunk1 = s[:m.start('weekday')]
chunk2 = s[m.end('weekday'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Natural language time strings
m = self.ptc.CRE_TIME.search(s)
if m is not None:
self.timeStrFlag = True
self.timeFlag = 2
if (m.group('time') != s):
# capture remaining string
parseStr = m.group('time')
chunk1 = s[:m.start('time')]
chunk2 = s[m.end('time'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# HH:MM(:SS) am/pm time strings
m = self.ptc.CRE_TIMEHMS2.search(s)
if m is not None:
self.meridianFlag = True
self.timeFlag = 2
if m.group('minutes') is not None:
if m.group('seconds') is not None:
parseStr = '%s:%s:%s %s' % (m.group('hours'),
m.group('minutes'),
m.group('seconds'),
m.group('meridian'))
else:
parseStr = '%s:%s %s' % (m.group('hours'),
m.group('minutes'),
m.group('meridian'))
else:
parseStr = '%s %s' % (m.group('hours'),
m.group('meridian'))
chunk1 = s[:m.start('hours')]
chunk2 = s[m.end('meridian'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
if parseStr == '':
# HH:MM(:SS) time strings
m = self.ptc.CRE_TIMEHMS.search(s)
if m is not None:
self.timeStdFlag = True
self.timeFlag = 2
if m.group('seconds') is not None:
parseStr = '%s:%s:%s' % (m.group('hours'),
m.group('minutes'),
m.group('seconds'))
chunk1 = s[:m.start('hours')]
chunk2 = s[m.end('seconds'):]
else:
parseStr = '%s:%s' % (m.group('hours'),
m.group('minutes'))
chunk1 = s[:m.start('hours')]
chunk2 = s[m.end('minutes'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
# if string does not match any regex, empty string to
# come out of the while loop
if not flag:
s = ''
if _debug:
print 'parse (bottom) [%s][%s][%s][%s]' % (s, parseStr, chunk1, chunk2)
print 'weekday %s, dateStd %s, dateStr %s, time %s, timeStr %s, meridian %s' % \
(self.weekdyFlag, self.dateStdFlag, self.dateStrFlag, self.timeStdFlag, self.timeStrFlag, self.meridianFlag)
print 'dayStr %s, modifier %s, modifier2 %s, units %s, qunits %s' % \
(self.dayStrFlag, self.modifierFlag, self.modifier2Flag, self.unitsFlag, self.qunitsFlag)
# evaluate the matched string
if parseStr != '':
if self.modifierFlag == True:
t, totalTime = self._evalModifier(parseStr, chunk1, chunk2, totalTime)
# t is the unparsed part of the chunks.
# If it is not date/time, return current
# totalTime as it is; else return the output
# after parsing t.
if (t != '') and (t != None):
tempDateFlag = self.dateFlag
tempTimeFlag = self.timeFlag
(totalTime2, flag) = self.parse(t, totalTime)
if flag == 0 and totalTime is not None:
self.timeFlag = tempTimeFlag
self.dateFlag = tempDateFlag
return (totalTime, self.dateFlag + self.timeFlag)
else:
return (totalTime2, self.dateFlag + self.timeFlag)
elif self.modifier2Flag == True:
totalTime, invalidFlag = self._evalModifier2(parseStr, chunk1, chunk2, totalTime)
if invalidFlag == True:
self.dateFlag = 0
self.timeFlag = 0
else:
totalTime = self._evalString(parseStr, totalTime)
parseStr = ''
# String is not parsed at all
if totalTime is None or totalTime == sourceTime:
totalTime = time.localtime()
self.dateFlag = 0
self.timeFlag = 0
return (totalTime, self.dateFlag + self.timeFlag)
def inc(self, source, month=None, year=None):
"""
Takes the given C{source} date, or current date if none is
passed, and increments it according to the values passed in
by month and/or year.
This routine is needed because Python's C{timedelta()} function
does not allow for month or year increments.
@type source: struct_time
@param source: C{struct_time} value to increment
@type month: integer
@param month: optional number of months to increment
@type year: integer
@param year: optional number of years to increment
@rtype: datetime
@return: C{source} incremented by the number of months and/or years
"""
yr = source.year
mth = source.month
dy = source.day
if year:
try:
yi = int(year)
except ValueError:
yi = 0
yr += yi
if month:
try:
mi = int(month)
except ValueError:
mi = 0
m = abs(mi)
y = m / 12 # how many years are in month increment
m = m % 12 # get remaining months
if mi < 0:
mth = mth - m # sub months from start month
if mth < 1: # cross start-of-year?
y -= 1 # yes - decrement year
mth += 12 # and fix month
else:
mth = mth + m # add months to start month
if mth > 12: # cross end-of-year?
y += 1 # yes - increment year
mth -= 12 # and fix month
yr += y
# if the day ends up past the last day of
# the new month, set it to the last day
if dy > self.ptc.daysInMonth(mth, yr):
dy = self.ptc.daysInMonth(mth, yr)
d = source.replace(year=yr, month=mth, day=dy)
return source + (d - source)
|
vilmibm/done | parsedatetime/parsedatetime.py | Calendar._evalString | python | def _evalString(self, datetimeString, sourceTime=None):
"""
Calculate the datetime based on flags set by the L{parse()} routine
Examples handled::
RFC822, W3CDTF formatted dates
HH:MM[:SS][ am/pm]
MM/DD/YYYY
DD MMMM YYYY
@type datetimeString: string
@param datetimeString: text to try and parse as more "traditional"
date/time text
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: datetime
@return: calculated C{struct_time} value or current C{struct_time}
if not parsed
"""
s = datetimeString.strip()
now = time.localtime()
# Given string date is a RFC822 date
if sourceTime is None:
sourceTime = _parse_date_rfc822(s)
if sourceTime is not None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst, _) = sourceTime
self.dateFlag = 1
if (hr != 0) and (mn != 0) and (sec != 0):
self.timeFlag = 2
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
# Given string date is a W3CDTF date
if sourceTime is None:
sourceTime = _parse_date_w3dtf(s)
if sourceTime is not None:
self.dateFlag = 1
self.timeFlag = 2
if sourceTime is None:
s = s.lower()
# Given string is in the format HH:MM(:SS)(am/pm)
if self.meridianFlag:
if sourceTime is None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = now
else:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
m = self.ptc.CRE_TIMEHMS2.search(s)
if m is not None:
dt = s[:m.start('meridian')].strip()
if len(dt) <= 2:
hr = int(dt)
mn = 0
sec = 0
else:
hr, mn, sec = _extract_time(m)
if hr == 24:
hr = 0
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
meridian = m.group('meridian').lower()
# if 'am' found and hour is 12 - force hour to 0 (midnight)
if (meridian in self.ptc.am) and hr == 12:
sourceTime = (yr, mth, dy, 0, mn, sec, wd, yd, isdst)
# if 'pm' found and hour < 12, add 12 to shift to evening
if (meridian in self.ptc.pm) and hr < 12:
sourceTime = (yr, mth, dy, hr + 12, mn, sec, wd, yd, isdst)
# invalid time
if hr > 24 or mn > 59 or sec > 59:
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
self.meridianFlag = False
# Given string is in the format HH:MM(:SS)
if self.timeStdFlag:
if sourceTime is None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = now
else:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
m = self.ptc.CRE_TIMEHMS.search(s)
if m is not None:
hr, mn, sec = _extract_time(m)
if hr == 24:
hr = 0
if hr > 24 or mn > 59 or sec > 59:
# invalid time
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
else:
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
self.timeStdFlag = False
# Given string is in the format 07/21/2006
if self.dateStdFlag:
sourceTime = self.parseDate(s)
self.dateStdFlag = False
# Given string is in the format "May 23rd, 2005"
if self.dateStrFlag:
sourceTime = self.parseDateText(s)
self.dateStrFlag = False
# Given string is a weekday
if self.weekdyFlag:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = now
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
wkdy = self.ptc.WeekdayOffsets[s]
if wkdy > wd:
qty = self._CalculateDOWDelta(wd, wkdy, 2,
self.ptc.DOWParseStyle,
self.ptc.CurrentDOWParseStyle)
else:
qty = self._CalculateDOWDelta(wd, wkdy, 2,
self.ptc.DOWParseStyle,
self.ptc.CurrentDOWParseStyle)
target = start + datetime.timedelta(days=qty)
wd = wkdy
sourceTime = target.timetuple()
self.weekdyFlag = False
# Given string is a natural language time string like
# lunch, midnight, etc
if self.timeStrFlag:
if s in self.ptc.re_values['now']:
sourceTime = now
else:
sources = self.ptc.buildSources(sourceTime)
if s in sources:
sourceTime = sources[s]
else:
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
self.timeStrFlag = False
# Given string is a natural language date string like today, tomorrow..
if self.dayStrFlag:
if sourceTime is None:
sourceTime = now
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
if s in self.ptc.dayOffsets:
offset = self.ptc.dayOffsets[s]
else:
offset = 0
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=offset)
sourceTime = target.timetuple()
self.dayStrFlag = False
# Given string is a time string with units like "5 hrs 30 min"
if self.unitsFlag:
modifier = '' # TODO
if sourceTime is None:
sourceTime = now
m = self.ptc.CRE_UNITS.search(s)
if m is not None:
units = m.group('units')
quantity = s[:m.start('units')]
sourceTime = self._buildTime(sourceTime, quantity, modifier, units)
self.unitsFlag = False
# Given string is a time string with single char units like "5 h 30 m"
if self.qunitsFlag:
modifier = '' # TODO
if sourceTime is None:
sourceTime = now
m = self.ptc.CRE_QUNITS.search(s)
if m is not None:
units = m.group('qunits')
quantity = s[:m.start('qunits')]
sourceTime = self._buildTime(sourceTime, quantity, modifier, units)
self.qunitsFlag = False
# Given string does not match anything
if sourceTime is None:
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
return sourceTime | Calculate the datetime based on flags set by the L{parse()} routine
Examples handled::
RFC822, W3CDTF formatted dates
HH:MM[:SS][ am/pm]
MM/DD/YYYY
DD MMMM YYYY
@type datetimeString: string
@param datetimeString: text to try and parse as more "traditional"
date/time text
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: datetime
@return: calculated C{struct_time} value or current C{struct_time}
if not parsed | train | https://github.com/vilmibm/done/blob/7e5b60d2900ceddefa49de352a19b794199b51a8/parsedatetime/parsedatetime.py#L946-L1158 | null | class Calendar:
"""
A collection of routines to input, parse and manipulate date and times.
The text can either be 'normal' date values or it can be human readable.
"""
def __init__(self, constants=None):
"""
Default constructor for the L{Calendar} class.
@type constants: object
@param constants: Instance of the class L{parsedatetime_consts.Constants}
@rtype: object
@return: L{Calendar} instance
"""
# if a constants reference is not included, use default
if constants is None:
self.ptc = parsedatetime_consts.Constants()
else:
self.ptc = constants
self.weekdyFlag = False # monday/tuesday/...
self.dateStdFlag = False # 07/21/06
self.dateStrFlag = False # July 21st, 2006
self.timeStdFlag = False # 5:50
self.meridianFlag = False # am/pm
self.dayStrFlag = False # tomorrow/yesterday/today/..
self.timeStrFlag = False # lunch/noon/breakfast/...
self.modifierFlag = False # after/before/prev/next/..
self.modifier2Flag = False # after/before/prev/next/..
self.unitsFlag = False # hrs/weeks/yrs/min/..
self.qunitsFlag = False # h/m/t/d..
self.timeFlag = 0
self.dateFlag = 0
def _convertUnitAsWords(self, unitText):
"""
Converts text units into their number value
Five = 5
Twenty Five = 25
Two hundred twenty five = 225
Two thousand and twenty five = 2025
Two thousand twenty five = 2025
@type unitText: string
@param unitText: number text to convert
@rtype: integer
@return: numerical value of unitText
"""
# TODO: implement this
pass
def _buildTime(self, source, quantity, modifier, units):
"""
Take C{quantity}, C{modifier} and C{unit} strings and convert them into values.
After converting, calcuate the time and return the adjusted sourceTime.
@type source: time
@param source: time to use as the base (or source)
@type quantity: string
@param quantity: quantity string
@type modifier: string
@param modifier: how quantity and units modify the source time
@type units: string
@param units: unit of the quantity (i.e. hours, days, months, etc)
@rtype: struct_time
@return: C{struct_time} of the calculated time
"""
if _debug:
print '_buildTime: [%s][%s][%s]' % (quantity, modifier, units)
if source is None:
source = time.localtime()
if quantity is None:
quantity = ''
else:
quantity = quantity.strip()
if len(quantity) == 0:
qty = 1
else:
try:
qty = int(quantity)
except ValueError:
qty = 0
if modifier in self.ptc.Modifiers:
qty = qty * self.ptc.Modifiers[modifier]
if units is None or units == '':
units = 'dy'
# plurals are handled by regex's (could be a bug tho)
(yr, mth, dy, hr, mn, sec, _, _, _) = source
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
target = start
if units.startswith('y'):
target = self.inc(start, year=qty)
self.dateFlag = 1
elif units.endswith('th') or units.endswith('ths'):
target = self.inc(start, month=qty)
self.dateFlag = 1
else:
if units.startswith('d'):
target = start + datetime.timedelta(days=qty)
self.dateFlag = 1
elif units.startswith('h'):
target = start + datetime.timedelta(hours=qty)
self.timeFlag = 2
elif units.startswith('m'):
target = start + datetime.timedelta(minutes=qty)
self.timeFlag = 2
elif units.startswith('s'):
target = start + datetime.timedelta(seconds=qty)
self.timeFlag = 2
elif units.startswith('w'):
target = start + datetime.timedelta(weeks=qty)
self.dateFlag = 1
return target.timetuple()
def parseDate(self, dateString):
"""
Parse short-form date strings::
'05/28/2006' or '04.21'
@type dateString: string
@param dateString: text to convert to a C{datetime}
@rtype: struct_time
@return: calculated C{struct_time} value of dateString
"""
yr, mth, dy, hr, mn, sec, wd, yd, isdst = time.localtime()
# values pulled from regex's will be stored here and later
# assigned to mth, dy, yr based on information from the locale
# -1 is used as the marker value because we want zero values
# to be passed thru so they can be flagged as errors later
v1 = -1
v2 = -1
v3 = -1
s = dateString
m = self.ptc.CRE_DATE2.search(s)
if m is not None:
index = m.start()
v1 = int(s[:index])
s = s[index + 1:]
m = self.ptc.CRE_DATE2.search(s)
if m is not None:
index = m.start()
v2 = int(s[:index])
v3 = int(s[index + 1:])
else:
v2 = int(s.strip())
v = [ v1, v2, v3 ]
d = { 'm': mth, 'd': dy, 'y': yr }
for i in range(0, 3):
n = v[i]
c = self.ptc.dp_order[i]
if n >= 0:
d[c] = n
# if the year is not specified and the date has already
# passed, increment the year
if v3 == -1 and ((mth > d['m']) or (mth == d['m'] and dy > d['d'])):
yr = d['y'] + 1
else:
yr = d['y']
mth = d['m']
dy = d['d']
# birthday epoch constraint
if yr < self.ptc.BirthdayEpoch:
yr += 2000
elif yr < 100:
yr += 1900
if _debug:
print 'parseDate: ', yr, mth, dy, self.ptc.daysInMonth(mth, yr)
if (mth > 0 and mth <= 12) and \
(dy > 0 and dy <= self.ptc.daysInMonth(mth, yr)):
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
else:
self.dateFlag = 0
self.timeFlag = 0
sourceTime = time.localtime() # return current time if date
# string is invalid
return sourceTime
def parseDateText(self, dateString):
"""
Parse long-form date strings::
'May 31st, 2006'
'Jan 1st'
'July 2006'
@type dateString: string
@param dateString: text to convert to a datetime
@rtype: struct_time
@return: calculated C{struct_time} value of dateString
"""
yr, mth, dy, hr, mn, sec, wd, yd, isdst = time.localtime()
currentMth = mth
currentDy = dy
s = dateString.lower()
m = self.ptc.CRE_DATE3.search(s)
mth = m.group('mthname')
mth = self.ptc.MonthOffsets[mth]
if m.group('day') != None:
dy = int(m.group('day'))
else:
dy = 1
if m.group('year') != None:
yr = int(m.group('year'))
# birthday epoch constraint
if yr < self.ptc.BirthdayEpoch:
yr += 2000
elif yr < 100:
yr += 1900
elif (mth < currentMth) or (mth == currentMth and dy < currentDy):
# if that day and month have already passed in this year,
# then increment the year by 1
yr += 1
if dy > 0 and dy <= self.ptc.daysInMonth(mth, yr):
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
else:
# Return current time if date string is invalid
self.dateFlag = 0
self.timeFlag = 0
sourceTime = time.localtime()
return sourceTime
def evalRanges(self, datetimeString, sourceTime=None):
"""
Evaluate the C{datetimeString} text and determine if
it represents a date or time range.
@type datetimeString: string
@param datetimeString: datetime text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: start datetime, end datetime and the invalid flag
"""
startTime = ''
endTime = ''
startDate = ''
endDate = ''
rangeFlag = 0
s = datetimeString.strip().lower()
if self.ptc.rangeSep in s:
s = s.replace(self.ptc.rangeSep, ' %s ' % self.ptc.rangeSep)
s = s.replace(' ', ' ')
m = self.ptc.CRE_TIMERNG1.search(s)
if m is not None:
rangeFlag = 1
else:
m = self.ptc.CRE_TIMERNG2.search(s)
if m is not None:
rangeFlag = 2
else:
m = self.ptc.CRE_TIMERNG4.search(s)
if m is not None:
rangeFlag = 7
else:
m = self.ptc.CRE_TIMERNG3.search(s)
if m is not None:
rangeFlag = 3
else:
m = self.ptc.CRE_DATERNG1.search(s)
if m is not None:
rangeFlag = 4
else:
m = self.ptc.CRE_DATERNG2.search(s)
if m is not None:
rangeFlag = 5
else:
m = self.ptc.CRE_DATERNG3.search(s)
if m is not None:
rangeFlag = 6
if _debug:
print 'evalRanges: rangeFlag =', rangeFlag, '[%s]' % s
if m is not None:
if (m.group() != s):
# capture remaining string
parseStr = m.group()
chunk1 = s[:m.start()]
chunk2 = s[m.end():]
s = '%s %s' % (chunk1, chunk2)
flag = 1
sourceTime, flag = self.parse(s, sourceTime)
if flag == 0:
sourceTime = None
else:
parseStr = s
if rangeFlag == 1:
m = re.search(self.ptc.rangeSep, parseStr)
startTime, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endTime, eflag = self.parse((parseStr[(m.start() + 1):]), sourceTime)
if (eflag != 0) and (sflag != 0):
return (startTime, endTime, 2)
elif rangeFlag == 2:
m = re.search(self.ptc.rangeSep, parseStr)
startTime, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endTime, eflag = self.parse((parseStr[(m.start() + 1):]), sourceTime)
if (eflag != 0) and (sflag != 0):
return (startTime, endTime, 2)
elif rangeFlag == 3 or rangeFlag == 7:
m = re.search(self.ptc.rangeSep, parseStr)
# capturing the meridian from the end time
if self.ptc.usesMeridian:
ampm = re.search(self.ptc.am[0], parseStr)
# appending the meridian to the start time
if ampm is not None:
startTime, sflag = self.parse((parseStr[:m.start()] + self.ptc.meridian[0]), sourceTime)
else:
startTime, sflag = self.parse((parseStr[:m.start()] + self.ptc.meridian[1]), sourceTime)
else:
startTime, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endTime, eflag = self.parse(parseStr[(m.start() + 1):], sourceTime)
if (eflag != 0) and (sflag != 0):
return (startTime, endTime, 2)
elif rangeFlag == 4:
m = re.search(self.ptc.rangeSep, parseStr)
startDate, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endDate, eflag = self.parse((parseStr[(m.start() + 1):]), sourceTime)
if (eflag != 0) and (sflag != 0):
return (startDate, endDate, 1)
elif rangeFlag == 5:
m = re.search(self.ptc.rangeSep, parseStr)
endDate = parseStr[(m.start() + 1):]
# capturing the year from the end date
date = self.ptc.CRE_DATE3.search(endDate)
endYear = date.group('year')
# appending the year to the start date if the start date
# does not have year information and the end date does.
# eg : "Aug 21 - Sep 4, 2007"
if endYear is not None:
startDate = (parseStr[:m.start()]).strip()
date = self.ptc.CRE_DATE3.search(startDate)
startYear = date.group('year')
if startYear is None:
startDate = startDate + ', ' + endYear
else:
startDate = parseStr[:m.start()]
startDate, sflag = self.parse(startDate, sourceTime)
endDate, eflag = self.parse(endDate, sourceTime)
if (eflag != 0) and (sflag != 0):
return (startDate, endDate, 1)
elif rangeFlag == 6:
m = re.search(self.ptc.rangeSep, parseStr)
startDate = parseStr[:m.start()]
# capturing the month from the start date
mth = self.ptc.CRE_DATE3.search(startDate)
mth = mth.group('mthname')
# appending the month name to the end date
endDate = mth + parseStr[(m.start() + 1):]
startDate, sflag = self.parse(startDate, sourceTime)
endDate, eflag = self.parse(endDate, sourceTime)
if (eflag != 0) and (sflag != 0):
return (startDate, endDate, 1)
else:
# if range is not found
sourceTime = time.localtime()
return (sourceTime, sourceTime, 0)
def _CalculateDOWDelta(self, wd, wkdy, offset, style, currentDayStyle):
"""
Based on the C{style} and C{currentDayStyle} determine what
day-of-week value is to be returned.
@type wd: integer
@param wd: day-of-week value for the current day
@type wkdy: integer
@param wkdy: day-of-week value for the parsed day
@type offset: integer
@param offset: offset direction for any modifiers (-1, 0, 1)
@type style: integer
@param style: normally the value set in C{Constants.DOWParseStyle}
@type currentDayStyle: integer
@param currentDayStyle: normally the value set in C{Constants.CurrentDOWParseStyle}
@rtype: integer
@return: calculated day-of-week
"""
if offset == 1:
# modifier is indicating future week eg: "next".
# DOW is calculated as DOW of next week
diff = 7 - wd + wkdy
elif offset == -1:
# modifier is indicating past week eg: "last","previous"
# DOW is calculated as DOW of previous week
diff = wkdy - wd - 7
elif offset == 0:
# modifier is indiacting current week eg: "this"
# DOW is calculated as DOW of this week
diff = wkdy - wd
elif offset == 2:
# no modifier is present.
# i.e. string to be parsed is just DOW
if style == 1:
# next occurance of the DOW is calculated
if currentDayStyle == True:
if wkdy >= wd:
diff = wkdy - wd
else:
diff = 7 - wd + wkdy
else:
if wkdy > wd:
diff = wkdy - wd
else:
diff = 7 - wd + wkdy
elif style == -1:
# last occurance of the DOW is calculated
if currentDayStyle == True:
if wkdy <= wd:
diff = wkdy - wd
else:
diff = wkdy - wd - 7
else:
if wkdy < wd:
diff = wkdy - wd
else:
diff = wkdy - wd - 7
else:
# occurance of the DOW in the current week is calculated
diff = wkdy - wd
if _debug:
print "wd %s, wkdy %s, offset %d, style %d\n" % (wd, wkdy, offset, style)
return diff
def _evalModifier(self, modifier, chunk1, chunk2, sourceTime):
"""
Evaluate the C{modifier} string and following text (passed in
as C{chunk1} and C{chunk2}) and if they match any known modifiers
calculate the delta and apply it to C{sourceTime}.
@type modifier: string
@param modifier: modifier text to apply to sourceTime
@type chunk1: string
@param chunk1: first text chunk that followed modifier (if any)
@type chunk2: string
@param chunk2: second text chunk that followed modifier (if any)
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: remaining text and the modified sourceTime
"""
offset = self.ptc.Modifiers[modifier]
if sourceTime is not None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
else:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = time.localtime()
# capture the units after the modifier and the remaining
# string after the unit
m = self.ptc.CRE_REMAINING.search(chunk2)
if m is not None:
index = m.start() + 1
unit = chunk2[:m.start()]
chunk2 = chunk2[index:]
else:
unit = chunk2
chunk2 = ''
flag = False
if unit == 'month' or \
unit == 'mth' or \
unit == 'm':
if offset == 0:
dy = self.ptc.daysInMonth(mth, yr)
sourceTime = (yr, mth, dy, 9, 0, 0, wd, yd, isdst)
elif offset == 2:
# if day is the last day of the month, calculate the last day
# of the next month
if dy == self.ptc.daysInMonth(mth, yr):
dy = self.ptc.daysInMonth(mth + 1, yr)
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = self.inc(start, month=1)
sourceTime = target.timetuple()
else:
start = datetime.datetime(yr, mth, 1, 9, 0, 0)
target = self.inc(start, month=offset)
sourceTime = target.timetuple()
flag = True
self.dateFlag = 1
if unit == 'week' or \
unit == 'wk' or \
unit == 'w':
if offset == 0:
start = datetime.datetime(yr, mth, dy, 17, 0, 0)
target = start + datetime.timedelta(days=(4 - wd))
sourceTime = target.timetuple()
elif offset == 2:
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=7)
sourceTime = target.timetuple()
else:
return self._evalModifier(modifier, chunk1, "monday " + chunk2, sourceTime)
flag = True
self.dateFlag = 1
if unit == 'day' or \
unit == 'dy' or \
unit == 'd':
if offset == 0:
sourceTime = (yr, mth, dy, 17, 0, 0, wd, yd, isdst)
self.timeFlag = 2
elif offset == 2:
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
target = start + datetime.timedelta(days=1)
sourceTime = target.timetuple()
else:
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=offset)
sourceTime = target.timetuple()
flag = True
self.dateFlag = 1
if unit == 'hour' or \
unit == 'hr':
if offset == 0:
sourceTime = (yr, mth, dy, hr, 0, 0, wd, yd, isdst)
else:
start = datetime.datetime(yr, mth, dy, hr, 0, 0)
target = start + datetime.timedelta(hours=offset)
sourceTime = target.timetuple()
flag = True
self.timeFlag = 2
if unit == 'year' or \
unit == 'yr' or \
unit == 'y':
if offset == 0:
sourceTime = (yr, 12, 31, hr, mn, sec, wd, yd, isdst)
elif offset == 2:
sourceTime = (yr + 1, mth, dy, hr, mn, sec, wd, yd, isdst)
else:
sourceTime = (yr + offset, 1, 1, 9, 0, 0, wd, yd, isdst)
flag = True
self.dateFlag = 1
if flag == False:
m = self.ptc.CRE_WEEKDAY.match(unit)
if m is not None:
wkdy = m.group()
self.dateFlag = 1
if modifier == 'eod':
# Calculate the upcoming weekday
self.modifierFlag = False
(sourceTime, _) = self.parse(wkdy, sourceTime)
sources = self.ptc.buildSources(sourceTime)
self.timeFlag = 2
if modifier in sources:
sourceTime = sources[modifier]
else:
wkdy = self.ptc.WeekdayOffsets[wkdy]
diff = self._CalculateDOWDelta(wd, wkdy, offset,
self.ptc.DOWParseStyle,
self.ptc.CurrentDOWParseStyle)
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=diff)
sourceTime = target.timetuple()
flag = True
self.dateFlag = 1
if not flag:
m = self.ptc.CRE_TIME.match(unit)
if m is not None:
self.modifierFlag = False
(yr, mth, dy, hr, mn, sec, wd, yd, isdst), _ = self.parse(unit)
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
target = start + datetime.timedelta(days=offset)
sourceTime = target.timetuple()
flag = True
else:
self.modifierFlag = False
# check if the remaining text is parsable and if so,
# use it as the base time for the modifier source time
t, flag2 = self.parse('%s %s' % (chunk1, unit), sourceTime)
if flag2 != 0:
sourceTime = t
sources = self.ptc.buildSources(sourceTime)
if modifier in sources:
sourceTime = sources[modifier]
flag = True
self.timeFlag = 2
# if the word after next is a number, the string is more than likely
# to be "next 4 hrs" which we will have to combine the units with the
# rest of the string
if not flag:
if offset < 0:
# if offset is negative, the unit has to be made negative
unit = '-%s' % unit
chunk2 = '%s %s' % (unit, chunk2)
self.modifierFlag = False
#return '%s %s' % (chunk1, chunk2), sourceTime
return '%s' % chunk2, sourceTime
def _evalModifier2(self, modifier, chunk1 , chunk2, sourceTime):
"""
Evaluate the C{modifier} string and following text (passed in
as C{chunk1} and C{chunk2}) and if they match any known modifiers
calculate the delta and apply it to C{sourceTime}.
@type modifier: string
@param modifier: modifier text to apply to C{sourceTime}
@type chunk1: string
@param chunk1: first text chunk that followed modifier (if any)
@type chunk2: string
@param chunk2: second text chunk that followed modifier (if any)
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: remaining text and the modified sourceTime
"""
offset = self.ptc.Modifiers[modifier]
digit = r'\d+'
self.modifier2Flag = False
# If the string after the negative modifier starts with digits,
# then it is likely that the string is similar to ' before 3 days'
# or 'evening prior to 3 days'.
# In this case, the total time is calculated by subtracting '3 days'
# from the current date.
# So, we have to identify the quantity and negate it before parsing
# the string.
# This is not required for strings not starting with digits since the
# string is enough to calculate the sourceTime
if chunk2 != '':
if offset < 0:
m = re.match(digit, chunk2.strip())
if m is not None:
qty = int(m.group()) * -1
chunk2 = chunk2[m.end():]
chunk2 = '%d%s' % (qty, chunk2)
sourceTime, flag1 = self.parse(chunk2, sourceTime)
if flag1 == 0:
flag1 = True
else:
flag1 = False
flag2 = False
else:
flag1 = False
if chunk1 != '':
if offset < 0:
m = re.search(digit, chunk1.strip())
if m is not None:
qty = int(m.group()) * -1
chunk1 = chunk1[m.end():]
chunk1 = '%d%s' % (qty, chunk1)
tempDateFlag = self.dateFlag
tempTimeFlag = self.timeFlag
sourceTime2, flag2 = self.parse(chunk1, sourceTime)
else:
return sourceTime, (flag1 and flag2)
# if chunk1 is not a datetime and chunk2 is then do not use datetime
# value returned by parsing chunk1
if not (flag1 == False and flag2 == 0):
sourceTime = sourceTime2
else:
self.timeFlag = tempTimeFlag
self.dateFlag = tempDateFlag
return sourceTime, (flag1 and flag2)
def _evalString(self, datetimeString, sourceTime=None):
"""
Calculate the datetime based on flags set by the L{parse()} routine
Examples handled::
RFC822, W3CDTF formatted dates
HH:MM[:SS][ am/pm]
MM/DD/YYYY
DD MMMM YYYY
@type datetimeString: string
@param datetimeString: text to try and parse as more "traditional"
date/time text
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: datetime
@return: calculated C{struct_time} value or current C{struct_time}
if not parsed
"""
s = datetimeString.strip()
now = time.localtime()
# Given string date is a RFC822 date
if sourceTime is None:
sourceTime = _parse_date_rfc822(s)
if sourceTime is not None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst, _) = sourceTime
self.dateFlag = 1
if (hr != 0) and (mn != 0) and (sec != 0):
self.timeFlag = 2
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
# Given string date is a W3CDTF date
if sourceTime is None:
sourceTime = _parse_date_w3dtf(s)
if sourceTime is not None:
self.dateFlag = 1
self.timeFlag = 2
if sourceTime is None:
s = s.lower()
# Given string is in the format HH:MM(:SS)(am/pm)
if self.meridianFlag:
if sourceTime is None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = now
else:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
m = self.ptc.CRE_TIMEHMS2.search(s)
if m is not None:
dt = s[:m.start('meridian')].strip()
if len(dt) <= 2:
hr = int(dt)
mn = 0
sec = 0
else:
hr, mn, sec = _extract_time(m)
if hr == 24:
hr = 0
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
meridian = m.group('meridian').lower()
# if 'am' found and hour is 12 - force hour to 0 (midnight)
if (meridian in self.ptc.am) and hr == 12:
sourceTime = (yr, mth, dy, 0, mn, sec, wd, yd, isdst)
# if 'pm' found and hour < 12, add 12 to shift to evening
if (meridian in self.ptc.pm) and hr < 12:
sourceTime = (yr, mth, dy, hr + 12, mn, sec, wd, yd, isdst)
# invalid time
if hr > 24 or mn > 59 or sec > 59:
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
self.meridianFlag = False
# Given string is in the format HH:MM(:SS)
if self.timeStdFlag:
if sourceTime is None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = now
else:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
m = self.ptc.CRE_TIMEHMS.search(s)
if m is not None:
hr, mn, sec = _extract_time(m)
if hr == 24:
hr = 0
if hr > 24 or mn > 59 or sec > 59:
# invalid time
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
else:
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
self.timeStdFlag = False
# Given string is in the format 07/21/2006
if self.dateStdFlag:
sourceTime = self.parseDate(s)
self.dateStdFlag = False
# Given string is in the format "May 23rd, 2005"
if self.dateStrFlag:
sourceTime = self.parseDateText(s)
self.dateStrFlag = False
# Given string is a weekday
if self.weekdyFlag:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = now
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
wkdy = self.ptc.WeekdayOffsets[s]
if wkdy > wd:
qty = self._CalculateDOWDelta(wd, wkdy, 2,
self.ptc.DOWParseStyle,
self.ptc.CurrentDOWParseStyle)
else:
qty = self._CalculateDOWDelta(wd, wkdy, 2,
self.ptc.DOWParseStyle,
self.ptc.CurrentDOWParseStyle)
target = start + datetime.timedelta(days=qty)
wd = wkdy
sourceTime = target.timetuple()
self.weekdyFlag = False
# Given string is a natural language time string like
# lunch, midnight, etc
if self.timeStrFlag:
if s in self.ptc.re_values['now']:
sourceTime = now
else:
sources = self.ptc.buildSources(sourceTime)
if s in sources:
sourceTime = sources[s]
else:
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
self.timeStrFlag = False
# Given string is a natural language date string like today, tomorrow..
if self.dayStrFlag:
if sourceTime is None:
sourceTime = now
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
if s in self.ptc.dayOffsets:
offset = self.ptc.dayOffsets[s]
else:
offset = 0
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=offset)
sourceTime = target.timetuple()
self.dayStrFlag = False
# Given string is a time string with units like "5 hrs 30 min"
if self.unitsFlag:
modifier = '' # TODO
if sourceTime is None:
sourceTime = now
m = self.ptc.CRE_UNITS.search(s)
if m is not None:
units = m.group('units')
quantity = s[:m.start('units')]
sourceTime = self._buildTime(sourceTime, quantity, modifier, units)
self.unitsFlag = False
# Given string is a time string with single char units like "5 h 30 m"
if self.qunitsFlag:
modifier = '' # TODO
if sourceTime is None:
sourceTime = now
m = self.ptc.CRE_QUNITS.search(s)
if m is not None:
units = m.group('qunits')
quantity = s[:m.start('qunits')]
sourceTime = self._buildTime(sourceTime, quantity, modifier, units)
self.qunitsFlag = False
# Given string does not match anything
if sourceTime is None:
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
return sourceTime
def parse(self, datetimeString, sourceTime=None):
"""
Splits the given C{datetimeString} into tokens, finds the regex
patterns that match and then calculates a C{struct_time} value from
the chunks.
If C{sourceTime} is given then the C{struct_time} value will be
calculated from that value, otherwise from the current date/time.
If the C{datetimeString} is parsed and date/time value found then
the second item of the returned tuple will be a flag to let you know
what kind of C{struct_time} value is being returned::
0 = not parsed at all
1 = parsed as a C{date}
2 = parsed as a C{time}
3 = parsed as a C{datetime}
@type datetimeString: string
@param datetimeString: date/time text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: modified C{sourceTime} and the result flag
"""
if sourceTime:
if isinstance(sourceTime, datetime.datetime):
if _debug:
print 'coercing datetime to timetuple'
sourceTime = sourceTime.timetuple()
else:
if not isinstance(sourceTime, time.struct_time) and \
not isinstance(sourceTime, tuple):
raise Exception('sourceTime is not a struct_time')
s = datetimeString.strip().lower()
parseStr = ''
totalTime = sourceTime
if s == '' :
if sourceTime is not None:
return (sourceTime, self.dateFlag + self.timeFlag)
else:
return (time.localtime(), 0)
self.timeFlag = 0
self.dateFlag = 0
while len(s) > 0:
flag = False
chunk1 = ''
chunk2 = ''
if _debug:
print 'parse (top of loop): [%s][%s]' % (s, parseStr)
if parseStr == '':
# Modifier like next\prev..
m = self.ptc.CRE_MODIFIER.search(s)
if m is not None:
self.modifierFlag = True
if (m.group('modifier') != s):
# capture remaining string
parseStr = m.group('modifier')
chunk1 = s[:m.start('modifier')].strip()
chunk2 = s[m.end('modifier'):].strip()
flag = True
else:
parseStr = s
if parseStr == '':
# Modifier like from\after\prior..
m = self.ptc.CRE_MODIFIER2.search(s)
if m is not None:
self.modifier2Flag = True
if (m.group('modifier') != s):
# capture remaining string
parseStr = m.group('modifier')
chunk1 = s[:m.start('modifier')].strip()
chunk2 = s[m.end('modifier'):].strip()
flag = True
else:
parseStr = s
if parseStr == '':
valid_date = False
for match in self.ptc.CRE_DATE3.finditer(s):
# to prevent "HH:MM(:SS) time strings" expressions from triggering
# this regex, we checks if the month field exists in the searched
# expression, if it doesn't exist, the date field is not valid
if match.group('mthname'):
m = self.ptc.CRE_DATE3.search(s, match.start())
valid_date = True
break
# String date format
if valid_date:
self.dateStrFlag = True
self.dateFlag = 1
if (m.group('date') != s):
# capture remaining string
parseStr = m.group('date')
chunk1 = s[:m.start('date')]
chunk2 = s[m.end('date'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Standard date format
m = self.ptc.CRE_DATE.search(s)
if m is not None:
self.dateStdFlag = True
self.dateFlag = 1
if (m.group('date') != s):
# capture remaining string
parseStr = m.group('date')
chunk1 = s[:m.start('date')]
chunk2 = s[m.end('date'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Natural language day strings
m = self.ptc.CRE_DAY.search(s)
if m is not None:
self.dayStrFlag = True
self.dateFlag = 1
if (m.group('day') != s):
# capture remaining string
parseStr = m.group('day')
chunk1 = s[:m.start('day')]
chunk2 = s[m.end('day'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Quantity + Units
m = self.ptc.CRE_UNITS.search(s)
if m is not None:
self.unitsFlag = True
if (m.group('qty') != s):
# capture remaining string
parseStr = m.group('qty')
chunk1 = s[:m.start('qty')].strip()
chunk2 = s[m.end('qty'):].strip()
if chunk1[-1:] == '-':
parseStr = '-%s' % parseStr
chunk1 = chunk1[:-1]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Quantity + Units
m = self.ptc.CRE_QUNITS.search(s)
if m is not None:
self.qunitsFlag = True
if (m.group('qty') != s):
# capture remaining string
parseStr = m.group('qty')
chunk1 = s[:m.start('qty')].strip()
chunk2 = s[m.end('qty'):].strip()
if chunk1[-1:] == '-':
parseStr = '-%s' % parseStr
chunk1 = chunk1[:-1]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Weekday
m = self.ptc.CRE_WEEKDAY.search(s)
if m is not None:
gv = m.group('weekday')
if s not in self.ptc.dayOffsets:
self.weekdyFlag = True
self.dateFlag = 1
if (gv != s):
# capture remaining string
parseStr = gv
chunk1 = s[:m.start('weekday')]
chunk2 = s[m.end('weekday'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Natural language time strings
m = self.ptc.CRE_TIME.search(s)
if m is not None:
self.timeStrFlag = True
self.timeFlag = 2
if (m.group('time') != s):
# capture remaining string
parseStr = m.group('time')
chunk1 = s[:m.start('time')]
chunk2 = s[m.end('time'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# HH:MM(:SS) am/pm time strings
m = self.ptc.CRE_TIMEHMS2.search(s)
if m is not None:
self.meridianFlag = True
self.timeFlag = 2
if m.group('minutes') is not None:
if m.group('seconds') is not None:
parseStr = '%s:%s:%s %s' % (m.group('hours'),
m.group('minutes'),
m.group('seconds'),
m.group('meridian'))
else:
parseStr = '%s:%s %s' % (m.group('hours'),
m.group('minutes'),
m.group('meridian'))
else:
parseStr = '%s %s' % (m.group('hours'),
m.group('meridian'))
chunk1 = s[:m.start('hours')]
chunk2 = s[m.end('meridian'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
if parseStr == '':
# HH:MM(:SS) time strings
m = self.ptc.CRE_TIMEHMS.search(s)
if m is not None:
self.timeStdFlag = True
self.timeFlag = 2
if m.group('seconds') is not None:
parseStr = '%s:%s:%s' % (m.group('hours'),
m.group('minutes'),
m.group('seconds'))
chunk1 = s[:m.start('hours')]
chunk2 = s[m.end('seconds'):]
else:
parseStr = '%s:%s' % (m.group('hours'),
m.group('minutes'))
chunk1 = s[:m.start('hours')]
chunk2 = s[m.end('minutes'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
# if string does not match any regex, empty string to
# come out of the while loop
if not flag:
s = ''
if _debug:
print 'parse (bottom) [%s][%s][%s][%s]' % (s, parseStr, chunk1, chunk2)
print 'weekday %s, dateStd %s, dateStr %s, time %s, timeStr %s, meridian %s' % \
(self.weekdyFlag, self.dateStdFlag, self.dateStrFlag, self.timeStdFlag, self.timeStrFlag, self.meridianFlag)
print 'dayStr %s, modifier %s, modifier2 %s, units %s, qunits %s' % \
(self.dayStrFlag, self.modifierFlag, self.modifier2Flag, self.unitsFlag, self.qunitsFlag)
# evaluate the matched string
if parseStr != '':
if self.modifierFlag == True:
t, totalTime = self._evalModifier(parseStr, chunk1, chunk2, totalTime)
# t is the unparsed part of the chunks.
# If it is not date/time, return current
# totalTime as it is; else return the output
# after parsing t.
if (t != '') and (t != None):
tempDateFlag = self.dateFlag
tempTimeFlag = self.timeFlag
(totalTime2, flag) = self.parse(t, totalTime)
if flag == 0 and totalTime is not None:
self.timeFlag = tempTimeFlag
self.dateFlag = tempDateFlag
return (totalTime, self.dateFlag + self.timeFlag)
else:
return (totalTime2, self.dateFlag + self.timeFlag)
elif self.modifier2Flag == True:
totalTime, invalidFlag = self._evalModifier2(parseStr, chunk1, chunk2, totalTime)
if invalidFlag == True:
self.dateFlag = 0
self.timeFlag = 0
else:
totalTime = self._evalString(parseStr, totalTime)
parseStr = ''
# String is not parsed at all
if totalTime is None or totalTime == sourceTime:
totalTime = time.localtime()
self.dateFlag = 0
self.timeFlag = 0
return (totalTime, self.dateFlag + self.timeFlag)
def inc(self, source, month=None, year=None):
"""
Takes the given C{source} date, or current date if none is
passed, and increments it according to the values passed in
by month and/or year.
This routine is needed because Python's C{timedelta()} function
does not allow for month or year increments.
@type source: struct_time
@param source: C{struct_time} value to increment
@type month: integer
@param month: optional number of months to increment
@type year: integer
@param year: optional number of years to increment
@rtype: datetime
@return: C{source} incremented by the number of months and/or years
"""
yr = source.year
mth = source.month
dy = source.day
if year:
try:
yi = int(year)
except ValueError:
yi = 0
yr += yi
if month:
try:
mi = int(month)
except ValueError:
mi = 0
m = abs(mi)
y = m / 12 # how many years are in month increment
m = m % 12 # get remaining months
if mi < 0:
mth = mth - m # sub months from start month
if mth < 1: # cross start-of-year?
y -= 1 # yes - decrement year
mth += 12 # and fix month
else:
mth = mth + m # add months to start month
if mth > 12: # cross end-of-year?
y += 1 # yes - increment year
mth -= 12 # and fix month
yr += y
# if the day ends up past the last day of
# the new month, set it to the last day
if dy > self.ptc.daysInMonth(mth, yr):
dy = self.ptc.daysInMonth(mth, yr)
d = source.replace(year=yr, month=mth, day=dy)
return source + (d - source)
|
vilmibm/done | parsedatetime/parsedatetime.py | Calendar.parse | python | def parse(self, datetimeString, sourceTime=None):
"""
Splits the given C{datetimeString} into tokens, finds the regex
patterns that match and then calculates a C{struct_time} value from
the chunks.
If C{sourceTime} is given then the C{struct_time} value will be
calculated from that value, otherwise from the current date/time.
If the C{datetimeString} is parsed and date/time value found then
the second item of the returned tuple will be a flag to let you know
what kind of C{struct_time} value is being returned::
0 = not parsed at all
1 = parsed as a C{date}
2 = parsed as a C{time}
3 = parsed as a C{datetime}
@type datetimeString: string
@param datetimeString: date/time text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: modified C{sourceTime} and the result flag
"""
if sourceTime:
if isinstance(sourceTime, datetime.datetime):
if _debug:
print 'coercing datetime to timetuple'
sourceTime = sourceTime.timetuple()
else:
if not isinstance(sourceTime, time.struct_time) and \
not isinstance(sourceTime, tuple):
raise Exception('sourceTime is not a struct_time')
s = datetimeString.strip().lower()
parseStr = ''
totalTime = sourceTime
if s == '' :
if sourceTime is not None:
return (sourceTime, self.dateFlag + self.timeFlag)
else:
return (time.localtime(), 0)
self.timeFlag = 0
self.dateFlag = 0
while len(s) > 0:
flag = False
chunk1 = ''
chunk2 = ''
if _debug:
print 'parse (top of loop): [%s][%s]' % (s, parseStr)
if parseStr == '':
# Modifier like next\prev..
m = self.ptc.CRE_MODIFIER.search(s)
if m is not None:
self.modifierFlag = True
if (m.group('modifier') != s):
# capture remaining string
parseStr = m.group('modifier')
chunk1 = s[:m.start('modifier')].strip()
chunk2 = s[m.end('modifier'):].strip()
flag = True
else:
parseStr = s
if parseStr == '':
# Modifier like from\after\prior..
m = self.ptc.CRE_MODIFIER2.search(s)
if m is not None:
self.modifier2Flag = True
if (m.group('modifier') != s):
# capture remaining string
parseStr = m.group('modifier')
chunk1 = s[:m.start('modifier')].strip()
chunk2 = s[m.end('modifier'):].strip()
flag = True
else:
parseStr = s
if parseStr == '':
valid_date = False
for match in self.ptc.CRE_DATE3.finditer(s):
# to prevent "HH:MM(:SS) time strings" expressions from triggering
# this regex, we checks if the month field exists in the searched
# expression, if it doesn't exist, the date field is not valid
if match.group('mthname'):
m = self.ptc.CRE_DATE3.search(s, match.start())
valid_date = True
break
# String date format
if valid_date:
self.dateStrFlag = True
self.dateFlag = 1
if (m.group('date') != s):
# capture remaining string
parseStr = m.group('date')
chunk1 = s[:m.start('date')]
chunk2 = s[m.end('date'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Standard date format
m = self.ptc.CRE_DATE.search(s)
if m is not None:
self.dateStdFlag = True
self.dateFlag = 1
if (m.group('date') != s):
# capture remaining string
parseStr = m.group('date')
chunk1 = s[:m.start('date')]
chunk2 = s[m.end('date'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Natural language day strings
m = self.ptc.CRE_DAY.search(s)
if m is not None:
self.dayStrFlag = True
self.dateFlag = 1
if (m.group('day') != s):
# capture remaining string
parseStr = m.group('day')
chunk1 = s[:m.start('day')]
chunk2 = s[m.end('day'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Quantity + Units
m = self.ptc.CRE_UNITS.search(s)
if m is not None:
self.unitsFlag = True
if (m.group('qty') != s):
# capture remaining string
parseStr = m.group('qty')
chunk1 = s[:m.start('qty')].strip()
chunk2 = s[m.end('qty'):].strip()
if chunk1[-1:] == '-':
parseStr = '-%s' % parseStr
chunk1 = chunk1[:-1]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Quantity + Units
m = self.ptc.CRE_QUNITS.search(s)
if m is not None:
self.qunitsFlag = True
if (m.group('qty') != s):
# capture remaining string
parseStr = m.group('qty')
chunk1 = s[:m.start('qty')].strip()
chunk2 = s[m.end('qty'):].strip()
if chunk1[-1:] == '-':
parseStr = '-%s' % parseStr
chunk1 = chunk1[:-1]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Weekday
m = self.ptc.CRE_WEEKDAY.search(s)
if m is not None:
gv = m.group('weekday')
if s not in self.ptc.dayOffsets:
self.weekdyFlag = True
self.dateFlag = 1
if (gv != s):
# capture remaining string
parseStr = gv
chunk1 = s[:m.start('weekday')]
chunk2 = s[m.end('weekday'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Natural language time strings
m = self.ptc.CRE_TIME.search(s)
if m is not None:
self.timeStrFlag = True
self.timeFlag = 2
if (m.group('time') != s):
# capture remaining string
parseStr = m.group('time')
chunk1 = s[:m.start('time')]
chunk2 = s[m.end('time'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# HH:MM(:SS) am/pm time strings
m = self.ptc.CRE_TIMEHMS2.search(s)
if m is not None:
self.meridianFlag = True
self.timeFlag = 2
if m.group('minutes') is not None:
if m.group('seconds') is not None:
parseStr = '%s:%s:%s %s' % (m.group('hours'),
m.group('minutes'),
m.group('seconds'),
m.group('meridian'))
else:
parseStr = '%s:%s %s' % (m.group('hours'),
m.group('minutes'),
m.group('meridian'))
else:
parseStr = '%s %s' % (m.group('hours'),
m.group('meridian'))
chunk1 = s[:m.start('hours')]
chunk2 = s[m.end('meridian'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
if parseStr == '':
# HH:MM(:SS) time strings
m = self.ptc.CRE_TIMEHMS.search(s)
if m is not None:
self.timeStdFlag = True
self.timeFlag = 2
if m.group('seconds') is not None:
parseStr = '%s:%s:%s' % (m.group('hours'),
m.group('minutes'),
m.group('seconds'))
chunk1 = s[:m.start('hours')]
chunk2 = s[m.end('seconds'):]
else:
parseStr = '%s:%s' % (m.group('hours'),
m.group('minutes'))
chunk1 = s[:m.start('hours')]
chunk2 = s[m.end('minutes'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
# if string does not match any regex, empty string to
# come out of the while loop
if not flag:
s = ''
if _debug:
print 'parse (bottom) [%s][%s][%s][%s]' % (s, parseStr, chunk1, chunk2)
print 'weekday %s, dateStd %s, dateStr %s, time %s, timeStr %s, meridian %s' % \
(self.weekdyFlag, self.dateStdFlag, self.dateStrFlag, self.timeStdFlag, self.timeStrFlag, self.meridianFlag)
print 'dayStr %s, modifier %s, modifier2 %s, units %s, qunits %s' % \
(self.dayStrFlag, self.modifierFlag, self.modifier2Flag, self.unitsFlag, self.qunitsFlag)
# evaluate the matched string
if parseStr != '':
if self.modifierFlag == True:
t, totalTime = self._evalModifier(parseStr, chunk1, chunk2, totalTime)
# t is the unparsed part of the chunks.
# If it is not date/time, return current
# totalTime as it is; else return the output
# after parsing t.
if (t != '') and (t != None):
tempDateFlag = self.dateFlag
tempTimeFlag = self.timeFlag
(totalTime2, flag) = self.parse(t, totalTime)
if flag == 0 and totalTime is not None:
self.timeFlag = tempTimeFlag
self.dateFlag = tempDateFlag
return (totalTime, self.dateFlag + self.timeFlag)
else:
return (totalTime2, self.dateFlag + self.timeFlag)
elif self.modifier2Flag == True:
totalTime, invalidFlag = self._evalModifier2(parseStr, chunk1, chunk2, totalTime)
if invalidFlag == True:
self.dateFlag = 0
self.timeFlag = 0
else:
totalTime = self._evalString(parseStr, totalTime)
parseStr = ''
# String is not parsed at all
if totalTime is None or totalTime == sourceTime:
totalTime = time.localtime()
self.dateFlag = 0
self.timeFlag = 0
return (totalTime, self.dateFlag + self.timeFlag) | Splits the given C{datetimeString} into tokens, finds the regex
patterns that match and then calculates a C{struct_time} value from
the chunks.
If C{sourceTime} is given then the C{struct_time} value will be
calculated from that value, otherwise from the current date/time.
If the C{datetimeString} is parsed and date/time value found then
the second item of the returned tuple will be a flag to let you know
what kind of C{struct_time} value is being returned::
0 = not parsed at all
1 = parsed as a C{date}
2 = parsed as a C{time}
3 = parsed as a C{datetime}
@type datetimeString: string
@param datetimeString: date/time text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: modified C{sourceTime} and the result flag | train | https://github.com/vilmibm/done/blob/7e5b60d2900ceddefa49de352a19b794199b51a8/parsedatetime/parsedatetime.py#L1161-L1476 | [
"def _evalModifier(self, modifier, chunk1, chunk2, sourceTime):\n \"\"\"\n Evaluate the C{modifier} string and following text (passed in\n as C{chunk1} and C{chunk2}) and if they match any known modifiers\n calculate the delta and apply it to C{sourceTime}.\n\n @type modifier: string\n @param modifier: modifier text to apply to sourceTime\n @type chunk1: string\n @param chunk1: first text chunk that followed modifier (if any)\n @type chunk2: string\n @param chunk2: second text chunk that followed modifier (if any)\n @type sourceTime: struct_time\n @param sourceTime: C{struct_time} value to use as the base\n\n @rtype: tuple\n @return: tuple of: remaining text and the modified sourceTime\n \"\"\"\n offset = self.ptc.Modifiers[modifier]\n\n if sourceTime is not None:\n (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime\n else:\n (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = time.localtime()\n\n # capture the units after the modifier and the remaining\n # string after the unit\n m = self.ptc.CRE_REMAINING.search(chunk2)\n if m is not None:\n index = m.start() + 1\n unit = chunk2[:m.start()]\n chunk2 = chunk2[index:]\n else:\n unit = chunk2\n chunk2 = ''\n\n flag = False\n\n if unit == 'month' or \\\n unit == 'mth' or \\\n unit == 'm':\n if offset == 0:\n dy = self.ptc.daysInMonth(mth, yr)\n sourceTime = (yr, mth, dy, 9, 0, 0, wd, yd, isdst)\n elif offset == 2:\n # if day is the last day of the month, calculate the last day\n # of the next month\n if dy == self.ptc.daysInMonth(mth, yr):\n dy = self.ptc.daysInMonth(mth + 1, yr)\n\n start = datetime.datetime(yr, mth, dy, 9, 0, 0)\n target = self.inc(start, month=1)\n sourceTime = target.timetuple()\n else:\n start = datetime.datetime(yr, mth, 1, 9, 0, 0)\n target = self.inc(start, month=offset)\n sourceTime = target.timetuple()\n\n flag = True\n self.dateFlag = 1\n\n if unit == 'week' or \\\n unit == 'wk' or \\\n unit == 'w':\n if offset == 0:\n start = datetime.datetime(yr, mth, dy, 17, 0, 0)\n target = start + datetime.timedelta(days=(4 - wd))\n sourceTime = target.timetuple()\n elif offset == 2:\n start = datetime.datetime(yr, mth, dy, 9, 0, 0)\n target = start + datetime.timedelta(days=7)\n sourceTime = target.timetuple()\n else:\n return self._evalModifier(modifier, chunk1, \"monday \" + chunk2, sourceTime)\n\n flag = True\n self.dateFlag = 1\n\n if unit == 'day' or \\\n unit == 'dy' or \\\n unit == 'd':\n if offset == 0:\n sourceTime = (yr, mth, dy, 17, 0, 0, wd, yd, isdst)\n self.timeFlag = 2\n elif offset == 2:\n start = datetime.datetime(yr, mth, dy, hr, mn, sec)\n target = start + datetime.timedelta(days=1)\n sourceTime = target.timetuple()\n else:\n start = datetime.datetime(yr, mth, dy, 9, 0, 0)\n target = start + datetime.timedelta(days=offset)\n sourceTime = target.timetuple()\n\n flag = True\n self.dateFlag = 1\n\n if unit == 'hour' or \\\n unit == 'hr':\n if offset == 0:\n sourceTime = (yr, mth, dy, hr, 0, 0, wd, yd, isdst)\n else:\n start = datetime.datetime(yr, mth, dy, hr, 0, 0)\n target = start + datetime.timedelta(hours=offset)\n sourceTime = target.timetuple()\n\n flag = True\n self.timeFlag = 2\n\n if unit == 'year' or \\\n unit == 'yr' or \\\n unit == 'y':\n if offset == 0:\n sourceTime = (yr, 12, 31, hr, mn, sec, wd, yd, isdst)\n elif offset == 2:\n sourceTime = (yr + 1, mth, dy, hr, mn, sec, wd, yd, isdst)\n else:\n sourceTime = (yr + offset, 1, 1, 9, 0, 0, wd, yd, isdst)\n\n flag = True\n self.dateFlag = 1\n\n if flag == False:\n m = self.ptc.CRE_WEEKDAY.match(unit)\n if m is not None:\n wkdy = m.group()\n self.dateFlag = 1\n\n if modifier == 'eod':\n # Calculate the upcoming weekday\n self.modifierFlag = False\n (sourceTime, _) = self.parse(wkdy, sourceTime)\n sources = self.ptc.buildSources(sourceTime)\n self.timeFlag = 2\n\n if modifier in sources:\n sourceTime = sources[modifier]\n\n else:\n wkdy = self.ptc.WeekdayOffsets[wkdy]\n diff = self._CalculateDOWDelta(wd, wkdy, offset,\n self.ptc.DOWParseStyle,\n self.ptc.CurrentDOWParseStyle)\n start = datetime.datetime(yr, mth, dy, 9, 0, 0)\n target = start + datetime.timedelta(days=diff)\n sourceTime = target.timetuple()\n\n flag = True\n self.dateFlag = 1\n\n if not flag:\n m = self.ptc.CRE_TIME.match(unit)\n if m is not None:\n self.modifierFlag = False\n (yr, mth, dy, hr, mn, sec, wd, yd, isdst), _ = self.parse(unit)\n\n start = datetime.datetime(yr, mth, dy, hr, mn, sec)\n target = start + datetime.timedelta(days=offset)\n sourceTime = target.timetuple()\n flag = True\n else:\n self.modifierFlag = False\n\n # check if the remaining text is parsable and if so,\n # use it as the base time for the modifier source time\n t, flag2 = self.parse('%s %s' % (chunk1, unit), sourceTime)\n\n if flag2 != 0:\n sourceTime = t\n\n sources = self.ptc.buildSources(sourceTime)\n\n if modifier in sources:\n sourceTime = sources[modifier]\n flag = True\n self.timeFlag = 2\n\n # if the word after next is a number, the string is more than likely\n # to be \"next 4 hrs\" which we will have to combine the units with the\n # rest of the string\n if not flag:\n if offset < 0:\n # if offset is negative, the unit has to be made negative\n unit = '-%s' % unit\n\n chunk2 = '%s %s' % (unit, chunk2)\n\n self.modifierFlag = False\n\n #return '%s %s' % (chunk1, chunk2), sourceTime\n return '%s' % chunk2, sourceTime\n",
"def _evalModifier2(self, modifier, chunk1 , chunk2, sourceTime):\n \"\"\"\n Evaluate the C{modifier} string and following text (passed in\n as C{chunk1} and C{chunk2}) and if they match any known modifiers\n calculate the delta and apply it to C{sourceTime}.\n\n @type modifier: string\n @param modifier: modifier text to apply to C{sourceTime}\n @type chunk1: string\n @param chunk1: first text chunk that followed modifier (if any)\n @type chunk2: string\n @param chunk2: second text chunk that followed modifier (if any)\n @type sourceTime: struct_time\n @param sourceTime: C{struct_time} value to use as the base\n\n @rtype: tuple\n @return: tuple of: remaining text and the modified sourceTime\n \"\"\"\n offset = self.ptc.Modifiers[modifier]\n digit = r'\\d+'\n\n self.modifier2Flag = False\n\n # If the string after the negative modifier starts with digits,\n # then it is likely that the string is similar to ' before 3 days'\n # or 'evening prior to 3 days'.\n # In this case, the total time is calculated by subtracting '3 days'\n # from the current date.\n # So, we have to identify the quantity and negate it before parsing\n # the string.\n # This is not required for strings not starting with digits since the\n # string is enough to calculate the sourceTime\n if chunk2 != '':\n if offset < 0:\n m = re.match(digit, chunk2.strip())\n if m is not None:\n qty = int(m.group()) * -1\n chunk2 = chunk2[m.end():]\n chunk2 = '%d%s' % (qty, chunk2)\n\n sourceTime, flag1 = self.parse(chunk2, sourceTime)\n if flag1 == 0:\n flag1 = True\n else:\n flag1 = False\n flag2 = False\n else:\n flag1 = False\n\n if chunk1 != '':\n if offset < 0:\n m = re.search(digit, chunk1.strip())\n if m is not None:\n qty = int(m.group()) * -1\n chunk1 = chunk1[m.end():]\n chunk1 = '%d%s' % (qty, chunk1)\n\n tempDateFlag = self.dateFlag\n tempTimeFlag = self.timeFlag\n sourceTime2, flag2 = self.parse(chunk1, sourceTime)\n else:\n return sourceTime, (flag1 and flag2)\n\n # if chunk1 is not a datetime and chunk2 is then do not use datetime\n # value returned by parsing chunk1\n if not (flag1 == False and flag2 == 0):\n sourceTime = sourceTime2\n else:\n self.timeFlag = tempTimeFlag\n self.dateFlag = tempDateFlag\n\n return sourceTime, (flag1 and flag2)\n",
"def _evalString(self, datetimeString, sourceTime=None):\n \"\"\"\n Calculate the datetime based on flags set by the L{parse()} routine\n\n Examples handled::\n RFC822, W3CDTF formatted dates\n HH:MM[:SS][ am/pm]\n MM/DD/YYYY\n DD MMMM YYYY\n\n @type datetimeString: string\n @param datetimeString: text to try and parse as more \"traditional\"\n date/time text\n @type sourceTime: struct_time\n @param sourceTime: C{struct_time} value to use as the base\n\n @rtype: datetime\n @return: calculated C{struct_time} value or current C{struct_time}\n if not parsed\n \"\"\"\n s = datetimeString.strip()\n now = time.localtime()\n\n # Given string date is a RFC822 date\n if sourceTime is None:\n sourceTime = _parse_date_rfc822(s)\n\n if sourceTime is not None:\n (yr, mth, dy, hr, mn, sec, wd, yd, isdst, _) = sourceTime\n self.dateFlag = 1\n\n if (hr != 0) and (mn != 0) and (sec != 0):\n self.timeFlag = 2\n\n sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)\n\n # Given string date is a W3CDTF date\n if sourceTime is None:\n sourceTime = _parse_date_w3dtf(s)\n\n if sourceTime is not None:\n self.dateFlag = 1\n self.timeFlag = 2\n\n if sourceTime is None:\n s = s.lower()\n\n # Given string is in the format HH:MM(:SS)(am/pm)\n if self.meridianFlag:\n if sourceTime is None:\n (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = now\n else:\n (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime\n\n m = self.ptc.CRE_TIMEHMS2.search(s)\n if m is not None:\n dt = s[:m.start('meridian')].strip()\n if len(dt) <= 2:\n hr = int(dt)\n mn = 0\n sec = 0\n else:\n hr, mn, sec = _extract_time(m)\n\n if hr == 24:\n hr = 0\n\n sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)\n meridian = m.group('meridian').lower()\n\n # if 'am' found and hour is 12 - force hour to 0 (midnight)\n if (meridian in self.ptc.am) and hr == 12:\n sourceTime = (yr, mth, dy, 0, mn, sec, wd, yd, isdst)\n\n # if 'pm' found and hour < 12, add 12 to shift to evening\n if (meridian in self.ptc.pm) and hr < 12:\n sourceTime = (yr, mth, dy, hr + 12, mn, sec, wd, yd, isdst)\n\n # invalid time\n if hr > 24 or mn > 59 or sec > 59:\n sourceTime = now\n self.dateFlag = 0\n self.timeFlag = 0\n\n self.meridianFlag = False\n\n # Given string is in the format HH:MM(:SS)\n if self.timeStdFlag:\n if sourceTime is None:\n (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = now\n else:\n (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime\n\n m = self.ptc.CRE_TIMEHMS.search(s)\n if m is not None:\n hr, mn, sec = _extract_time(m)\n if hr == 24:\n hr = 0\n\n if hr > 24 or mn > 59 or sec > 59:\n # invalid time\n sourceTime = now\n self.dateFlag = 0\n self.timeFlag = 0\n else:\n sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)\n\n self.timeStdFlag = False\n\n # Given string is in the format 07/21/2006\n if self.dateStdFlag:\n sourceTime = self.parseDate(s)\n self.dateStdFlag = False\n\n # Given string is in the format \"May 23rd, 2005\"\n if self.dateStrFlag:\n sourceTime = self.parseDateText(s)\n self.dateStrFlag = False\n\n # Given string is a weekday\n if self.weekdyFlag:\n (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = now\n\n start = datetime.datetime(yr, mth, dy, hr, mn, sec)\n wkdy = self.ptc.WeekdayOffsets[s]\n\n if wkdy > wd:\n qty = self._CalculateDOWDelta(wd, wkdy, 2,\n self.ptc.DOWParseStyle,\n self.ptc.CurrentDOWParseStyle)\n else:\n qty = self._CalculateDOWDelta(wd, wkdy, 2,\n self.ptc.DOWParseStyle,\n self.ptc.CurrentDOWParseStyle)\n\n target = start + datetime.timedelta(days=qty)\n wd = wkdy\n\n sourceTime = target.timetuple()\n self.weekdyFlag = False\n\n # Given string is a natural language time string like\n # lunch, midnight, etc\n if self.timeStrFlag:\n if s in self.ptc.re_values['now']:\n sourceTime = now\n else:\n sources = self.ptc.buildSources(sourceTime)\n\n if s in sources:\n sourceTime = sources[s]\n else:\n sourceTime = now\n self.dateFlag = 0\n self.timeFlag = 0\n\n self.timeStrFlag = False\n\n # Given string is a natural language date string like today, tomorrow..\n if self.dayStrFlag:\n if sourceTime is None:\n sourceTime = now\n\n (yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime\n\n if s in self.ptc.dayOffsets:\n offset = self.ptc.dayOffsets[s]\n else:\n offset = 0\n\n start = datetime.datetime(yr, mth, dy, 9, 0, 0)\n target = start + datetime.timedelta(days=offset)\n sourceTime = target.timetuple()\n\n self.dayStrFlag = False\n\n # Given string is a time string with units like \"5 hrs 30 min\"\n if self.unitsFlag:\n modifier = '' # TODO\n\n if sourceTime is None:\n sourceTime = now\n\n m = self.ptc.CRE_UNITS.search(s)\n if m is not None:\n units = m.group('units')\n quantity = s[:m.start('units')]\n\n sourceTime = self._buildTime(sourceTime, quantity, modifier, units)\n self.unitsFlag = False\n\n # Given string is a time string with single char units like \"5 h 30 m\"\n if self.qunitsFlag:\n modifier = '' # TODO\n\n if sourceTime is None:\n sourceTime = now\n\n m = self.ptc.CRE_QUNITS.search(s)\n if m is not None:\n units = m.group('qunits')\n quantity = s[:m.start('qunits')]\n\n sourceTime = self._buildTime(sourceTime, quantity, modifier, units)\n self.qunitsFlag = False\n\n # Given string does not match anything\n if sourceTime is None:\n sourceTime = now\n self.dateFlag = 0\n self.timeFlag = 0\n\n return sourceTime\n",
"def parse(self, datetimeString, sourceTime=None):\n \"\"\"\n Splits the given C{datetimeString} into tokens, finds the regex\n patterns that match and then calculates a C{struct_time} value from\n the chunks.\n\n If C{sourceTime} is given then the C{struct_time} value will be\n calculated from that value, otherwise from the current date/time.\n\n If the C{datetimeString} is parsed and date/time value found then\n the second item of the returned tuple will be a flag to let you know\n what kind of C{struct_time} value is being returned::\n\n 0 = not parsed at all\n 1 = parsed as a C{date}\n 2 = parsed as a C{time}\n 3 = parsed as a C{datetime}\n\n @type datetimeString: string\n @param datetimeString: date/time text to evaluate\n @type sourceTime: struct_time\n @param sourceTime: C{struct_time} value to use as the base\n\n @rtype: tuple\n @return: tuple of: modified C{sourceTime} and the result flag\n \"\"\"\n\n if sourceTime:\n if isinstance(sourceTime, datetime.datetime):\n if _debug:\n print 'coercing datetime to timetuple'\n sourceTime = sourceTime.timetuple()\n else:\n if not isinstance(sourceTime, time.struct_time) and \\\n not isinstance(sourceTime, tuple):\n raise Exception('sourceTime is not a struct_time')\n\n s = datetimeString.strip().lower()\n parseStr = ''\n totalTime = sourceTime\n\n if s == '' :\n if sourceTime is not None:\n return (sourceTime, self.dateFlag + self.timeFlag)\n else:\n return (time.localtime(), 0)\n\n self.timeFlag = 0\n self.dateFlag = 0\n\n while len(s) > 0:\n flag = False\n chunk1 = ''\n chunk2 = ''\n\n if _debug:\n print 'parse (top of loop): [%s][%s]' % (s, parseStr)\n\n if parseStr == '':\n # Modifier like next\\prev..\n m = self.ptc.CRE_MODIFIER.search(s)\n if m is not None:\n self.modifierFlag = True\n if (m.group('modifier') != s):\n # capture remaining string\n parseStr = m.group('modifier')\n chunk1 = s[:m.start('modifier')].strip()\n chunk2 = s[m.end('modifier'):].strip()\n flag = True\n else:\n parseStr = s\n\n if parseStr == '':\n # Modifier like from\\after\\prior..\n m = self.ptc.CRE_MODIFIER2.search(s)\n if m is not None:\n self.modifier2Flag = True\n if (m.group('modifier') != s):\n # capture remaining string\n parseStr = m.group('modifier')\n chunk1 = s[:m.start('modifier')].strip()\n chunk2 = s[m.end('modifier'):].strip()\n flag = True\n else:\n parseStr = s\n\n if parseStr == '':\n valid_date = False\n for match in self.ptc.CRE_DATE3.finditer(s):\n # to prevent \"HH:MM(:SS) time strings\" expressions from triggering\n # this regex, we checks if the month field exists in the searched \n # expression, if it doesn't exist, the date field is not valid\n if match.group('mthname'):\n m = self.ptc.CRE_DATE3.search(s, match.start())\n valid_date = True\n break\n\n # String date format\n if valid_date:\n self.dateStrFlag = True\n self.dateFlag = 1\n if (m.group('date') != s):\n # capture remaining string\n parseStr = m.group('date')\n chunk1 = s[:m.start('date')]\n chunk2 = s[m.end('date'):]\n s = '%s %s' % (chunk1, chunk2)\n flag = True\n else:\n parseStr = s\n\n if parseStr == '':\n # Standard date format\n m = self.ptc.CRE_DATE.search(s)\n if m is not None:\n self.dateStdFlag = True\n self.dateFlag = 1\n if (m.group('date') != s):\n # capture remaining string\n parseStr = m.group('date')\n chunk1 = s[:m.start('date')]\n chunk2 = s[m.end('date'):]\n s = '%s %s' % (chunk1, chunk2)\n flag = True\n else:\n parseStr = s\n\n if parseStr == '':\n # Natural language day strings\n m = self.ptc.CRE_DAY.search(s)\n if m is not None:\n self.dayStrFlag = True\n self.dateFlag = 1\n if (m.group('day') != s):\n # capture remaining string\n parseStr = m.group('day')\n chunk1 = s[:m.start('day')]\n chunk2 = s[m.end('day'):]\n s = '%s %s' % (chunk1, chunk2)\n flag = True\n else:\n parseStr = s\n\n if parseStr == '':\n # Quantity + Units\n m = self.ptc.CRE_UNITS.search(s)\n if m is not None:\n self.unitsFlag = True\n if (m.group('qty') != s):\n # capture remaining string\n parseStr = m.group('qty')\n chunk1 = s[:m.start('qty')].strip()\n chunk2 = s[m.end('qty'):].strip()\n\n if chunk1[-1:] == '-':\n parseStr = '-%s' % parseStr\n chunk1 = chunk1[:-1]\n\n s = '%s %s' % (chunk1, chunk2)\n flag = True\n else:\n parseStr = s\n\n if parseStr == '':\n # Quantity + Units\n m = self.ptc.CRE_QUNITS.search(s)\n if m is not None:\n self.qunitsFlag = True\n\n if (m.group('qty') != s):\n # capture remaining string\n parseStr = m.group('qty')\n chunk1 = s[:m.start('qty')].strip()\n chunk2 = s[m.end('qty'):].strip()\n\n if chunk1[-1:] == '-':\n parseStr = '-%s' % parseStr\n chunk1 = chunk1[:-1]\n\n s = '%s %s' % (chunk1, chunk2)\n flag = True\n else:\n parseStr = s \n\n if parseStr == '':\n # Weekday\n m = self.ptc.CRE_WEEKDAY.search(s)\n if m is not None:\n gv = m.group('weekday')\n if s not in self.ptc.dayOffsets:\n self.weekdyFlag = True\n self.dateFlag = 1\n if (gv != s):\n # capture remaining string\n parseStr = gv\n chunk1 = s[:m.start('weekday')]\n chunk2 = s[m.end('weekday'):]\n s = '%s %s' % (chunk1, chunk2)\n flag = True\n else:\n parseStr = s\n\n if parseStr == '':\n # Natural language time strings\n m = self.ptc.CRE_TIME.search(s)\n if m is not None:\n self.timeStrFlag = True\n self.timeFlag = 2\n if (m.group('time') != s):\n # capture remaining string\n parseStr = m.group('time')\n chunk1 = s[:m.start('time')]\n chunk2 = s[m.end('time'):]\n s = '%s %s' % (chunk1, chunk2)\n flag = True\n else:\n parseStr = s\n\n if parseStr == '':\n # HH:MM(:SS) am/pm time strings\n m = self.ptc.CRE_TIMEHMS2.search(s)\n if m is not None:\n self.meridianFlag = True\n self.timeFlag = 2\n if m.group('minutes') is not None:\n if m.group('seconds') is not None:\n parseStr = '%s:%s:%s %s' % (m.group('hours'),\n m.group('minutes'),\n m.group('seconds'),\n m.group('meridian'))\n else:\n parseStr = '%s:%s %s' % (m.group('hours'),\n m.group('minutes'),\n m.group('meridian'))\n else:\n parseStr = '%s %s' % (m.group('hours'),\n m.group('meridian'))\n\n chunk1 = s[:m.start('hours')]\n chunk2 = s[m.end('meridian'):]\n\n s = '%s %s' % (chunk1, chunk2)\n flag = True\n\n if parseStr == '':\n # HH:MM(:SS) time strings\n m = self.ptc.CRE_TIMEHMS.search(s)\n if m is not None:\n self.timeStdFlag = True\n self.timeFlag = 2\n if m.group('seconds') is not None:\n parseStr = '%s:%s:%s' % (m.group('hours'),\n m.group('minutes'),\n m.group('seconds'))\n chunk1 = s[:m.start('hours')]\n chunk2 = s[m.end('seconds'):]\n else:\n parseStr = '%s:%s' % (m.group('hours'),\n m.group('minutes'))\n chunk1 = s[:m.start('hours')]\n chunk2 = s[m.end('minutes'):]\n\n s = '%s %s' % (chunk1, chunk2)\n flag = True\n\n # if string does not match any regex, empty string to\n # come out of the while loop\n if not flag:\n s = ''\n\n if _debug:\n print 'parse (bottom) [%s][%s][%s][%s]' % (s, parseStr, chunk1, chunk2)\n print 'weekday %s, dateStd %s, dateStr %s, time %s, timeStr %s, meridian %s' % \\\n (self.weekdyFlag, self.dateStdFlag, self.dateStrFlag, self.timeStdFlag, self.timeStrFlag, self.meridianFlag)\n print 'dayStr %s, modifier %s, modifier2 %s, units %s, qunits %s' % \\\n (self.dayStrFlag, self.modifierFlag, self.modifier2Flag, self.unitsFlag, self.qunitsFlag)\n\n # evaluate the matched string\n if parseStr != '':\n if self.modifierFlag == True:\n t, totalTime = self._evalModifier(parseStr, chunk1, chunk2, totalTime)\n # t is the unparsed part of the chunks.\n # If it is not date/time, return current\n # totalTime as it is; else return the output\n # after parsing t.\n if (t != '') and (t != None):\n tempDateFlag = self.dateFlag\n tempTimeFlag = self.timeFlag\n (totalTime2, flag) = self.parse(t, totalTime)\n\n if flag == 0 and totalTime is not None:\n self.timeFlag = tempTimeFlag\n self.dateFlag = tempDateFlag\n\n return (totalTime, self.dateFlag + self.timeFlag)\n else:\n return (totalTime2, self.dateFlag + self.timeFlag)\n\n elif self.modifier2Flag == True:\n totalTime, invalidFlag = self._evalModifier2(parseStr, chunk1, chunk2, totalTime)\n\n if invalidFlag == True:\n self.dateFlag = 0\n self.timeFlag = 0\n\n else:\n totalTime = self._evalString(parseStr, totalTime)\n parseStr = ''\n\n # String is not parsed at all\n if totalTime is None or totalTime == sourceTime:\n totalTime = time.localtime()\n self.dateFlag = 0\n self.timeFlag = 0\n\n return (totalTime, self.dateFlag + self.timeFlag)\n"
] | class Calendar:
"""
A collection of routines to input, parse and manipulate date and times.
The text can either be 'normal' date values or it can be human readable.
"""
def __init__(self, constants=None):
"""
Default constructor for the L{Calendar} class.
@type constants: object
@param constants: Instance of the class L{parsedatetime_consts.Constants}
@rtype: object
@return: L{Calendar} instance
"""
# if a constants reference is not included, use default
if constants is None:
self.ptc = parsedatetime_consts.Constants()
else:
self.ptc = constants
self.weekdyFlag = False # monday/tuesday/...
self.dateStdFlag = False # 07/21/06
self.dateStrFlag = False # July 21st, 2006
self.timeStdFlag = False # 5:50
self.meridianFlag = False # am/pm
self.dayStrFlag = False # tomorrow/yesterday/today/..
self.timeStrFlag = False # lunch/noon/breakfast/...
self.modifierFlag = False # after/before/prev/next/..
self.modifier2Flag = False # after/before/prev/next/..
self.unitsFlag = False # hrs/weeks/yrs/min/..
self.qunitsFlag = False # h/m/t/d..
self.timeFlag = 0
self.dateFlag = 0
def _convertUnitAsWords(self, unitText):
"""
Converts text units into their number value
Five = 5
Twenty Five = 25
Two hundred twenty five = 225
Two thousand and twenty five = 2025
Two thousand twenty five = 2025
@type unitText: string
@param unitText: number text to convert
@rtype: integer
@return: numerical value of unitText
"""
# TODO: implement this
pass
def _buildTime(self, source, quantity, modifier, units):
"""
Take C{quantity}, C{modifier} and C{unit} strings and convert them into values.
After converting, calcuate the time and return the adjusted sourceTime.
@type source: time
@param source: time to use as the base (or source)
@type quantity: string
@param quantity: quantity string
@type modifier: string
@param modifier: how quantity and units modify the source time
@type units: string
@param units: unit of the quantity (i.e. hours, days, months, etc)
@rtype: struct_time
@return: C{struct_time} of the calculated time
"""
if _debug:
print '_buildTime: [%s][%s][%s]' % (quantity, modifier, units)
if source is None:
source = time.localtime()
if quantity is None:
quantity = ''
else:
quantity = quantity.strip()
if len(quantity) == 0:
qty = 1
else:
try:
qty = int(quantity)
except ValueError:
qty = 0
if modifier in self.ptc.Modifiers:
qty = qty * self.ptc.Modifiers[modifier]
if units is None or units == '':
units = 'dy'
# plurals are handled by regex's (could be a bug tho)
(yr, mth, dy, hr, mn, sec, _, _, _) = source
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
target = start
if units.startswith('y'):
target = self.inc(start, year=qty)
self.dateFlag = 1
elif units.endswith('th') or units.endswith('ths'):
target = self.inc(start, month=qty)
self.dateFlag = 1
else:
if units.startswith('d'):
target = start + datetime.timedelta(days=qty)
self.dateFlag = 1
elif units.startswith('h'):
target = start + datetime.timedelta(hours=qty)
self.timeFlag = 2
elif units.startswith('m'):
target = start + datetime.timedelta(minutes=qty)
self.timeFlag = 2
elif units.startswith('s'):
target = start + datetime.timedelta(seconds=qty)
self.timeFlag = 2
elif units.startswith('w'):
target = start + datetime.timedelta(weeks=qty)
self.dateFlag = 1
return target.timetuple()
def parseDate(self, dateString):
"""
Parse short-form date strings::
'05/28/2006' or '04.21'
@type dateString: string
@param dateString: text to convert to a C{datetime}
@rtype: struct_time
@return: calculated C{struct_time} value of dateString
"""
yr, mth, dy, hr, mn, sec, wd, yd, isdst = time.localtime()
# values pulled from regex's will be stored here and later
# assigned to mth, dy, yr based on information from the locale
# -1 is used as the marker value because we want zero values
# to be passed thru so they can be flagged as errors later
v1 = -1
v2 = -1
v3 = -1
s = dateString
m = self.ptc.CRE_DATE2.search(s)
if m is not None:
index = m.start()
v1 = int(s[:index])
s = s[index + 1:]
m = self.ptc.CRE_DATE2.search(s)
if m is not None:
index = m.start()
v2 = int(s[:index])
v3 = int(s[index + 1:])
else:
v2 = int(s.strip())
v = [ v1, v2, v3 ]
d = { 'm': mth, 'd': dy, 'y': yr }
for i in range(0, 3):
n = v[i]
c = self.ptc.dp_order[i]
if n >= 0:
d[c] = n
# if the year is not specified and the date has already
# passed, increment the year
if v3 == -1 and ((mth > d['m']) or (mth == d['m'] and dy > d['d'])):
yr = d['y'] + 1
else:
yr = d['y']
mth = d['m']
dy = d['d']
# birthday epoch constraint
if yr < self.ptc.BirthdayEpoch:
yr += 2000
elif yr < 100:
yr += 1900
if _debug:
print 'parseDate: ', yr, mth, dy, self.ptc.daysInMonth(mth, yr)
if (mth > 0 and mth <= 12) and \
(dy > 0 and dy <= self.ptc.daysInMonth(mth, yr)):
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
else:
self.dateFlag = 0
self.timeFlag = 0
sourceTime = time.localtime() # return current time if date
# string is invalid
return sourceTime
def parseDateText(self, dateString):
"""
Parse long-form date strings::
'May 31st, 2006'
'Jan 1st'
'July 2006'
@type dateString: string
@param dateString: text to convert to a datetime
@rtype: struct_time
@return: calculated C{struct_time} value of dateString
"""
yr, mth, dy, hr, mn, sec, wd, yd, isdst = time.localtime()
currentMth = mth
currentDy = dy
s = dateString.lower()
m = self.ptc.CRE_DATE3.search(s)
mth = m.group('mthname')
mth = self.ptc.MonthOffsets[mth]
if m.group('day') != None:
dy = int(m.group('day'))
else:
dy = 1
if m.group('year') != None:
yr = int(m.group('year'))
# birthday epoch constraint
if yr < self.ptc.BirthdayEpoch:
yr += 2000
elif yr < 100:
yr += 1900
elif (mth < currentMth) or (mth == currentMth and dy < currentDy):
# if that day and month have already passed in this year,
# then increment the year by 1
yr += 1
if dy > 0 and dy <= self.ptc.daysInMonth(mth, yr):
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
else:
# Return current time if date string is invalid
self.dateFlag = 0
self.timeFlag = 0
sourceTime = time.localtime()
return sourceTime
def evalRanges(self, datetimeString, sourceTime=None):
"""
Evaluate the C{datetimeString} text and determine if
it represents a date or time range.
@type datetimeString: string
@param datetimeString: datetime text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: start datetime, end datetime and the invalid flag
"""
startTime = ''
endTime = ''
startDate = ''
endDate = ''
rangeFlag = 0
s = datetimeString.strip().lower()
if self.ptc.rangeSep in s:
s = s.replace(self.ptc.rangeSep, ' %s ' % self.ptc.rangeSep)
s = s.replace(' ', ' ')
m = self.ptc.CRE_TIMERNG1.search(s)
if m is not None:
rangeFlag = 1
else:
m = self.ptc.CRE_TIMERNG2.search(s)
if m is not None:
rangeFlag = 2
else:
m = self.ptc.CRE_TIMERNG4.search(s)
if m is not None:
rangeFlag = 7
else:
m = self.ptc.CRE_TIMERNG3.search(s)
if m is not None:
rangeFlag = 3
else:
m = self.ptc.CRE_DATERNG1.search(s)
if m is not None:
rangeFlag = 4
else:
m = self.ptc.CRE_DATERNG2.search(s)
if m is not None:
rangeFlag = 5
else:
m = self.ptc.CRE_DATERNG3.search(s)
if m is not None:
rangeFlag = 6
if _debug:
print 'evalRanges: rangeFlag =', rangeFlag, '[%s]' % s
if m is not None:
if (m.group() != s):
# capture remaining string
parseStr = m.group()
chunk1 = s[:m.start()]
chunk2 = s[m.end():]
s = '%s %s' % (chunk1, chunk2)
flag = 1
sourceTime, flag = self.parse(s, sourceTime)
if flag == 0:
sourceTime = None
else:
parseStr = s
if rangeFlag == 1:
m = re.search(self.ptc.rangeSep, parseStr)
startTime, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endTime, eflag = self.parse((parseStr[(m.start() + 1):]), sourceTime)
if (eflag != 0) and (sflag != 0):
return (startTime, endTime, 2)
elif rangeFlag == 2:
m = re.search(self.ptc.rangeSep, parseStr)
startTime, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endTime, eflag = self.parse((parseStr[(m.start() + 1):]), sourceTime)
if (eflag != 0) and (sflag != 0):
return (startTime, endTime, 2)
elif rangeFlag == 3 or rangeFlag == 7:
m = re.search(self.ptc.rangeSep, parseStr)
# capturing the meridian from the end time
if self.ptc.usesMeridian:
ampm = re.search(self.ptc.am[0], parseStr)
# appending the meridian to the start time
if ampm is not None:
startTime, sflag = self.parse((parseStr[:m.start()] + self.ptc.meridian[0]), sourceTime)
else:
startTime, sflag = self.parse((parseStr[:m.start()] + self.ptc.meridian[1]), sourceTime)
else:
startTime, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endTime, eflag = self.parse(parseStr[(m.start() + 1):], sourceTime)
if (eflag != 0) and (sflag != 0):
return (startTime, endTime, 2)
elif rangeFlag == 4:
m = re.search(self.ptc.rangeSep, parseStr)
startDate, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endDate, eflag = self.parse((parseStr[(m.start() + 1):]), sourceTime)
if (eflag != 0) and (sflag != 0):
return (startDate, endDate, 1)
elif rangeFlag == 5:
m = re.search(self.ptc.rangeSep, parseStr)
endDate = parseStr[(m.start() + 1):]
# capturing the year from the end date
date = self.ptc.CRE_DATE3.search(endDate)
endYear = date.group('year')
# appending the year to the start date if the start date
# does not have year information and the end date does.
# eg : "Aug 21 - Sep 4, 2007"
if endYear is not None:
startDate = (parseStr[:m.start()]).strip()
date = self.ptc.CRE_DATE3.search(startDate)
startYear = date.group('year')
if startYear is None:
startDate = startDate + ', ' + endYear
else:
startDate = parseStr[:m.start()]
startDate, sflag = self.parse(startDate, sourceTime)
endDate, eflag = self.parse(endDate, sourceTime)
if (eflag != 0) and (sflag != 0):
return (startDate, endDate, 1)
elif rangeFlag == 6:
m = re.search(self.ptc.rangeSep, parseStr)
startDate = parseStr[:m.start()]
# capturing the month from the start date
mth = self.ptc.CRE_DATE3.search(startDate)
mth = mth.group('mthname')
# appending the month name to the end date
endDate = mth + parseStr[(m.start() + 1):]
startDate, sflag = self.parse(startDate, sourceTime)
endDate, eflag = self.parse(endDate, sourceTime)
if (eflag != 0) and (sflag != 0):
return (startDate, endDate, 1)
else:
# if range is not found
sourceTime = time.localtime()
return (sourceTime, sourceTime, 0)
def _CalculateDOWDelta(self, wd, wkdy, offset, style, currentDayStyle):
"""
Based on the C{style} and C{currentDayStyle} determine what
day-of-week value is to be returned.
@type wd: integer
@param wd: day-of-week value for the current day
@type wkdy: integer
@param wkdy: day-of-week value for the parsed day
@type offset: integer
@param offset: offset direction for any modifiers (-1, 0, 1)
@type style: integer
@param style: normally the value set in C{Constants.DOWParseStyle}
@type currentDayStyle: integer
@param currentDayStyle: normally the value set in C{Constants.CurrentDOWParseStyle}
@rtype: integer
@return: calculated day-of-week
"""
if offset == 1:
# modifier is indicating future week eg: "next".
# DOW is calculated as DOW of next week
diff = 7 - wd + wkdy
elif offset == -1:
# modifier is indicating past week eg: "last","previous"
# DOW is calculated as DOW of previous week
diff = wkdy - wd - 7
elif offset == 0:
# modifier is indiacting current week eg: "this"
# DOW is calculated as DOW of this week
diff = wkdy - wd
elif offset == 2:
# no modifier is present.
# i.e. string to be parsed is just DOW
if style == 1:
# next occurance of the DOW is calculated
if currentDayStyle == True:
if wkdy >= wd:
diff = wkdy - wd
else:
diff = 7 - wd + wkdy
else:
if wkdy > wd:
diff = wkdy - wd
else:
diff = 7 - wd + wkdy
elif style == -1:
# last occurance of the DOW is calculated
if currentDayStyle == True:
if wkdy <= wd:
diff = wkdy - wd
else:
diff = wkdy - wd - 7
else:
if wkdy < wd:
diff = wkdy - wd
else:
diff = wkdy - wd - 7
else:
# occurance of the DOW in the current week is calculated
diff = wkdy - wd
if _debug:
print "wd %s, wkdy %s, offset %d, style %d\n" % (wd, wkdy, offset, style)
return diff
def _evalModifier(self, modifier, chunk1, chunk2, sourceTime):
"""
Evaluate the C{modifier} string and following text (passed in
as C{chunk1} and C{chunk2}) and if they match any known modifiers
calculate the delta and apply it to C{sourceTime}.
@type modifier: string
@param modifier: modifier text to apply to sourceTime
@type chunk1: string
@param chunk1: first text chunk that followed modifier (if any)
@type chunk2: string
@param chunk2: second text chunk that followed modifier (if any)
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: remaining text and the modified sourceTime
"""
offset = self.ptc.Modifiers[modifier]
if sourceTime is not None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
else:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = time.localtime()
# capture the units after the modifier and the remaining
# string after the unit
m = self.ptc.CRE_REMAINING.search(chunk2)
if m is not None:
index = m.start() + 1
unit = chunk2[:m.start()]
chunk2 = chunk2[index:]
else:
unit = chunk2
chunk2 = ''
flag = False
if unit == 'month' or \
unit == 'mth' or \
unit == 'm':
if offset == 0:
dy = self.ptc.daysInMonth(mth, yr)
sourceTime = (yr, mth, dy, 9, 0, 0, wd, yd, isdst)
elif offset == 2:
# if day is the last day of the month, calculate the last day
# of the next month
if dy == self.ptc.daysInMonth(mth, yr):
dy = self.ptc.daysInMonth(mth + 1, yr)
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = self.inc(start, month=1)
sourceTime = target.timetuple()
else:
start = datetime.datetime(yr, mth, 1, 9, 0, 0)
target = self.inc(start, month=offset)
sourceTime = target.timetuple()
flag = True
self.dateFlag = 1
if unit == 'week' or \
unit == 'wk' or \
unit == 'w':
if offset == 0:
start = datetime.datetime(yr, mth, dy, 17, 0, 0)
target = start + datetime.timedelta(days=(4 - wd))
sourceTime = target.timetuple()
elif offset == 2:
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=7)
sourceTime = target.timetuple()
else:
return self._evalModifier(modifier, chunk1, "monday " + chunk2, sourceTime)
flag = True
self.dateFlag = 1
if unit == 'day' or \
unit == 'dy' or \
unit == 'd':
if offset == 0:
sourceTime = (yr, mth, dy, 17, 0, 0, wd, yd, isdst)
self.timeFlag = 2
elif offset == 2:
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
target = start + datetime.timedelta(days=1)
sourceTime = target.timetuple()
else:
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=offset)
sourceTime = target.timetuple()
flag = True
self.dateFlag = 1
if unit == 'hour' or \
unit == 'hr':
if offset == 0:
sourceTime = (yr, mth, dy, hr, 0, 0, wd, yd, isdst)
else:
start = datetime.datetime(yr, mth, dy, hr, 0, 0)
target = start + datetime.timedelta(hours=offset)
sourceTime = target.timetuple()
flag = True
self.timeFlag = 2
if unit == 'year' or \
unit == 'yr' or \
unit == 'y':
if offset == 0:
sourceTime = (yr, 12, 31, hr, mn, sec, wd, yd, isdst)
elif offset == 2:
sourceTime = (yr + 1, mth, dy, hr, mn, sec, wd, yd, isdst)
else:
sourceTime = (yr + offset, 1, 1, 9, 0, 0, wd, yd, isdst)
flag = True
self.dateFlag = 1
if flag == False:
m = self.ptc.CRE_WEEKDAY.match(unit)
if m is not None:
wkdy = m.group()
self.dateFlag = 1
if modifier == 'eod':
# Calculate the upcoming weekday
self.modifierFlag = False
(sourceTime, _) = self.parse(wkdy, sourceTime)
sources = self.ptc.buildSources(sourceTime)
self.timeFlag = 2
if modifier in sources:
sourceTime = sources[modifier]
else:
wkdy = self.ptc.WeekdayOffsets[wkdy]
diff = self._CalculateDOWDelta(wd, wkdy, offset,
self.ptc.DOWParseStyle,
self.ptc.CurrentDOWParseStyle)
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=diff)
sourceTime = target.timetuple()
flag = True
self.dateFlag = 1
if not flag:
m = self.ptc.CRE_TIME.match(unit)
if m is not None:
self.modifierFlag = False
(yr, mth, dy, hr, mn, sec, wd, yd, isdst), _ = self.parse(unit)
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
target = start + datetime.timedelta(days=offset)
sourceTime = target.timetuple()
flag = True
else:
self.modifierFlag = False
# check if the remaining text is parsable and if so,
# use it as the base time for the modifier source time
t, flag2 = self.parse('%s %s' % (chunk1, unit), sourceTime)
if flag2 != 0:
sourceTime = t
sources = self.ptc.buildSources(sourceTime)
if modifier in sources:
sourceTime = sources[modifier]
flag = True
self.timeFlag = 2
# if the word after next is a number, the string is more than likely
# to be "next 4 hrs" which we will have to combine the units with the
# rest of the string
if not flag:
if offset < 0:
# if offset is negative, the unit has to be made negative
unit = '-%s' % unit
chunk2 = '%s %s' % (unit, chunk2)
self.modifierFlag = False
#return '%s %s' % (chunk1, chunk2), sourceTime
return '%s' % chunk2, sourceTime
def _evalModifier2(self, modifier, chunk1 , chunk2, sourceTime):
"""
Evaluate the C{modifier} string and following text (passed in
as C{chunk1} and C{chunk2}) and if they match any known modifiers
calculate the delta and apply it to C{sourceTime}.
@type modifier: string
@param modifier: modifier text to apply to C{sourceTime}
@type chunk1: string
@param chunk1: first text chunk that followed modifier (if any)
@type chunk2: string
@param chunk2: second text chunk that followed modifier (if any)
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: remaining text and the modified sourceTime
"""
offset = self.ptc.Modifiers[modifier]
digit = r'\d+'
self.modifier2Flag = False
# If the string after the negative modifier starts with digits,
# then it is likely that the string is similar to ' before 3 days'
# or 'evening prior to 3 days'.
# In this case, the total time is calculated by subtracting '3 days'
# from the current date.
# So, we have to identify the quantity and negate it before parsing
# the string.
# This is not required for strings not starting with digits since the
# string is enough to calculate the sourceTime
if chunk2 != '':
if offset < 0:
m = re.match(digit, chunk2.strip())
if m is not None:
qty = int(m.group()) * -1
chunk2 = chunk2[m.end():]
chunk2 = '%d%s' % (qty, chunk2)
sourceTime, flag1 = self.parse(chunk2, sourceTime)
if flag1 == 0:
flag1 = True
else:
flag1 = False
flag2 = False
else:
flag1 = False
if chunk1 != '':
if offset < 0:
m = re.search(digit, chunk1.strip())
if m is not None:
qty = int(m.group()) * -1
chunk1 = chunk1[m.end():]
chunk1 = '%d%s' % (qty, chunk1)
tempDateFlag = self.dateFlag
tempTimeFlag = self.timeFlag
sourceTime2, flag2 = self.parse(chunk1, sourceTime)
else:
return sourceTime, (flag1 and flag2)
# if chunk1 is not a datetime and chunk2 is then do not use datetime
# value returned by parsing chunk1
if not (flag1 == False and flag2 == 0):
sourceTime = sourceTime2
else:
self.timeFlag = tempTimeFlag
self.dateFlag = tempDateFlag
return sourceTime, (flag1 and flag2)
def _evalString(self, datetimeString, sourceTime=None):
"""
Calculate the datetime based on flags set by the L{parse()} routine
Examples handled::
RFC822, W3CDTF formatted dates
HH:MM[:SS][ am/pm]
MM/DD/YYYY
DD MMMM YYYY
@type datetimeString: string
@param datetimeString: text to try and parse as more "traditional"
date/time text
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: datetime
@return: calculated C{struct_time} value or current C{struct_time}
if not parsed
"""
s = datetimeString.strip()
now = time.localtime()
# Given string date is a RFC822 date
if sourceTime is None:
sourceTime = _parse_date_rfc822(s)
if sourceTime is not None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst, _) = sourceTime
self.dateFlag = 1
if (hr != 0) and (mn != 0) and (sec != 0):
self.timeFlag = 2
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
# Given string date is a W3CDTF date
if sourceTime is None:
sourceTime = _parse_date_w3dtf(s)
if sourceTime is not None:
self.dateFlag = 1
self.timeFlag = 2
if sourceTime is None:
s = s.lower()
# Given string is in the format HH:MM(:SS)(am/pm)
if self.meridianFlag:
if sourceTime is None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = now
else:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
m = self.ptc.CRE_TIMEHMS2.search(s)
if m is not None:
dt = s[:m.start('meridian')].strip()
if len(dt) <= 2:
hr = int(dt)
mn = 0
sec = 0
else:
hr, mn, sec = _extract_time(m)
if hr == 24:
hr = 0
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
meridian = m.group('meridian').lower()
# if 'am' found and hour is 12 - force hour to 0 (midnight)
if (meridian in self.ptc.am) and hr == 12:
sourceTime = (yr, mth, dy, 0, mn, sec, wd, yd, isdst)
# if 'pm' found and hour < 12, add 12 to shift to evening
if (meridian in self.ptc.pm) and hr < 12:
sourceTime = (yr, mth, dy, hr + 12, mn, sec, wd, yd, isdst)
# invalid time
if hr > 24 or mn > 59 or sec > 59:
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
self.meridianFlag = False
# Given string is in the format HH:MM(:SS)
if self.timeStdFlag:
if sourceTime is None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = now
else:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
m = self.ptc.CRE_TIMEHMS.search(s)
if m is not None:
hr, mn, sec = _extract_time(m)
if hr == 24:
hr = 0
if hr > 24 or mn > 59 or sec > 59:
# invalid time
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
else:
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
self.timeStdFlag = False
# Given string is in the format 07/21/2006
if self.dateStdFlag:
sourceTime = self.parseDate(s)
self.dateStdFlag = False
# Given string is in the format "May 23rd, 2005"
if self.dateStrFlag:
sourceTime = self.parseDateText(s)
self.dateStrFlag = False
# Given string is a weekday
if self.weekdyFlag:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = now
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
wkdy = self.ptc.WeekdayOffsets[s]
if wkdy > wd:
qty = self._CalculateDOWDelta(wd, wkdy, 2,
self.ptc.DOWParseStyle,
self.ptc.CurrentDOWParseStyle)
else:
qty = self._CalculateDOWDelta(wd, wkdy, 2,
self.ptc.DOWParseStyle,
self.ptc.CurrentDOWParseStyle)
target = start + datetime.timedelta(days=qty)
wd = wkdy
sourceTime = target.timetuple()
self.weekdyFlag = False
# Given string is a natural language time string like
# lunch, midnight, etc
if self.timeStrFlag:
if s in self.ptc.re_values['now']:
sourceTime = now
else:
sources = self.ptc.buildSources(sourceTime)
if s in sources:
sourceTime = sources[s]
else:
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
self.timeStrFlag = False
# Given string is a natural language date string like today, tomorrow..
if self.dayStrFlag:
if sourceTime is None:
sourceTime = now
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
if s in self.ptc.dayOffsets:
offset = self.ptc.dayOffsets[s]
else:
offset = 0
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=offset)
sourceTime = target.timetuple()
self.dayStrFlag = False
# Given string is a time string with units like "5 hrs 30 min"
if self.unitsFlag:
modifier = '' # TODO
if sourceTime is None:
sourceTime = now
m = self.ptc.CRE_UNITS.search(s)
if m is not None:
units = m.group('units')
quantity = s[:m.start('units')]
sourceTime = self._buildTime(sourceTime, quantity, modifier, units)
self.unitsFlag = False
# Given string is a time string with single char units like "5 h 30 m"
if self.qunitsFlag:
modifier = '' # TODO
if sourceTime is None:
sourceTime = now
m = self.ptc.CRE_QUNITS.search(s)
if m is not None:
units = m.group('qunits')
quantity = s[:m.start('qunits')]
sourceTime = self._buildTime(sourceTime, quantity, modifier, units)
self.qunitsFlag = False
# Given string does not match anything
if sourceTime is None:
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
return sourceTime
def parse(self, datetimeString, sourceTime=None):
"""
Splits the given C{datetimeString} into tokens, finds the regex
patterns that match and then calculates a C{struct_time} value from
the chunks.
If C{sourceTime} is given then the C{struct_time} value will be
calculated from that value, otherwise from the current date/time.
If the C{datetimeString} is parsed and date/time value found then
the second item of the returned tuple will be a flag to let you know
what kind of C{struct_time} value is being returned::
0 = not parsed at all
1 = parsed as a C{date}
2 = parsed as a C{time}
3 = parsed as a C{datetime}
@type datetimeString: string
@param datetimeString: date/time text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: modified C{sourceTime} and the result flag
"""
if sourceTime:
if isinstance(sourceTime, datetime.datetime):
if _debug:
print 'coercing datetime to timetuple'
sourceTime = sourceTime.timetuple()
else:
if not isinstance(sourceTime, time.struct_time) and \
not isinstance(sourceTime, tuple):
raise Exception('sourceTime is not a struct_time')
s = datetimeString.strip().lower()
parseStr = ''
totalTime = sourceTime
if s == '' :
if sourceTime is not None:
return (sourceTime, self.dateFlag + self.timeFlag)
else:
return (time.localtime(), 0)
self.timeFlag = 0
self.dateFlag = 0
while len(s) > 0:
flag = False
chunk1 = ''
chunk2 = ''
if _debug:
print 'parse (top of loop): [%s][%s]' % (s, parseStr)
if parseStr == '':
# Modifier like next\prev..
m = self.ptc.CRE_MODIFIER.search(s)
if m is not None:
self.modifierFlag = True
if (m.group('modifier') != s):
# capture remaining string
parseStr = m.group('modifier')
chunk1 = s[:m.start('modifier')].strip()
chunk2 = s[m.end('modifier'):].strip()
flag = True
else:
parseStr = s
if parseStr == '':
# Modifier like from\after\prior..
m = self.ptc.CRE_MODIFIER2.search(s)
if m is not None:
self.modifier2Flag = True
if (m.group('modifier') != s):
# capture remaining string
parseStr = m.group('modifier')
chunk1 = s[:m.start('modifier')].strip()
chunk2 = s[m.end('modifier'):].strip()
flag = True
else:
parseStr = s
if parseStr == '':
valid_date = False
for match in self.ptc.CRE_DATE3.finditer(s):
# to prevent "HH:MM(:SS) time strings" expressions from triggering
# this regex, we checks if the month field exists in the searched
# expression, if it doesn't exist, the date field is not valid
if match.group('mthname'):
m = self.ptc.CRE_DATE3.search(s, match.start())
valid_date = True
break
# String date format
if valid_date:
self.dateStrFlag = True
self.dateFlag = 1
if (m.group('date') != s):
# capture remaining string
parseStr = m.group('date')
chunk1 = s[:m.start('date')]
chunk2 = s[m.end('date'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Standard date format
m = self.ptc.CRE_DATE.search(s)
if m is not None:
self.dateStdFlag = True
self.dateFlag = 1
if (m.group('date') != s):
# capture remaining string
parseStr = m.group('date')
chunk1 = s[:m.start('date')]
chunk2 = s[m.end('date'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Natural language day strings
m = self.ptc.CRE_DAY.search(s)
if m is not None:
self.dayStrFlag = True
self.dateFlag = 1
if (m.group('day') != s):
# capture remaining string
parseStr = m.group('day')
chunk1 = s[:m.start('day')]
chunk2 = s[m.end('day'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Quantity + Units
m = self.ptc.CRE_UNITS.search(s)
if m is not None:
self.unitsFlag = True
if (m.group('qty') != s):
# capture remaining string
parseStr = m.group('qty')
chunk1 = s[:m.start('qty')].strip()
chunk2 = s[m.end('qty'):].strip()
if chunk1[-1:] == '-':
parseStr = '-%s' % parseStr
chunk1 = chunk1[:-1]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Quantity + Units
m = self.ptc.CRE_QUNITS.search(s)
if m is not None:
self.qunitsFlag = True
if (m.group('qty') != s):
# capture remaining string
parseStr = m.group('qty')
chunk1 = s[:m.start('qty')].strip()
chunk2 = s[m.end('qty'):].strip()
if chunk1[-1:] == '-':
parseStr = '-%s' % parseStr
chunk1 = chunk1[:-1]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Weekday
m = self.ptc.CRE_WEEKDAY.search(s)
if m is not None:
gv = m.group('weekday')
if s not in self.ptc.dayOffsets:
self.weekdyFlag = True
self.dateFlag = 1
if (gv != s):
# capture remaining string
parseStr = gv
chunk1 = s[:m.start('weekday')]
chunk2 = s[m.end('weekday'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Natural language time strings
m = self.ptc.CRE_TIME.search(s)
if m is not None:
self.timeStrFlag = True
self.timeFlag = 2
if (m.group('time') != s):
# capture remaining string
parseStr = m.group('time')
chunk1 = s[:m.start('time')]
chunk2 = s[m.end('time'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# HH:MM(:SS) am/pm time strings
m = self.ptc.CRE_TIMEHMS2.search(s)
if m is not None:
self.meridianFlag = True
self.timeFlag = 2
if m.group('minutes') is not None:
if m.group('seconds') is not None:
parseStr = '%s:%s:%s %s' % (m.group('hours'),
m.group('minutes'),
m.group('seconds'),
m.group('meridian'))
else:
parseStr = '%s:%s %s' % (m.group('hours'),
m.group('minutes'),
m.group('meridian'))
else:
parseStr = '%s %s' % (m.group('hours'),
m.group('meridian'))
chunk1 = s[:m.start('hours')]
chunk2 = s[m.end('meridian'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
if parseStr == '':
# HH:MM(:SS) time strings
m = self.ptc.CRE_TIMEHMS.search(s)
if m is not None:
self.timeStdFlag = True
self.timeFlag = 2
if m.group('seconds') is not None:
parseStr = '%s:%s:%s' % (m.group('hours'),
m.group('minutes'),
m.group('seconds'))
chunk1 = s[:m.start('hours')]
chunk2 = s[m.end('seconds'):]
else:
parseStr = '%s:%s' % (m.group('hours'),
m.group('minutes'))
chunk1 = s[:m.start('hours')]
chunk2 = s[m.end('minutes'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
# if string does not match any regex, empty string to
# come out of the while loop
if not flag:
s = ''
if _debug:
print 'parse (bottom) [%s][%s][%s][%s]' % (s, parseStr, chunk1, chunk2)
print 'weekday %s, dateStd %s, dateStr %s, time %s, timeStr %s, meridian %s' % \
(self.weekdyFlag, self.dateStdFlag, self.dateStrFlag, self.timeStdFlag, self.timeStrFlag, self.meridianFlag)
print 'dayStr %s, modifier %s, modifier2 %s, units %s, qunits %s' % \
(self.dayStrFlag, self.modifierFlag, self.modifier2Flag, self.unitsFlag, self.qunitsFlag)
# evaluate the matched string
if parseStr != '':
if self.modifierFlag == True:
t, totalTime = self._evalModifier(parseStr, chunk1, chunk2, totalTime)
# t is the unparsed part of the chunks.
# If it is not date/time, return current
# totalTime as it is; else return the output
# after parsing t.
if (t != '') and (t != None):
tempDateFlag = self.dateFlag
tempTimeFlag = self.timeFlag
(totalTime2, flag) = self.parse(t, totalTime)
if flag == 0 and totalTime is not None:
self.timeFlag = tempTimeFlag
self.dateFlag = tempDateFlag
return (totalTime, self.dateFlag + self.timeFlag)
else:
return (totalTime2, self.dateFlag + self.timeFlag)
elif self.modifier2Flag == True:
totalTime, invalidFlag = self._evalModifier2(parseStr, chunk1, chunk2, totalTime)
if invalidFlag == True:
self.dateFlag = 0
self.timeFlag = 0
else:
totalTime = self._evalString(parseStr, totalTime)
parseStr = ''
# String is not parsed at all
if totalTime is None or totalTime == sourceTime:
totalTime = time.localtime()
self.dateFlag = 0
self.timeFlag = 0
return (totalTime, self.dateFlag + self.timeFlag)
def inc(self, source, month=None, year=None):
"""
Takes the given C{source} date, or current date if none is
passed, and increments it according to the values passed in
by month and/or year.
This routine is needed because Python's C{timedelta()} function
does not allow for month or year increments.
@type source: struct_time
@param source: C{struct_time} value to increment
@type month: integer
@param month: optional number of months to increment
@type year: integer
@param year: optional number of years to increment
@rtype: datetime
@return: C{source} incremented by the number of months and/or years
"""
yr = source.year
mth = source.month
dy = source.day
if year:
try:
yi = int(year)
except ValueError:
yi = 0
yr += yi
if month:
try:
mi = int(month)
except ValueError:
mi = 0
m = abs(mi)
y = m / 12 # how many years are in month increment
m = m % 12 # get remaining months
if mi < 0:
mth = mth - m # sub months from start month
if mth < 1: # cross start-of-year?
y -= 1 # yes - decrement year
mth += 12 # and fix month
else:
mth = mth + m # add months to start month
if mth > 12: # cross end-of-year?
y += 1 # yes - increment year
mth -= 12 # and fix month
yr += y
# if the day ends up past the last day of
# the new month, set it to the last day
if dy > self.ptc.daysInMonth(mth, yr):
dy = self.ptc.daysInMonth(mth, yr)
d = source.replace(year=yr, month=mth, day=dy)
return source + (d - source)
|
vilmibm/done | parsedatetime/parsedatetime.py | Calendar.inc | python | def inc(self, source, month=None, year=None):
"""
Takes the given C{source} date, or current date if none is
passed, and increments it according to the values passed in
by month and/or year.
This routine is needed because Python's C{timedelta()} function
does not allow for month or year increments.
@type source: struct_time
@param source: C{struct_time} value to increment
@type month: integer
@param month: optional number of months to increment
@type year: integer
@param year: optional number of years to increment
@rtype: datetime
@return: C{source} incremented by the number of months and/or years
"""
yr = source.year
mth = source.month
dy = source.day
if year:
try:
yi = int(year)
except ValueError:
yi = 0
yr += yi
if month:
try:
mi = int(month)
except ValueError:
mi = 0
m = abs(mi)
y = m / 12 # how many years are in month increment
m = m % 12 # get remaining months
if mi < 0:
mth = mth - m # sub months from start month
if mth < 1: # cross start-of-year?
y -= 1 # yes - decrement year
mth += 12 # and fix month
else:
mth = mth + m # add months to start month
if mth > 12: # cross end-of-year?
y += 1 # yes - increment year
mth -= 12 # and fix month
yr += y
# if the day ends up past the last day of
# the new month, set it to the last day
if dy > self.ptc.daysInMonth(mth, yr):
dy = self.ptc.daysInMonth(mth, yr)
d = source.replace(year=yr, month=mth, day=dy)
return source + (d - source) | Takes the given C{source} date, or current date if none is
passed, and increments it according to the values passed in
by month and/or year.
This routine is needed because Python's C{timedelta()} function
does not allow for month or year increments.
@type source: struct_time
@param source: C{struct_time} value to increment
@type month: integer
@param month: optional number of months to increment
@type year: integer
@param year: optional number of years to increment
@rtype: datetime
@return: C{source} incremented by the number of months and/or years | train | https://github.com/vilmibm/done/blob/7e5b60d2900ceddefa49de352a19b794199b51a8/parsedatetime/parsedatetime.py#L1479-L1540 | null | class Calendar:
"""
A collection of routines to input, parse and manipulate date and times.
The text can either be 'normal' date values or it can be human readable.
"""
def __init__(self, constants=None):
"""
Default constructor for the L{Calendar} class.
@type constants: object
@param constants: Instance of the class L{parsedatetime_consts.Constants}
@rtype: object
@return: L{Calendar} instance
"""
# if a constants reference is not included, use default
if constants is None:
self.ptc = parsedatetime_consts.Constants()
else:
self.ptc = constants
self.weekdyFlag = False # monday/tuesday/...
self.dateStdFlag = False # 07/21/06
self.dateStrFlag = False # July 21st, 2006
self.timeStdFlag = False # 5:50
self.meridianFlag = False # am/pm
self.dayStrFlag = False # tomorrow/yesterday/today/..
self.timeStrFlag = False # lunch/noon/breakfast/...
self.modifierFlag = False # after/before/prev/next/..
self.modifier2Flag = False # after/before/prev/next/..
self.unitsFlag = False # hrs/weeks/yrs/min/..
self.qunitsFlag = False # h/m/t/d..
self.timeFlag = 0
self.dateFlag = 0
def _convertUnitAsWords(self, unitText):
"""
Converts text units into their number value
Five = 5
Twenty Five = 25
Two hundred twenty five = 225
Two thousand and twenty five = 2025
Two thousand twenty five = 2025
@type unitText: string
@param unitText: number text to convert
@rtype: integer
@return: numerical value of unitText
"""
# TODO: implement this
pass
def _buildTime(self, source, quantity, modifier, units):
"""
Take C{quantity}, C{modifier} and C{unit} strings and convert them into values.
After converting, calcuate the time and return the adjusted sourceTime.
@type source: time
@param source: time to use as the base (or source)
@type quantity: string
@param quantity: quantity string
@type modifier: string
@param modifier: how quantity and units modify the source time
@type units: string
@param units: unit of the quantity (i.e. hours, days, months, etc)
@rtype: struct_time
@return: C{struct_time} of the calculated time
"""
if _debug:
print '_buildTime: [%s][%s][%s]' % (quantity, modifier, units)
if source is None:
source = time.localtime()
if quantity is None:
quantity = ''
else:
quantity = quantity.strip()
if len(quantity) == 0:
qty = 1
else:
try:
qty = int(quantity)
except ValueError:
qty = 0
if modifier in self.ptc.Modifiers:
qty = qty * self.ptc.Modifiers[modifier]
if units is None or units == '':
units = 'dy'
# plurals are handled by regex's (could be a bug tho)
(yr, mth, dy, hr, mn, sec, _, _, _) = source
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
target = start
if units.startswith('y'):
target = self.inc(start, year=qty)
self.dateFlag = 1
elif units.endswith('th') or units.endswith('ths'):
target = self.inc(start, month=qty)
self.dateFlag = 1
else:
if units.startswith('d'):
target = start + datetime.timedelta(days=qty)
self.dateFlag = 1
elif units.startswith('h'):
target = start + datetime.timedelta(hours=qty)
self.timeFlag = 2
elif units.startswith('m'):
target = start + datetime.timedelta(minutes=qty)
self.timeFlag = 2
elif units.startswith('s'):
target = start + datetime.timedelta(seconds=qty)
self.timeFlag = 2
elif units.startswith('w'):
target = start + datetime.timedelta(weeks=qty)
self.dateFlag = 1
return target.timetuple()
def parseDate(self, dateString):
"""
Parse short-form date strings::
'05/28/2006' or '04.21'
@type dateString: string
@param dateString: text to convert to a C{datetime}
@rtype: struct_time
@return: calculated C{struct_time} value of dateString
"""
yr, mth, dy, hr, mn, sec, wd, yd, isdst = time.localtime()
# values pulled from regex's will be stored here and later
# assigned to mth, dy, yr based on information from the locale
# -1 is used as the marker value because we want zero values
# to be passed thru so they can be flagged as errors later
v1 = -1
v2 = -1
v3 = -1
s = dateString
m = self.ptc.CRE_DATE2.search(s)
if m is not None:
index = m.start()
v1 = int(s[:index])
s = s[index + 1:]
m = self.ptc.CRE_DATE2.search(s)
if m is not None:
index = m.start()
v2 = int(s[:index])
v3 = int(s[index + 1:])
else:
v2 = int(s.strip())
v = [ v1, v2, v3 ]
d = { 'm': mth, 'd': dy, 'y': yr }
for i in range(0, 3):
n = v[i]
c = self.ptc.dp_order[i]
if n >= 0:
d[c] = n
# if the year is not specified and the date has already
# passed, increment the year
if v3 == -1 and ((mth > d['m']) or (mth == d['m'] and dy > d['d'])):
yr = d['y'] + 1
else:
yr = d['y']
mth = d['m']
dy = d['d']
# birthday epoch constraint
if yr < self.ptc.BirthdayEpoch:
yr += 2000
elif yr < 100:
yr += 1900
if _debug:
print 'parseDate: ', yr, mth, dy, self.ptc.daysInMonth(mth, yr)
if (mth > 0 and mth <= 12) and \
(dy > 0 and dy <= self.ptc.daysInMonth(mth, yr)):
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
else:
self.dateFlag = 0
self.timeFlag = 0
sourceTime = time.localtime() # return current time if date
# string is invalid
return sourceTime
def parseDateText(self, dateString):
"""
Parse long-form date strings::
'May 31st, 2006'
'Jan 1st'
'July 2006'
@type dateString: string
@param dateString: text to convert to a datetime
@rtype: struct_time
@return: calculated C{struct_time} value of dateString
"""
yr, mth, dy, hr, mn, sec, wd, yd, isdst = time.localtime()
currentMth = mth
currentDy = dy
s = dateString.lower()
m = self.ptc.CRE_DATE3.search(s)
mth = m.group('mthname')
mth = self.ptc.MonthOffsets[mth]
if m.group('day') != None:
dy = int(m.group('day'))
else:
dy = 1
if m.group('year') != None:
yr = int(m.group('year'))
# birthday epoch constraint
if yr < self.ptc.BirthdayEpoch:
yr += 2000
elif yr < 100:
yr += 1900
elif (mth < currentMth) or (mth == currentMth and dy < currentDy):
# if that day and month have already passed in this year,
# then increment the year by 1
yr += 1
if dy > 0 and dy <= self.ptc.daysInMonth(mth, yr):
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
else:
# Return current time if date string is invalid
self.dateFlag = 0
self.timeFlag = 0
sourceTime = time.localtime()
return sourceTime
def evalRanges(self, datetimeString, sourceTime=None):
"""
Evaluate the C{datetimeString} text and determine if
it represents a date or time range.
@type datetimeString: string
@param datetimeString: datetime text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: start datetime, end datetime and the invalid flag
"""
startTime = ''
endTime = ''
startDate = ''
endDate = ''
rangeFlag = 0
s = datetimeString.strip().lower()
if self.ptc.rangeSep in s:
s = s.replace(self.ptc.rangeSep, ' %s ' % self.ptc.rangeSep)
s = s.replace(' ', ' ')
m = self.ptc.CRE_TIMERNG1.search(s)
if m is not None:
rangeFlag = 1
else:
m = self.ptc.CRE_TIMERNG2.search(s)
if m is not None:
rangeFlag = 2
else:
m = self.ptc.CRE_TIMERNG4.search(s)
if m is not None:
rangeFlag = 7
else:
m = self.ptc.CRE_TIMERNG3.search(s)
if m is not None:
rangeFlag = 3
else:
m = self.ptc.CRE_DATERNG1.search(s)
if m is not None:
rangeFlag = 4
else:
m = self.ptc.CRE_DATERNG2.search(s)
if m is not None:
rangeFlag = 5
else:
m = self.ptc.CRE_DATERNG3.search(s)
if m is not None:
rangeFlag = 6
if _debug:
print 'evalRanges: rangeFlag =', rangeFlag, '[%s]' % s
if m is not None:
if (m.group() != s):
# capture remaining string
parseStr = m.group()
chunk1 = s[:m.start()]
chunk2 = s[m.end():]
s = '%s %s' % (chunk1, chunk2)
flag = 1
sourceTime, flag = self.parse(s, sourceTime)
if flag == 0:
sourceTime = None
else:
parseStr = s
if rangeFlag == 1:
m = re.search(self.ptc.rangeSep, parseStr)
startTime, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endTime, eflag = self.parse((parseStr[(m.start() + 1):]), sourceTime)
if (eflag != 0) and (sflag != 0):
return (startTime, endTime, 2)
elif rangeFlag == 2:
m = re.search(self.ptc.rangeSep, parseStr)
startTime, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endTime, eflag = self.parse((parseStr[(m.start() + 1):]), sourceTime)
if (eflag != 0) and (sflag != 0):
return (startTime, endTime, 2)
elif rangeFlag == 3 or rangeFlag == 7:
m = re.search(self.ptc.rangeSep, parseStr)
# capturing the meridian from the end time
if self.ptc.usesMeridian:
ampm = re.search(self.ptc.am[0], parseStr)
# appending the meridian to the start time
if ampm is not None:
startTime, sflag = self.parse((parseStr[:m.start()] + self.ptc.meridian[0]), sourceTime)
else:
startTime, sflag = self.parse((parseStr[:m.start()] + self.ptc.meridian[1]), sourceTime)
else:
startTime, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endTime, eflag = self.parse(parseStr[(m.start() + 1):], sourceTime)
if (eflag != 0) and (sflag != 0):
return (startTime, endTime, 2)
elif rangeFlag == 4:
m = re.search(self.ptc.rangeSep, parseStr)
startDate, sflag = self.parse((parseStr[:m.start()]), sourceTime)
endDate, eflag = self.parse((parseStr[(m.start() + 1):]), sourceTime)
if (eflag != 0) and (sflag != 0):
return (startDate, endDate, 1)
elif rangeFlag == 5:
m = re.search(self.ptc.rangeSep, parseStr)
endDate = parseStr[(m.start() + 1):]
# capturing the year from the end date
date = self.ptc.CRE_DATE3.search(endDate)
endYear = date.group('year')
# appending the year to the start date if the start date
# does not have year information and the end date does.
# eg : "Aug 21 - Sep 4, 2007"
if endYear is not None:
startDate = (parseStr[:m.start()]).strip()
date = self.ptc.CRE_DATE3.search(startDate)
startYear = date.group('year')
if startYear is None:
startDate = startDate + ', ' + endYear
else:
startDate = parseStr[:m.start()]
startDate, sflag = self.parse(startDate, sourceTime)
endDate, eflag = self.parse(endDate, sourceTime)
if (eflag != 0) and (sflag != 0):
return (startDate, endDate, 1)
elif rangeFlag == 6:
m = re.search(self.ptc.rangeSep, parseStr)
startDate = parseStr[:m.start()]
# capturing the month from the start date
mth = self.ptc.CRE_DATE3.search(startDate)
mth = mth.group('mthname')
# appending the month name to the end date
endDate = mth + parseStr[(m.start() + 1):]
startDate, sflag = self.parse(startDate, sourceTime)
endDate, eflag = self.parse(endDate, sourceTime)
if (eflag != 0) and (sflag != 0):
return (startDate, endDate, 1)
else:
# if range is not found
sourceTime = time.localtime()
return (sourceTime, sourceTime, 0)
def _CalculateDOWDelta(self, wd, wkdy, offset, style, currentDayStyle):
"""
Based on the C{style} and C{currentDayStyle} determine what
day-of-week value is to be returned.
@type wd: integer
@param wd: day-of-week value for the current day
@type wkdy: integer
@param wkdy: day-of-week value for the parsed day
@type offset: integer
@param offset: offset direction for any modifiers (-1, 0, 1)
@type style: integer
@param style: normally the value set in C{Constants.DOWParseStyle}
@type currentDayStyle: integer
@param currentDayStyle: normally the value set in C{Constants.CurrentDOWParseStyle}
@rtype: integer
@return: calculated day-of-week
"""
if offset == 1:
# modifier is indicating future week eg: "next".
# DOW is calculated as DOW of next week
diff = 7 - wd + wkdy
elif offset == -1:
# modifier is indicating past week eg: "last","previous"
# DOW is calculated as DOW of previous week
diff = wkdy - wd - 7
elif offset == 0:
# modifier is indiacting current week eg: "this"
# DOW is calculated as DOW of this week
diff = wkdy - wd
elif offset == 2:
# no modifier is present.
# i.e. string to be parsed is just DOW
if style == 1:
# next occurance of the DOW is calculated
if currentDayStyle == True:
if wkdy >= wd:
diff = wkdy - wd
else:
diff = 7 - wd + wkdy
else:
if wkdy > wd:
diff = wkdy - wd
else:
diff = 7 - wd + wkdy
elif style == -1:
# last occurance of the DOW is calculated
if currentDayStyle == True:
if wkdy <= wd:
diff = wkdy - wd
else:
diff = wkdy - wd - 7
else:
if wkdy < wd:
diff = wkdy - wd
else:
diff = wkdy - wd - 7
else:
# occurance of the DOW in the current week is calculated
diff = wkdy - wd
if _debug:
print "wd %s, wkdy %s, offset %d, style %d\n" % (wd, wkdy, offset, style)
return diff
def _evalModifier(self, modifier, chunk1, chunk2, sourceTime):
"""
Evaluate the C{modifier} string and following text (passed in
as C{chunk1} and C{chunk2}) and if they match any known modifiers
calculate the delta and apply it to C{sourceTime}.
@type modifier: string
@param modifier: modifier text to apply to sourceTime
@type chunk1: string
@param chunk1: first text chunk that followed modifier (if any)
@type chunk2: string
@param chunk2: second text chunk that followed modifier (if any)
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: remaining text and the modified sourceTime
"""
offset = self.ptc.Modifiers[modifier]
if sourceTime is not None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
else:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = time.localtime()
# capture the units after the modifier and the remaining
# string after the unit
m = self.ptc.CRE_REMAINING.search(chunk2)
if m is not None:
index = m.start() + 1
unit = chunk2[:m.start()]
chunk2 = chunk2[index:]
else:
unit = chunk2
chunk2 = ''
flag = False
if unit == 'month' or \
unit == 'mth' or \
unit == 'm':
if offset == 0:
dy = self.ptc.daysInMonth(mth, yr)
sourceTime = (yr, mth, dy, 9, 0, 0, wd, yd, isdst)
elif offset == 2:
# if day is the last day of the month, calculate the last day
# of the next month
if dy == self.ptc.daysInMonth(mth, yr):
dy = self.ptc.daysInMonth(mth + 1, yr)
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = self.inc(start, month=1)
sourceTime = target.timetuple()
else:
start = datetime.datetime(yr, mth, 1, 9, 0, 0)
target = self.inc(start, month=offset)
sourceTime = target.timetuple()
flag = True
self.dateFlag = 1
if unit == 'week' or \
unit == 'wk' or \
unit == 'w':
if offset == 0:
start = datetime.datetime(yr, mth, dy, 17, 0, 0)
target = start + datetime.timedelta(days=(4 - wd))
sourceTime = target.timetuple()
elif offset == 2:
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=7)
sourceTime = target.timetuple()
else:
return self._evalModifier(modifier, chunk1, "monday " + chunk2, sourceTime)
flag = True
self.dateFlag = 1
if unit == 'day' or \
unit == 'dy' or \
unit == 'd':
if offset == 0:
sourceTime = (yr, mth, dy, 17, 0, 0, wd, yd, isdst)
self.timeFlag = 2
elif offset == 2:
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
target = start + datetime.timedelta(days=1)
sourceTime = target.timetuple()
else:
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=offset)
sourceTime = target.timetuple()
flag = True
self.dateFlag = 1
if unit == 'hour' or \
unit == 'hr':
if offset == 0:
sourceTime = (yr, mth, dy, hr, 0, 0, wd, yd, isdst)
else:
start = datetime.datetime(yr, mth, dy, hr, 0, 0)
target = start + datetime.timedelta(hours=offset)
sourceTime = target.timetuple()
flag = True
self.timeFlag = 2
if unit == 'year' or \
unit == 'yr' or \
unit == 'y':
if offset == 0:
sourceTime = (yr, 12, 31, hr, mn, sec, wd, yd, isdst)
elif offset == 2:
sourceTime = (yr + 1, mth, dy, hr, mn, sec, wd, yd, isdst)
else:
sourceTime = (yr + offset, 1, 1, 9, 0, 0, wd, yd, isdst)
flag = True
self.dateFlag = 1
if flag == False:
m = self.ptc.CRE_WEEKDAY.match(unit)
if m is not None:
wkdy = m.group()
self.dateFlag = 1
if modifier == 'eod':
# Calculate the upcoming weekday
self.modifierFlag = False
(sourceTime, _) = self.parse(wkdy, sourceTime)
sources = self.ptc.buildSources(sourceTime)
self.timeFlag = 2
if modifier in sources:
sourceTime = sources[modifier]
else:
wkdy = self.ptc.WeekdayOffsets[wkdy]
diff = self._CalculateDOWDelta(wd, wkdy, offset,
self.ptc.DOWParseStyle,
self.ptc.CurrentDOWParseStyle)
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=diff)
sourceTime = target.timetuple()
flag = True
self.dateFlag = 1
if not flag:
m = self.ptc.CRE_TIME.match(unit)
if m is not None:
self.modifierFlag = False
(yr, mth, dy, hr, mn, sec, wd, yd, isdst), _ = self.parse(unit)
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
target = start + datetime.timedelta(days=offset)
sourceTime = target.timetuple()
flag = True
else:
self.modifierFlag = False
# check if the remaining text is parsable and if so,
# use it as the base time for the modifier source time
t, flag2 = self.parse('%s %s' % (chunk1, unit), sourceTime)
if flag2 != 0:
sourceTime = t
sources = self.ptc.buildSources(sourceTime)
if modifier in sources:
sourceTime = sources[modifier]
flag = True
self.timeFlag = 2
# if the word after next is a number, the string is more than likely
# to be "next 4 hrs" which we will have to combine the units with the
# rest of the string
if not flag:
if offset < 0:
# if offset is negative, the unit has to be made negative
unit = '-%s' % unit
chunk2 = '%s %s' % (unit, chunk2)
self.modifierFlag = False
#return '%s %s' % (chunk1, chunk2), sourceTime
return '%s' % chunk2, sourceTime
def _evalModifier2(self, modifier, chunk1 , chunk2, sourceTime):
"""
Evaluate the C{modifier} string and following text (passed in
as C{chunk1} and C{chunk2}) and if they match any known modifiers
calculate the delta and apply it to C{sourceTime}.
@type modifier: string
@param modifier: modifier text to apply to C{sourceTime}
@type chunk1: string
@param chunk1: first text chunk that followed modifier (if any)
@type chunk2: string
@param chunk2: second text chunk that followed modifier (if any)
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: remaining text and the modified sourceTime
"""
offset = self.ptc.Modifiers[modifier]
digit = r'\d+'
self.modifier2Flag = False
# If the string after the negative modifier starts with digits,
# then it is likely that the string is similar to ' before 3 days'
# or 'evening prior to 3 days'.
# In this case, the total time is calculated by subtracting '3 days'
# from the current date.
# So, we have to identify the quantity and negate it before parsing
# the string.
# This is not required for strings not starting with digits since the
# string is enough to calculate the sourceTime
if chunk2 != '':
if offset < 0:
m = re.match(digit, chunk2.strip())
if m is not None:
qty = int(m.group()) * -1
chunk2 = chunk2[m.end():]
chunk2 = '%d%s' % (qty, chunk2)
sourceTime, flag1 = self.parse(chunk2, sourceTime)
if flag1 == 0:
flag1 = True
else:
flag1 = False
flag2 = False
else:
flag1 = False
if chunk1 != '':
if offset < 0:
m = re.search(digit, chunk1.strip())
if m is not None:
qty = int(m.group()) * -1
chunk1 = chunk1[m.end():]
chunk1 = '%d%s' % (qty, chunk1)
tempDateFlag = self.dateFlag
tempTimeFlag = self.timeFlag
sourceTime2, flag2 = self.parse(chunk1, sourceTime)
else:
return sourceTime, (flag1 and flag2)
# if chunk1 is not a datetime and chunk2 is then do not use datetime
# value returned by parsing chunk1
if not (flag1 == False and flag2 == 0):
sourceTime = sourceTime2
else:
self.timeFlag = tempTimeFlag
self.dateFlag = tempDateFlag
return sourceTime, (flag1 and flag2)
def _evalString(self, datetimeString, sourceTime=None):
"""
Calculate the datetime based on flags set by the L{parse()} routine
Examples handled::
RFC822, W3CDTF formatted dates
HH:MM[:SS][ am/pm]
MM/DD/YYYY
DD MMMM YYYY
@type datetimeString: string
@param datetimeString: text to try and parse as more "traditional"
date/time text
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: datetime
@return: calculated C{struct_time} value or current C{struct_time}
if not parsed
"""
s = datetimeString.strip()
now = time.localtime()
# Given string date is a RFC822 date
if sourceTime is None:
sourceTime = _parse_date_rfc822(s)
if sourceTime is not None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst, _) = sourceTime
self.dateFlag = 1
if (hr != 0) and (mn != 0) and (sec != 0):
self.timeFlag = 2
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
# Given string date is a W3CDTF date
if sourceTime is None:
sourceTime = _parse_date_w3dtf(s)
if sourceTime is not None:
self.dateFlag = 1
self.timeFlag = 2
if sourceTime is None:
s = s.lower()
# Given string is in the format HH:MM(:SS)(am/pm)
if self.meridianFlag:
if sourceTime is None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = now
else:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
m = self.ptc.CRE_TIMEHMS2.search(s)
if m is not None:
dt = s[:m.start('meridian')].strip()
if len(dt) <= 2:
hr = int(dt)
mn = 0
sec = 0
else:
hr, mn, sec = _extract_time(m)
if hr == 24:
hr = 0
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
meridian = m.group('meridian').lower()
# if 'am' found and hour is 12 - force hour to 0 (midnight)
if (meridian in self.ptc.am) and hr == 12:
sourceTime = (yr, mth, dy, 0, mn, sec, wd, yd, isdst)
# if 'pm' found and hour < 12, add 12 to shift to evening
if (meridian in self.ptc.pm) and hr < 12:
sourceTime = (yr, mth, dy, hr + 12, mn, sec, wd, yd, isdst)
# invalid time
if hr > 24 or mn > 59 or sec > 59:
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
self.meridianFlag = False
# Given string is in the format HH:MM(:SS)
if self.timeStdFlag:
if sourceTime is None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = now
else:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
m = self.ptc.CRE_TIMEHMS.search(s)
if m is not None:
hr, mn, sec = _extract_time(m)
if hr == 24:
hr = 0
if hr > 24 or mn > 59 or sec > 59:
# invalid time
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
else:
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
self.timeStdFlag = False
# Given string is in the format 07/21/2006
if self.dateStdFlag:
sourceTime = self.parseDate(s)
self.dateStdFlag = False
# Given string is in the format "May 23rd, 2005"
if self.dateStrFlag:
sourceTime = self.parseDateText(s)
self.dateStrFlag = False
# Given string is a weekday
if self.weekdyFlag:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = now
start = datetime.datetime(yr, mth, dy, hr, mn, sec)
wkdy = self.ptc.WeekdayOffsets[s]
if wkdy > wd:
qty = self._CalculateDOWDelta(wd, wkdy, 2,
self.ptc.DOWParseStyle,
self.ptc.CurrentDOWParseStyle)
else:
qty = self._CalculateDOWDelta(wd, wkdy, 2,
self.ptc.DOWParseStyle,
self.ptc.CurrentDOWParseStyle)
target = start + datetime.timedelta(days=qty)
wd = wkdy
sourceTime = target.timetuple()
self.weekdyFlag = False
# Given string is a natural language time string like
# lunch, midnight, etc
if self.timeStrFlag:
if s in self.ptc.re_values['now']:
sourceTime = now
else:
sources = self.ptc.buildSources(sourceTime)
if s in sources:
sourceTime = sources[s]
else:
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
self.timeStrFlag = False
# Given string is a natural language date string like today, tomorrow..
if self.dayStrFlag:
if sourceTime is None:
sourceTime = now
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
if s in self.ptc.dayOffsets:
offset = self.ptc.dayOffsets[s]
else:
offset = 0
start = datetime.datetime(yr, mth, dy, 9, 0, 0)
target = start + datetime.timedelta(days=offset)
sourceTime = target.timetuple()
self.dayStrFlag = False
# Given string is a time string with units like "5 hrs 30 min"
if self.unitsFlag:
modifier = '' # TODO
if sourceTime is None:
sourceTime = now
m = self.ptc.CRE_UNITS.search(s)
if m is not None:
units = m.group('units')
quantity = s[:m.start('units')]
sourceTime = self._buildTime(sourceTime, quantity, modifier, units)
self.unitsFlag = False
# Given string is a time string with single char units like "5 h 30 m"
if self.qunitsFlag:
modifier = '' # TODO
if sourceTime is None:
sourceTime = now
m = self.ptc.CRE_QUNITS.search(s)
if m is not None:
units = m.group('qunits')
quantity = s[:m.start('qunits')]
sourceTime = self._buildTime(sourceTime, quantity, modifier, units)
self.qunitsFlag = False
# Given string does not match anything
if sourceTime is None:
sourceTime = now
self.dateFlag = 0
self.timeFlag = 0
return sourceTime
def parse(self, datetimeString, sourceTime=None):
"""
Splits the given C{datetimeString} into tokens, finds the regex
patterns that match and then calculates a C{struct_time} value from
the chunks.
If C{sourceTime} is given then the C{struct_time} value will be
calculated from that value, otherwise from the current date/time.
If the C{datetimeString} is parsed and date/time value found then
the second item of the returned tuple will be a flag to let you know
what kind of C{struct_time} value is being returned::
0 = not parsed at all
1 = parsed as a C{date}
2 = parsed as a C{time}
3 = parsed as a C{datetime}
@type datetimeString: string
@param datetimeString: date/time text to evaluate
@type sourceTime: struct_time
@param sourceTime: C{struct_time} value to use as the base
@rtype: tuple
@return: tuple of: modified C{sourceTime} and the result flag
"""
if sourceTime:
if isinstance(sourceTime, datetime.datetime):
if _debug:
print 'coercing datetime to timetuple'
sourceTime = sourceTime.timetuple()
else:
if not isinstance(sourceTime, time.struct_time) and \
not isinstance(sourceTime, tuple):
raise Exception('sourceTime is not a struct_time')
s = datetimeString.strip().lower()
parseStr = ''
totalTime = sourceTime
if s == '' :
if sourceTime is not None:
return (sourceTime, self.dateFlag + self.timeFlag)
else:
return (time.localtime(), 0)
self.timeFlag = 0
self.dateFlag = 0
while len(s) > 0:
flag = False
chunk1 = ''
chunk2 = ''
if _debug:
print 'parse (top of loop): [%s][%s]' % (s, parseStr)
if parseStr == '':
# Modifier like next\prev..
m = self.ptc.CRE_MODIFIER.search(s)
if m is not None:
self.modifierFlag = True
if (m.group('modifier') != s):
# capture remaining string
parseStr = m.group('modifier')
chunk1 = s[:m.start('modifier')].strip()
chunk2 = s[m.end('modifier'):].strip()
flag = True
else:
parseStr = s
if parseStr == '':
# Modifier like from\after\prior..
m = self.ptc.CRE_MODIFIER2.search(s)
if m is not None:
self.modifier2Flag = True
if (m.group('modifier') != s):
# capture remaining string
parseStr = m.group('modifier')
chunk1 = s[:m.start('modifier')].strip()
chunk2 = s[m.end('modifier'):].strip()
flag = True
else:
parseStr = s
if parseStr == '':
valid_date = False
for match in self.ptc.CRE_DATE3.finditer(s):
# to prevent "HH:MM(:SS) time strings" expressions from triggering
# this regex, we checks if the month field exists in the searched
# expression, if it doesn't exist, the date field is not valid
if match.group('mthname'):
m = self.ptc.CRE_DATE3.search(s, match.start())
valid_date = True
break
# String date format
if valid_date:
self.dateStrFlag = True
self.dateFlag = 1
if (m.group('date') != s):
# capture remaining string
parseStr = m.group('date')
chunk1 = s[:m.start('date')]
chunk2 = s[m.end('date'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Standard date format
m = self.ptc.CRE_DATE.search(s)
if m is not None:
self.dateStdFlag = True
self.dateFlag = 1
if (m.group('date') != s):
# capture remaining string
parseStr = m.group('date')
chunk1 = s[:m.start('date')]
chunk2 = s[m.end('date'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Natural language day strings
m = self.ptc.CRE_DAY.search(s)
if m is not None:
self.dayStrFlag = True
self.dateFlag = 1
if (m.group('day') != s):
# capture remaining string
parseStr = m.group('day')
chunk1 = s[:m.start('day')]
chunk2 = s[m.end('day'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Quantity + Units
m = self.ptc.CRE_UNITS.search(s)
if m is not None:
self.unitsFlag = True
if (m.group('qty') != s):
# capture remaining string
parseStr = m.group('qty')
chunk1 = s[:m.start('qty')].strip()
chunk2 = s[m.end('qty'):].strip()
if chunk1[-1:] == '-':
parseStr = '-%s' % parseStr
chunk1 = chunk1[:-1]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Quantity + Units
m = self.ptc.CRE_QUNITS.search(s)
if m is not None:
self.qunitsFlag = True
if (m.group('qty') != s):
# capture remaining string
parseStr = m.group('qty')
chunk1 = s[:m.start('qty')].strip()
chunk2 = s[m.end('qty'):].strip()
if chunk1[-1:] == '-':
parseStr = '-%s' % parseStr
chunk1 = chunk1[:-1]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Weekday
m = self.ptc.CRE_WEEKDAY.search(s)
if m is not None:
gv = m.group('weekday')
if s not in self.ptc.dayOffsets:
self.weekdyFlag = True
self.dateFlag = 1
if (gv != s):
# capture remaining string
parseStr = gv
chunk1 = s[:m.start('weekday')]
chunk2 = s[m.end('weekday'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# Natural language time strings
m = self.ptc.CRE_TIME.search(s)
if m is not None:
self.timeStrFlag = True
self.timeFlag = 2
if (m.group('time') != s):
# capture remaining string
parseStr = m.group('time')
chunk1 = s[:m.start('time')]
chunk2 = s[m.end('time'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
else:
parseStr = s
if parseStr == '':
# HH:MM(:SS) am/pm time strings
m = self.ptc.CRE_TIMEHMS2.search(s)
if m is not None:
self.meridianFlag = True
self.timeFlag = 2
if m.group('minutes') is not None:
if m.group('seconds') is not None:
parseStr = '%s:%s:%s %s' % (m.group('hours'),
m.group('minutes'),
m.group('seconds'),
m.group('meridian'))
else:
parseStr = '%s:%s %s' % (m.group('hours'),
m.group('minutes'),
m.group('meridian'))
else:
parseStr = '%s %s' % (m.group('hours'),
m.group('meridian'))
chunk1 = s[:m.start('hours')]
chunk2 = s[m.end('meridian'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
if parseStr == '':
# HH:MM(:SS) time strings
m = self.ptc.CRE_TIMEHMS.search(s)
if m is not None:
self.timeStdFlag = True
self.timeFlag = 2
if m.group('seconds') is not None:
parseStr = '%s:%s:%s' % (m.group('hours'),
m.group('minutes'),
m.group('seconds'))
chunk1 = s[:m.start('hours')]
chunk2 = s[m.end('seconds'):]
else:
parseStr = '%s:%s' % (m.group('hours'),
m.group('minutes'))
chunk1 = s[:m.start('hours')]
chunk2 = s[m.end('minutes'):]
s = '%s %s' % (chunk1, chunk2)
flag = True
# if string does not match any regex, empty string to
# come out of the while loop
if not flag:
s = ''
if _debug:
print 'parse (bottom) [%s][%s][%s][%s]' % (s, parseStr, chunk1, chunk2)
print 'weekday %s, dateStd %s, dateStr %s, time %s, timeStr %s, meridian %s' % \
(self.weekdyFlag, self.dateStdFlag, self.dateStrFlag, self.timeStdFlag, self.timeStrFlag, self.meridianFlag)
print 'dayStr %s, modifier %s, modifier2 %s, units %s, qunits %s' % \
(self.dayStrFlag, self.modifierFlag, self.modifier2Flag, self.unitsFlag, self.qunitsFlag)
# evaluate the matched string
if parseStr != '':
if self.modifierFlag == True:
t, totalTime = self._evalModifier(parseStr, chunk1, chunk2, totalTime)
# t is the unparsed part of the chunks.
# If it is not date/time, return current
# totalTime as it is; else return the output
# after parsing t.
if (t != '') and (t != None):
tempDateFlag = self.dateFlag
tempTimeFlag = self.timeFlag
(totalTime2, flag) = self.parse(t, totalTime)
if flag == 0 and totalTime is not None:
self.timeFlag = tempTimeFlag
self.dateFlag = tempDateFlag
return (totalTime, self.dateFlag + self.timeFlag)
else:
return (totalTime2, self.dateFlag + self.timeFlag)
elif self.modifier2Flag == True:
totalTime, invalidFlag = self._evalModifier2(parseStr, chunk1, chunk2, totalTime)
if invalidFlag == True:
self.dateFlag = 0
self.timeFlag = 0
else:
totalTime = self._evalString(parseStr, totalTime)
parseStr = ''
# String is not parsed at all
if totalTime is None or totalTime == sourceTime:
totalTime = time.localtime()
self.dateFlag = 0
self.timeFlag = 0
return (totalTime, self.dateFlag + self.timeFlag)
def inc(self, source, month=None, year=None):
"""
Takes the given C{source} date, or current date if none is
passed, and increments it according to the values passed in
by month and/or year.
This routine is needed because Python's C{timedelta()} function
does not allow for month or year increments.
@type source: struct_time
@param source: C{struct_time} value to increment
@type month: integer
@param month: optional number of months to increment
@type year: integer
@param year: optional number of years to increment
@rtype: datetime
@return: C{source} incremented by the number of months and/or years
"""
yr = source.year
mth = source.month
dy = source.day
if year:
try:
yi = int(year)
except ValueError:
yi = 0
yr += yi
if month:
try:
mi = int(month)
except ValueError:
mi = 0
m = abs(mi)
y = m / 12 # how many years are in month increment
m = m % 12 # get remaining months
if mi < 0:
mth = mth - m # sub months from start month
if mth < 1: # cross start-of-year?
y -= 1 # yes - decrement year
mth += 12 # and fix month
else:
mth = mth + m # add months to start month
if mth > 12: # cross end-of-year?
y += 1 # yes - increment year
mth -= 12 # and fix month
yr += y
# if the day ends up past the last day of
# the new month, set it to the last day
if dy > self.ptc.daysInMonth(mth, yr):
dy = self.ptc.daysInMonth(mth, yr)
d = source.replace(year=yr, month=mth, day=dy)
return source + (d - source)
|
vilmibm/done | parsedatetime/parsedatetime_consts.py | _initLocale | python | def _initLocale(ptc):
def lcase(x):
return x.lower()
if pyicu and ptc.usePyICU:
ptc.icuLocale = None
if ptc.localeID is not None:
ptc.icuLocale = pyicu.Locale(ptc.localeID)
if ptc.icuLocale is None:
for id in range(0, len(ptc.fallbackLocales)):
ptc.localeID = ptc.fallbackLocales[id]
ptc.icuLocale = pyicu.Locale(ptc.localeID)
if ptc.icuLocale is not None:
break
ptc.icuSymbols = pyicu.DateFormatSymbols(ptc.icuLocale)
# grab ICU list of weekdays, skipping first entry which
# is always blank
wd = map(lcase, ptc.icuSymbols.getWeekdays()[1:])
swd = map(lcase, ptc.icuSymbols.getShortWeekdays()[1:])
# store them in our list with Monday first (ICU puts Sunday first)
ptc.Weekdays = wd[1:] + wd[0:1]
ptc.shortWeekdays = swd[1:] + swd[0:1]
ptc.Months = map(lcase, ptc.icuSymbols.getMonths())
ptc.shortMonths = map(lcase, ptc.icuSymbols.getShortMonths())
# not quite sure how to init this so for now
# set it to none so it will be set to the en_US defaults for now
ptc.re_consts = None
ptc.icu_df = { 'full': pyicu.DateFormat.createDateInstance(pyicu.DateFormat.kFull, ptc.icuLocale),
'long': pyicu.DateFormat.createDateInstance(pyicu.DateFormat.kLong, ptc.icuLocale),
'medium': pyicu.DateFormat.createDateInstance(pyicu.DateFormat.kMedium, ptc.icuLocale),
'short': pyicu.DateFormat.createDateInstance(pyicu.DateFormat.kShort, ptc.icuLocale),
}
ptc.icu_tf = { 'full': pyicu.DateFormat.createTimeInstance(pyicu.DateFormat.kFull, ptc.icuLocale),
'long': pyicu.DateFormat.createTimeInstance(pyicu.DateFormat.kLong, ptc.icuLocale),
'medium': pyicu.DateFormat.createTimeInstance(pyicu.DateFormat.kMedium, ptc.icuLocale),
'short': pyicu.DateFormat.createTimeInstance(pyicu.DateFormat.kShort, ptc.icuLocale),
}
ptc.dateFormats = { 'full': ptc.icu_df['full'].toPattern(),
'long': ptc.icu_df['long'].toPattern(),
'medium': ptc.icu_df['medium'].toPattern(),
'short': ptc.icu_df['short'].toPattern(),
}
ptc.timeFormats = { 'full': ptc.icu_tf['full'].toPattern(),
'long': ptc.icu_tf['long'].toPattern(),
'medium': ptc.icu_tf['medium'].toPattern(),
'short': ptc.icu_tf['short'].toPattern(),
}
else:
if not ptc.localeID in pdtLocales:
for id in range(0, len(ptc.fallbackLocales)):
ptc.localeID = ptc.fallbackLocales[id]
if ptc.localeID in pdtLocales:
break
ptc.locale = pdtLocales[ptc.localeID]
ptc.usePyICU = False
ptc.Weekdays = ptc.locale.Weekdays
ptc.shortWeekdays = ptc.locale.shortWeekdays
ptc.Months = ptc.locale.Months
ptc.shortMonths = ptc.locale.shortMonths
ptc.dateFormats = ptc.locale.dateFormats
ptc.timeFormats = ptc.locale.timeFormats
# these values are used to setup the various bits
# of the regex values used to parse
#
# check if a local set of constants has been
# provided, if not use en_US as the default
if ptc.localeID in pdtLocales:
ptc.re_sources = pdtLocales[ptc.localeID].re_sources
ptc.re_values = pdtLocales[ptc.localeID].re_consts
units = pdtLocales[ptc.localeID].units
ptc.Modifiers = pdtLocales[ptc.localeID].modifiers
ptc.dayOffsets = pdtLocales[ptc.localeID].dayoffsets
# for now, pull over any missing keys from the US set
for key in pdtLocales['en_US'].re_consts:
if not key in ptc.re_values:
ptc.re_values[key] = pdtLocales['en_US'].re_consts[key]
else:
ptc.re_sources = pdtLocales['en_US'].re_sources
ptc.re_values = pdtLocales['en_US'].re_consts
ptc.Modifiers = pdtLocales['en_US'].modifiers
ptc.dayOffsets = pdtLocales['en_US'].dayoffsets
units = pdtLocales['en_US'].units
# escape any regex special characters that may be found
wd = tuple(map(re.escape, ptc.Weekdays))
swd = tuple(map(re.escape, ptc.shortWeekdays))
mth = tuple(map(re.escape, ptc.Months))
smth = tuple(map(re.escape, ptc.shortMonths))
ptc.re_values['months'] = '%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s' % mth
ptc.re_values['shortmonths'] = '%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s' % smth
ptc.re_values['days'] = '%s|%s|%s|%s|%s|%s|%s' % wd
ptc.re_values['shortdays'] = '%s|%s|%s|%s|%s|%s|%s' % swd
l = []
for unit in units:
l.append('|'.join(units[unit]))
ptc.re_values['units'] = '|'.join(l)
ptc.Units = ptc.re_values['units'].split('|') | Helper function to initialize the different lists and strings
from either PyICU or one of the internal pdt Locales and store
them into ptc. | train | https://github.com/vilmibm/done/blob/7e5b60d2900ceddefa49de352a19b794199b51a8/parsedatetime/parsedatetime_consts.py#L479-L598 | null | #!/usr/bin/env python
"""
parsedatetime constants and helper functions to determine
regex values from Locale information if present.
Also contains the internal Locale classes to give some sane
defaults if PyICU is not found.
"""
__license__ = """
Copyright (c) 2004-2008 Mike Taylor
Copyright (c) 2006-2008 Darshana Chhajed
Copyright (c) 2007 Bernd Zeimetz <bzed@debian.org>
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
try:
import PyICU as pyicu
except:
pyicu = None
import datetime
import calendar
import time
import re
class pdtLocale_en:
"""
en_US Locale constants
This class will be used to initialize L{Constants} if PyICU is not located.
Defined as class variables are the lists and strings needed by parsedatetime
to evaluate strings for USA
"""
localeID = 'en_US' # don't use a unicode string
dateSep = [ u'/', u'.' ]
timeSep = [ u':' ]
meridian = [ u'AM', u'PM' ]
usesMeridian = True
uses24 = False
Weekdays = [ u'monday', u'tuesday', u'wednesday',
u'thursday', u'friday', u'saturday', u'sunday',
]
shortWeekdays = [ u'mon', u'tues', u'wed',
u'thu', u'fri', u'sat', u'sun',
]
Months = [ u'january', u'february', u'march',
u'april', u'may', u'june',
u'july', u'august', u'september',
u'october', u'november', u'december',
]
shortMonths = [ u'jan', u'feb', u'mar',
u'apr', u'may', u'jun',
u'jul', u'aug', u'sep',
u'oct', u'nov', u'dec',
]
dateFormats = { 'full': 'EEEE, MMMM d, yyyy',
'long': 'MMMM d, yyyy',
'medium': 'MMM d, yyyy',
'short': 'M/d/yy',
}
timeFormats = { 'full': 'h:mm:ss a z',
'long': 'h:mm:ss a z',
'medium': 'h:mm:ss a',
'short': 'h:mm a',
}
dp_order = [ u'm', u'd', u'y' ]
# this will be added to re_consts later
units = { 'seconds': [ 'second', 'sec' ],
'minutes': [ 'minute', 'min' ],
'hours': [ 'hour', 'hr' ],
'days': [ 'day', 'dy' ],
'weeks': [ 'week', 'wk' ],
'months': [ 'month', 'mth' ],
'years': [ 'year', 'yr' ],
}
# text constants to be used by regex's later
re_consts = { 'specials': 'in|on|of|at',
'timeseperator': ':',
'rangeseperator': '-',
'daysuffix': 'rd|st|nd|th',
'meridian': 'am|pm|a.m.|p.m.|a|p',
'qunits': 'h|m|s|d|w|m|y',
'now': [ 'now' ],
}
# Used to adjust the returned date before/after the source
modifiers = { 'from': 1,
'before': -1,
'after': 1,
'ago': -1,
'prior': -1,
'prev': -1,
'last': -1,
'next': 1,
'previous': -1,
'in a': 2,
'end of': 0,
'eod': 0,
'eo': 0
}
dayoffsets = { 'tomorrow': 1,
'today': 0,
'yesterday': -1,
}
# special day and/or times, i.e. lunch, noon, evening
# each element in the dictionary is a dictionary that is used
# to fill in any value to be replace - the current date/time will
# already have been populated by the method buildSources
re_sources = { 'noon': { 'hr': 12, 'mn': 0, 'sec': 0 },
'lunch': { 'hr': 12, 'mn': 0, 'sec': 0 },
'morning': { 'hr': 6, 'mn': 0, 'sec': 0 },
'breakfast': { 'hr': 8, 'mn': 0, 'sec': 0 },
'dinner': { 'hr': 19, 'mn': 0, 'sec': 0 },
'evening': { 'hr': 18, 'mn': 0, 'sec': 0 },
'midnight': { 'hr': 0, 'mn': 0, 'sec': 0 },
'night': { 'hr': 21, 'mn': 0, 'sec': 0 },
'tonight': { 'hr': 21, 'mn': 0, 'sec': 0 },
'eod': { 'hr': 17, 'mn': 0, 'sec': 0 },
}
class pdtLocale_au:
"""
en_AU Locale constants
This class will be used to initialize L{Constants} if PyICU is not located.
Defined as class variables are the lists and strings needed by parsedatetime
to evaluate strings for Australia
"""
localeID = 'en_AU' # don't use a unicode string
dateSep = [ u'-', u'/' ]
timeSep = [ u':' ]
meridian = [ u'AM', u'PM' ]
usesMeridian = True
uses24 = False
Weekdays = [ u'monday', u'tuesday', u'wednesday',
u'thursday', u'friday', u'saturday', u'sunday',
]
shortWeekdays = [ u'mon', u'tues', u'wed',
u'thu', u'fri', u'sat', u'sun',
]
Months = [ u'january', u'february', u'march',
u'april', u'may', u'june',
u'july', u'august', u'september',
u'october', u'november', u'december',
]
shortMonths = [ u'jan', u'feb', u'mar',
u'apr', u'may', u'jun',
u'jul', u'aug', u'sep',
u'oct', u'nov', u'dec',
]
dateFormats = { 'full': 'EEEE, d MMMM yyyy',
'long': 'd MMMM yyyy',
'medium': 'dd/MM/yyyy',
'short': 'd/MM/yy',
}
timeFormats = { 'full': 'h:mm:ss a z',
'long': 'h:mm:ss a',
'medium': 'h:mm:ss a',
'short': 'h:mm a',
}
dp_order = [ u'd', u'm', u'y' ]
# this will be added to re_consts later
units = { 'seconds': [ 'second', 'sec' ],
'minutes': [ 'minute', 'min' ],
'hours': [ 'hour', 'hr' ],
'days': [ 'day', 'dy' ],
'weeks': [ 'week', 'wk' ],
'months': [ 'month', 'mth' ],
'years': [ 'year', 'yr' ],
}
# text constants to be used by regex's later
re_consts = { 'specials': 'in|on|of|at',
'timeseperator': ':',
'rangeseperator': '-',
'daysuffix': 'rd|st|nd|th',
'meridian': 'am|pm|a.m.|p.m.|a|p',
'qunits': 'h|m|s|d|w|m|y',
'now': [ 'now' ],
}
# Used to adjust the returned date before/after the source
modifiers = { 'from': 1,
'before': -1,
'after': 1,
'ago': 1,
'prior': -1,
'prev': -1,
'last': -1,
'next': 1,
'previous': -1,
'in a': 2,
'end of': 0,
'eo': 0,
}
dayoffsets = { 'tomorrow': 1,
'today': 0,
'yesterday': -1,
}
# special day and/or times, i.e. lunch, noon, evening
# each element in the dictionary is a dictionary that is used
# to fill in any value to be replace - the current date/time will
# already have been populated by the method buildSources
re_sources = { 'noon': { 'hr': 12, 'mn': 0, 'sec': 0 },
'lunch': { 'hr': 12, 'mn': 0, 'sec': 0 },
'morning': { 'hr': 6, 'mn': 0, 'sec': 0 },
'breakfast': { 'hr': 8, 'mn': 0, 'sec': 0 },
'dinner': { 'hr': 19, 'mn': 0, 'sec': 0 },
'evening': { 'hr': 18, 'mn': 0, 'sec': 0 },
'midnight': { 'hr': 0, 'mn': 0, 'sec': 0 },
'night': { 'hr': 21, 'mn': 0, 'sec': 0 },
'tonight': { 'hr': 21, 'mn': 0, 'sec': 0 },
'eod': { 'hr': 17, 'mn': 0, 'sec': 0 },
}
class pdtLocale_es:
"""
es Locale constants
This class will be used to initialize L{Constants} if PyICU is not located.
Defined as class variables are the lists and strings needed by parsedatetime
to evaluate strings in Spanish
Note that I don't speak Spanish so many of the items below are still in English
"""
localeID = 'es' # don't use a unicode string
dateSep = [ u'/' ]
timeSep = [ u':' ]
meridian = []
usesMeridian = False
uses24 = True
Weekdays = [ u'lunes', u'martes', u'mi\xe9rcoles',
u'jueves', u'viernes', u's\xe1bado', u'domingo',
]
shortWeekdays = [ u'lun', u'mar', u'mi\xe9',
u'jue', u'vie', u's\xe1b', u'dom',
]
Months = [ u'enero', u'febrero', u'marzo',
u'abril', u'mayo', u'junio',
u'julio', u'agosto', u'septiembre',
u'octubre', u'noviembre', u'diciembre'
]
shortMonths = [ u'ene', u'feb', u'mar',
u'abr', u'may', u'jun',
u'jul', u'ago', u'sep',
u'oct', u'nov', u'dic'
]
dateFormats = { 'full': "EEEE d' de 'MMMM' de 'yyyy",
'long': "d' de 'MMMM' de 'yyyy",
'medium': "dd-MMM-yy",
'short': "d/MM/yy",
}
timeFormats = { 'full': "HH'H'mm' 'ss z",
'long': "HH:mm:ss z",
'medium': "HH:mm:ss",
'short': "HH:mm",
}
dp_order = [ u'd', u'm', u'y' ]
# this will be added to re_consts later
units = { 'seconds': [ 'second', 'sec' ],
'minutes': [ 'minute', 'min' ],
'hours': [ 'hour', 'hr' ],
'days': [ 'day', 'dy' ],
'weeks': [ 'week', 'wk' ],
'months': [ 'month', 'mth' ],
'years': [ 'year', 'yr' ],
}
# text constants to be used by regex's later
re_consts = { 'specials': 'in|on|of|at',
'timeseperator': timeSep,
'dateseperator': dateSep,
'rangeseperator': '-',
'daysuffix': 'rd|st|nd|th',
'qunits': 'h|m|s|d|w|m|y',
'now': [ 'now' ],
}
# Used to adjust the returned date before/after the source
modifiers = { 'from': 1,
'before': -1,
'after': 1,
'ago': 1,
'prior': -1,
'prev': -1,
'last': -1,
'next': 1,
'previous': -1,
'in a': 2,
'end of': 0,
'eo': 0,
}
dayoffsets = { 'tomorrow': 1,
'today': 0,
'yesterday': -1,
}
# special day and/or times, i.e. lunch, noon, evening
# each element in the dictionary is a dictionary that is used
# to fill in any value to be replace - the current date/time will
# already have been populated by the method buildSources
re_sources = { 'noon': { 'hr': 12, 'mn': 0, 'sec': 0 },
'lunch': { 'hr': 12, 'mn': 0, 'sec': 0 },
'morning': { 'hr': 6, 'mn': 0, 'sec': 0 },
'breakfast': { 'hr': 8, 'mn': 0, 'sec': 0 },
'dinner': { 'hr': 19, 'mn': 0, 'sec': 0 },
'evening': { 'hr': 18, 'mn': 0, 'sec': 0 },
'midnight': { 'hr': 0, 'mn': 0, 'sec': 0 },
'night': { 'hr': 21, 'mn': 0, 'sec': 0 },
'tonight': { 'hr': 21, 'mn': 0, 'sec': 0 },
'eod': { 'hr': 17, 'mn': 0, 'sec': 0 },
}
class pdtLocale_de:
"""
de_DE Locale constants
This class will be used to initialize L{Constants} if PyICU is not located.
Contributed by Debian parsedatetime package maintainer Bernd Zeimetz <bzed@debian.org>
Defined as class variables are the lists and strings needed by parsedatetime
to evaluate strings for German
"""
localeID = 'de_DE' # don't use a unicode string
dateSep = [ u'.' ]
timeSep = [ u':' ]
meridian = [ ]
usesMeridian = False
uses24 = True
Weekdays = [ u'montag', u'dienstag', u'mittwoch',
u'donnerstag', u'freitag', u'samstag', u'sonntag',
]
shortWeekdays = [ u'mo', u'di', u'mi',
u'do', u'fr', u'sa', u'so',
]
Months = [ u'januar', u'februar', u'm\xe4rz',
u'april', u'mai', u'juni',
u'juli', u'august', u'september',
u'oktober', u'november', u'dezember',
]
shortMonths = [ u'jan', u'feb', u'mrz',
u'apr', u'mai', u'jun',
u'jul', u'aug', u'sep',
u'okt', u'nov', u'dez',
]
dateFormats = { 'full': u'EEEE, d. MMMM yyyy',
'long': u'd. MMMM yyyy',
'medium': u'dd.MM.yyyy',
'short': u'dd.MM.yy'
}
timeFormats = { 'full': u'HH:mm:ss v',
'long': u'HH:mm:ss z',
'medium': u'HH:mm:ss',
'short': u'HH:mm'
}
dp_order = [ u'd', u'm', u'y' ]
# this will be added to re_consts later
units = { 'seconds': [ 'sekunden', 'sek', 's' ],
'minutes': [ 'minuten', 'min' , 'm' ],
'hours': [ 'stunden', 'std', 'h' ],
'days': [ 'tage', 't' ],
'weeks': [ 'wochen', 'w' ],
'months': [ 'monate' ], #the short version would be a capital M,
#as I understand it we can't distinguis
#between m for minutes and M for months.
'years': [ 'jahre', 'j' ],
}
# text constants to be used by regex's later
re_consts = { 'specials': 'am|dem|der|im|in|den|zum',
'timeseperator': ':',
'rangeseperator': '-',
'daysuffix': '',
'qunits': 'h|m|s|t|w|m|j',
'now': [ 'jetzt' ],
}
# Used to adjust the returned date before/after the source
#still looking for insight on how to translate all of them to german.
modifiers = { u'from': 1,
u'before': -1,
u'after': 1,
u'vergangener': -1,
u'vorheriger': -1,
u'prev': -1,
u'letzter': -1,
u'n\xe4chster': 1,
u'dieser': 0,
u'previous': -1,
u'in a': 2,
u'end of': 0,
u'eod': 0,
u'eo': 0,
}
#morgen/abermorgen does not work, see http://code.google.com/p/parsedatetime/issues/detail?id=19
dayoffsets = { u'morgen': 1,
u'heute': 0,
u'gestern': -1,
u'vorgestern': -2,
u'\xfcbermorgen': 2,
}
# special day and/or times, i.e. lunch, noon, evening
# each element in the dictionary is a dictionary that is used
# to fill in any value to be replace - the current date/time will
# already have been populated by the method buildSources
re_sources = { u'mittag': { 'hr': 12, 'mn': 0, 'sec': 0 },
u'mittags': { 'hr': 12, 'mn': 0, 'sec': 0 },
u'mittagessen': { 'hr': 12, 'mn': 0, 'sec': 0 },
u'morgen': { 'hr': 6, 'mn': 0, 'sec': 0 },
u'morgens': { 'hr': 6, 'mn': 0, 'sec': 0 },
u'fr\e4hst\xe4ck': { 'hr': 8, 'mn': 0, 'sec': 0 },
u'abendessen': { 'hr': 19, 'mn': 0, 'sec': 0 },
u'abend': { 'hr': 18, 'mn': 0, 'sec': 0 },
u'abends': { 'hr': 18, 'mn': 0, 'sec': 0 },
u'mitternacht': { 'hr': 0, 'mn': 0, 'sec': 0 },
u'nacht': { 'hr': 21, 'mn': 0, 'sec': 0 },
u'nachts': { 'hr': 21, 'mn': 0, 'sec': 0 },
u'heute abend': { 'hr': 21, 'mn': 0, 'sec': 0 },
u'heute nacht': { 'hr': 21, 'mn': 0, 'sec': 0 },
u'feierabend': { 'hr': 17, 'mn': 0, 'sec': 0 },
}
pdtLocales = { 'en_US': pdtLocale_en,
'en_AU': pdtLocale_au,
'es_ES': pdtLocale_es,
'de_DE': pdtLocale_de,
}
def _initSymbols(ptc):
"""
Helper function to initialize the single character constants
and other symbols needed.
"""
ptc.timeSep = [ u':' ]
ptc.dateSep = [ u'/' ]
ptc.meridian = [ u'AM', u'PM' ]
ptc.usesMeridian = True
ptc.uses24 = False
if pyicu and ptc.usePyICU:
am = u''
pm = u''
ts = ''
# ICU doesn't seem to provide directly the
# date or time seperator - so we have to
# figure it out
o = ptc.icu_tf['short']
s = ptc.timeFormats['short']
ptc.usesMeridian = u'a' in s
ptc.uses24 = u'H' in s
# '11:45 AM' or '11:45'
s = o.format(datetime.datetime(2003, 10, 30, 11, 45))
# ': AM' or ':'
s = s.replace('11', '').replace('45', '')
if len(s) > 0:
ts = s[0]
if ptc.usesMeridian:
# '23:45 AM' or '23:45'
am = s[1:].strip()
s = o.format(datetime.datetime(2003, 10, 30, 23, 45))
if ptc.uses24:
s = s.replace('23', '')
else:
s = s.replace('11', '')
# 'PM' or ''
pm = s.replace('45', '').replace(ts, '').strip()
ptc.timeSep = [ ts ]
ptc.meridian = [ am, pm ]
o = ptc.icu_df['short']
s = o.format(datetime.datetime(2003, 10, 30, 11, 45))
s = s.replace('10', '').replace('30', '').replace('03', '').replace('2003', '')
if len(s) > 0:
ds = s[0]
else:
ds = '/'
ptc.dateSep = [ ds ]
s = ptc.dateFormats['short']
l = s.lower().split(ds)
dp_order = []
for s in l:
if len(s) > 0:
dp_order.append(s[:1])
ptc.dp_order = dp_order
else:
ptc.timeSep = ptc.locale.timeSep
ptc.dateSep = ptc.locale.dateSep
ptc.meridian = ptc.locale.meridian
ptc.usesMeridian = ptc.locale.usesMeridian
ptc.uses24 = ptc.locale.uses24
ptc.dp_order = ptc.locale.dp_order
# build am and pm lists to contain
# original case, lowercase and first-char
# versions of the meridian text
if len(ptc.meridian) > 0:
am = ptc.meridian[0]
ptc.am = [ am ]
if len(am) > 0:
ptc.am.append(am[0])
am = am.lower()
ptc.am.append(am)
ptc.am.append(am[0])
else:
am = ''
ptc.am = [ '', '' ]
if len(ptc.meridian) > 1:
pm = ptc.meridian[1]
ptc.pm = [ pm ]
if len(pm) > 0:
ptc.pm.append(pm[0])
pm = pm.lower()
ptc.pm.append(pm)
ptc.pm.append(pm[0])
else:
pm = ''
ptc.pm = [ '', '' ]
def _initPatterns(ptc):
"""
Helper function to take the different localized bits from ptc and
create the regex strings.
"""
# TODO add code to parse the date formats and build the regexes up from sub-parts
# TODO find all hard-coded uses of date/time seperators
ptc.RE_DATE4 = r'''(?P<date>(((?P<day>\d\d?)(?P<suffix>%(daysuffix)s)?(,)?(\s)?)
(?P<mthname>(%(months)s|%(shortmonths)s))\s?
(?P<year>\d\d(\d\d)?)?
)
)''' % ptc.re_values
# I refactored DATE3 to fix Issue 16 http://code.google.com/p/parsedatetime/issues/detail?id=16
# I suspect the final line was for a trailing time - but testing shows it's not needed
# ptc.RE_DATE3 = r'''(?P<date>((?P<mthname>(%(months)s|%(shortmonths)s))\s?
# ((?P<day>\d\d?)(\s?|%(daysuffix)s|$)+)?
# (,\s?(?P<year>\d\d(\d\d)?))?))
# (\s?|$|[^0-9a-zA-Z])''' % ptc.re_values
ptc.RE_DATE3 = r'''(?P<date>(
(((?P<mthname>(%(months)s|%(shortmonths)s))|
((?P<day>\d\d?)(?P<suffix>%(daysuffix)s)?))(\s)?){1,2}
((,)?(\s)?(?P<year>\d\d(\d\d)?))?
)
)''' % ptc.re_values
ptc.RE_MONTH = r'''(\s?|^)
(?P<month>(
(?P<mthname>(%(months)s|%(shortmonths)s))
(\s?(?P<year>(\d\d\d\d)))?
))
(\s?|$|[^0-9a-zA-Z])''' % ptc.re_values
ptc.RE_WEEKDAY = r'''(\s?|^)
(?P<weekday>(%(days)s|%(shortdays)s))
(\s?|$|[^0-9a-zA-Z])''' % ptc.re_values
ptc.RE_SPECIAL = r'(?P<special>^[%(specials)s]+)\s+' % ptc.re_values
ptc.RE_UNITS = r'''(?P<qty>(-?\d+\s*
(?P<units>((%(units)s)s?))
))''' % ptc.re_values
ptc.RE_QUNITS = r'''(?P<qty>(-?\d+\s?
(?P<qunits>%(qunits)s)
(\s?|,|$)
))''' % ptc.re_values
ptc.RE_MODIFIER = r'''(\s?|^)
(?P<modifier>
(previous|prev|last|next|eod|eo|(end\sof)|(in\sa)))''' % ptc.re_values
ptc.RE_MODIFIER2 = r'''(\s?|^)
(?P<modifier>
(from|before|after|ago|prior))
(\s?|$|[^0-9a-zA-Z])''' % ptc.re_values
ptc.RE_TIMEHMS = r'''(\s?|^)
(?P<hours>\d\d?)
(?P<tsep>%(timeseperator)s|)
(?P<minutes>\d\d)
(?:(?P=tsep)(?P<seconds>\d\d(?:[.,]\d+)?))?''' % ptc.re_values
ptc.RE_TIMEHMS2 = r'''(?P<hours>(\d\d?))
((?P<tsep>%(timeseperator)s|)
(?P<minutes>(\d\d?))
(?:(?P=tsep)
(?P<seconds>\d\d?
(?:[.,]\d+)?))?)?''' % ptc.re_values
if 'meridian' in ptc.re_values:
ptc.RE_TIMEHMS2 += r'\s?(?P<meridian>(%(meridian)s))' % ptc.re_values
dateSeps = ''.join(ptc.dateSep) + '.'
ptc.RE_DATE = r'''(\s?|^)
(?P<date>(\d\d?[%s]\d\d?([%s]\d\d(\d\d)?)?))
(\s?|$|[^0-9a-zA-Z])''' % (dateSeps, dateSeps)
ptc.RE_DATE2 = r'[%s]' % dateSeps
ptc.RE_DAY = r'''(\s?|^)
(?P<day>(today|tomorrow|yesterday))
(\s?|$|[^0-9a-zA-Z])''' % ptc.re_values
ptc.RE_DAY2 = r'''(?P<day>\d\d?)|(?P<suffix>%(daysuffix)s)
''' % ptc.re_values
ptc.RE_TIME = r'''(\s?|^)
(?P<time>(morning|breakfast|noon|lunch|evening|midnight|tonight|dinner|night|now))
(\s?|$|[^0-9a-zA-Z])''' % ptc.re_values
ptc.RE_REMAINING = r'\s+'
# Regex for date/time ranges
ptc.RE_RTIMEHMS = r'''(\s?|^)
(\d\d?)%(timeseperator)s
(\d\d)
(%(timeseperator)s(\d\d))?
(\s?|$)''' % ptc.re_values
ptc.RE_RTIMEHMS2 = r'''(\s?|^)
(\d\d?)
(%(timeseperator)s(\d\d?))?
(%(timeseperator)s(\d\d?))?''' % ptc.re_values
if 'meridian' in ptc.re_values:
ptc.RE_RTIMEHMS2 += r'\s?(%(meridian)s)' % ptc.re_values
ptc.RE_RDATE = r'(\d+([%s]\d+)+)' % dateSeps
ptc.RE_RDATE3 = r'''((((%(months)s))\s?
((\d\d?)
(\s?|%(daysuffix)s|$)+)?
(,\s?\d\d\d\d)?))''' % ptc.re_values
# "06/07/06 - 08/09/06"
ptc.DATERNG1 = ptc.RE_RDATE + r'\s?%(rangeseperator)s\s?' + ptc.RE_RDATE
ptc.DATERNG1 = ptc.DATERNG1 % ptc.re_values
# "march 31 - june 1st, 2006"
ptc.DATERNG2 = ptc.RE_RDATE3 + r'\s?%(rangeseperator)s\s?' + ptc.RE_RDATE3
ptc.DATERNG2 = ptc.DATERNG2 % ptc.re_values
# "march 1rd -13th"
ptc.DATERNG3 = ptc.RE_RDATE3 + r'\s?%(rangeseperator)s\s?(\d\d?)\s?(rd|st|nd|th)?'
ptc.DATERNG3 = ptc.DATERNG3 % ptc.re_values
# "4:00:55 pm - 5:90:44 am", '4p-5p'
ptc.TIMERNG1 = ptc.RE_RTIMEHMS2 + r'\s?%(rangeseperator)s\s?' + ptc.RE_RTIMEHMS2
ptc.TIMERNG1 = ptc.TIMERNG1 % ptc.re_values
# "4:00 - 5:90 ", "4:55:55-3:44:55"
ptc.TIMERNG2 = ptc.RE_RTIMEHMS + r'\s?%(rangeseperator)s\s?' + ptc.RE_RTIMEHMS
ptc.TIMERNG2 = ptc.TIMERNG2 % ptc.re_values
# "4-5pm "
ptc.TIMERNG3 = r'\d\d?\s?%(rangeseperator)s\s?' + ptc.RE_RTIMEHMS2
ptc.TIMERNG3 = ptc.TIMERNG3 % ptc.re_values
# "4:30-5pm "
ptc.TIMERNG4 = ptc.RE_RTIMEHMS + r'\s?%(rangeseperator)s\s?' + ptc.RE_RTIMEHMS2
ptc.TIMERNG4 = ptc.TIMERNG4 % ptc.re_values
def _initConstants(ptc):
"""
Create localized versions of the units, week and month names
"""
# build weekday offsets - yes, it assumes the Weekday and shortWeekday
# lists are in the same order and Mon..Sun (Python style)
ptc.WeekdayOffsets = {}
o = 0
for key in ptc.Weekdays:
ptc.WeekdayOffsets[key] = o
o += 1
o = 0
for key in ptc.shortWeekdays:
ptc.WeekdayOffsets[key] = o
o += 1
# build month offsets - yes, it assumes the Months and shortMonths
# lists are in the same order and Jan..Dec
ptc.MonthOffsets = {}
o = 1
for key in ptc.Months:
ptc.MonthOffsets[key] = o
o += 1
o = 1
for key in ptc.shortMonths:
ptc.MonthOffsets[key] = o
o += 1
# ptc.DaySuffixes = ptc.re_consts['daysuffix'].split('|')
class Constants:
"""
Default set of constants for parsedatetime.
If PyICU is present, then the class will first try to get PyICU
to return a locale specified by C{localeID}. If either C{localeID} is
None or if the locale does not exist within PyICU, then each of the
locales defined in C{fallbackLocales} is tried in order.
If PyICU is not present or none of the specified locales can be used,
then the class will initialize itself to the en_US locale.
if PyICU is not present or not requested, only the locales defined by
C{pdtLocales} will be searched.
"""
def __init__(self, localeID=None, usePyICU=True, fallbackLocales=['en_US']):
self.localeID = localeID
self.fallbackLocales = fallbackLocales
if 'en_US' not in self.fallbackLocales:
self.fallbackLocales.append('en_US')
# define non-locale specific constants
self.locale = None
self.usePyICU = usePyICU
# starting cache of leap years
# daysInMonth will add to this if during
# runtime it gets a request for a year not found
self._leapYears = [ 1904, 1908, 1912, 1916, 1920, 1924, 1928, 1932, 1936, 1940, 1944,
1948, 1952, 1956, 1960, 1964, 1968, 1972, 1976, 1980, 1984, 1988,
1992, 1996, 2000, 2004, 2008, 2012, 2016, 2020, 2024, 2028, 2032,
2036, 2040, 2044, 2048, 2052, 2056, 2060, 2064, 2068, 2072, 2076,
2080, 2084, 2088, 2092, 2096 ]
self.Second = 1
self.Minute = 60 * self.Second
self.Hour = 60 * self.Minute
self.Day = 24 * self.Hour
self.Week = 7 * self.Day
self.Month = 30 * self.Day
self.Year = 365 * self.Day
self.rangeSep = u'-'
self._DaysInMonthList = (31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
self.BirthdayEpoch = 50
# DOWParseStyle controls how we parse "Tuesday"
# If the current day was Thursday and the text to parse is "Tuesday"
# then the following table shows how each style would be returned
# -1, 0, +1
#
# Current day marked as ***
#
# Sun Mon Tue Wed Thu Fri Sat
# week -1
# current -1,0 ***
# week +1 +1
#
# If the current day was Monday and the text to parse is "Tuesday"
# then the following table shows how each style would be returned
# -1, 0, +1
#
# Sun Mon Tue Wed Thu Fri Sat
# week -1 -1
# current *** 0,+1
# week +1
self.DOWParseStyle = 1
# CurrentDOWParseStyle controls how we parse "Friday"
# If the current day was Friday and the text to parse is "Friday"
# then the following table shows how each style would be returned
# True/False. This also depends on DOWParseStyle.
#
# Current day marked as ***
#
# DOWParseStyle = 0
# Sun Mon Tue Wed Thu Fri Sat
# week -1
# current T,F
# week +1
#
# DOWParseStyle = -1
# Sun Mon Tue Wed Thu Fri Sat
# week -1 F
# current T
# week +1
#
# DOWParseStyle = +1
#
# Sun Mon Tue Wed Thu Fri Sat
# week -1
# current T
# week +1 F
self.CurrentDOWParseStyle = False
# initalize attributes to empty values to ensure
# they are defined
self.re_sources = None
self.re_values = None
self.Modifiers = None
self.dayOffsets = None
self.WeekdayOffsets = None
self.MonthOffsets = None
self.dateSep = None
self.timeSep = None
self.am = None
self.pm = None
self.meridian = None
self.usesMeridian = None
self.uses24 = None
self.dp_order = None
self.RE_DATE4 = r''
self.RE_DATE3 = r''
self.RE_MONTH = r''
self.RE_WEEKDAY = r''
self.RE_SPECIAL = r''
self.RE_UNITS = r''
self.RE_QUNITS = r''
self.RE_MODIFIER = r''
self.RE_MODIFIER2 = r''
self.RE_TIMEHMS = r''
self.RE_TIMEHMS2 = r''
self.RE_DATE = r''
self.RE_DATE2 = r''
self.RE_DAY = r''
self.RE_DAY2 = r''
self.RE_TIME = r''
self.RE_REMAINING = r''
self.RE_RTIMEHMS = r''
self.RE_RTIMEHMS2 = r''
self.RE_RDATE = r''
self.RE_RDATE3 = r''
self.DATERNG1 = r''
self.DATERNG2 = r''
self.DATERNG3 = r''
self.TIMERNG1 = r''
self.TIMERNG2 = r''
self.TIMERNG3 = r''
self.TIMERNG4 = r''
_initLocale(self)
_initConstants(self)
_initSymbols(self)
_initPatterns(self)
self.re_option = re.IGNORECASE + re.VERBOSE
self.cre_source = { 'CRE_SPECIAL': self.RE_SPECIAL,
'CRE_UNITS': self.RE_UNITS,
'CRE_QUNITS': self.RE_QUNITS,
'CRE_MODIFIER': self.RE_MODIFIER,
'CRE_MODIFIER2': self.RE_MODIFIER2,
'CRE_TIMEHMS': self.RE_TIMEHMS,
'CRE_TIMEHMS2': self.RE_TIMEHMS2,
'CRE_DATE': self.RE_DATE,
'CRE_DATE2': self.RE_DATE2,
'CRE_DATE3': self.RE_DATE3,
'CRE_DATE4': self.RE_DATE4,
'CRE_MONTH': self.RE_MONTH,
'CRE_WEEKDAY': self.RE_WEEKDAY,
'CRE_DAY': self.RE_DAY,
'CRE_DAY2': self.RE_DAY2,
'CRE_TIME': self.RE_TIME,
'CRE_REMAINING': self.RE_REMAINING,
'CRE_RTIMEHMS': self.RE_RTIMEHMS,
'CRE_RTIMEHMS2': self.RE_RTIMEHMS2,
'CRE_RDATE': self.RE_RDATE,
'CRE_RDATE3': self.RE_RDATE3,
'CRE_TIMERNG1': self.TIMERNG1,
'CRE_TIMERNG2': self.TIMERNG2,
'CRE_TIMERNG3': self.TIMERNG3,
'CRE_TIMERNG4': self.TIMERNG4,
'CRE_DATERNG1': self.DATERNG1,
'CRE_DATERNG2': self.DATERNG2,
'CRE_DATERNG3': self.DATERNG3,
}
self.cre_keys = self.cre_source.keys()
def __getattr__(self, name):
if name in self.cre_keys:
value = re.compile(self.cre_source[name], self.re_option)
setattr(self, name, value)
return value
else:
raise AttributeError, name
def daysInMonth(self, month, year):
"""
Take the given month (1-12) and a given year (4 digit) return
the number of days in the month adjusting for leap year as needed
"""
result = None
if month > 0 and month <= 12:
result = self._DaysInMonthList[month - 1]
if month == 2:
if year in self._leapYears:
result += 1
else:
if calendar.isleap(year):
self._leapYears.append(year)
result += 1
return result
def buildSources(self, sourceTime=None):
"""
Return a dictionary of date/time tuples based on the keys
found in self.re_sources.
The current time is used as the default and any specified
item found in self.re_sources is inserted into the value
and the generated dictionary is returned.
"""
if sourceTime is None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = time.localtime()
else:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
sources = {}
defaults = { 'yr': yr, 'mth': mth, 'dy': dy,
'hr': hr, 'mn': mn, 'sec': sec, }
for item in self.re_sources:
values = {}
source = self.re_sources[item]
for key in defaults.keys():
if key in source:
values[key] = source[key]
else:
values[key] = defaults[key]
sources[item] = ( values['yr'], values['mth'], values['dy'],
values['hr'], values['mn'], values['sec'], wd, yd, isdst )
return sources
|
vilmibm/done | parsedatetime/parsedatetime_consts.py | _initSymbols | python | def _initSymbols(ptc):
ptc.timeSep = [ u':' ]
ptc.dateSep = [ u'/' ]
ptc.meridian = [ u'AM', u'PM' ]
ptc.usesMeridian = True
ptc.uses24 = False
if pyicu and ptc.usePyICU:
am = u''
pm = u''
ts = ''
# ICU doesn't seem to provide directly the
# date or time seperator - so we have to
# figure it out
o = ptc.icu_tf['short']
s = ptc.timeFormats['short']
ptc.usesMeridian = u'a' in s
ptc.uses24 = u'H' in s
# '11:45 AM' or '11:45'
s = o.format(datetime.datetime(2003, 10, 30, 11, 45))
# ': AM' or ':'
s = s.replace('11', '').replace('45', '')
if len(s) > 0:
ts = s[0]
if ptc.usesMeridian:
# '23:45 AM' or '23:45'
am = s[1:].strip()
s = o.format(datetime.datetime(2003, 10, 30, 23, 45))
if ptc.uses24:
s = s.replace('23', '')
else:
s = s.replace('11', '')
# 'PM' or ''
pm = s.replace('45', '').replace(ts, '').strip()
ptc.timeSep = [ ts ]
ptc.meridian = [ am, pm ]
o = ptc.icu_df['short']
s = o.format(datetime.datetime(2003, 10, 30, 11, 45))
s = s.replace('10', '').replace('30', '').replace('03', '').replace('2003', '')
if len(s) > 0:
ds = s[0]
else:
ds = '/'
ptc.dateSep = [ ds ]
s = ptc.dateFormats['short']
l = s.lower().split(ds)
dp_order = []
for s in l:
if len(s) > 0:
dp_order.append(s[:1])
ptc.dp_order = dp_order
else:
ptc.timeSep = ptc.locale.timeSep
ptc.dateSep = ptc.locale.dateSep
ptc.meridian = ptc.locale.meridian
ptc.usesMeridian = ptc.locale.usesMeridian
ptc.uses24 = ptc.locale.uses24
ptc.dp_order = ptc.locale.dp_order
# build am and pm lists to contain
# original case, lowercase and first-char
# versions of the meridian text
if len(ptc.meridian) > 0:
am = ptc.meridian[0]
ptc.am = [ am ]
if len(am) > 0:
ptc.am.append(am[0])
am = am.lower()
ptc.am.append(am)
ptc.am.append(am[0])
else:
am = ''
ptc.am = [ '', '' ]
if len(ptc.meridian) > 1:
pm = ptc.meridian[1]
ptc.pm = [ pm ]
if len(pm) > 0:
ptc.pm.append(pm[0])
pm = pm.lower()
ptc.pm.append(pm)
ptc.pm.append(pm[0])
else:
pm = ''
ptc.pm = [ '', '' ] | Helper function to initialize the single character constants
and other symbols needed. | train | https://github.com/vilmibm/done/blob/7e5b60d2900ceddefa49de352a19b794199b51a8/parsedatetime/parsedatetime_consts.py#L601-L707 | null | #!/usr/bin/env python
"""
parsedatetime constants and helper functions to determine
regex values from Locale information if present.
Also contains the internal Locale classes to give some sane
defaults if PyICU is not found.
"""
__license__ = """
Copyright (c) 2004-2008 Mike Taylor
Copyright (c) 2006-2008 Darshana Chhajed
Copyright (c) 2007 Bernd Zeimetz <bzed@debian.org>
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
try:
import PyICU as pyicu
except:
pyicu = None
import datetime
import calendar
import time
import re
class pdtLocale_en:
"""
en_US Locale constants
This class will be used to initialize L{Constants} if PyICU is not located.
Defined as class variables are the lists and strings needed by parsedatetime
to evaluate strings for USA
"""
localeID = 'en_US' # don't use a unicode string
dateSep = [ u'/', u'.' ]
timeSep = [ u':' ]
meridian = [ u'AM', u'PM' ]
usesMeridian = True
uses24 = False
Weekdays = [ u'monday', u'tuesday', u'wednesday',
u'thursday', u'friday', u'saturday', u'sunday',
]
shortWeekdays = [ u'mon', u'tues', u'wed',
u'thu', u'fri', u'sat', u'sun',
]
Months = [ u'january', u'february', u'march',
u'april', u'may', u'june',
u'july', u'august', u'september',
u'october', u'november', u'december',
]
shortMonths = [ u'jan', u'feb', u'mar',
u'apr', u'may', u'jun',
u'jul', u'aug', u'sep',
u'oct', u'nov', u'dec',
]
dateFormats = { 'full': 'EEEE, MMMM d, yyyy',
'long': 'MMMM d, yyyy',
'medium': 'MMM d, yyyy',
'short': 'M/d/yy',
}
timeFormats = { 'full': 'h:mm:ss a z',
'long': 'h:mm:ss a z',
'medium': 'h:mm:ss a',
'short': 'h:mm a',
}
dp_order = [ u'm', u'd', u'y' ]
# this will be added to re_consts later
units = { 'seconds': [ 'second', 'sec' ],
'minutes': [ 'minute', 'min' ],
'hours': [ 'hour', 'hr' ],
'days': [ 'day', 'dy' ],
'weeks': [ 'week', 'wk' ],
'months': [ 'month', 'mth' ],
'years': [ 'year', 'yr' ],
}
# text constants to be used by regex's later
re_consts = { 'specials': 'in|on|of|at',
'timeseperator': ':',
'rangeseperator': '-',
'daysuffix': 'rd|st|nd|th',
'meridian': 'am|pm|a.m.|p.m.|a|p',
'qunits': 'h|m|s|d|w|m|y',
'now': [ 'now' ],
}
# Used to adjust the returned date before/after the source
modifiers = { 'from': 1,
'before': -1,
'after': 1,
'ago': -1,
'prior': -1,
'prev': -1,
'last': -1,
'next': 1,
'previous': -1,
'in a': 2,
'end of': 0,
'eod': 0,
'eo': 0
}
dayoffsets = { 'tomorrow': 1,
'today': 0,
'yesterday': -1,
}
# special day and/or times, i.e. lunch, noon, evening
# each element in the dictionary is a dictionary that is used
# to fill in any value to be replace - the current date/time will
# already have been populated by the method buildSources
re_sources = { 'noon': { 'hr': 12, 'mn': 0, 'sec': 0 },
'lunch': { 'hr': 12, 'mn': 0, 'sec': 0 },
'morning': { 'hr': 6, 'mn': 0, 'sec': 0 },
'breakfast': { 'hr': 8, 'mn': 0, 'sec': 0 },
'dinner': { 'hr': 19, 'mn': 0, 'sec': 0 },
'evening': { 'hr': 18, 'mn': 0, 'sec': 0 },
'midnight': { 'hr': 0, 'mn': 0, 'sec': 0 },
'night': { 'hr': 21, 'mn': 0, 'sec': 0 },
'tonight': { 'hr': 21, 'mn': 0, 'sec': 0 },
'eod': { 'hr': 17, 'mn': 0, 'sec': 0 },
}
class pdtLocale_au:
"""
en_AU Locale constants
This class will be used to initialize L{Constants} if PyICU is not located.
Defined as class variables are the lists and strings needed by parsedatetime
to evaluate strings for Australia
"""
localeID = 'en_AU' # don't use a unicode string
dateSep = [ u'-', u'/' ]
timeSep = [ u':' ]
meridian = [ u'AM', u'PM' ]
usesMeridian = True
uses24 = False
Weekdays = [ u'monday', u'tuesday', u'wednesday',
u'thursday', u'friday', u'saturday', u'sunday',
]
shortWeekdays = [ u'mon', u'tues', u'wed',
u'thu', u'fri', u'sat', u'sun',
]
Months = [ u'january', u'february', u'march',
u'april', u'may', u'june',
u'july', u'august', u'september',
u'october', u'november', u'december',
]
shortMonths = [ u'jan', u'feb', u'mar',
u'apr', u'may', u'jun',
u'jul', u'aug', u'sep',
u'oct', u'nov', u'dec',
]
dateFormats = { 'full': 'EEEE, d MMMM yyyy',
'long': 'd MMMM yyyy',
'medium': 'dd/MM/yyyy',
'short': 'd/MM/yy',
}
timeFormats = { 'full': 'h:mm:ss a z',
'long': 'h:mm:ss a',
'medium': 'h:mm:ss a',
'short': 'h:mm a',
}
dp_order = [ u'd', u'm', u'y' ]
# this will be added to re_consts later
units = { 'seconds': [ 'second', 'sec' ],
'minutes': [ 'minute', 'min' ],
'hours': [ 'hour', 'hr' ],
'days': [ 'day', 'dy' ],
'weeks': [ 'week', 'wk' ],
'months': [ 'month', 'mth' ],
'years': [ 'year', 'yr' ],
}
# text constants to be used by regex's later
re_consts = { 'specials': 'in|on|of|at',
'timeseperator': ':',
'rangeseperator': '-',
'daysuffix': 'rd|st|nd|th',
'meridian': 'am|pm|a.m.|p.m.|a|p',
'qunits': 'h|m|s|d|w|m|y',
'now': [ 'now' ],
}
# Used to adjust the returned date before/after the source
modifiers = { 'from': 1,
'before': -1,
'after': 1,
'ago': 1,
'prior': -1,
'prev': -1,
'last': -1,
'next': 1,
'previous': -1,
'in a': 2,
'end of': 0,
'eo': 0,
}
dayoffsets = { 'tomorrow': 1,
'today': 0,
'yesterday': -1,
}
# special day and/or times, i.e. lunch, noon, evening
# each element in the dictionary is a dictionary that is used
# to fill in any value to be replace - the current date/time will
# already have been populated by the method buildSources
re_sources = { 'noon': { 'hr': 12, 'mn': 0, 'sec': 0 },
'lunch': { 'hr': 12, 'mn': 0, 'sec': 0 },
'morning': { 'hr': 6, 'mn': 0, 'sec': 0 },
'breakfast': { 'hr': 8, 'mn': 0, 'sec': 0 },
'dinner': { 'hr': 19, 'mn': 0, 'sec': 0 },
'evening': { 'hr': 18, 'mn': 0, 'sec': 0 },
'midnight': { 'hr': 0, 'mn': 0, 'sec': 0 },
'night': { 'hr': 21, 'mn': 0, 'sec': 0 },
'tonight': { 'hr': 21, 'mn': 0, 'sec': 0 },
'eod': { 'hr': 17, 'mn': 0, 'sec': 0 },
}
class pdtLocale_es:
"""
es Locale constants
This class will be used to initialize L{Constants} if PyICU is not located.
Defined as class variables are the lists and strings needed by parsedatetime
to evaluate strings in Spanish
Note that I don't speak Spanish so many of the items below are still in English
"""
localeID = 'es' # don't use a unicode string
dateSep = [ u'/' ]
timeSep = [ u':' ]
meridian = []
usesMeridian = False
uses24 = True
Weekdays = [ u'lunes', u'martes', u'mi\xe9rcoles',
u'jueves', u'viernes', u's\xe1bado', u'domingo',
]
shortWeekdays = [ u'lun', u'mar', u'mi\xe9',
u'jue', u'vie', u's\xe1b', u'dom',
]
Months = [ u'enero', u'febrero', u'marzo',
u'abril', u'mayo', u'junio',
u'julio', u'agosto', u'septiembre',
u'octubre', u'noviembre', u'diciembre'
]
shortMonths = [ u'ene', u'feb', u'mar',
u'abr', u'may', u'jun',
u'jul', u'ago', u'sep',
u'oct', u'nov', u'dic'
]
dateFormats = { 'full': "EEEE d' de 'MMMM' de 'yyyy",
'long': "d' de 'MMMM' de 'yyyy",
'medium': "dd-MMM-yy",
'short': "d/MM/yy",
}
timeFormats = { 'full': "HH'H'mm' 'ss z",
'long': "HH:mm:ss z",
'medium': "HH:mm:ss",
'short': "HH:mm",
}
dp_order = [ u'd', u'm', u'y' ]
# this will be added to re_consts later
units = { 'seconds': [ 'second', 'sec' ],
'minutes': [ 'minute', 'min' ],
'hours': [ 'hour', 'hr' ],
'days': [ 'day', 'dy' ],
'weeks': [ 'week', 'wk' ],
'months': [ 'month', 'mth' ],
'years': [ 'year', 'yr' ],
}
# text constants to be used by regex's later
re_consts = { 'specials': 'in|on|of|at',
'timeseperator': timeSep,
'dateseperator': dateSep,
'rangeseperator': '-',
'daysuffix': 'rd|st|nd|th',
'qunits': 'h|m|s|d|w|m|y',
'now': [ 'now' ],
}
# Used to adjust the returned date before/after the source
modifiers = { 'from': 1,
'before': -1,
'after': 1,
'ago': 1,
'prior': -1,
'prev': -1,
'last': -1,
'next': 1,
'previous': -1,
'in a': 2,
'end of': 0,
'eo': 0,
}
dayoffsets = { 'tomorrow': 1,
'today': 0,
'yesterday': -1,
}
# special day and/or times, i.e. lunch, noon, evening
# each element in the dictionary is a dictionary that is used
# to fill in any value to be replace - the current date/time will
# already have been populated by the method buildSources
re_sources = { 'noon': { 'hr': 12, 'mn': 0, 'sec': 0 },
'lunch': { 'hr': 12, 'mn': 0, 'sec': 0 },
'morning': { 'hr': 6, 'mn': 0, 'sec': 0 },
'breakfast': { 'hr': 8, 'mn': 0, 'sec': 0 },
'dinner': { 'hr': 19, 'mn': 0, 'sec': 0 },
'evening': { 'hr': 18, 'mn': 0, 'sec': 0 },
'midnight': { 'hr': 0, 'mn': 0, 'sec': 0 },
'night': { 'hr': 21, 'mn': 0, 'sec': 0 },
'tonight': { 'hr': 21, 'mn': 0, 'sec': 0 },
'eod': { 'hr': 17, 'mn': 0, 'sec': 0 },
}
class pdtLocale_de:
"""
de_DE Locale constants
This class will be used to initialize L{Constants} if PyICU is not located.
Contributed by Debian parsedatetime package maintainer Bernd Zeimetz <bzed@debian.org>
Defined as class variables are the lists and strings needed by parsedatetime
to evaluate strings for German
"""
localeID = 'de_DE' # don't use a unicode string
dateSep = [ u'.' ]
timeSep = [ u':' ]
meridian = [ ]
usesMeridian = False
uses24 = True
Weekdays = [ u'montag', u'dienstag', u'mittwoch',
u'donnerstag', u'freitag', u'samstag', u'sonntag',
]
shortWeekdays = [ u'mo', u'di', u'mi',
u'do', u'fr', u'sa', u'so',
]
Months = [ u'januar', u'februar', u'm\xe4rz',
u'april', u'mai', u'juni',
u'juli', u'august', u'september',
u'oktober', u'november', u'dezember',
]
shortMonths = [ u'jan', u'feb', u'mrz',
u'apr', u'mai', u'jun',
u'jul', u'aug', u'sep',
u'okt', u'nov', u'dez',
]
dateFormats = { 'full': u'EEEE, d. MMMM yyyy',
'long': u'd. MMMM yyyy',
'medium': u'dd.MM.yyyy',
'short': u'dd.MM.yy'
}
timeFormats = { 'full': u'HH:mm:ss v',
'long': u'HH:mm:ss z',
'medium': u'HH:mm:ss',
'short': u'HH:mm'
}
dp_order = [ u'd', u'm', u'y' ]
# this will be added to re_consts later
units = { 'seconds': [ 'sekunden', 'sek', 's' ],
'minutes': [ 'minuten', 'min' , 'm' ],
'hours': [ 'stunden', 'std', 'h' ],
'days': [ 'tage', 't' ],
'weeks': [ 'wochen', 'w' ],
'months': [ 'monate' ], #the short version would be a capital M,
#as I understand it we can't distinguis
#between m for minutes and M for months.
'years': [ 'jahre', 'j' ],
}
# text constants to be used by regex's later
re_consts = { 'specials': 'am|dem|der|im|in|den|zum',
'timeseperator': ':',
'rangeseperator': '-',
'daysuffix': '',
'qunits': 'h|m|s|t|w|m|j',
'now': [ 'jetzt' ],
}
# Used to adjust the returned date before/after the source
#still looking for insight on how to translate all of them to german.
modifiers = { u'from': 1,
u'before': -1,
u'after': 1,
u'vergangener': -1,
u'vorheriger': -1,
u'prev': -1,
u'letzter': -1,
u'n\xe4chster': 1,
u'dieser': 0,
u'previous': -1,
u'in a': 2,
u'end of': 0,
u'eod': 0,
u'eo': 0,
}
#morgen/abermorgen does not work, see http://code.google.com/p/parsedatetime/issues/detail?id=19
dayoffsets = { u'morgen': 1,
u'heute': 0,
u'gestern': -1,
u'vorgestern': -2,
u'\xfcbermorgen': 2,
}
# special day and/or times, i.e. lunch, noon, evening
# each element in the dictionary is a dictionary that is used
# to fill in any value to be replace - the current date/time will
# already have been populated by the method buildSources
re_sources = { u'mittag': { 'hr': 12, 'mn': 0, 'sec': 0 },
u'mittags': { 'hr': 12, 'mn': 0, 'sec': 0 },
u'mittagessen': { 'hr': 12, 'mn': 0, 'sec': 0 },
u'morgen': { 'hr': 6, 'mn': 0, 'sec': 0 },
u'morgens': { 'hr': 6, 'mn': 0, 'sec': 0 },
u'fr\e4hst\xe4ck': { 'hr': 8, 'mn': 0, 'sec': 0 },
u'abendessen': { 'hr': 19, 'mn': 0, 'sec': 0 },
u'abend': { 'hr': 18, 'mn': 0, 'sec': 0 },
u'abends': { 'hr': 18, 'mn': 0, 'sec': 0 },
u'mitternacht': { 'hr': 0, 'mn': 0, 'sec': 0 },
u'nacht': { 'hr': 21, 'mn': 0, 'sec': 0 },
u'nachts': { 'hr': 21, 'mn': 0, 'sec': 0 },
u'heute abend': { 'hr': 21, 'mn': 0, 'sec': 0 },
u'heute nacht': { 'hr': 21, 'mn': 0, 'sec': 0 },
u'feierabend': { 'hr': 17, 'mn': 0, 'sec': 0 },
}
pdtLocales = { 'en_US': pdtLocale_en,
'en_AU': pdtLocale_au,
'es_ES': pdtLocale_es,
'de_DE': pdtLocale_de,
}
def _initLocale(ptc):
"""
Helper function to initialize the different lists and strings
from either PyICU or one of the internal pdt Locales and store
them into ptc.
"""
def lcase(x):
return x.lower()
if pyicu and ptc.usePyICU:
ptc.icuLocale = None
if ptc.localeID is not None:
ptc.icuLocale = pyicu.Locale(ptc.localeID)
if ptc.icuLocale is None:
for id in range(0, len(ptc.fallbackLocales)):
ptc.localeID = ptc.fallbackLocales[id]
ptc.icuLocale = pyicu.Locale(ptc.localeID)
if ptc.icuLocale is not None:
break
ptc.icuSymbols = pyicu.DateFormatSymbols(ptc.icuLocale)
# grab ICU list of weekdays, skipping first entry which
# is always blank
wd = map(lcase, ptc.icuSymbols.getWeekdays()[1:])
swd = map(lcase, ptc.icuSymbols.getShortWeekdays()[1:])
# store them in our list with Monday first (ICU puts Sunday first)
ptc.Weekdays = wd[1:] + wd[0:1]
ptc.shortWeekdays = swd[1:] + swd[0:1]
ptc.Months = map(lcase, ptc.icuSymbols.getMonths())
ptc.shortMonths = map(lcase, ptc.icuSymbols.getShortMonths())
# not quite sure how to init this so for now
# set it to none so it will be set to the en_US defaults for now
ptc.re_consts = None
ptc.icu_df = { 'full': pyicu.DateFormat.createDateInstance(pyicu.DateFormat.kFull, ptc.icuLocale),
'long': pyicu.DateFormat.createDateInstance(pyicu.DateFormat.kLong, ptc.icuLocale),
'medium': pyicu.DateFormat.createDateInstance(pyicu.DateFormat.kMedium, ptc.icuLocale),
'short': pyicu.DateFormat.createDateInstance(pyicu.DateFormat.kShort, ptc.icuLocale),
}
ptc.icu_tf = { 'full': pyicu.DateFormat.createTimeInstance(pyicu.DateFormat.kFull, ptc.icuLocale),
'long': pyicu.DateFormat.createTimeInstance(pyicu.DateFormat.kLong, ptc.icuLocale),
'medium': pyicu.DateFormat.createTimeInstance(pyicu.DateFormat.kMedium, ptc.icuLocale),
'short': pyicu.DateFormat.createTimeInstance(pyicu.DateFormat.kShort, ptc.icuLocale),
}
ptc.dateFormats = { 'full': ptc.icu_df['full'].toPattern(),
'long': ptc.icu_df['long'].toPattern(),
'medium': ptc.icu_df['medium'].toPattern(),
'short': ptc.icu_df['short'].toPattern(),
}
ptc.timeFormats = { 'full': ptc.icu_tf['full'].toPattern(),
'long': ptc.icu_tf['long'].toPattern(),
'medium': ptc.icu_tf['medium'].toPattern(),
'short': ptc.icu_tf['short'].toPattern(),
}
else:
if not ptc.localeID in pdtLocales:
for id in range(0, len(ptc.fallbackLocales)):
ptc.localeID = ptc.fallbackLocales[id]
if ptc.localeID in pdtLocales:
break
ptc.locale = pdtLocales[ptc.localeID]
ptc.usePyICU = False
ptc.Weekdays = ptc.locale.Weekdays
ptc.shortWeekdays = ptc.locale.shortWeekdays
ptc.Months = ptc.locale.Months
ptc.shortMonths = ptc.locale.shortMonths
ptc.dateFormats = ptc.locale.dateFormats
ptc.timeFormats = ptc.locale.timeFormats
# these values are used to setup the various bits
# of the regex values used to parse
#
# check if a local set of constants has been
# provided, if not use en_US as the default
if ptc.localeID in pdtLocales:
ptc.re_sources = pdtLocales[ptc.localeID].re_sources
ptc.re_values = pdtLocales[ptc.localeID].re_consts
units = pdtLocales[ptc.localeID].units
ptc.Modifiers = pdtLocales[ptc.localeID].modifiers
ptc.dayOffsets = pdtLocales[ptc.localeID].dayoffsets
# for now, pull over any missing keys from the US set
for key in pdtLocales['en_US'].re_consts:
if not key in ptc.re_values:
ptc.re_values[key] = pdtLocales['en_US'].re_consts[key]
else:
ptc.re_sources = pdtLocales['en_US'].re_sources
ptc.re_values = pdtLocales['en_US'].re_consts
ptc.Modifiers = pdtLocales['en_US'].modifiers
ptc.dayOffsets = pdtLocales['en_US'].dayoffsets
units = pdtLocales['en_US'].units
# escape any regex special characters that may be found
wd = tuple(map(re.escape, ptc.Weekdays))
swd = tuple(map(re.escape, ptc.shortWeekdays))
mth = tuple(map(re.escape, ptc.Months))
smth = tuple(map(re.escape, ptc.shortMonths))
ptc.re_values['months'] = '%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s' % mth
ptc.re_values['shortmonths'] = '%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s' % smth
ptc.re_values['days'] = '%s|%s|%s|%s|%s|%s|%s' % wd
ptc.re_values['shortdays'] = '%s|%s|%s|%s|%s|%s|%s' % swd
l = []
for unit in units:
l.append('|'.join(units[unit]))
ptc.re_values['units'] = '|'.join(l)
ptc.Units = ptc.re_values['units'].split('|')
def _initPatterns(ptc):
"""
Helper function to take the different localized bits from ptc and
create the regex strings.
"""
# TODO add code to parse the date formats and build the regexes up from sub-parts
# TODO find all hard-coded uses of date/time seperators
ptc.RE_DATE4 = r'''(?P<date>(((?P<day>\d\d?)(?P<suffix>%(daysuffix)s)?(,)?(\s)?)
(?P<mthname>(%(months)s|%(shortmonths)s))\s?
(?P<year>\d\d(\d\d)?)?
)
)''' % ptc.re_values
# I refactored DATE3 to fix Issue 16 http://code.google.com/p/parsedatetime/issues/detail?id=16
# I suspect the final line was for a trailing time - but testing shows it's not needed
# ptc.RE_DATE3 = r'''(?P<date>((?P<mthname>(%(months)s|%(shortmonths)s))\s?
# ((?P<day>\d\d?)(\s?|%(daysuffix)s|$)+)?
# (,\s?(?P<year>\d\d(\d\d)?))?))
# (\s?|$|[^0-9a-zA-Z])''' % ptc.re_values
ptc.RE_DATE3 = r'''(?P<date>(
(((?P<mthname>(%(months)s|%(shortmonths)s))|
((?P<day>\d\d?)(?P<suffix>%(daysuffix)s)?))(\s)?){1,2}
((,)?(\s)?(?P<year>\d\d(\d\d)?))?
)
)''' % ptc.re_values
ptc.RE_MONTH = r'''(\s?|^)
(?P<month>(
(?P<mthname>(%(months)s|%(shortmonths)s))
(\s?(?P<year>(\d\d\d\d)))?
))
(\s?|$|[^0-9a-zA-Z])''' % ptc.re_values
ptc.RE_WEEKDAY = r'''(\s?|^)
(?P<weekday>(%(days)s|%(shortdays)s))
(\s?|$|[^0-9a-zA-Z])''' % ptc.re_values
ptc.RE_SPECIAL = r'(?P<special>^[%(specials)s]+)\s+' % ptc.re_values
ptc.RE_UNITS = r'''(?P<qty>(-?\d+\s*
(?P<units>((%(units)s)s?))
))''' % ptc.re_values
ptc.RE_QUNITS = r'''(?P<qty>(-?\d+\s?
(?P<qunits>%(qunits)s)
(\s?|,|$)
))''' % ptc.re_values
ptc.RE_MODIFIER = r'''(\s?|^)
(?P<modifier>
(previous|prev|last|next|eod|eo|(end\sof)|(in\sa)))''' % ptc.re_values
ptc.RE_MODIFIER2 = r'''(\s?|^)
(?P<modifier>
(from|before|after|ago|prior))
(\s?|$|[^0-9a-zA-Z])''' % ptc.re_values
ptc.RE_TIMEHMS = r'''(\s?|^)
(?P<hours>\d\d?)
(?P<tsep>%(timeseperator)s|)
(?P<minutes>\d\d)
(?:(?P=tsep)(?P<seconds>\d\d(?:[.,]\d+)?))?''' % ptc.re_values
ptc.RE_TIMEHMS2 = r'''(?P<hours>(\d\d?))
((?P<tsep>%(timeseperator)s|)
(?P<minutes>(\d\d?))
(?:(?P=tsep)
(?P<seconds>\d\d?
(?:[.,]\d+)?))?)?''' % ptc.re_values
if 'meridian' in ptc.re_values:
ptc.RE_TIMEHMS2 += r'\s?(?P<meridian>(%(meridian)s))' % ptc.re_values
dateSeps = ''.join(ptc.dateSep) + '.'
ptc.RE_DATE = r'''(\s?|^)
(?P<date>(\d\d?[%s]\d\d?([%s]\d\d(\d\d)?)?))
(\s?|$|[^0-9a-zA-Z])''' % (dateSeps, dateSeps)
ptc.RE_DATE2 = r'[%s]' % dateSeps
ptc.RE_DAY = r'''(\s?|^)
(?P<day>(today|tomorrow|yesterday))
(\s?|$|[^0-9a-zA-Z])''' % ptc.re_values
ptc.RE_DAY2 = r'''(?P<day>\d\d?)|(?P<suffix>%(daysuffix)s)
''' % ptc.re_values
ptc.RE_TIME = r'''(\s?|^)
(?P<time>(morning|breakfast|noon|lunch|evening|midnight|tonight|dinner|night|now))
(\s?|$|[^0-9a-zA-Z])''' % ptc.re_values
ptc.RE_REMAINING = r'\s+'
# Regex for date/time ranges
ptc.RE_RTIMEHMS = r'''(\s?|^)
(\d\d?)%(timeseperator)s
(\d\d)
(%(timeseperator)s(\d\d))?
(\s?|$)''' % ptc.re_values
ptc.RE_RTIMEHMS2 = r'''(\s?|^)
(\d\d?)
(%(timeseperator)s(\d\d?))?
(%(timeseperator)s(\d\d?))?''' % ptc.re_values
if 'meridian' in ptc.re_values:
ptc.RE_RTIMEHMS2 += r'\s?(%(meridian)s)' % ptc.re_values
ptc.RE_RDATE = r'(\d+([%s]\d+)+)' % dateSeps
ptc.RE_RDATE3 = r'''((((%(months)s))\s?
((\d\d?)
(\s?|%(daysuffix)s|$)+)?
(,\s?\d\d\d\d)?))''' % ptc.re_values
# "06/07/06 - 08/09/06"
ptc.DATERNG1 = ptc.RE_RDATE + r'\s?%(rangeseperator)s\s?' + ptc.RE_RDATE
ptc.DATERNG1 = ptc.DATERNG1 % ptc.re_values
# "march 31 - june 1st, 2006"
ptc.DATERNG2 = ptc.RE_RDATE3 + r'\s?%(rangeseperator)s\s?' + ptc.RE_RDATE3
ptc.DATERNG2 = ptc.DATERNG2 % ptc.re_values
# "march 1rd -13th"
ptc.DATERNG3 = ptc.RE_RDATE3 + r'\s?%(rangeseperator)s\s?(\d\d?)\s?(rd|st|nd|th)?'
ptc.DATERNG3 = ptc.DATERNG3 % ptc.re_values
# "4:00:55 pm - 5:90:44 am", '4p-5p'
ptc.TIMERNG1 = ptc.RE_RTIMEHMS2 + r'\s?%(rangeseperator)s\s?' + ptc.RE_RTIMEHMS2
ptc.TIMERNG1 = ptc.TIMERNG1 % ptc.re_values
# "4:00 - 5:90 ", "4:55:55-3:44:55"
ptc.TIMERNG2 = ptc.RE_RTIMEHMS + r'\s?%(rangeseperator)s\s?' + ptc.RE_RTIMEHMS
ptc.TIMERNG2 = ptc.TIMERNG2 % ptc.re_values
# "4-5pm "
ptc.TIMERNG3 = r'\d\d?\s?%(rangeseperator)s\s?' + ptc.RE_RTIMEHMS2
ptc.TIMERNG3 = ptc.TIMERNG3 % ptc.re_values
# "4:30-5pm "
ptc.TIMERNG4 = ptc.RE_RTIMEHMS + r'\s?%(rangeseperator)s\s?' + ptc.RE_RTIMEHMS2
ptc.TIMERNG4 = ptc.TIMERNG4 % ptc.re_values
def _initConstants(ptc):
"""
Create localized versions of the units, week and month names
"""
# build weekday offsets - yes, it assumes the Weekday and shortWeekday
# lists are in the same order and Mon..Sun (Python style)
ptc.WeekdayOffsets = {}
o = 0
for key in ptc.Weekdays:
ptc.WeekdayOffsets[key] = o
o += 1
o = 0
for key in ptc.shortWeekdays:
ptc.WeekdayOffsets[key] = o
o += 1
# build month offsets - yes, it assumes the Months and shortMonths
# lists are in the same order and Jan..Dec
ptc.MonthOffsets = {}
o = 1
for key in ptc.Months:
ptc.MonthOffsets[key] = o
o += 1
o = 1
for key in ptc.shortMonths:
ptc.MonthOffsets[key] = o
o += 1
# ptc.DaySuffixes = ptc.re_consts['daysuffix'].split('|')
class Constants:
"""
Default set of constants for parsedatetime.
If PyICU is present, then the class will first try to get PyICU
to return a locale specified by C{localeID}. If either C{localeID} is
None or if the locale does not exist within PyICU, then each of the
locales defined in C{fallbackLocales} is tried in order.
If PyICU is not present or none of the specified locales can be used,
then the class will initialize itself to the en_US locale.
if PyICU is not present or not requested, only the locales defined by
C{pdtLocales} will be searched.
"""
def __init__(self, localeID=None, usePyICU=True, fallbackLocales=['en_US']):
self.localeID = localeID
self.fallbackLocales = fallbackLocales
if 'en_US' not in self.fallbackLocales:
self.fallbackLocales.append('en_US')
# define non-locale specific constants
self.locale = None
self.usePyICU = usePyICU
# starting cache of leap years
# daysInMonth will add to this if during
# runtime it gets a request for a year not found
self._leapYears = [ 1904, 1908, 1912, 1916, 1920, 1924, 1928, 1932, 1936, 1940, 1944,
1948, 1952, 1956, 1960, 1964, 1968, 1972, 1976, 1980, 1984, 1988,
1992, 1996, 2000, 2004, 2008, 2012, 2016, 2020, 2024, 2028, 2032,
2036, 2040, 2044, 2048, 2052, 2056, 2060, 2064, 2068, 2072, 2076,
2080, 2084, 2088, 2092, 2096 ]
self.Second = 1
self.Minute = 60 * self.Second
self.Hour = 60 * self.Minute
self.Day = 24 * self.Hour
self.Week = 7 * self.Day
self.Month = 30 * self.Day
self.Year = 365 * self.Day
self.rangeSep = u'-'
self._DaysInMonthList = (31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
self.BirthdayEpoch = 50
# DOWParseStyle controls how we parse "Tuesday"
# If the current day was Thursday and the text to parse is "Tuesday"
# then the following table shows how each style would be returned
# -1, 0, +1
#
# Current day marked as ***
#
# Sun Mon Tue Wed Thu Fri Sat
# week -1
# current -1,0 ***
# week +1 +1
#
# If the current day was Monday and the text to parse is "Tuesday"
# then the following table shows how each style would be returned
# -1, 0, +1
#
# Sun Mon Tue Wed Thu Fri Sat
# week -1 -1
# current *** 0,+1
# week +1
self.DOWParseStyle = 1
# CurrentDOWParseStyle controls how we parse "Friday"
# If the current day was Friday and the text to parse is "Friday"
# then the following table shows how each style would be returned
# True/False. This also depends on DOWParseStyle.
#
# Current day marked as ***
#
# DOWParseStyle = 0
# Sun Mon Tue Wed Thu Fri Sat
# week -1
# current T,F
# week +1
#
# DOWParseStyle = -1
# Sun Mon Tue Wed Thu Fri Sat
# week -1 F
# current T
# week +1
#
# DOWParseStyle = +1
#
# Sun Mon Tue Wed Thu Fri Sat
# week -1
# current T
# week +1 F
self.CurrentDOWParseStyle = False
# initalize attributes to empty values to ensure
# they are defined
self.re_sources = None
self.re_values = None
self.Modifiers = None
self.dayOffsets = None
self.WeekdayOffsets = None
self.MonthOffsets = None
self.dateSep = None
self.timeSep = None
self.am = None
self.pm = None
self.meridian = None
self.usesMeridian = None
self.uses24 = None
self.dp_order = None
self.RE_DATE4 = r''
self.RE_DATE3 = r''
self.RE_MONTH = r''
self.RE_WEEKDAY = r''
self.RE_SPECIAL = r''
self.RE_UNITS = r''
self.RE_QUNITS = r''
self.RE_MODIFIER = r''
self.RE_MODIFIER2 = r''
self.RE_TIMEHMS = r''
self.RE_TIMEHMS2 = r''
self.RE_DATE = r''
self.RE_DATE2 = r''
self.RE_DAY = r''
self.RE_DAY2 = r''
self.RE_TIME = r''
self.RE_REMAINING = r''
self.RE_RTIMEHMS = r''
self.RE_RTIMEHMS2 = r''
self.RE_RDATE = r''
self.RE_RDATE3 = r''
self.DATERNG1 = r''
self.DATERNG2 = r''
self.DATERNG3 = r''
self.TIMERNG1 = r''
self.TIMERNG2 = r''
self.TIMERNG3 = r''
self.TIMERNG4 = r''
_initLocale(self)
_initConstants(self)
_initSymbols(self)
_initPatterns(self)
self.re_option = re.IGNORECASE + re.VERBOSE
self.cre_source = { 'CRE_SPECIAL': self.RE_SPECIAL,
'CRE_UNITS': self.RE_UNITS,
'CRE_QUNITS': self.RE_QUNITS,
'CRE_MODIFIER': self.RE_MODIFIER,
'CRE_MODIFIER2': self.RE_MODIFIER2,
'CRE_TIMEHMS': self.RE_TIMEHMS,
'CRE_TIMEHMS2': self.RE_TIMEHMS2,
'CRE_DATE': self.RE_DATE,
'CRE_DATE2': self.RE_DATE2,
'CRE_DATE3': self.RE_DATE3,
'CRE_DATE4': self.RE_DATE4,
'CRE_MONTH': self.RE_MONTH,
'CRE_WEEKDAY': self.RE_WEEKDAY,
'CRE_DAY': self.RE_DAY,
'CRE_DAY2': self.RE_DAY2,
'CRE_TIME': self.RE_TIME,
'CRE_REMAINING': self.RE_REMAINING,
'CRE_RTIMEHMS': self.RE_RTIMEHMS,
'CRE_RTIMEHMS2': self.RE_RTIMEHMS2,
'CRE_RDATE': self.RE_RDATE,
'CRE_RDATE3': self.RE_RDATE3,
'CRE_TIMERNG1': self.TIMERNG1,
'CRE_TIMERNG2': self.TIMERNG2,
'CRE_TIMERNG3': self.TIMERNG3,
'CRE_TIMERNG4': self.TIMERNG4,
'CRE_DATERNG1': self.DATERNG1,
'CRE_DATERNG2': self.DATERNG2,
'CRE_DATERNG3': self.DATERNG3,
}
self.cre_keys = self.cre_source.keys()
def __getattr__(self, name):
if name in self.cre_keys:
value = re.compile(self.cre_source[name], self.re_option)
setattr(self, name, value)
return value
else:
raise AttributeError, name
def daysInMonth(self, month, year):
"""
Take the given month (1-12) and a given year (4 digit) return
the number of days in the month adjusting for leap year as needed
"""
result = None
if month > 0 and month <= 12:
result = self._DaysInMonthList[month - 1]
if month == 2:
if year in self._leapYears:
result += 1
else:
if calendar.isleap(year):
self._leapYears.append(year)
result += 1
return result
def buildSources(self, sourceTime=None):
"""
Return a dictionary of date/time tuples based on the keys
found in self.re_sources.
The current time is used as the default and any specified
item found in self.re_sources is inserted into the value
and the generated dictionary is returned.
"""
if sourceTime is None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = time.localtime()
else:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
sources = {}
defaults = { 'yr': yr, 'mth': mth, 'dy': dy,
'hr': hr, 'mn': mn, 'sec': sec, }
for item in self.re_sources:
values = {}
source = self.re_sources[item]
for key in defaults.keys():
if key in source:
values[key] = source[key]
else:
values[key] = defaults[key]
sources[item] = ( values['yr'], values['mth'], values['dy'],
values['hr'], values['mn'], values['sec'], wd, yd, isdst )
return sources
|
vilmibm/done | parsedatetime/parsedatetime_consts.py | _initPatterns | python | def _initPatterns(ptc):
# TODO add code to parse the date formats and build the regexes up from sub-parts
# TODO find all hard-coded uses of date/time seperators
ptc.RE_DATE4 = r'''(?P<date>(((?P<day>\d\d?)(?P<suffix>%(daysuffix)s)?(,)?(\s)?)
(?P<mthname>(%(months)s|%(shortmonths)s))\s?
(?P<year>\d\d(\d\d)?)?
)
)''' % ptc.re_values
# I refactored DATE3 to fix Issue 16 http://code.google.com/p/parsedatetime/issues/detail?id=16
# I suspect the final line was for a trailing time - but testing shows it's not needed
# ptc.RE_DATE3 = r'''(?P<date>((?P<mthname>(%(months)s|%(shortmonths)s))\s?
# ((?P<day>\d\d?)(\s?|%(daysuffix)s|$)+)?
# (,\s?(?P<year>\d\d(\d\d)?))?))
# (\s?|$|[^0-9a-zA-Z])''' % ptc.re_values
ptc.RE_DATE3 = r'''(?P<date>(
(((?P<mthname>(%(months)s|%(shortmonths)s))|
((?P<day>\d\d?)(?P<suffix>%(daysuffix)s)?))(\s)?){1,2}
((,)?(\s)?(?P<year>\d\d(\d\d)?))?
)
)''' % ptc.re_values
ptc.RE_MONTH = r'''(\s?|^)
(?P<month>(
(?P<mthname>(%(months)s|%(shortmonths)s))
(\s?(?P<year>(\d\d\d\d)))?
))
(\s?|$|[^0-9a-zA-Z])''' % ptc.re_values
ptc.RE_WEEKDAY = r'''(\s?|^)
(?P<weekday>(%(days)s|%(shortdays)s))
(\s?|$|[^0-9a-zA-Z])''' % ptc.re_values
ptc.RE_SPECIAL = r'(?P<special>^[%(specials)s]+)\s+' % ptc.re_values
ptc.RE_UNITS = r'''(?P<qty>(-?\d+\s*
(?P<units>((%(units)s)s?))
))''' % ptc.re_values
ptc.RE_QUNITS = r'''(?P<qty>(-?\d+\s?
(?P<qunits>%(qunits)s)
(\s?|,|$)
))''' % ptc.re_values
ptc.RE_MODIFIER = r'''(\s?|^)
(?P<modifier>
(previous|prev|last|next|eod|eo|(end\sof)|(in\sa)))''' % ptc.re_values
ptc.RE_MODIFIER2 = r'''(\s?|^)
(?P<modifier>
(from|before|after|ago|prior))
(\s?|$|[^0-9a-zA-Z])''' % ptc.re_values
ptc.RE_TIMEHMS = r'''(\s?|^)
(?P<hours>\d\d?)
(?P<tsep>%(timeseperator)s|)
(?P<minutes>\d\d)
(?:(?P=tsep)(?P<seconds>\d\d(?:[.,]\d+)?))?''' % ptc.re_values
ptc.RE_TIMEHMS2 = r'''(?P<hours>(\d\d?))
((?P<tsep>%(timeseperator)s|)
(?P<minutes>(\d\d?))
(?:(?P=tsep)
(?P<seconds>\d\d?
(?:[.,]\d+)?))?)?''' % ptc.re_values
if 'meridian' in ptc.re_values:
ptc.RE_TIMEHMS2 += r'\s?(?P<meridian>(%(meridian)s))' % ptc.re_values
dateSeps = ''.join(ptc.dateSep) + '.'
ptc.RE_DATE = r'''(\s?|^)
(?P<date>(\d\d?[%s]\d\d?([%s]\d\d(\d\d)?)?))
(\s?|$|[^0-9a-zA-Z])''' % (dateSeps, dateSeps)
ptc.RE_DATE2 = r'[%s]' % dateSeps
ptc.RE_DAY = r'''(\s?|^)
(?P<day>(today|tomorrow|yesterday))
(\s?|$|[^0-9a-zA-Z])''' % ptc.re_values
ptc.RE_DAY2 = r'''(?P<day>\d\d?)|(?P<suffix>%(daysuffix)s)
''' % ptc.re_values
ptc.RE_TIME = r'''(\s?|^)
(?P<time>(morning|breakfast|noon|lunch|evening|midnight|tonight|dinner|night|now))
(\s?|$|[^0-9a-zA-Z])''' % ptc.re_values
ptc.RE_REMAINING = r'\s+'
# Regex for date/time ranges
ptc.RE_RTIMEHMS = r'''(\s?|^)
(\d\d?)%(timeseperator)s
(\d\d)
(%(timeseperator)s(\d\d))?
(\s?|$)''' % ptc.re_values
ptc.RE_RTIMEHMS2 = r'''(\s?|^)
(\d\d?)
(%(timeseperator)s(\d\d?))?
(%(timeseperator)s(\d\d?))?''' % ptc.re_values
if 'meridian' in ptc.re_values:
ptc.RE_RTIMEHMS2 += r'\s?(%(meridian)s)' % ptc.re_values
ptc.RE_RDATE = r'(\d+([%s]\d+)+)' % dateSeps
ptc.RE_RDATE3 = r'''((((%(months)s))\s?
((\d\d?)
(\s?|%(daysuffix)s|$)+)?
(,\s?\d\d\d\d)?))''' % ptc.re_values
# "06/07/06 - 08/09/06"
ptc.DATERNG1 = ptc.RE_RDATE + r'\s?%(rangeseperator)s\s?' + ptc.RE_RDATE
ptc.DATERNG1 = ptc.DATERNG1 % ptc.re_values
# "march 31 - june 1st, 2006"
ptc.DATERNG2 = ptc.RE_RDATE3 + r'\s?%(rangeseperator)s\s?' + ptc.RE_RDATE3
ptc.DATERNG2 = ptc.DATERNG2 % ptc.re_values
# "march 1rd -13th"
ptc.DATERNG3 = ptc.RE_RDATE3 + r'\s?%(rangeseperator)s\s?(\d\d?)\s?(rd|st|nd|th)?'
ptc.DATERNG3 = ptc.DATERNG3 % ptc.re_values
# "4:00:55 pm - 5:90:44 am", '4p-5p'
ptc.TIMERNG1 = ptc.RE_RTIMEHMS2 + r'\s?%(rangeseperator)s\s?' + ptc.RE_RTIMEHMS2
ptc.TIMERNG1 = ptc.TIMERNG1 % ptc.re_values
# "4:00 - 5:90 ", "4:55:55-3:44:55"
ptc.TIMERNG2 = ptc.RE_RTIMEHMS + r'\s?%(rangeseperator)s\s?' + ptc.RE_RTIMEHMS
ptc.TIMERNG2 = ptc.TIMERNG2 % ptc.re_values
# "4-5pm "
ptc.TIMERNG3 = r'\d\d?\s?%(rangeseperator)s\s?' + ptc.RE_RTIMEHMS2
ptc.TIMERNG3 = ptc.TIMERNG3 % ptc.re_values
# "4:30-5pm "
ptc.TIMERNG4 = ptc.RE_RTIMEHMS + r'\s?%(rangeseperator)s\s?' + ptc.RE_RTIMEHMS2
ptc.TIMERNG4 = ptc.TIMERNG4 % ptc.re_values | Helper function to take the different localized bits from ptc and
create the regex strings. | train | https://github.com/vilmibm/done/blob/7e5b60d2900ceddefa49de352a19b794199b51a8/parsedatetime/parsedatetime_consts.py#L710-L838 | null | #!/usr/bin/env python
"""
parsedatetime constants and helper functions to determine
regex values from Locale information if present.
Also contains the internal Locale classes to give some sane
defaults if PyICU is not found.
"""
__license__ = """
Copyright (c) 2004-2008 Mike Taylor
Copyright (c) 2006-2008 Darshana Chhajed
Copyright (c) 2007 Bernd Zeimetz <bzed@debian.org>
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
try:
import PyICU as pyicu
except:
pyicu = None
import datetime
import calendar
import time
import re
class pdtLocale_en:
"""
en_US Locale constants
This class will be used to initialize L{Constants} if PyICU is not located.
Defined as class variables are the lists and strings needed by parsedatetime
to evaluate strings for USA
"""
localeID = 'en_US' # don't use a unicode string
dateSep = [ u'/', u'.' ]
timeSep = [ u':' ]
meridian = [ u'AM', u'PM' ]
usesMeridian = True
uses24 = False
Weekdays = [ u'monday', u'tuesday', u'wednesday',
u'thursday', u'friday', u'saturday', u'sunday',
]
shortWeekdays = [ u'mon', u'tues', u'wed',
u'thu', u'fri', u'sat', u'sun',
]
Months = [ u'january', u'february', u'march',
u'april', u'may', u'june',
u'july', u'august', u'september',
u'october', u'november', u'december',
]
shortMonths = [ u'jan', u'feb', u'mar',
u'apr', u'may', u'jun',
u'jul', u'aug', u'sep',
u'oct', u'nov', u'dec',
]
dateFormats = { 'full': 'EEEE, MMMM d, yyyy',
'long': 'MMMM d, yyyy',
'medium': 'MMM d, yyyy',
'short': 'M/d/yy',
}
timeFormats = { 'full': 'h:mm:ss a z',
'long': 'h:mm:ss a z',
'medium': 'h:mm:ss a',
'short': 'h:mm a',
}
dp_order = [ u'm', u'd', u'y' ]
# this will be added to re_consts later
units = { 'seconds': [ 'second', 'sec' ],
'minutes': [ 'minute', 'min' ],
'hours': [ 'hour', 'hr' ],
'days': [ 'day', 'dy' ],
'weeks': [ 'week', 'wk' ],
'months': [ 'month', 'mth' ],
'years': [ 'year', 'yr' ],
}
# text constants to be used by regex's later
re_consts = { 'specials': 'in|on|of|at',
'timeseperator': ':',
'rangeseperator': '-',
'daysuffix': 'rd|st|nd|th',
'meridian': 'am|pm|a.m.|p.m.|a|p',
'qunits': 'h|m|s|d|w|m|y',
'now': [ 'now' ],
}
# Used to adjust the returned date before/after the source
modifiers = { 'from': 1,
'before': -1,
'after': 1,
'ago': -1,
'prior': -1,
'prev': -1,
'last': -1,
'next': 1,
'previous': -1,
'in a': 2,
'end of': 0,
'eod': 0,
'eo': 0
}
dayoffsets = { 'tomorrow': 1,
'today': 0,
'yesterday': -1,
}
# special day and/or times, i.e. lunch, noon, evening
# each element in the dictionary is a dictionary that is used
# to fill in any value to be replace - the current date/time will
# already have been populated by the method buildSources
re_sources = { 'noon': { 'hr': 12, 'mn': 0, 'sec': 0 },
'lunch': { 'hr': 12, 'mn': 0, 'sec': 0 },
'morning': { 'hr': 6, 'mn': 0, 'sec': 0 },
'breakfast': { 'hr': 8, 'mn': 0, 'sec': 0 },
'dinner': { 'hr': 19, 'mn': 0, 'sec': 0 },
'evening': { 'hr': 18, 'mn': 0, 'sec': 0 },
'midnight': { 'hr': 0, 'mn': 0, 'sec': 0 },
'night': { 'hr': 21, 'mn': 0, 'sec': 0 },
'tonight': { 'hr': 21, 'mn': 0, 'sec': 0 },
'eod': { 'hr': 17, 'mn': 0, 'sec': 0 },
}
class pdtLocale_au:
"""
en_AU Locale constants
This class will be used to initialize L{Constants} if PyICU is not located.
Defined as class variables are the lists and strings needed by parsedatetime
to evaluate strings for Australia
"""
localeID = 'en_AU' # don't use a unicode string
dateSep = [ u'-', u'/' ]
timeSep = [ u':' ]
meridian = [ u'AM', u'PM' ]
usesMeridian = True
uses24 = False
Weekdays = [ u'monday', u'tuesday', u'wednesday',
u'thursday', u'friday', u'saturday', u'sunday',
]
shortWeekdays = [ u'mon', u'tues', u'wed',
u'thu', u'fri', u'sat', u'sun',
]
Months = [ u'january', u'february', u'march',
u'april', u'may', u'june',
u'july', u'august', u'september',
u'october', u'november', u'december',
]
shortMonths = [ u'jan', u'feb', u'mar',
u'apr', u'may', u'jun',
u'jul', u'aug', u'sep',
u'oct', u'nov', u'dec',
]
dateFormats = { 'full': 'EEEE, d MMMM yyyy',
'long': 'd MMMM yyyy',
'medium': 'dd/MM/yyyy',
'short': 'd/MM/yy',
}
timeFormats = { 'full': 'h:mm:ss a z',
'long': 'h:mm:ss a',
'medium': 'h:mm:ss a',
'short': 'h:mm a',
}
dp_order = [ u'd', u'm', u'y' ]
# this will be added to re_consts later
units = { 'seconds': [ 'second', 'sec' ],
'minutes': [ 'minute', 'min' ],
'hours': [ 'hour', 'hr' ],
'days': [ 'day', 'dy' ],
'weeks': [ 'week', 'wk' ],
'months': [ 'month', 'mth' ],
'years': [ 'year', 'yr' ],
}
# text constants to be used by regex's later
re_consts = { 'specials': 'in|on|of|at',
'timeseperator': ':',
'rangeseperator': '-',
'daysuffix': 'rd|st|nd|th',
'meridian': 'am|pm|a.m.|p.m.|a|p',
'qunits': 'h|m|s|d|w|m|y',
'now': [ 'now' ],
}
# Used to adjust the returned date before/after the source
modifiers = { 'from': 1,
'before': -1,
'after': 1,
'ago': 1,
'prior': -1,
'prev': -1,
'last': -1,
'next': 1,
'previous': -1,
'in a': 2,
'end of': 0,
'eo': 0,
}
dayoffsets = { 'tomorrow': 1,
'today': 0,
'yesterday': -1,
}
# special day and/or times, i.e. lunch, noon, evening
# each element in the dictionary is a dictionary that is used
# to fill in any value to be replace - the current date/time will
# already have been populated by the method buildSources
re_sources = { 'noon': { 'hr': 12, 'mn': 0, 'sec': 0 },
'lunch': { 'hr': 12, 'mn': 0, 'sec': 0 },
'morning': { 'hr': 6, 'mn': 0, 'sec': 0 },
'breakfast': { 'hr': 8, 'mn': 0, 'sec': 0 },
'dinner': { 'hr': 19, 'mn': 0, 'sec': 0 },
'evening': { 'hr': 18, 'mn': 0, 'sec': 0 },
'midnight': { 'hr': 0, 'mn': 0, 'sec': 0 },
'night': { 'hr': 21, 'mn': 0, 'sec': 0 },
'tonight': { 'hr': 21, 'mn': 0, 'sec': 0 },
'eod': { 'hr': 17, 'mn': 0, 'sec': 0 },
}
class pdtLocale_es:
"""
es Locale constants
This class will be used to initialize L{Constants} if PyICU is not located.
Defined as class variables are the lists and strings needed by parsedatetime
to evaluate strings in Spanish
Note that I don't speak Spanish so many of the items below are still in English
"""
localeID = 'es' # don't use a unicode string
dateSep = [ u'/' ]
timeSep = [ u':' ]
meridian = []
usesMeridian = False
uses24 = True
Weekdays = [ u'lunes', u'martes', u'mi\xe9rcoles',
u'jueves', u'viernes', u's\xe1bado', u'domingo',
]
shortWeekdays = [ u'lun', u'mar', u'mi\xe9',
u'jue', u'vie', u's\xe1b', u'dom',
]
Months = [ u'enero', u'febrero', u'marzo',
u'abril', u'mayo', u'junio',
u'julio', u'agosto', u'septiembre',
u'octubre', u'noviembre', u'diciembre'
]
shortMonths = [ u'ene', u'feb', u'mar',
u'abr', u'may', u'jun',
u'jul', u'ago', u'sep',
u'oct', u'nov', u'dic'
]
dateFormats = { 'full': "EEEE d' de 'MMMM' de 'yyyy",
'long': "d' de 'MMMM' de 'yyyy",
'medium': "dd-MMM-yy",
'short': "d/MM/yy",
}
timeFormats = { 'full': "HH'H'mm' 'ss z",
'long': "HH:mm:ss z",
'medium': "HH:mm:ss",
'short': "HH:mm",
}
dp_order = [ u'd', u'm', u'y' ]
# this will be added to re_consts later
units = { 'seconds': [ 'second', 'sec' ],
'minutes': [ 'minute', 'min' ],
'hours': [ 'hour', 'hr' ],
'days': [ 'day', 'dy' ],
'weeks': [ 'week', 'wk' ],
'months': [ 'month', 'mth' ],
'years': [ 'year', 'yr' ],
}
# text constants to be used by regex's later
re_consts = { 'specials': 'in|on|of|at',
'timeseperator': timeSep,
'dateseperator': dateSep,
'rangeseperator': '-',
'daysuffix': 'rd|st|nd|th',
'qunits': 'h|m|s|d|w|m|y',
'now': [ 'now' ],
}
# Used to adjust the returned date before/after the source
modifiers = { 'from': 1,
'before': -1,
'after': 1,
'ago': 1,
'prior': -1,
'prev': -1,
'last': -1,
'next': 1,
'previous': -1,
'in a': 2,
'end of': 0,
'eo': 0,
}
dayoffsets = { 'tomorrow': 1,
'today': 0,
'yesterday': -1,
}
# special day and/or times, i.e. lunch, noon, evening
# each element in the dictionary is a dictionary that is used
# to fill in any value to be replace - the current date/time will
# already have been populated by the method buildSources
re_sources = { 'noon': { 'hr': 12, 'mn': 0, 'sec': 0 },
'lunch': { 'hr': 12, 'mn': 0, 'sec': 0 },
'morning': { 'hr': 6, 'mn': 0, 'sec': 0 },
'breakfast': { 'hr': 8, 'mn': 0, 'sec': 0 },
'dinner': { 'hr': 19, 'mn': 0, 'sec': 0 },
'evening': { 'hr': 18, 'mn': 0, 'sec': 0 },
'midnight': { 'hr': 0, 'mn': 0, 'sec': 0 },
'night': { 'hr': 21, 'mn': 0, 'sec': 0 },
'tonight': { 'hr': 21, 'mn': 0, 'sec': 0 },
'eod': { 'hr': 17, 'mn': 0, 'sec': 0 },
}
class pdtLocale_de:
"""
de_DE Locale constants
This class will be used to initialize L{Constants} if PyICU is not located.
Contributed by Debian parsedatetime package maintainer Bernd Zeimetz <bzed@debian.org>
Defined as class variables are the lists and strings needed by parsedatetime
to evaluate strings for German
"""
localeID = 'de_DE' # don't use a unicode string
dateSep = [ u'.' ]
timeSep = [ u':' ]
meridian = [ ]
usesMeridian = False
uses24 = True
Weekdays = [ u'montag', u'dienstag', u'mittwoch',
u'donnerstag', u'freitag', u'samstag', u'sonntag',
]
shortWeekdays = [ u'mo', u'di', u'mi',
u'do', u'fr', u'sa', u'so',
]
Months = [ u'januar', u'februar', u'm\xe4rz',
u'april', u'mai', u'juni',
u'juli', u'august', u'september',
u'oktober', u'november', u'dezember',
]
shortMonths = [ u'jan', u'feb', u'mrz',
u'apr', u'mai', u'jun',
u'jul', u'aug', u'sep',
u'okt', u'nov', u'dez',
]
dateFormats = { 'full': u'EEEE, d. MMMM yyyy',
'long': u'd. MMMM yyyy',
'medium': u'dd.MM.yyyy',
'short': u'dd.MM.yy'
}
timeFormats = { 'full': u'HH:mm:ss v',
'long': u'HH:mm:ss z',
'medium': u'HH:mm:ss',
'short': u'HH:mm'
}
dp_order = [ u'd', u'm', u'y' ]
# this will be added to re_consts later
units = { 'seconds': [ 'sekunden', 'sek', 's' ],
'minutes': [ 'minuten', 'min' , 'm' ],
'hours': [ 'stunden', 'std', 'h' ],
'days': [ 'tage', 't' ],
'weeks': [ 'wochen', 'w' ],
'months': [ 'monate' ], #the short version would be a capital M,
#as I understand it we can't distinguis
#between m for minutes and M for months.
'years': [ 'jahre', 'j' ],
}
# text constants to be used by regex's later
re_consts = { 'specials': 'am|dem|der|im|in|den|zum',
'timeseperator': ':',
'rangeseperator': '-',
'daysuffix': '',
'qunits': 'h|m|s|t|w|m|j',
'now': [ 'jetzt' ],
}
# Used to adjust the returned date before/after the source
#still looking for insight on how to translate all of them to german.
modifiers = { u'from': 1,
u'before': -1,
u'after': 1,
u'vergangener': -1,
u'vorheriger': -1,
u'prev': -1,
u'letzter': -1,
u'n\xe4chster': 1,
u'dieser': 0,
u'previous': -1,
u'in a': 2,
u'end of': 0,
u'eod': 0,
u'eo': 0,
}
#morgen/abermorgen does not work, see http://code.google.com/p/parsedatetime/issues/detail?id=19
dayoffsets = { u'morgen': 1,
u'heute': 0,
u'gestern': -1,
u'vorgestern': -2,
u'\xfcbermorgen': 2,
}
# special day and/or times, i.e. lunch, noon, evening
# each element in the dictionary is a dictionary that is used
# to fill in any value to be replace - the current date/time will
# already have been populated by the method buildSources
re_sources = { u'mittag': { 'hr': 12, 'mn': 0, 'sec': 0 },
u'mittags': { 'hr': 12, 'mn': 0, 'sec': 0 },
u'mittagessen': { 'hr': 12, 'mn': 0, 'sec': 0 },
u'morgen': { 'hr': 6, 'mn': 0, 'sec': 0 },
u'morgens': { 'hr': 6, 'mn': 0, 'sec': 0 },
u'fr\e4hst\xe4ck': { 'hr': 8, 'mn': 0, 'sec': 0 },
u'abendessen': { 'hr': 19, 'mn': 0, 'sec': 0 },
u'abend': { 'hr': 18, 'mn': 0, 'sec': 0 },
u'abends': { 'hr': 18, 'mn': 0, 'sec': 0 },
u'mitternacht': { 'hr': 0, 'mn': 0, 'sec': 0 },
u'nacht': { 'hr': 21, 'mn': 0, 'sec': 0 },
u'nachts': { 'hr': 21, 'mn': 0, 'sec': 0 },
u'heute abend': { 'hr': 21, 'mn': 0, 'sec': 0 },
u'heute nacht': { 'hr': 21, 'mn': 0, 'sec': 0 },
u'feierabend': { 'hr': 17, 'mn': 0, 'sec': 0 },
}
pdtLocales = { 'en_US': pdtLocale_en,
'en_AU': pdtLocale_au,
'es_ES': pdtLocale_es,
'de_DE': pdtLocale_de,
}
def _initLocale(ptc):
"""
Helper function to initialize the different lists and strings
from either PyICU or one of the internal pdt Locales and store
them into ptc.
"""
def lcase(x):
return x.lower()
if pyicu and ptc.usePyICU:
ptc.icuLocale = None
if ptc.localeID is not None:
ptc.icuLocale = pyicu.Locale(ptc.localeID)
if ptc.icuLocale is None:
for id in range(0, len(ptc.fallbackLocales)):
ptc.localeID = ptc.fallbackLocales[id]
ptc.icuLocale = pyicu.Locale(ptc.localeID)
if ptc.icuLocale is not None:
break
ptc.icuSymbols = pyicu.DateFormatSymbols(ptc.icuLocale)
# grab ICU list of weekdays, skipping first entry which
# is always blank
wd = map(lcase, ptc.icuSymbols.getWeekdays()[1:])
swd = map(lcase, ptc.icuSymbols.getShortWeekdays()[1:])
# store them in our list with Monday first (ICU puts Sunday first)
ptc.Weekdays = wd[1:] + wd[0:1]
ptc.shortWeekdays = swd[1:] + swd[0:1]
ptc.Months = map(lcase, ptc.icuSymbols.getMonths())
ptc.shortMonths = map(lcase, ptc.icuSymbols.getShortMonths())
# not quite sure how to init this so for now
# set it to none so it will be set to the en_US defaults for now
ptc.re_consts = None
ptc.icu_df = { 'full': pyicu.DateFormat.createDateInstance(pyicu.DateFormat.kFull, ptc.icuLocale),
'long': pyicu.DateFormat.createDateInstance(pyicu.DateFormat.kLong, ptc.icuLocale),
'medium': pyicu.DateFormat.createDateInstance(pyicu.DateFormat.kMedium, ptc.icuLocale),
'short': pyicu.DateFormat.createDateInstance(pyicu.DateFormat.kShort, ptc.icuLocale),
}
ptc.icu_tf = { 'full': pyicu.DateFormat.createTimeInstance(pyicu.DateFormat.kFull, ptc.icuLocale),
'long': pyicu.DateFormat.createTimeInstance(pyicu.DateFormat.kLong, ptc.icuLocale),
'medium': pyicu.DateFormat.createTimeInstance(pyicu.DateFormat.kMedium, ptc.icuLocale),
'short': pyicu.DateFormat.createTimeInstance(pyicu.DateFormat.kShort, ptc.icuLocale),
}
ptc.dateFormats = { 'full': ptc.icu_df['full'].toPattern(),
'long': ptc.icu_df['long'].toPattern(),
'medium': ptc.icu_df['medium'].toPattern(),
'short': ptc.icu_df['short'].toPattern(),
}
ptc.timeFormats = { 'full': ptc.icu_tf['full'].toPattern(),
'long': ptc.icu_tf['long'].toPattern(),
'medium': ptc.icu_tf['medium'].toPattern(),
'short': ptc.icu_tf['short'].toPattern(),
}
else:
if not ptc.localeID in pdtLocales:
for id in range(0, len(ptc.fallbackLocales)):
ptc.localeID = ptc.fallbackLocales[id]
if ptc.localeID in pdtLocales:
break
ptc.locale = pdtLocales[ptc.localeID]
ptc.usePyICU = False
ptc.Weekdays = ptc.locale.Weekdays
ptc.shortWeekdays = ptc.locale.shortWeekdays
ptc.Months = ptc.locale.Months
ptc.shortMonths = ptc.locale.shortMonths
ptc.dateFormats = ptc.locale.dateFormats
ptc.timeFormats = ptc.locale.timeFormats
# these values are used to setup the various bits
# of the regex values used to parse
#
# check if a local set of constants has been
# provided, if not use en_US as the default
if ptc.localeID in pdtLocales:
ptc.re_sources = pdtLocales[ptc.localeID].re_sources
ptc.re_values = pdtLocales[ptc.localeID].re_consts
units = pdtLocales[ptc.localeID].units
ptc.Modifiers = pdtLocales[ptc.localeID].modifiers
ptc.dayOffsets = pdtLocales[ptc.localeID].dayoffsets
# for now, pull over any missing keys from the US set
for key in pdtLocales['en_US'].re_consts:
if not key in ptc.re_values:
ptc.re_values[key] = pdtLocales['en_US'].re_consts[key]
else:
ptc.re_sources = pdtLocales['en_US'].re_sources
ptc.re_values = pdtLocales['en_US'].re_consts
ptc.Modifiers = pdtLocales['en_US'].modifiers
ptc.dayOffsets = pdtLocales['en_US'].dayoffsets
units = pdtLocales['en_US'].units
# escape any regex special characters that may be found
wd = tuple(map(re.escape, ptc.Weekdays))
swd = tuple(map(re.escape, ptc.shortWeekdays))
mth = tuple(map(re.escape, ptc.Months))
smth = tuple(map(re.escape, ptc.shortMonths))
ptc.re_values['months'] = '%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s' % mth
ptc.re_values['shortmonths'] = '%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s' % smth
ptc.re_values['days'] = '%s|%s|%s|%s|%s|%s|%s' % wd
ptc.re_values['shortdays'] = '%s|%s|%s|%s|%s|%s|%s' % swd
l = []
for unit in units:
l.append('|'.join(units[unit]))
ptc.re_values['units'] = '|'.join(l)
ptc.Units = ptc.re_values['units'].split('|')
def _initSymbols(ptc):
"""
Helper function to initialize the single character constants
and other symbols needed.
"""
ptc.timeSep = [ u':' ]
ptc.dateSep = [ u'/' ]
ptc.meridian = [ u'AM', u'PM' ]
ptc.usesMeridian = True
ptc.uses24 = False
if pyicu and ptc.usePyICU:
am = u''
pm = u''
ts = ''
# ICU doesn't seem to provide directly the
# date or time seperator - so we have to
# figure it out
o = ptc.icu_tf['short']
s = ptc.timeFormats['short']
ptc.usesMeridian = u'a' in s
ptc.uses24 = u'H' in s
# '11:45 AM' or '11:45'
s = o.format(datetime.datetime(2003, 10, 30, 11, 45))
# ': AM' or ':'
s = s.replace('11', '').replace('45', '')
if len(s) > 0:
ts = s[0]
if ptc.usesMeridian:
# '23:45 AM' or '23:45'
am = s[1:].strip()
s = o.format(datetime.datetime(2003, 10, 30, 23, 45))
if ptc.uses24:
s = s.replace('23', '')
else:
s = s.replace('11', '')
# 'PM' or ''
pm = s.replace('45', '').replace(ts, '').strip()
ptc.timeSep = [ ts ]
ptc.meridian = [ am, pm ]
o = ptc.icu_df['short']
s = o.format(datetime.datetime(2003, 10, 30, 11, 45))
s = s.replace('10', '').replace('30', '').replace('03', '').replace('2003', '')
if len(s) > 0:
ds = s[0]
else:
ds = '/'
ptc.dateSep = [ ds ]
s = ptc.dateFormats['short']
l = s.lower().split(ds)
dp_order = []
for s in l:
if len(s) > 0:
dp_order.append(s[:1])
ptc.dp_order = dp_order
else:
ptc.timeSep = ptc.locale.timeSep
ptc.dateSep = ptc.locale.dateSep
ptc.meridian = ptc.locale.meridian
ptc.usesMeridian = ptc.locale.usesMeridian
ptc.uses24 = ptc.locale.uses24
ptc.dp_order = ptc.locale.dp_order
# build am and pm lists to contain
# original case, lowercase and first-char
# versions of the meridian text
if len(ptc.meridian) > 0:
am = ptc.meridian[0]
ptc.am = [ am ]
if len(am) > 0:
ptc.am.append(am[0])
am = am.lower()
ptc.am.append(am)
ptc.am.append(am[0])
else:
am = ''
ptc.am = [ '', '' ]
if len(ptc.meridian) > 1:
pm = ptc.meridian[1]
ptc.pm = [ pm ]
if len(pm) > 0:
ptc.pm.append(pm[0])
pm = pm.lower()
ptc.pm.append(pm)
ptc.pm.append(pm[0])
else:
pm = ''
ptc.pm = [ '', '' ]
def _initConstants(ptc):
"""
Create localized versions of the units, week and month names
"""
# build weekday offsets - yes, it assumes the Weekday and shortWeekday
# lists are in the same order and Mon..Sun (Python style)
ptc.WeekdayOffsets = {}
o = 0
for key in ptc.Weekdays:
ptc.WeekdayOffsets[key] = o
o += 1
o = 0
for key in ptc.shortWeekdays:
ptc.WeekdayOffsets[key] = o
o += 1
# build month offsets - yes, it assumes the Months and shortMonths
# lists are in the same order and Jan..Dec
ptc.MonthOffsets = {}
o = 1
for key in ptc.Months:
ptc.MonthOffsets[key] = o
o += 1
o = 1
for key in ptc.shortMonths:
ptc.MonthOffsets[key] = o
o += 1
# ptc.DaySuffixes = ptc.re_consts['daysuffix'].split('|')
class Constants:
"""
Default set of constants for parsedatetime.
If PyICU is present, then the class will first try to get PyICU
to return a locale specified by C{localeID}. If either C{localeID} is
None or if the locale does not exist within PyICU, then each of the
locales defined in C{fallbackLocales} is tried in order.
If PyICU is not present or none of the specified locales can be used,
then the class will initialize itself to the en_US locale.
if PyICU is not present or not requested, only the locales defined by
C{pdtLocales} will be searched.
"""
def __init__(self, localeID=None, usePyICU=True, fallbackLocales=['en_US']):
self.localeID = localeID
self.fallbackLocales = fallbackLocales
if 'en_US' not in self.fallbackLocales:
self.fallbackLocales.append('en_US')
# define non-locale specific constants
self.locale = None
self.usePyICU = usePyICU
# starting cache of leap years
# daysInMonth will add to this if during
# runtime it gets a request for a year not found
self._leapYears = [ 1904, 1908, 1912, 1916, 1920, 1924, 1928, 1932, 1936, 1940, 1944,
1948, 1952, 1956, 1960, 1964, 1968, 1972, 1976, 1980, 1984, 1988,
1992, 1996, 2000, 2004, 2008, 2012, 2016, 2020, 2024, 2028, 2032,
2036, 2040, 2044, 2048, 2052, 2056, 2060, 2064, 2068, 2072, 2076,
2080, 2084, 2088, 2092, 2096 ]
self.Second = 1
self.Minute = 60 * self.Second
self.Hour = 60 * self.Minute
self.Day = 24 * self.Hour
self.Week = 7 * self.Day
self.Month = 30 * self.Day
self.Year = 365 * self.Day
self.rangeSep = u'-'
self._DaysInMonthList = (31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
self.BirthdayEpoch = 50
# DOWParseStyle controls how we parse "Tuesday"
# If the current day was Thursday and the text to parse is "Tuesday"
# then the following table shows how each style would be returned
# -1, 0, +1
#
# Current day marked as ***
#
# Sun Mon Tue Wed Thu Fri Sat
# week -1
# current -1,0 ***
# week +1 +1
#
# If the current day was Monday and the text to parse is "Tuesday"
# then the following table shows how each style would be returned
# -1, 0, +1
#
# Sun Mon Tue Wed Thu Fri Sat
# week -1 -1
# current *** 0,+1
# week +1
self.DOWParseStyle = 1
# CurrentDOWParseStyle controls how we parse "Friday"
# If the current day was Friday and the text to parse is "Friday"
# then the following table shows how each style would be returned
# True/False. This also depends on DOWParseStyle.
#
# Current day marked as ***
#
# DOWParseStyle = 0
# Sun Mon Tue Wed Thu Fri Sat
# week -1
# current T,F
# week +1
#
# DOWParseStyle = -1
# Sun Mon Tue Wed Thu Fri Sat
# week -1 F
# current T
# week +1
#
# DOWParseStyle = +1
#
# Sun Mon Tue Wed Thu Fri Sat
# week -1
# current T
# week +1 F
self.CurrentDOWParseStyle = False
# initalize attributes to empty values to ensure
# they are defined
self.re_sources = None
self.re_values = None
self.Modifiers = None
self.dayOffsets = None
self.WeekdayOffsets = None
self.MonthOffsets = None
self.dateSep = None
self.timeSep = None
self.am = None
self.pm = None
self.meridian = None
self.usesMeridian = None
self.uses24 = None
self.dp_order = None
self.RE_DATE4 = r''
self.RE_DATE3 = r''
self.RE_MONTH = r''
self.RE_WEEKDAY = r''
self.RE_SPECIAL = r''
self.RE_UNITS = r''
self.RE_QUNITS = r''
self.RE_MODIFIER = r''
self.RE_MODIFIER2 = r''
self.RE_TIMEHMS = r''
self.RE_TIMEHMS2 = r''
self.RE_DATE = r''
self.RE_DATE2 = r''
self.RE_DAY = r''
self.RE_DAY2 = r''
self.RE_TIME = r''
self.RE_REMAINING = r''
self.RE_RTIMEHMS = r''
self.RE_RTIMEHMS2 = r''
self.RE_RDATE = r''
self.RE_RDATE3 = r''
self.DATERNG1 = r''
self.DATERNG2 = r''
self.DATERNG3 = r''
self.TIMERNG1 = r''
self.TIMERNG2 = r''
self.TIMERNG3 = r''
self.TIMERNG4 = r''
_initLocale(self)
_initConstants(self)
_initSymbols(self)
_initPatterns(self)
self.re_option = re.IGNORECASE + re.VERBOSE
self.cre_source = { 'CRE_SPECIAL': self.RE_SPECIAL,
'CRE_UNITS': self.RE_UNITS,
'CRE_QUNITS': self.RE_QUNITS,
'CRE_MODIFIER': self.RE_MODIFIER,
'CRE_MODIFIER2': self.RE_MODIFIER2,
'CRE_TIMEHMS': self.RE_TIMEHMS,
'CRE_TIMEHMS2': self.RE_TIMEHMS2,
'CRE_DATE': self.RE_DATE,
'CRE_DATE2': self.RE_DATE2,
'CRE_DATE3': self.RE_DATE3,
'CRE_DATE4': self.RE_DATE4,
'CRE_MONTH': self.RE_MONTH,
'CRE_WEEKDAY': self.RE_WEEKDAY,
'CRE_DAY': self.RE_DAY,
'CRE_DAY2': self.RE_DAY2,
'CRE_TIME': self.RE_TIME,
'CRE_REMAINING': self.RE_REMAINING,
'CRE_RTIMEHMS': self.RE_RTIMEHMS,
'CRE_RTIMEHMS2': self.RE_RTIMEHMS2,
'CRE_RDATE': self.RE_RDATE,
'CRE_RDATE3': self.RE_RDATE3,
'CRE_TIMERNG1': self.TIMERNG1,
'CRE_TIMERNG2': self.TIMERNG2,
'CRE_TIMERNG3': self.TIMERNG3,
'CRE_TIMERNG4': self.TIMERNG4,
'CRE_DATERNG1': self.DATERNG1,
'CRE_DATERNG2': self.DATERNG2,
'CRE_DATERNG3': self.DATERNG3,
}
self.cre_keys = self.cre_source.keys()
def __getattr__(self, name):
if name in self.cre_keys:
value = re.compile(self.cre_source[name], self.re_option)
setattr(self, name, value)
return value
else:
raise AttributeError, name
def daysInMonth(self, month, year):
"""
Take the given month (1-12) and a given year (4 digit) return
the number of days in the month adjusting for leap year as needed
"""
result = None
if month > 0 and month <= 12:
result = self._DaysInMonthList[month - 1]
if month == 2:
if year in self._leapYears:
result += 1
else:
if calendar.isleap(year):
self._leapYears.append(year)
result += 1
return result
def buildSources(self, sourceTime=None):
"""
Return a dictionary of date/time tuples based on the keys
found in self.re_sources.
The current time is used as the default and any specified
item found in self.re_sources is inserted into the value
and the generated dictionary is returned.
"""
if sourceTime is None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = time.localtime()
else:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
sources = {}
defaults = { 'yr': yr, 'mth': mth, 'dy': dy,
'hr': hr, 'mn': mn, 'sec': sec, }
for item in self.re_sources:
values = {}
source = self.re_sources[item]
for key in defaults.keys():
if key in source:
values[key] = source[key]
else:
values[key] = defaults[key]
sources[item] = ( values['yr'], values['mth'], values['dy'],
values['hr'], values['mn'], values['sec'], wd, yd, isdst )
return sources
|
vilmibm/done | parsedatetime/parsedatetime_consts.py | _initConstants | python | def _initConstants(ptc):
# build weekday offsets - yes, it assumes the Weekday and shortWeekday
# lists are in the same order and Mon..Sun (Python style)
ptc.WeekdayOffsets = {}
o = 0
for key in ptc.Weekdays:
ptc.WeekdayOffsets[key] = o
o += 1
o = 0
for key in ptc.shortWeekdays:
ptc.WeekdayOffsets[key] = o
o += 1
# build month offsets - yes, it assumes the Months and shortMonths
# lists are in the same order and Jan..Dec
ptc.MonthOffsets = {}
o = 1
for key in ptc.Months:
ptc.MonthOffsets[key] = o
o += 1
o = 1
for key in ptc.shortMonths:
ptc.MonthOffsets[key] = o
o += 1 | Create localized versions of the units, week and month names | train | https://github.com/vilmibm/done/blob/7e5b60d2900ceddefa49de352a19b794199b51a8/parsedatetime/parsedatetime_consts.py#L841-L869 | null | #!/usr/bin/env python
"""
parsedatetime constants and helper functions to determine
regex values from Locale information if present.
Also contains the internal Locale classes to give some sane
defaults if PyICU is not found.
"""
__license__ = """
Copyright (c) 2004-2008 Mike Taylor
Copyright (c) 2006-2008 Darshana Chhajed
Copyright (c) 2007 Bernd Zeimetz <bzed@debian.org>
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
try:
import PyICU as pyicu
except:
pyicu = None
import datetime
import calendar
import time
import re
class pdtLocale_en:
"""
en_US Locale constants
This class will be used to initialize L{Constants} if PyICU is not located.
Defined as class variables are the lists and strings needed by parsedatetime
to evaluate strings for USA
"""
localeID = 'en_US' # don't use a unicode string
dateSep = [ u'/', u'.' ]
timeSep = [ u':' ]
meridian = [ u'AM', u'PM' ]
usesMeridian = True
uses24 = False
Weekdays = [ u'monday', u'tuesday', u'wednesday',
u'thursday', u'friday', u'saturday', u'sunday',
]
shortWeekdays = [ u'mon', u'tues', u'wed',
u'thu', u'fri', u'sat', u'sun',
]
Months = [ u'january', u'february', u'march',
u'april', u'may', u'june',
u'july', u'august', u'september',
u'october', u'november', u'december',
]
shortMonths = [ u'jan', u'feb', u'mar',
u'apr', u'may', u'jun',
u'jul', u'aug', u'sep',
u'oct', u'nov', u'dec',
]
dateFormats = { 'full': 'EEEE, MMMM d, yyyy',
'long': 'MMMM d, yyyy',
'medium': 'MMM d, yyyy',
'short': 'M/d/yy',
}
timeFormats = { 'full': 'h:mm:ss a z',
'long': 'h:mm:ss a z',
'medium': 'h:mm:ss a',
'short': 'h:mm a',
}
dp_order = [ u'm', u'd', u'y' ]
# this will be added to re_consts later
units = { 'seconds': [ 'second', 'sec' ],
'minutes': [ 'minute', 'min' ],
'hours': [ 'hour', 'hr' ],
'days': [ 'day', 'dy' ],
'weeks': [ 'week', 'wk' ],
'months': [ 'month', 'mth' ],
'years': [ 'year', 'yr' ],
}
# text constants to be used by regex's later
re_consts = { 'specials': 'in|on|of|at',
'timeseperator': ':',
'rangeseperator': '-',
'daysuffix': 'rd|st|nd|th',
'meridian': 'am|pm|a.m.|p.m.|a|p',
'qunits': 'h|m|s|d|w|m|y',
'now': [ 'now' ],
}
# Used to adjust the returned date before/after the source
modifiers = { 'from': 1,
'before': -1,
'after': 1,
'ago': -1,
'prior': -1,
'prev': -1,
'last': -1,
'next': 1,
'previous': -1,
'in a': 2,
'end of': 0,
'eod': 0,
'eo': 0
}
dayoffsets = { 'tomorrow': 1,
'today': 0,
'yesterday': -1,
}
# special day and/or times, i.e. lunch, noon, evening
# each element in the dictionary is a dictionary that is used
# to fill in any value to be replace - the current date/time will
# already have been populated by the method buildSources
re_sources = { 'noon': { 'hr': 12, 'mn': 0, 'sec': 0 },
'lunch': { 'hr': 12, 'mn': 0, 'sec': 0 },
'morning': { 'hr': 6, 'mn': 0, 'sec': 0 },
'breakfast': { 'hr': 8, 'mn': 0, 'sec': 0 },
'dinner': { 'hr': 19, 'mn': 0, 'sec': 0 },
'evening': { 'hr': 18, 'mn': 0, 'sec': 0 },
'midnight': { 'hr': 0, 'mn': 0, 'sec': 0 },
'night': { 'hr': 21, 'mn': 0, 'sec': 0 },
'tonight': { 'hr': 21, 'mn': 0, 'sec': 0 },
'eod': { 'hr': 17, 'mn': 0, 'sec': 0 },
}
class pdtLocale_au:
"""
en_AU Locale constants
This class will be used to initialize L{Constants} if PyICU is not located.
Defined as class variables are the lists and strings needed by parsedatetime
to evaluate strings for Australia
"""
localeID = 'en_AU' # don't use a unicode string
dateSep = [ u'-', u'/' ]
timeSep = [ u':' ]
meridian = [ u'AM', u'PM' ]
usesMeridian = True
uses24 = False
Weekdays = [ u'monday', u'tuesday', u'wednesday',
u'thursday', u'friday', u'saturday', u'sunday',
]
shortWeekdays = [ u'mon', u'tues', u'wed',
u'thu', u'fri', u'sat', u'sun',
]
Months = [ u'january', u'february', u'march',
u'april', u'may', u'june',
u'july', u'august', u'september',
u'october', u'november', u'december',
]
shortMonths = [ u'jan', u'feb', u'mar',
u'apr', u'may', u'jun',
u'jul', u'aug', u'sep',
u'oct', u'nov', u'dec',
]
dateFormats = { 'full': 'EEEE, d MMMM yyyy',
'long': 'd MMMM yyyy',
'medium': 'dd/MM/yyyy',
'short': 'd/MM/yy',
}
timeFormats = { 'full': 'h:mm:ss a z',
'long': 'h:mm:ss a',
'medium': 'h:mm:ss a',
'short': 'h:mm a',
}
dp_order = [ u'd', u'm', u'y' ]
# this will be added to re_consts later
units = { 'seconds': [ 'second', 'sec' ],
'minutes': [ 'minute', 'min' ],
'hours': [ 'hour', 'hr' ],
'days': [ 'day', 'dy' ],
'weeks': [ 'week', 'wk' ],
'months': [ 'month', 'mth' ],
'years': [ 'year', 'yr' ],
}
# text constants to be used by regex's later
re_consts = { 'specials': 'in|on|of|at',
'timeseperator': ':',
'rangeseperator': '-',
'daysuffix': 'rd|st|nd|th',
'meridian': 'am|pm|a.m.|p.m.|a|p',
'qunits': 'h|m|s|d|w|m|y',
'now': [ 'now' ],
}
# Used to adjust the returned date before/after the source
modifiers = { 'from': 1,
'before': -1,
'after': 1,
'ago': 1,
'prior': -1,
'prev': -1,
'last': -1,
'next': 1,
'previous': -1,
'in a': 2,
'end of': 0,
'eo': 0,
}
dayoffsets = { 'tomorrow': 1,
'today': 0,
'yesterday': -1,
}
# special day and/or times, i.e. lunch, noon, evening
# each element in the dictionary is a dictionary that is used
# to fill in any value to be replace - the current date/time will
# already have been populated by the method buildSources
re_sources = { 'noon': { 'hr': 12, 'mn': 0, 'sec': 0 },
'lunch': { 'hr': 12, 'mn': 0, 'sec': 0 },
'morning': { 'hr': 6, 'mn': 0, 'sec': 0 },
'breakfast': { 'hr': 8, 'mn': 0, 'sec': 0 },
'dinner': { 'hr': 19, 'mn': 0, 'sec': 0 },
'evening': { 'hr': 18, 'mn': 0, 'sec': 0 },
'midnight': { 'hr': 0, 'mn': 0, 'sec': 0 },
'night': { 'hr': 21, 'mn': 0, 'sec': 0 },
'tonight': { 'hr': 21, 'mn': 0, 'sec': 0 },
'eod': { 'hr': 17, 'mn': 0, 'sec': 0 },
}
class pdtLocale_es:
"""
es Locale constants
This class will be used to initialize L{Constants} if PyICU is not located.
Defined as class variables are the lists and strings needed by parsedatetime
to evaluate strings in Spanish
Note that I don't speak Spanish so many of the items below are still in English
"""
localeID = 'es' # don't use a unicode string
dateSep = [ u'/' ]
timeSep = [ u':' ]
meridian = []
usesMeridian = False
uses24 = True
Weekdays = [ u'lunes', u'martes', u'mi\xe9rcoles',
u'jueves', u'viernes', u's\xe1bado', u'domingo',
]
shortWeekdays = [ u'lun', u'mar', u'mi\xe9',
u'jue', u'vie', u's\xe1b', u'dom',
]
Months = [ u'enero', u'febrero', u'marzo',
u'abril', u'mayo', u'junio',
u'julio', u'agosto', u'septiembre',
u'octubre', u'noviembre', u'diciembre'
]
shortMonths = [ u'ene', u'feb', u'mar',
u'abr', u'may', u'jun',
u'jul', u'ago', u'sep',
u'oct', u'nov', u'dic'
]
dateFormats = { 'full': "EEEE d' de 'MMMM' de 'yyyy",
'long': "d' de 'MMMM' de 'yyyy",
'medium': "dd-MMM-yy",
'short': "d/MM/yy",
}
timeFormats = { 'full': "HH'H'mm' 'ss z",
'long': "HH:mm:ss z",
'medium': "HH:mm:ss",
'short': "HH:mm",
}
dp_order = [ u'd', u'm', u'y' ]
# this will be added to re_consts later
units = { 'seconds': [ 'second', 'sec' ],
'minutes': [ 'minute', 'min' ],
'hours': [ 'hour', 'hr' ],
'days': [ 'day', 'dy' ],
'weeks': [ 'week', 'wk' ],
'months': [ 'month', 'mth' ],
'years': [ 'year', 'yr' ],
}
# text constants to be used by regex's later
re_consts = { 'specials': 'in|on|of|at',
'timeseperator': timeSep,
'dateseperator': dateSep,
'rangeseperator': '-',
'daysuffix': 'rd|st|nd|th',
'qunits': 'h|m|s|d|w|m|y',
'now': [ 'now' ],
}
# Used to adjust the returned date before/after the source
modifiers = { 'from': 1,
'before': -1,
'after': 1,
'ago': 1,
'prior': -1,
'prev': -1,
'last': -1,
'next': 1,
'previous': -1,
'in a': 2,
'end of': 0,
'eo': 0,
}
dayoffsets = { 'tomorrow': 1,
'today': 0,
'yesterday': -1,
}
# special day and/or times, i.e. lunch, noon, evening
# each element in the dictionary is a dictionary that is used
# to fill in any value to be replace - the current date/time will
# already have been populated by the method buildSources
re_sources = { 'noon': { 'hr': 12, 'mn': 0, 'sec': 0 },
'lunch': { 'hr': 12, 'mn': 0, 'sec': 0 },
'morning': { 'hr': 6, 'mn': 0, 'sec': 0 },
'breakfast': { 'hr': 8, 'mn': 0, 'sec': 0 },
'dinner': { 'hr': 19, 'mn': 0, 'sec': 0 },
'evening': { 'hr': 18, 'mn': 0, 'sec': 0 },
'midnight': { 'hr': 0, 'mn': 0, 'sec': 0 },
'night': { 'hr': 21, 'mn': 0, 'sec': 0 },
'tonight': { 'hr': 21, 'mn': 0, 'sec': 0 },
'eod': { 'hr': 17, 'mn': 0, 'sec': 0 },
}
class pdtLocale_de:
"""
de_DE Locale constants
This class will be used to initialize L{Constants} if PyICU is not located.
Contributed by Debian parsedatetime package maintainer Bernd Zeimetz <bzed@debian.org>
Defined as class variables are the lists and strings needed by parsedatetime
to evaluate strings for German
"""
localeID = 'de_DE' # don't use a unicode string
dateSep = [ u'.' ]
timeSep = [ u':' ]
meridian = [ ]
usesMeridian = False
uses24 = True
Weekdays = [ u'montag', u'dienstag', u'mittwoch',
u'donnerstag', u'freitag', u'samstag', u'sonntag',
]
shortWeekdays = [ u'mo', u'di', u'mi',
u'do', u'fr', u'sa', u'so',
]
Months = [ u'januar', u'februar', u'm\xe4rz',
u'april', u'mai', u'juni',
u'juli', u'august', u'september',
u'oktober', u'november', u'dezember',
]
shortMonths = [ u'jan', u'feb', u'mrz',
u'apr', u'mai', u'jun',
u'jul', u'aug', u'sep',
u'okt', u'nov', u'dez',
]
dateFormats = { 'full': u'EEEE, d. MMMM yyyy',
'long': u'd. MMMM yyyy',
'medium': u'dd.MM.yyyy',
'short': u'dd.MM.yy'
}
timeFormats = { 'full': u'HH:mm:ss v',
'long': u'HH:mm:ss z',
'medium': u'HH:mm:ss',
'short': u'HH:mm'
}
dp_order = [ u'd', u'm', u'y' ]
# this will be added to re_consts later
units = { 'seconds': [ 'sekunden', 'sek', 's' ],
'minutes': [ 'minuten', 'min' , 'm' ],
'hours': [ 'stunden', 'std', 'h' ],
'days': [ 'tage', 't' ],
'weeks': [ 'wochen', 'w' ],
'months': [ 'monate' ], #the short version would be a capital M,
#as I understand it we can't distinguis
#between m for minutes and M for months.
'years': [ 'jahre', 'j' ],
}
# text constants to be used by regex's later
re_consts = { 'specials': 'am|dem|der|im|in|den|zum',
'timeseperator': ':',
'rangeseperator': '-',
'daysuffix': '',
'qunits': 'h|m|s|t|w|m|j',
'now': [ 'jetzt' ],
}
# Used to adjust the returned date before/after the source
#still looking for insight on how to translate all of them to german.
modifiers = { u'from': 1,
u'before': -1,
u'after': 1,
u'vergangener': -1,
u'vorheriger': -1,
u'prev': -1,
u'letzter': -1,
u'n\xe4chster': 1,
u'dieser': 0,
u'previous': -1,
u'in a': 2,
u'end of': 0,
u'eod': 0,
u'eo': 0,
}
#morgen/abermorgen does not work, see http://code.google.com/p/parsedatetime/issues/detail?id=19
dayoffsets = { u'morgen': 1,
u'heute': 0,
u'gestern': -1,
u'vorgestern': -2,
u'\xfcbermorgen': 2,
}
# special day and/or times, i.e. lunch, noon, evening
# each element in the dictionary is a dictionary that is used
# to fill in any value to be replace - the current date/time will
# already have been populated by the method buildSources
re_sources = { u'mittag': { 'hr': 12, 'mn': 0, 'sec': 0 },
u'mittags': { 'hr': 12, 'mn': 0, 'sec': 0 },
u'mittagessen': { 'hr': 12, 'mn': 0, 'sec': 0 },
u'morgen': { 'hr': 6, 'mn': 0, 'sec': 0 },
u'morgens': { 'hr': 6, 'mn': 0, 'sec': 0 },
u'fr\e4hst\xe4ck': { 'hr': 8, 'mn': 0, 'sec': 0 },
u'abendessen': { 'hr': 19, 'mn': 0, 'sec': 0 },
u'abend': { 'hr': 18, 'mn': 0, 'sec': 0 },
u'abends': { 'hr': 18, 'mn': 0, 'sec': 0 },
u'mitternacht': { 'hr': 0, 'mn': 0, 'sec': 0 },
u'nacht': { 'hr': 21, 'mn': 0, 'sec': 0 },
u'nachts': { 'hr': 21, 'mn': 0, 'sec': 0 },
u'heute abend': { 'hr': 21, 'mn': 0, 'sec': 0 },
u'heute nacht': { 'hr': 21, 'mn': 0, 'sec': 0 },
u'feierabend': { 'hr': 17, 'mn': 0, 'sec': 0 },
}
pdtLocales = { 'en_US': pdtLocale_en,
'en_AU': pdtLocale_au,
'es_ES': pdtLocale_es,
'de_DE': pdtLocale_de,
}
def _initLocale(ptc):
"""
Helper function to initialize the different lists and strings
from either PyICU or one of the internal pdt Locales and store
them into ptc.
"""
def lcase(x):
return x.lower()
if pyicu and ptc.usePyICU:
ptc.icuLocale = None
if ptc.localeID is not None:
ptc.icuLocale = pyicu.Locale(ptc.localeID)
if ptc.icuLocale is None:
for id in range(0, len(ptc.fallbackLocales)):
ptc.localeID = ptc.fallbackLocales[id]
ptc.icuLocale = pyicu.Locale(ptc.localeID)
if ptc.icuLocale is not None:
break
ptc.icuSymbols = pyicu.DateFormatSymbols(ptc.icuLocale)
# grab ICU list of weekdays, skipping first entry which
# is always blank
wd = map(lcase, ptc.icuSymbols.getWeekdays()[1:])
swd = map(lcase, ptc.icuSymbols.getShortWeekdays()[1:])
# store them in our list with Monday first (ICU puts Sunday first)
ptc.Weekdays = wd[1:] + wd[0:1]
ptc.shortWeekdays = swd[1:] + swd[0:1]
ptc.Months = map(lcase, ptc.icuSymbols.getMonths())
ptc.shortMonths = map(lcase, ptc.icuSymbols.getShortMonths())
# not quite sure how to init this so for now
# set it to none so it will be set to the en_US defaults for now
ptc.re_consts = None
ptc.icu_df = { 'full': pyicu.DateFormat.createDateInstance(pyicu.DateFormat.kFull, ptc.icuLocale),
'long': pyicu.DateFormat.createDateInstance(pyicu.DateFormat.kLong, ptc.icuLocale),
'medium': pyicu.DateFormat.createDateInstance(pyicu.DateFormat.kMedium, ptc.icuLocale),
'short': pyicu.DateFormat.createDateInstance(pyicu.DateFormat.kShort, ptc.icuLocale),
}
ptc.icu_tf = { 'full': pyicu.DateFormat.createTimeInstance(pyicu.DateFormat.kFull, ptc.icuLocale),
'long': pyicu.DateFormat.createTimeInstance(pyicu.DateFormat.kLong, ptc.icuLocale),
'medium': pyicu.DateFormat.createTimeInstance(pyicu.DateFormat.kMedium, ptc.icuLocale),
'short': pyicu.DateFormat.createTimeInstance(pyicu.DateFormat.kShort, ptc.icuLocale),
}
ptc.dateFormats = { 'full': ptc.icu_df['full'].toPattern(),
'long': ptc.icu_df['long'].toPattern(),
'medium': ptc.icu_df['medium'].toPattern(),
'short': ptc.icu_df['short'].toPattern(),
}
ptc.timeFormats = { 'full': ptc.icu_tf['full'].toPattern(),
'long': ptc.icu_tf['long'].toPattern(),
'medium': ptc.icu_tf['medium'].toPattern(),
'short': ptc.icu_tf['short'].toPattern(),
}
else:
if not ptc.localeID in pdtLocales:
for id in range(0, len(ptc.fallbackLocales)):
ptc.localeID = ptc.fallbackLocales[id]
if ptc.localeID in pdtLocales:
break
ptc.locale = pdtLocales[ptc.localeID]
ptc.usePyICU = False
ptc.Weekdays = ptc.locale.Weekdays
ptc.shortWeekdays = ptc.locale.shortWeekdays
ptc.Months = ptc.locale.Months
ptc.shortMonths = ptc.locale.shortMonths
ptc.dateFormats = ptc.locale.dateFormats
ptc.timeFormats = ptc.locale.timeFormats
# these values are used to setup the various bits
# of the regex values used to parse
#
# check if a local set of constants has been
# provided, if not use en_US as the default
if ptc.localeID in pdtLocales:
ptc.re_sources = pdtLocales[ptc.localeID].re_sources
ptc.re_values = pdtLocales[ptc.localeID].re_consts
units = pdtLocales[ptc.localeID].units
ptc.Modifiers = pdtLocales[ptc.localeID].modifiers
ptc.dayOffsets = pdtLocales[ptc.localeID].dayoffsets
# for now, pull over any missing keys from the US set
for key in pdtLocales['en_US'].re_consts:
if not key in ptc.re_values:
ptc.re_values[key] = pdtLocales['en_US'].re_consts[key]
else:
ptc.re_sources = pdtLocales['en_US'].re_sources
ptc.re_values = pdtLocales['en_US'].re_consts
ptc.Modifiers = pdtLocales['en_US'].modifiers
ptc.dayOffsets = pdtLocales['en_US'].dayoffsets
units = pdtLocales['en_US'].units
# escape any regex special characters that may be found
wd = tuple(map(re.escape, ptc.Weekdays))
swd = tuple(map(re.escape, ptc.shortWeekdays))
mth = tuple(map(re.escape, ptc.Months))
smth = tuple(map(re.escape, ptc.shortMonths))
ptc.re_values['months'] = '%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s' % mth
ptc.re_values['shortmonths'] = '%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s' % smth
ptc.re_values['days'] = '%s|%s|%s|%s|%s|%s|%s' % wd
ptc.re_values['shortdays'] = '%s|%s|%s|%s|%s|%s|%s' % swd
l = []
for unit in units:
l.append('|'.join(units[unit]))
ptc.re_values['units'] = '|'.join(l)
ptc.Units = ptc.re_values['units'].split('|')
def _initSymbols(ptc):
"""
Helper function to initialize the single character constants
and other symbols needed.
"""
ptc.timeSep = [ u':' ]
ptc.dateSep = [ u'/' ]
ptc.meridian = [ u'AM', u'PM' ]
ptc.usesMeridian = True
ptc.uses24 = False
if pyicu and ptc.usePyICU:
am = u''
pm = u''
ts = ''
# ICU doesn't seem to provide directly the
# date or time seperator - so we have to
# figure it out
o = ptc.icu_tf['short']
s = ptc.timeFormats['short']
ptc.usesMeridian = u'a' in s
ptc.uses24 = u'H' in s
# '11:45 AM' or '11:45'
s = o.format(datetime.datetime(2003, 10, 30, 11, 45))
# ': AM' or ':'
s = s.replace('11', '').replace('45', '')
if len(s) > 0:
ts = s[0]
if ptc.usesMeridian:
# '23:45 AM' or '23:45'
am = s[1:].strip()
s = o.format(datetime.datetime(2003, 10, 30, 23, 45))
if ptc.uses24:
s = s.replace('23', '')
else:
s = s.replace('11', '')
# 'PM' or ''
pm = s.replace('45', '').replace(ts, '').strip()
ptc.timeSep = [ ts ]
ptc.meridian = [ am, pm ]
o = ptc.icu_df['short']
s = o.format(datetime.datetime(2003, 10, 30, 11, 45))
s = s.replace('10', '').replace('30', '').replace('03', '').replace('2003', '')
if len(s) > 0:
ds = s[0]
else:
ds = '/'
ptc.dateSep = [ ds ]
s = ptc.dateFormats['short']
l = s.lower().split(ds)
dp_order = []
for s in l:
if len(s) > 0:
dp_order.append(s[:1])
ptc.dp_order = dp_order
else:
ptc.timeSep = ptc.locale.timeSep
ptc.dateSep = ptc.locale.dateSep
ptc.meridian = ptc.locale.meridian
ptc.usesMeridian = ptc.locale.usesMeridian
ptc.uses24 = ptc.locale.uses24
ptc.dp_order = ptc.locale.dp_order
# build am and pm lists to contain
# original case, lowercase and first-char
# versions of the meridian text
if len(ptc.meridian) > 0:
am = ptc.meridian[0]
ptc.am = [ am ]
if len(am) > 0:
ptc.am.append(am[0])
am = am.lower()
ptc.am.append(am)
ptc.am.append(am[0])
else:
am = ''
ptc.am = [ '', '' ]
if len(ptc.meridian) > 1:
pm = ptc.meridian[1]
ptc.pm = [ pm ]
if len(pm) > 0:
ptc.pm.append(pm[0])
pm = pm.lower()
ptc.pm.append(pm)
ptc.pm.append(pm[0])
else:
pm = ''
ptc.pm = [ '', '' ]
def _initPatterns(ptc):
"""
Helper function to take the different localized bits from ptc and
create the regex strings.
"""
# TODO add code to parse the date formats and build the regexes up from sub-parts
# TODO find all hard-coded uses of date/time seperators
ptc.RE_DATE4 = r'''(?P<date>(((?P<day>\d\d?)(?P<suffix>%(daysuffix)s)?(,)?(\s)?)
(?P<mthname>(%(months)s|%(shortmonths)s))\s?
(?P<year>\d\d(\d\d)?)?
)
)''' % ptc.re_values
# I refactored DATE3 to fix Issue 16 http://code.google.com/p/parsedatetime/issues/detail?id=16
# I suspect the final line was for a trailing time - but testing shows it's not needed
# ptc.RE_DATE3 = r'''(?P<date>((?P<mthname>(%(months)s|%(shortmonths)s))\s?
# ((?P<day>\d\d?)(\s?|%(daysuffix)s|$)+)?
# (,\s?(?P<year>\d\d(\d\d)?))?))
# (\s?|$|[^0-9a-zA-Z])''' % ptc.re_values
ptc.RE_DATE3 = r'''(?P<date>(
(((?P<mthname>(%(months)s|%(shortmonths)s))|
((?P<day>\d\d?)(?P<suffix>%(daysuffix)s)?))(\s)?){1,2}
((,)?(\s)?(?P<year>\d\d(\d\d)?))?
)
)''' % ptc.re_values
ptc.RE_MONTH = r'''(\s?|^)
(?P<month>(
(?P<mthname>(%(months)s|%(shortmonths)s))
(\s?(?P<year>(\d\d\d\d)))?
))
(\s?|$|[^0-9a-zA-Z])''' % ptc.re_values
ptc.RE_WEEKDAY = r'''(\s?|^)
(?P<weekday>(%(days)s|%(shortdays)s))
(\s?|$|[^0-9a-zA-Z])''' % ptc.re_values
ptc.RE_SPECIAL = r'(?P<special>^[%(specials)s]+)\s+' % ptc.re_values
ptc.RE_UNITS = r'''(?P<qty>(-?\d+\s*
(?P<units>((%(units)s)s?))
))''' % ptc.re_values
ptc.RE_QUNITS = r'''(?P<qty>(-?\d+\s?
(?P<qunits>%(qunits)s)
(\s?|,|$)
))''' % ptc.re_values
ptc.RE_MODIFIER = r'''(\s?|^)
(?P<modifier>
(previous|prev|last|next|eod|eo|(end\sof)|(in\sa)))''' % ptc.re_values
ptc.RE_MODIFIER2 = r'''(\s?|^)
(?P<modifier>
(from|before|after|ago|prior))
(\s?|$|[^0-9a-zA-Z])''' % ptc.re_values
ptc.RE_TIMEHMS = r'''(\s?|^)
(?P<hours>\d\d?)
(?P<tsep>%(timeseperator)s|)
(?P<minutes>\d\d)
(?:(?P=tsep)(?P<seconds>\d\d(?:[.,]\d+)?))?''' % ptc.re_values
ptc.RE_TIMEHMS2 = r'''(?P<hours>(\d\d?))
((?P<tsep>%(timeseperator)s|)
(?P<minutes>(\d\d?))
(?:(?P=tsep)
(?P<seconds>\d\d?
(?:[.,]\d+)?))?)?''' % ptc.re_values
if 'meridian' in ptc.re_values:
ptc.RE_TIMEHMS2 += r'\s?(?P<meridian>(%(meridian)s))' % ptc.re_values
dateSeps = ''.join(ptc.dateSep) + '.'
ptc.RE_DATE = r'''(\s?|^)
(?P<date>(\d\d?[%s]\d\d?([%s]\d\d(\d\d)?)?))
(\s?|$|[^0-9a-zA-Z])''' % (dateSeps, dateSeps)
ptc.RE_DATE2 = r'[%s]' % dateSeps
ptc.RE_DAY = r'''(\s?|^)
(?P<day>(today|tomorrow|yesterday))
(\s?|$|[^0-9a-zA-Z])''' % ptc.re_values
ptc.RE_DAY2 = r'''(?P<day>\d\d?)|(?P<suffix>%(daysuffix)s)
''' % ptc.re_values
ptc.RE_TIME = r'''(\s?|^)
(?P<time>(morning|breakfast|noon|lunch|evening|midnight|tonight|dinner|night|now))
(\s?|$|[^0-9a-zA-Z])''' % ptc.re_values
ptc.RE_REMAINING = r'\s+'
# Regex for date/time ranges
ptc.RE_RTIMEHMS = r'''(\s?|^)
(\d\d?)%(timeseperator)s
(\d\d)
(%(timeseperator)s(\d\d))?
(\s?|$)''' % ptc.re_values
ptc.RE_RTIMEHMS2 = r'''(\s?|^)
(\d\d?)
(%(timeseperator)s(\d\d?))?
(%(timeseperator)s(\d\d?))?''' % ptc.re_values
if 'meridian' in ptc.re_values:
ptc.RE_RTIMEHMS2 += r'\s?(%(meridian)s)' % ptc.re_values
ptc.RE_RDATE = r'(\d+([%s]\d+)+)' % dateSeps
ptc.RE_RDATE3 = r'''((((%(months)s))\s?
((\d\d?)
(\s?|%(daysuffix)s|$)+)?
(,\s?\d\d\d\d)?))''' % ptc.re_values
# "06/07/06 - 08/09/06"
ptc.DATERNG1 = ptc.RE_RDATE + r'\s?%(rangeseperator)s\s?' + ptc.RE_RDATE
ptc.DATERNG1 = ptc.DATERNG1 % ptc.re_values
# "march 31 - june 1st, 2006"
ptc.DATERNG2 = ptc.RE_RDATE3 + r'\s?%(rangeseperator)s\s?' + ptc.RE_RDATE3
ptc.DATERNG2 = ptc.DATERNG2 % ptc.re_values
# "march 1rd -13th"
ptc.DATERNG3 = ptc.RE_RDATE3 + r'\s?%(rangeseperator)s\s?(\d\d?)\s?(rd|st|nd|th)?'
ptc.DATERNG3 = ptc.DATERNG3 % ptc.re_values
# "4:00:55 pm - 5:90:44 am", '4p-5p'
ptc.TIMERNG1 = ptc.RE_RTIMEHMS2 + r'\s?%(rangeseperator)s\s?' + ptc.RE_RTIMEHMS2
ptc.TIMERNG1 = ptc.TIMERNG1 % ptc.re_values
# "4:00 - 5:90 ", "4:55:55-3:44:55"
ptc.TIMERNG2 = ptc.RE_RTIMEHMS + r'\s?%(rangeseperator)s\s?' + ptc.RE_RTIMEHMS
ptc.TIMERNG2 = ptc.TIMERNG2 % ptc.re_values
# "4-5pm "
ptc.TIMERNG3 = r'\d\d?\s?%(rangeseperator)s\s?' + ptc.RE_RTIMEHMS2
ptc.TIMERNG3 = ptc.TIMERNG3 % ptc.re_values
# "4:30-5pm "
ptc.TIMERNG4 = ptc.RE_RTIMEHMS + r'\s?%(rangeseperator)s\s?' + ptc.RE_RTIMEHMS2
ptc.TIMERNG4 = ptc.TIMERNG4 % ptc.re_values
# ptc.DaySuffixes = ptc.re_consts['daysuffix'].split('|')
class Constants:
"""
Default set of constants for parsedatetime.
If PyICU is present, then the class will first try to get PyICU
to return a locale specified by C{localeID}. If either C{localeID} is
None or if the locale does not exist within PyICU, then each of the
locales defined in C{fallbackLocales} is tried in order.
If PyICU is not present or none of the specified locales can be used,
then the class will initialize itself to the en_US locale.
if PyICU is not present or not requested, only the locales defined by
C{pdtLocales} will be searched.
"""
def __init__(self, localeID=None, usePyICU=True, fallbackLocales=['en_US']):
self.localeID = localeID
self.fallbackLocales = fallbackLocales
if 'en_US' not in self.fallbackLocales:
self.fallbackLocales.append('en_US')
# define non-locale specific constants
self.locale = None
self.usePyICU = usePyICU
# starting cache of leap years
# daysInMonth will add to this if during
# runtime it gets a request for a year not found
self._leapYears = [ 1904, 1908, 1912, 1916, 1920, 1924, 1928, 1932, 1936, 1940, 1944,
1948, 1952, 1956, 1960, 1964, 1968, 1972, 1976, 1980, 1984, 1988,
1992, 1996, 2000, 2004, 2008, 2012, 2016, 2020, 2024, 2028, 2032,
2036, 2040, 2044, 2048, 2052, 2056, 2060, 2064, 2068, 2072, 2076,
2080, 2084, 2088, 2092, 2096 ]
self.Second = 1
self.Minute = 60 * self.Second
self.Hour = 60 * self.Minute
self.Day = 24 * self.Hour
self.Week = 7 * self.Day
self.Month = 30 * self.Day
self.Year = 365 * self.Day
self.rangeSep = u'-'
self._DaysInMonthList = (31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
self.BirthdayEpoch = 50
# DOWParseStyle controls how we parse "Tuesday"
# If the current day was Thursday and the text to parse is "Tuesday"
# then the following table shows how each style would be returned
# -1, 0, +1
#
# Current day marked as ***
#
# Sun Mon Tue Wed Thu Fri Sat
# week -1
# current -1,0 ***
# week +1 +1
#
# If the current day was Monday and the text to parse is "Tuesday"
# then the following table shows how each style would be returned
# -1, 0, +1
#
# Sun Mon Tue Wed Thu Fri Sat
# week -1 -1
# current *** 0,+1
# week +1
self.DOWParseStyle = 1
# CurrentDOWParseStyle controls how we parse "Friday"
# If the current day was Friday and the text to parse is "Friday"
# then the following table shows how each style would be returned
# True/False. This also depends on DOWParseStyle.
#
# Current day marked as ***
#
# DOWParseStyle = 0
# Sun Mon Tue Wed Thu Fri Sat
# week -1
# current T,F
# week +1
#
# DOWParseStyle = -1
# Sun Mon Tue Wed Thu Fri Sat
# week -1 F
# current T
# week +1
#
# DOWParseStyle = +1
#
# Sun Mon Tue Wed Thu Fri Sat
# week -1
# current T
# week +1 F
self.CurrentDOWParseStyle = False
# initalize attributes to empty values to ensure
# they are defined
self.re_sources = None
self.re_values = None
self.Modifiers = None
self.dayOffsets = None
self.WeekdayOffsets = None
self.MonthOffsets = None
self.dateSep = None
self.timeSep = None
self.am = None
self.pm = None
self.meridian = None
self.usesMeridian = None
self.uses24 = None
self.dp_order = None
self.RE_DATE4 = r''
self.RE_DATE3 = r''
self.RE_MONTH = r''
self.RE_WEEKDAY = r''
self.RE_SPECIAL = r''
self.RE_UNITS = r''
self.RE_QUNITS = r''
self.RE_MODIFIER = r''
self.RE_MODIFIER2 = r''
self.RE_TIMEHMS = r''
self.RE_TIMEHMS2 = r''
self.RE_DATE = r''
self.RE_DATE2 = r''
self.RE_DAY = r''
self.RE_DAY2 = r''
self.RE_TIME = r''
self.RE_REMAINING = r''
self.RE_RTIMEHMS = r''
self.RE_RTIMEHMS2 = r''
self.RE_RDATE = r''
self.RE_RDATE3 = r''
self.DATERNG1 = r''
self.DATERNG2 = r''
self.DATERNG3 = r''
self.TIMERNG1 = r''
self.TIMERNG2 = r''
self.TIMERNG3 = r''
self.TIMERNG4 = r''
_initLocale(self)
_initConstants(self)
_initSymbols(self)
_initPatterns(self)
self.re_option = re.IGNORECASE + re.VERBOSE
self.cre_source = { 'CRE_SPECIAL': self.RE_SPECIAL,
'CRE_UNITS': self.RE_UNITS,
'CRE_QUNITS': self.RE_QUNITS,
'CRE_MODIFIER': self.RE_MODIFIER,
'CRE_MODIFIER2': self.RE_MODIFIER2,
'CRE_TIMEHMS': self.RE_TIMEHMS,
'CRE_TIMEHMS2': self.RE_TIMEHMS2,
'CRE_DATE': self.RE_DATE,
'CRE_DATE2': self.RE_DATE2,
'CRE_DATE3': self.RE_DATE3,
'CRE_DATE4': self.RE_DATE4,
'CRE_MONTH': self.RE_MONTH,
'CRE_WEEKDAY': self.RE_WEEKDAY,
'CRE_DAY': self.RE_DAY,
'CRE_DAY2': self.RE_DAY2,
'CRE_TIME': self.RE_TIME,
'CRE_REMAINING': self.RE_REMAINING,
'CRE_RTIMEHMS': self.RE_RTIMEHMS,
'CRE_RTIMEHMS2': self.RE_RTIMEHMS2,
'CRE_RDATE': self.RE_RDATE,
'CRE_RDATE3': self.RE_RDATE3,
'CRE_TIMERNG1': self.TIMERNG1,
'CRE_TIMERNG2': self.TIMERNG2,
'CRE_TIMERNG3': self.TIMERNG3,
'CRE_TIMERNG4': self.TIMERNG4,
'CRE_DATERNG1': self.DATERNG1,
'CRE_DATERNG2': self.DATERNG2,
'CRE_DATERNG3': self.DATERNG3,
}
self.cre_keys = self.cre_source.keys()
def __getattr__(self, name):
if name in self.cre_keys:
value = re.compile(self.cre_source[name], self.re_option)
setattr(self, name, value)
return value
else:
raise AttributeError, name
def daysInMonth(self, month, year):
"""
Take the given month (1-12) and a given year (4 digit) return
the number of days in the month adjusting for leap year as needed
"""
result = None
if month > 0 and month <= 12:
result = self._DaysInMonthList[month - 1]
if month == 2:
if year in self._leapYears:
result += 1
else:
if calendar.isleap(year):
self._leapYears.append(year)
result += 1
return result
def buildSources(self, sourceTime=None):
"""
Return a dictionary of date/time tuples based on the keys
found in self.re_sources.
The current time is used as the default and any specified
item found in self.re_sources is inserted into the value
and the generated dictionary is returned.
"""
if sourceTime is None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = time.localtime()
else:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
sources = {}
defaults = { 'yr': yr, 'mth': mth, 'dy': dy,
'hr': hr, 'mn': mn, 'sec': sec, }
for item in self.re_sources:
values = {}
source = self.re_sources[item]
for key in defaults.keys():
if key in source:
values[key] = source[key]
else:
values[key] = defaults[key]
sources[item] = ( values['yr'], values['mth'], values['dy'],
values['hr'], values['mn'], values['sec'], wd, yd, isdst )
return sources
|
vilmibm/done | parsedatetime/parsedatetime_consts.py | Constants.buildSources | python | def buildSources(self, sourceTime=None):
if sourceTime is None:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = time.localtime()
else:
(yr, mth, dy, hr, mn, sec, wd, yd, isdst) = sourceTime
sources = {}
defaults = { 'yr': yr, 'mth': mth, 'dy': dy,
'hr': hr, 'mn': mn, 'sec': sec, }
for item in self.re_sources:
values = {}
source = self.re_sources[item]
for key in defaults.keys():
if key in source:
values[key] = source[key]
else:
values[key] = defaults[key]
sources[item] = ( values['yr'], values['mth'], values['dy'],
values['hr'], values['mn'], values['sec'], wd, yd, isdst )
return sources | Return a dictionary of date/time tuples based on the keys
found in self.re_sources.
The current time is used as the default and any specified
item found in self.re_sources is inserted into the value
and the generated dictionary is returned. | train | https://github.com/vilmibm/done/blob/7e5b60d2900ceddefa49de352a19b794199b51a8/parsedatetime/parsedatetime_consts.py#L1087-L1118 | null | class Constants:
"""
Default set of constants for parsedatetime.
If PyICU is present, then the class will first try to get PyICU
to return a locale specified by C{localeID}. If either C{localeID} is
None or if the locale does not exist within PyICU, then each of the
locales defined in C{fallbackLocales} is tried in order.
If PyICU is not present or none of the specified locales can be used,
then the class will initialize itself to the en_US locale.
if PyICU is not present or not requested, only the locales defined by
C{pdtLocales} will be searched.
"""
def __init__(self, localeID=None, usePyICU=True, fallbackLocales=['en_US']):
self.localeID = localeID
self.fallbackLocales = fallbackLocales
if 'en_US' not in self.fallbackLocales:
self.fallbackLocales.append('en_US')
# define non-locale specific constants
self.locale = None
self.usePyICU = usePyICU
# starting cache of leap years
# daysInMonth will add to this if during
# runtime it gets a request for a year not found
self._leapYears = [ 1904, 1908, 1912, 1916, 1920, 1924, 1928, 1932, 1936, 1940, 1944,
1948, 1952, 1956, 1960, 1964, 1968, 1972, 1976, 1980, 1984, 1988,
1992, 1996, 2000, 2004, 2008, 2012, 2016, 2020, 2024, 2028, 2032,
2036, 2040, 2044, 2048, 2052, 2056, 2060, 2064, 2068, 2072, 2076,
2080, 2084, 2088, 2092, 2096 ]
self.Second = 1
self.Minute = 60 * self.Second
self.Hour = 60 * self.Minute
self.Day = 24 * self.Hour
self.Week = 7 * self.Day
self.Month = 30 * self.Day
self.Year = 365 * self.Day
self.rangeSep = u'-'
self._DaysInMonthList = (31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31)
self.BirthdayEpoch = 50
# DOWParseStyle controls how we parse "Tuesday"
# If the current day was Thursday and the text to parse is "Tuesday"
# then the following table shows how each style would be returned
# -1, 0, +1
#
# Current day marked as ***
#
# Sun Mon Tue Wed Thu Fri Sat
# week -1
# current -1,0 ***
# week +1 +1
#
# If the current day was Monday and the text to parse is "Tuesday"
# then the following table shows how each style would be returned
# -1, 0, +1
#
# Sun Mon Tue Wed Thu Fri Sat
# week -1 -1
# current *** 0,+1
# week +1
self.DOWParseStyle = 1
# CurrentDOWParseStyle controls how we parse "Friday"
# If the current day was Friday and the text to parse is "Friday"
# then the following table shows how each style would be returned
# True/False. This also depends on DOWParseStyle.
#
# Current day marked as ***
#
# DOWParseStyle = 0
# Sun Mon Tue Wed Thu Fri Sat
# week -1
# current T,F
# week +1
#
# DOWParseStyle = -1
# Sun Mon Tue Wed Thu Fri Sat
# week -1 F
# current T
# week +1
#
# DOWParseStyle = +1
#
# Sun Mon Tue Wed Thu Fri Sat
# week -1
# current T
# week +1 F
self.CurrentDOWParseStyle = False
# initalize attributes to empty values to ensure
# they are defined
self.re_sources = None
self.re_values = None
self.Modifiers = None
self.dayOffsets = None
self.WeekdayOffsets = None
self.MonthOffsets = None
self.dateSep = None
self.timeSep = None
self.am = None
self.pm = None
self.meridian = None
self.usesMeridian = None
self.uses24 = None
self.dp_order = None
self.RE_DATE4 = r''
self.RE_DATE3 = r''
self.RE_MONTH = r''
self.RE_WEEKDAY = r''
self.RE_SPECIAL = r''
self.RE_UNITS = r''
self.RE_QUNITS = r''
self.RE_MODIFIER = r''
self.RE_MODIFIER2 = r''
self.RE_TIMEHMS = r''
self.RE_TIMEHMS2 = r''
self.RE_DATE = r''
self.RE_DATE2 = r''
self.RE_DAY = r''
self.RE_DAY2 = r''
self.RE_TIME = r''
self.RE_REMAINING = r''
self.RE_RTIMEHMS = r''
self.RE_RTIMEHMS2 = r''
self.RE_RDATE = r''
self.RE_RDATE3 = r''
self.DATERNG1 = r''
self.DATERNG2 = r''
self.DATERNG3 = r''
self.TIMERNG1 = r''
self.TIMERNG2 = r''
self.TIMERNG3 = r''
self.TIMERNG4 = r''
_initLocale(self)
_initConstants(self)
_initSymbols(self)
_initPatterns(self)
self.re_option = re.IGNORECASE + re.VERBOSE
self.cre_source = { 'CRE_SPECIAL': self.RE_SPECIAL,
'CRE_UNITS': self.RE_UNITS,
'CRE_QUNITS': self.RE_QUNITS,
'CRE_MODIFIER': self.RE_MODIFIER,
'CRE_MODIFIER2': self.RE_MODIFIER2,
'CRE_TIMEHMS': self.RE_TIMEHMS,
'CRE_TIMEHMS2': self.RE_TIMEHMS2,
'CRE_DATE': self.RE_DATE,
'CRE_DATE2': self.RE_DATE2,
'CRE_DATE3': self.RE_DATE3,
'CRE_DATE4': self.RE_DATE4,
'CRE_MONTH': self.RE_MONTH,
'CRE_WEEKDAY': self.RE_WEEKDAY,
'CRE_DAY': self.RE_DAY,
'CRE_DAY2': self.RE_DAY2,
'CRE_TIME': self.RE_TIME,
'CRE_REMAINING': self.RE_REMAINING,
'CRE_RTIMEHMS': self.RE_RTIMEHMS,
'CRE_RTIMEHMS2': self.RE_RTIMEHMS2,
'CRE_RDATE': self.RE_RDATE,
'CRE_RDATE3': self.RE_RDATE3,
'CRE_TIMERNG1': self.TIMERNG1,
'CRE_TIMERNG2': self.TIMERNG2,
'CRE_TIMERNG3': self.TIMERNG3,
'CRE_TIMERNG4': self.TIMERNG4,
'CRE_DATERNG1': self.DATERNG1,
'CRE_DATERNG2': self.DATERNG2,
'CRE_DATERNG3': self.DATERNG3,
}
self.cre_keys = self.cre_source.keys()
def __getattr__(self, name):
if name in self.cre_keys:
value = re.compile(self.cre_source[name], self.re_option)
setattr(self, name, value)
return value
else:
raise AttributeError, name
def daysInMonth(self, month, year):
"""
Take the given month (1-12) and a given year (4 digit) return
the number of days in the month adjusting for leap year as needed
"""
result = None
if month > 0 and month <= 12:
result = self._DaysInMonthList[month - 1]
if month == 2:
if year in self._leapYears:
result += 1
else:
if calendar.isleap(year):
self._leapYears.append(year)
result += 1
return result
|
MacHu-GWU/pyknackhq-project | pyknackhq/schema.py | Object.get_field_key | python | def get_field_key(self, key, using_name=True):
try:
if using_name:
return self.f_name[key].key
else:
return self.f[key].key
except KeyError:
raise ValueError("'%s' are not found!" % key) | Given a field key or name, return it's field key. | train | https://github.com/MacHu-GWU/pyknackhq-project/blob/dd937f24d7b0a351ba3818eb746c31b29a8cc341/pyknackhq/schema.py#L80-L89 | null | class Object(object):
"""Data object class.
Object are used to define an abstract concept of thing. For example, an
employee can be an object having attributes: name, date of birth, phone,
email, etc...
"""
def __init__(self, **kwargs):
for k, v in kwargs.items():
object.__setattr__(self, k, v)
self.f = OrderedDict() # {field_key: Field instance}
self.f_name = OrderedDict() # {field_name: Field instance}
for d in self.fields:
field = Field.from_dict(d)
self.f.setdefault(d["key"], field)
self.f_name.setdefault(d["name"], field)
def __str__(self):
return "Object('%s')" % self.name
def __repr__(self):
return "Object(key='%s', name='%s')" % (self.key, self.name)
@staticmethod
def from_dict(d):
return Object(**d)
@staticmethod
def from_json(abspath):
return Object.from_dict(load_js(abspath, enable_verbose=False))
def __iter__(self):
return iter(self.f.values())
@property
def all_field_key(self):
"""Return all available field_key.
"""
return [f.key for f in self.f.values()]
@property
def all_field_name(self):
"""Return all available field_name.
"""
return [f.name for f in self.f.values()]
def get_field(self, key, using_name=True):
"""Given a field key or name, return the Field instance.
"""
try:
if using_name:
return self.f_name[key]
else:
return self.f[key]
except KeyError:
raise ValueError("'%s' are not found!" % key)
|
MacHu-GWU/pyknackhq-project | pyknackhq/schema.py | Application.get_object_key | python | def get_object_key(self, key, using_name=True):
try:
if using_name:
return self.o_name[key].key
else:
return self.o[key].key
except KeyError:
raise ValueError("'%s' are not found!" % key) | Given a object key or name, return it's object key. | train | https://github.com/MacHu-GWU/pyknackhq-project/blob/dd937f24d7b0a351ba3818eb746c31b29a8cc341/pyknackhq/schema.py#L149-L158 | null | class Application(object):
"""Application class that holding object and its fields information.
"""
def __init__(self, **kwargs):
self.data = {"application": kwargs}
for k, v in kwargs.items():
object.__setattr__(self, k, v)
self.o = OrderedDict() # {field_key: Field instance}
self.o_name = OrderedDict() # {field_name: Field instance}
for d in self.objects:
object_ = Object.from_dict(d)
self.o.setdefault(d["key"], object_)
self.o_name.setdefault(d["name"], object_)
def __str__(self):
return "Application('%s')" % self.name
def __repr__(self):
return "Application('%s')" % self.name
@staticmethod
def from_dict(d):
return Application(**d["application"])
@staticmethod
def from_json(abspath):
return Application.from_dict(load_js(abspath, enable_verbose=False))
def to_json(self, abspath):
safe_dump_js(self.data, abspath, enable_verbose=False)
def __iter__(self):
return iter(self.o.values())
@property
def all_object_key(self):
"""Return all available object_key.
"""
return [o.key for o in self.o.values()]
@property
def all_object_name(self):
"""Return all available object_name.
"""
return [o.name for o in self.o.values()]
def get_object(self, key, using_name=True):
"""Given a object key or name, return the Object instance.
"""
try:
if using_name:
return self.o_name[key]
else:
return self.o[key]
except KeyError:
raise ValueError("'%s' are not found!" % key)
|
MacHu-GWU/pyknackhq-project | pyknackhq/zzz_manual_install.py | install | python | def install():
import os, shutil
_ROOT = os.getcwd()
_PACKAGE_NAME = os.path.basename(_ROOT)
print("Installing [%s] to all python version..." % _PACKAGE_NAME)
# find all Python release installed on this windows computer
installed_python_version = list()
for root, folder_list, _ in os.walk(r"C:\\"):
for folder in folder_list:
if folder.startswith("Python"):
if os.path.exists(os.path.join(root, folder, "pythonw.exe")):
installed_python_version.append(folder)
break
print("\tYou have installed: {0}".format(", ".join(installed_python_version)))
# remove __pycache__ folder and *.pyc file
print("\tRemoving *.pyc file ...")
pyc_folder_list = list()
for root, folder_list, _ in os.walk(_ROOT):
if os.path.basename(root) == "__pycache__":
pyc_folder_list.append(root)
for folder in pyc_folder_list:
shutil.rmtree(folder)
print("\t\tall *.pyc file has been removed.")
# install this package to all python version
for py_root in installed_python_version:
dst = os.path.join(r"C:\\", py_root, r"Lib\site-packages", _PACKAGE_NAME)
try:
shutil.rmtree(dst)
except:
pass
print("\tRemoved %s." % dst)
shutil.copytree(_ROOT, dst)
print("\tInstalled %s." % dst)
print("Complete!") | Install your package to all Python version you have installed on Windows. | train | https://github.com/MacHu-GWU/pyknackhq-project/blob/dd937f24d7b0a351ba3818eb746c31b29a8cc341/pyknackhq/zzz_manual_install.py#L100-L141 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Install your own package in one seconds! (Windows System Only!)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Put this script in your package directory, for example::
|---mypackage
|---subpackage1
|---__init__.py
|---subpackage2
|---__init__.py
|---__init__.py
|---module1.py
|---module2.py
|---zzz_manual_install.py <=== put it here
Run this script as the main script. Then your package is automatically installed
and replace the old one for all Python versions on your ``WINDOWS`` computer.
This feature is extremely useful when you need refresh your package over and
over again. But if you want to make an official release, you should make a
setup.py and build the distribution by yourself. Read the following instruction:
- For Python2:
- https://docs.python.org/2/distutils/setupscript.html
- https://docs.python.org/2/distutils/builtdist.html
- For Python3:
- https://docs.python.org/3.3/distutils/setupscript.html
- https://docs.python.org/3.3/distutils/builtdist.html
**Warning**: with python2, the project directory cannot have non-ascil char.
-------------------------------------------------------------------------------
**中文文档**
本脚用于在Windows系统下傻瓜一键安装用户自己写的扩展包, 纯python实现。
例如你有一个扩展包叫 mypackage, 文件目录形如: ``C:\project\mypackage``
则只需要把该脚本拷贝到 mypackage 目录下:
``C:\project\mypackage\zzz_manual_install.py``
然后将本脚本以主脚本运行。则会把package文件中所有的 .pyc 文件清除后, 安装你所有
已安装的Python版本下。例如你安装了Python27和Python33, 那么就会创建以下目录并将
包里的所有文件拷贝到该目录下::
C:\Python27\Lib\site-packages\mypackage
C:\Python33\Lib\site-packages\mypackage
然后你就可以用 ``import mypackage`` 调用你写的库了。
这一功能在调试阶段非常方便, 但最终发布时还是要通过写 ``setup.py`` 文件来制作
package的安装包。这一部分可以参考:
- Python2:
- https://docs.python.org/2/distutils/setupscript.html
- https://docs.python.org/2/distutils/builtdist.html
- Python3:
- https://docs.python.org/3.3/distutils/setupscript.html
- https://docs.python.org/3.3/distutils/builtdist.html
注: 项目目录在python2中不允许有中文路径。
About
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
**Copyright (c) 2015 by Sanhe Hu**
- Author: Sanhe Hu
- Email: husanhe@gmail.com
- Lisence: MIT
**Compatibility**
- Python2: Yes
- Python3: Yes
**Prerequisites**
- None
class, method, func, exception
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import print_function, unicode_literals
def install():
"""Install your package to all Python version you have installed on Windows.
"""
import os, shutil
_ROOT = os.getcwd()
_PACKAGE_NAME = os.path.basename(_ROOT)
print("Installing [%s] to all python version..." % _PACKAGE_NAME)
# find all Python release installed on this windows computer
installed_python_version = list()
for root, folder_list, _ in os.walk(r"C:\\"):
for folder in folder_list:
if folder.startswith("Python"):
if os.path.exists(os.path.join(root, folder, "pythonw.exe")):
installed_python_version.append(folder)
break
print("\tYou have installed: {0}".format(", ".join(installed_python_version)))
# remove __pycache__ folder and *.pyc file
print("\tRemoving *.pyc file ...")
pyc_folder_list = list()
for root, folder_list, _ in os.walk(_ROOT):
if os.path.basename(root) == "__pycache__":
pyc_folder_list.append(root)
for folder in pyc_folder_list:
shutil.rmtree(folder)
print("\t\tall *.pyc file has been removed.")
# install this package to all python version
for py_root in installed_python_version:
dst = os.path.join(r"C:\\", py_root, r"Lib\site-packages", _PACKAGE_NAME)
try:
shutil.rmtree(dst)
except:
pass
print("\tRemoved %s." % dst)
shutil.copytree(_ROOT, dst)
print("\tInstalled %s." % dst)
print("Complete!")
if __name__ == "__main__":
install() |
MacHu-GWU/pyknackhq-project | pyknackhq/js.py | load_js | python | def load_js(abspath, default=dict(), compress=False, enable_verbose=True):
abspath = str(abspath) # try stringlize
if compress: # check extension name
if os.path.splitext(abspath)[1] != ".gz":
raise Exception("compressed json has to use extension '.gz'!")
else:
if os.path.splitext(abspath)[1] != ".json":
raise Exception("file extension are not '.json'!")
if enable_verbose:
print("\nLoading from %s..." % abspath)
st = time.clock()
if os.path.exists(abspath): # exists, then load
if compress:
with gzip.open(abspath, "rb") as f:
js = json.loads(f.read().decode("utf-8"))
else:
with open(abspath, "r") as f:
js = json.load(f)
if enable_verbose:
print("\tComplete! Elapse %.6f sec." % (time.clock() - st) )
return js
else:
if enable_verbose:
print("\t%s not exists! cannot load! Create an default object "
"instead" % abspath)
return default | Load Json from file. If file are not exists, returns ``default``.
:param abspath: File path. Use absolute path as much as you can. File
extension has to be ``.json`` or ``.gz``. (for compressed Json)
:type abspath: string
:param default: (default dict()) If ``abspath`` not exists, return the
default Python object instead.
:param compress: (default False) Load from a gzip compressed Json file.
Check :func:`dump_js()<dump_js>` function for more information.
:type compress: boolean
:param enable_verbose: (default True) Trigger for message.
:type enable_verbose: boolean
Usage::
>>> from weatherlab.lib.dataIO.js import load_js
>>> load_js("test.json") # if you have a json file
Loading from test.json...
Complete! Elapse 0.000432 sec.
{'a': 1, 'b': 2}
**中文文档**
从Json文件中读取数据
参数列表
:param abspath: 文件路径, 扩展名需为 ``.json`` 或 ``.gz``
:type abspath: ``字符串``
:param default: (默认 dict()) 如果文件路径不存在, 则会返回一个默认的Python对象。
:param compress: (默认 False) 是否从一个gzip压缩过的Json文件中读取数据。 请
参考 :func:`dump_js()<dump_js>` 获得更多信息.
:type compress: ``布尔值``
:param enable_verbose: (默认 True) 是否打开信息提示开关, 批处理时建议关闭.
:type enable_verbose: ``布尔值`` | train | https://github.com/MacHu-GWU/pyknackhq-project/blob/dd937f24d7b0a351ba3818eb746c31b29a8cc341/pyknackhq/js.py#L52-L123 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module description
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module is re-pack of some json utility functions.
- :func:`load_js`: Load Json from file. If file are not exists, returns
user defined ``default value``.
- :func:`dump_js`: Dump Json serializable object to file.
- :func:`safe_dump_js`: An atomic write version of dump_js, silently overwrite
existing file.
- :func:`js2str`: Encode js to nicely formatted human readable string.
- :func:`prt_js`: Print Json in pretty format.
Highlight
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- :func:`load_js`, :func:`dump_js`, :func:`safe_dump_js` support gzip compress,
size is **10 - 20 times** smaller in average.
Compatibility
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- Python2: Yes
- Python3: Yes
Prerequisites
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- None
Class, method, function, exception
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import print_function, unicode_literals
import json, gzip
import os, shutil
import time
def load_js(abspath, default=dict(), compress=False, enable_verbose=True):
"""Load Json from file. If file are not exists, returns ``default``.
:param abspath: File path. Use absolute path as much as you can. File
extension has to be ``.json`` or ``.gz``. (for compressed Json)
:type abspath: string
:param default: (default dict()) If ``abspath`` not exists, return the
default Python object instead.
:param compress: (default False) Load from a gzip compressed Json file.
Check :func:`dump_js()<dump_js>` function for more information.
:type compress: boolean
:param enable_verbose: (default True) Trigger for message.
:type enable_verbose: boolean
Usage::
>>> from weatherlab.lib.dataIO.js import load_js
>>> load_js("test.json") # if you have a json file
Loading from test.json...
Complete! Elapse 0.000432 sec.
{'a': 1, 'b': 2}
**中文文档**
从Json文件中读取数据
参数列表
:param abspath: 文件路径, 扩展名需为 ``.json`` 或 ``.gz``
:type abspath: ``字符串``
:param default: (默认 dict()) 如果文件路径不存在, 则会返回一个默认的Python对象。
:param compress: (默认 False) 是否从一个gzip压缩过的Json文件中读取数据。 请
参考 :func:`dump_js()<dump_js>` 获得更多信息.
:type compress: ``布尔值``
:param enable_verbose: (默认 True) 是否打开信息提示开关, 批处理时建议关闭.
:type enable_verbose: ``布尔值``
"""
abspath = str(abspath) # try stringlize
if compress: # check extension name
if os.path.splitext(abspath)[1] != ".gz":
raise Exception("compressed json has to use extension '.gz'!")
else:
if os.path.splitext(abspath)[1] != ".json":
raise Exception("file extension are not '.json'!")
if enable_verbose:
print("\nLoading from %s..." % abspath)
st = time.clock()
if os.path.exists(abspath): # exists, then load
if compress:
with gzip.open(abspath, "rb") as f:
js = json.loads(f.read().decode("utf-8"))
else:
with open(abspath, "r") as f:
js = json.load(f)
if enable_verbose:
print("\tComplete! Elapse %.6f sec." % (time.clock() - st) )
return js
else:
if enable_verbose:
print("\t%s not exists! cannot load! Create an default object "
"instead" % abspath)
return default
def dump_js(js, abspath,
fastmode=False, replace=False, compress=False, enable_verbose=True):
"""Dump Json serializable object to file.
Provides multiple choice to customize the behavior.
:param js: Serializable python object.
:type js: dict or list
:param abspath: ``save as`` path, file extension has to be ``.json`` or ``.gz``
(for compressed json).
:type abspath: string
:param fastmode: (default False) If ``True``, then dumping json without
sorted keys and pretty indent, and it's faster and smaller in size.
:type fastmode: boolean
:param replace: (default False) If ``True``, when you dump json to a existing
path, it silently overwrite it. If False, an exception will be raised.
Default False setting is to prevent overwrite file by mistake.
:type replace: boolean
:param compress: (default False) If ``True``, use GNU program gzip to
compress the json file. Disk usage can be greatly reduced. But you have
to use :func:`load_js(abspath, compress=True)<load_js>` in loading.
:type compress: boolean
:param enable_verbose: (default True) Trigger for message.
:type enable_verbose: boolean
Usage::
>>> from weatherlab.lib.dataIO.js import dump_js
>>> js = {"a": 1, "b": 2}
>>> dump_js(js, "test.json", replace=True)
Dumping to test.json...
Complete! Elapse 0.002432 sec
**中文文档**
将Python中可被序列化的"字典", "列表"以及他们的组合, 按照Json的编码方式写入文件
文件
参数列表
:param js: 可Json化的Python对象
:type js: ``字典`` 或 ``列表``
:param abspath: 写入文件的路径。扩展名必须为``.json``或``.gz``, 其中gz用于被压
缩的Json
:type abspath: ``字符串``
:param fastmode: (默认 False) 当为``True``时, Json编码时不对Key进行排序, 也不
进行缩进排版。这样做写入的速度更快, 文件的大小也更小。
:type fastmode: "布尔值"
:param replace: (默认 False) 当为``True``时, 如果写入路径已经存在, 则会自动覆盖
原文件。而为``False``时, 则会抛出异常。防止误操作覆盖源文件。
:type replace: "布尔值"
:param compress: (默认 False) 当为``True``时, 使用开源压缩标准gzip压缩Json文件。
通常能让文件大小缩小10-20倍不等。如要读取文件, 则需要使用函数
:func:`load_js(abspath, compress=True)<load_js>`.
:type compress: "布尔值"
:param enable_verbose: (默认 True) 是否打开信息提示开关, 批处理时建议关闭.
:type enable_verbose: "布尔值"
"""
abspath = str(abspath) # try stringlize
if compress: # check extension name
root, ext = os.path.splitext(abspath)
if ext != ".gz":
if ext != ".tmp":
raise Exception("compressed json has to use extension '.gz'!")
else:
_, ext = os.path.splitext(root)
if ext != ".gz":
raise Exception("compressed json has to use extension '.gz'!")
else:
root, ext = os.path.splitext(abspath)
if ext != ".json":
if ext != ".tmp":
raise Exception("file extension are not '.json'!")
else:
_, ext = os.path.splitext(root)
if ext != ".json":
raise Exception("file extension are not '.json'!")
if enable_verbose:
print("\nDumping to %s..." % abspath)
st = time.clock()
if os.path.exists(abspath): # if exists, check replace option
if replace: # replace existing file
if fastmode: # no sort and indent, do the fastest dumping
if compress:
with gzip.open(abspath, "wb") as f:
f.write(json.dumps(js).encode("utf-8"))
else:
with open(abspath, "w") as f:
json.dump(js, f)
else:
if compress:
with gzip.open(abspath, "wb") as f:
f.write(json.dumps(js, sort_keys=True,
indent=4, separators=("," , ": ")).encode("utf-8"))
else:
with open(abspath, "w") as f:
json.dump(js, f, sort_keys=True,
indent=4, separators=("," , ": ") )
else: # stop, print error message
raise Exception("\tCANNOT WRITE to %s, it's already "
"exists" % abspath)
else: # if not exists, just write to it
if fastmode: # no sort and indent, do the fastest dumping
if compress:
with gzip.open(abspath, "wb") as f:
f.write(json.dumps(js).encode("utf-8"))
else:
with open(abspath, "w") as f:
json.dump(js, f)
else:
if compress:
with gzip.open(abspath, "wb") as f:
f.write(json.dumps(js, sort_keys=True,
indent=4, separators=("," , ": ")).encode("utf-8"))
else:
with open(abspath, "w") as f:
json.dump(js, f, sort_keys=True,
indent=4, separators=("," , ": ") )
if enable_verbose:
print("\tComplete! Elapse %.6f sec" % (time.clock() - st) )
def safe_dump_js(js, abspath,
fastmode=False, compress=False, enable_verbose=True):
"""A stable version of dump_js, silently overwrite existing file.
When your program been interrupted, you lose nothing. Typically if your
program is interrupted by any reason, it only leaves a incomplete file.
If you use replace=True, then you also lose your old file.
So a bettr way is to:
1. dump json to a temp file.
2. when it's done, rename it to #abspath, overwrite the old one.
This way guarantee atomic write.
:param js: Serializable python object.
:type js: dict or list
:param abspath: ``save as`` path, file extension has to be ``.json`` or ``.gz``
(for compressed json).
:type abspath: string
:param fastmode: (default False) If ``True``, then dumping json without
sorted keys and pretty indent, and it's faster and smaller in size.
:type fastmode: boolean
:param compress: (default False) If ``True``, use GNU program gzip to
compress the json file. Disk usage can be greatly reduced. But you have
to use :func:`load_js(abspath, compress=True)<load_js>` in loading.
:type compress: boolean
:param enable_verbose: (default True) Trigger for message.
:type enable_verbose: boolean
Usage::
>>> from weatherlab.lib.dataIO.js import dump_js
>>> js = {"a": 1, "b": 2}
>>> safe_dump_js(js, "test.json")
Dumping to test.json...
Complete! Elapse 0.002432 sec
**中文文档**
在对文件进行写入时, 如果程序中断, 则会留下一个不完整的文件。如果你使用了覆盖式
写入, 则你同时也丢失了原文件。所以为了保证写操作的原子性(要么全部完成, 要么全部
都不完成), 更好的方法是: 首先将文件写入一个临时文件中, 完成后再讲文件重命名,
覆盖旧文件。这样即使中途程序被中断, 也仅仅是留下了一个未完成的临时文件而已, 不会
影响原文件。
参数列表
:param js: 可Json化的Python对象
:type js: ``字典`` 或 ``列表``
:param abspath: 写入文件的路径。扩展名必须为 ``.json`` 或 ``.gz``, 其中gz用于被压
缩的Json
:type abspath: ``字符串``
:param replace: (默认 False) 当为``True``时, 如果写入路径已经存在, 则会自动覆盖
原文件。而为``False``时, 则会抛出异常。防止误操作覆盖源文件。
:type replace: ``布尔值``
:param compress: (默认 False) 当为``True``时, 使用开源压缩标准gzip压缩Json文件。
通常能让文件大小缩小10-20倍不等。如要读取文件, 则需要使用函数
:func:`load_js(abspath, compress=True)<load_js>`.
:type compress: ``布尔值``
:param enable_verbose: (默认 True) 是否打开信息提示开关, 批处理时建议关闭.
:type enable_verbose: ``布尔值``
"""
abspath = str(abspath) # try stringlize
temp_abspath = "%s.tmp" % abspath
dump_js(js, temp_abspath, fastmode=fastmode,
replace=True, compress=compress, enable_verbose=enable_verbose)
shutil.move(temp_abspath, abspath)
def js2str(js, sort_keys=True, indent=4):
"""Encode js to nicely formatted human readable string. (utf-8 encoding)
Usage::
>>> from weatherlab.lib.dataIO.js import js2str
>>> s = js2str({"a": 1, "b": 2})
>>> print(s)
{
"a": 1,
"b": 2
}
**中文文档**
将可Json化的Python对象转化成格式化的字符串。
"""
return json.dumps(js, sort_keys=sort_keys,
indent=indent, separators=("," , ": "))
def prt_js(js, sort_keys=True, indent=4):
"""Print Json in pretty format.
There's a standard module pprint, can pretty print python dict and list.
But it doesn't support sorted key. That why we need this func.
Usage::
>>> from weatherlab.lib.dataIO.js import prt_js
>>> prt_js({"a": 1, "b": 2})
{
"a": 1,
"b": 2
}
**中文文档**
以人类可读的方式打印可Json化的Python对象。
"""
print(js2str(js, sort_keys, indent) )
############
# Unittest #
############
if __name__ == "__main__":
import unittest
class JSUnittest(unittest.TestCase):
def test_write_and_read(self):
data = {"a": [1, 2], "b": ["是", "否"]}
safe_dump_js(data, "data.json")
data = load_js("data.json")
self.assertEqual(data["a"][0], 1)
self.assertEqual(data["b"][0], "是")
def test_js2str(self):
data = {"a": [1, 2], "b": ["是", "否"]}
prt_js(data)
def test_compress(self):
data = {"a": list(range(32)),
"b": list(range(32)),}
safe_dump_js(data, "data.gz", compress=True)
prt_js(load_js("data.gz", compress=True))
def tearDown(self):
for path in ["data.json", "data.gz"]:
try:
os.remove(path)
except:
pass
unittest.main() |
MacHu-GWU/pyknackhq-project | pyknackhq/js.py | dump_js | python | def dump_js(js, abspath,
fastmode=False, replace=False, compress=False, enable_verbose=True):
abspath = str(abspath) # try stringlize
if compress: # check extension name
root, ext = os.path.splitext(abspath)
if ext != ".gz":
if ext != ".tmp":
raise Exception("compressed json has to use extension '.gz'!")
else:
_, ext = os.path.splitext(root)
if ext != ".gz":
raise Exception("compressed json has to use extension '.gz'!")
else:
root, ext = os.path.splitext(abspath)
if ext != ".json":
if ext != ".tmp":
raise Exception("file extension are not '.json'!")
else:
_, ext = os.path.splitext(root)
if ext != ".json":
raise Exception("file extension are not '.json'!")
if enable_verbose:
print("\nDumping to %s..." % abspath)
st = time.clock()
if os.path.exists(abspath): # if exists, check replace option
if replace: # replace existing file
if fastmode: # no sort and indent, do the fastest dumping
if compress:
with gzip.open(abspath, "wb") as f:
f.write(json.dumps(js).encode("utf-8"))
else:
with open(abspath, "w") as f:
json.dump(js, f)
else:
if compress:
with gzip.open(abspath, "wb") as f:
f.write(json.dumps(js, sort_keys=True,
indent=4, separators=("," , ": ")).encode("utf-8"))
else:
with open(abspath, "w") as f:
json.dump(js, f, sort_keys=True,
indent=4, separators=("," , ": ") )
else: # stop, print error message
raise Exception("\tCANNOT WRITE to %s, it's already "
"exists" % abspath)
else: # if not exists, just write to it
if fastmode: # no sort and indent, do the fastest dumping
if compress:
with gzip.open(abspath, "wb") as f:
f.write(json.dumps(js).encode("utf-8"))
else:
with open(abspath, "w") as f:
json.dump(js, f)
else:
if compress:
with gzip.open(abspath, "wb") as f:
f.write(json.dumps(js, sort_keys=True,
indent=4, separators=("," , ": ")).encode("utf-8"))
else:
with open(abspath, "w") as f:
json.dump(js, f, sort_keys=True,
indent=4, separators=("," , ": ") )
if enable_verbose:
print("\tComplete! Elapse %.6f sec" % (time.clock() - st) ) | Dump Json serializable object to file.
Provides multiple choice to customize the behavior.
:param js: Serializable python object.
:type js: dict or list
:param abspath: ``save as`` path, file extension has to be ``.json`` or ``.gz``
(for compressed json).
:type abspath: string
:param fastmode: (default False) If ``True``, then dumping json without
sorted keys and pretty indent, and it's faster and smaller in size.
:type fastmode: boolean
:param replace: (default False) If ``True``, when you dump json to a existing
path, it silently overwrite it. If False, an exception will be raised.
Default False setting is to prevent overwrite file by mistake.
:type replace: boolean
:param compress: (default False) If ``True``, use GNU program gzip to
compress the json file. Disk usage can be greatly reduced. But you have
to use :func:`load_js(abspath, compress=True)<load_js>` in loading.
:type compress: boolean
:param enable_verbose: (default True) Trigger for message.
:type enable_verbose: boolean
Usage::
>>> from weatherlab.lib.dataIO.js import dump_js
>>> js = {"a": 1, "b": 2}
>>> dump_js(js, "test.json", replace=True)
Dumping to test.json...
Complete! Elapse 0.002432 sec
**中文文档**
将Python中可被序列化的"字典", "列表"以及他们的组合, 按照Json的编码方式写入文件
文件
参数列表
:param js: 可Json化的Python对象
:type js: ``字典`` 或 ``列表``
:param abspath: 写入文件的路径。扩展名必须为``.json``或``.gz``, 其中gz用于被压
缩的Json
:type abspath: ``字符串``
:param fastmode: (默认 False) 当为``True``时, Json编码时不对Key进行排序, 也不
进行缩进排版。这样做写入的速度更快, 文件的大小也更小。
:type fastmode: "布尔值"
:param replace: (默认 False) 当为``True``时, 如果写入路径已经存在, 则会自动覆盖
原文件。而为``False``时, 则会抛出异常。防止误操作覆盖源文件。
:type replace: "布尔值"
:param compress: (默认 False) 当为``True``时, 使用开源压缩标准gzip压缩Json文件。
通常能让文件大小缩小10-20倍不等。如要读取文件, 则需要使用函数
:func:`load_js(abspath, compress=True)<load_js>`.
:type compress: "布尔值"
:param enable_verbose: (默认 True) 是否打开信息提示开关, 批处理时建议关闭.
:type enable_verbose: "布尔值" | train | https://github.com/MacHu-GWU/pyknackhq-project/blob/dd937f24d7b0a351ba3818eb746c31b29a8cc341/pyknackhq/js.py#L125-L258 | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module description
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module is re-pack of some json utility functions.
- :func:`load_js`: Load Json from file. If file are not exists, returns
user defined ``default value``.
- :func:`dump_js`: Dump Json serializable object to file.
- :func:`safe_dump_js`: An atomic write version of dump_js, silently overwrite
existing file.
- :func:`js2str`: Encode js to nicely formatted human readable string.
- :func:`prt_js`: Print Json in pretty format.
Highlight
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- :func:`load_js`, :func:`dump_js`, :func:`safe_dump_js` support gzip compress,
size is **10 - 20 times** smaller in average.
Compatibility
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- Python2: Yes
- Python3: Yes
Prerequisites
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- None
Class, method, function, exception
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import print_function, unicode_literals
import json, gzip
import os, shutil
import time
def load_js(abspath, default=dict(), compress=False, enable_verbose=True):
"""Load Json from file. If file are not exists, returns ``default``.
:param abspath: File path. Use absolute path as much as you can. File
extension has to be ``.json`` or ``.gz``. (for compressed Json)
:type abspath: string
:param default: (default dict()) If ``abspath`` not exists, return the
default Python object instead.
:param compress: (default False) Load from a gzip compressed Json file.
Check :func:`dump_js()<dump_js>` function for more information.
:type compress: boolean
:param enable_verbose: (default True) Trigger for message.
:type enable_verbose: boolean
Usage::
>>> from weatherlab.lib.dataIO.js import load_js
>>> load_js("test.json") # if you have a json file
Loading from test.json...
Complete! Elapse 0.000432 sec.
{'a': 1, 'b': 2}
**中文文档**
从Json文件中读取数据
参数列表
:param abspath: 文件路径, 扩展名需为 ``.json`` 或 ``.gz``
:type abspath: ``字符串``
:param default: (默认 dict()) 如果文件路径不存在, 则会返回一个默认的Python对象。
:param compress: (默认 False) 是否从一个gzip压缩过的Json文件中读取数据。 请
参考 :func:`dump_js()<dump_js>` 获得更多信息.
:type compress: ``布尔值``
:param enable_verbose: (默认 True) 是否打开信息提示开关, 批处理时建议关闭.
:type enable_verbose: ``布尔值``
"""
abspath = str(abspath) # try stringlize
if compress: # check extension name
if os.path.splitext(abspath)[1] != ".gz":
raise Exception("compressed json has to use extension '.gz'!")
else:
if os.path.splitext(abspath)[1] != ".json":
raise Exception("file extension are not '.json'!")
if enable_verbose:
print("\nLoading from %s..." % abspath)
st = time.clock()
if os.path.exists(abspath): # exists, then load
if compress:
with gzip.open(abspath, "rb") as f:
js = json.loads(f.read().decode("utf-8"))
else:
with open(abspath, "r") as f:
js = json.load(f)
if enable_verbose:
print("\tComplete! Elapse %.6f sec." % (time.clock() - st) )
return js
else:
if enable_verbose:
print("\t%s not exists! cannot load! Create an default object "
"instead" % abspath)
return default
def dump_js(js, abspath,
fastmode=False, replace=False, compress=False, enable_verbose=True):
"""Dump Json serializable object to file.
Provides multiple choice to customize the behavior.
:param js: Serializable python object.
:type js: dict or list
:param abspath: ``save as`` path, file extension has to be ``.json`` or ``.gz``
(for compressed json).
:type abspath: string
:param fastmode: (default False) If ``True``, then dumping json without
sorted keys and pretty indent, and it's faster and smaller in size.
:type fastmode: boolean
:param replace: (default False) If ``True``, when you dump json to a existing
path, it silently overwrite it. If False, an exception will be raised.
Default False setting is to prevent overwrite file by mistake.
:type replace: boolean
:param compress: (default False) If ``True``, use GNU program gzip to
compress the json file. Disk usage can be greatly reduced. But you have
to use :func:`load_js(abspath, compress=True)<load_js>` in loading.
:type compress: boolean
:param enable_verbose: (default True) Trigger for message.
:type enable_verbose: boolean
Usage::
>>> from weatherlab.lib.dataIO.js import dump_js
>>> js = {"a": 1, "b": 2}
>>> dump_js(js, "test.json", replace=True)
Dumping to test.json...
Complete! Elapse 0.002432 sec
**中文文档**
将Python中可被序列化的"字典", "列表"以及他们的组合, 按照Json的编码方式写入文件
文件
参数列表
:param js: 可Json化的Python对象
:type js: ``字典`` 或 ``列表``
:param abspath: 写入文件的路径。扩展名必须为``.json``或``.gz``, 其中gz用于被压
缩的Json
:type abspath: ``字符串``
:param fastmode: (默认 False) 当为``True``时, Json编码时不对Key进行排序, 也不
进行缩进排版。这样做写入的速度更快, 文件的大小也更小。
:type fastmode: "布尔值"
:param replace: (默认 False) 当为``True``时, 如果写入路径已经存在, 则会自动覆盖
原文件。而为``False``时, 则会抛出异常。防止误操作覆盖源文件。
:type replace: "布尔值"
:param compress: (默认 False) 当为``True``时, 使用开源压缩标准gzip压缩Json文件。
通常能让文件大小缩小10-20倍不等。如要读取文件, 则需要使用函数
:func:`load_js(abspath, compress=True)<load_js>`.
:type compress: "布尔值"
:param enable_verbose: (默认 True) 是否打开信息提示开关, 批处理时建议关闭.
:type enable_verbose: "布尔值"
"""
abspath = str(abspath) # try stringlize
if compress: # check extension name
root, ext = os.path.splitext(abspath)
if ext != ".gz":
if ext != ".tmp":
raise Exception("compressed json has to use extension '.gz'!")
else:
_, ext = os.path.splitext(root)
if ext != ".gz":
raise Exception("compressed json has to use extension '.gz'!")
else:
root, ext = os.path.splitext(abspath)
if ext != ".json":
if ext != ".tmp":
raise Exception("file extension are not '.json'!")
else:
_, ext = os.path.splitext(root)
if ext != ".json":
raise Exception("file extension are not '.json'!")
if enable_verbose:
print("\nDumping to %s..." % abspath)
st = time.clock()
if os.path.exists(abspath): # if exists, check replace option
if replace: # replace existing file
if fastmode: # no sort and indent, do the fastest dumping
if compress:
with gzip.open(abspath, "wb") as f:
f.write(json.dumps(js).encode("utf-8"))
else:
with open(abspath, "w") as f:
json.dump(js, f)
else:
if compress:
with gzip.open(abspath, "wb") as f:
f.write(json.dumps(js, sort_keys=True,
indent=4, separators=("," , ": ")).encode("utf-8"))
else:
with open(abspath, "w") as f:
json.dump(js, f, sort_keys=True,
indent=4, separators=("," , ": ") )
else: # stop, print error message
raise Exception("\tCANNOT WRITE to %s, it's already "
"exists" % abspath)
else: # if not exists, just write to it
if fastmode: # no sort and indent, do the fastest dumping
if compress:
with gzip.open(abspath, "wb") as f:
f.write(json.dumps(js).encode("utf-8"))
else:
with open(abspath, "w") as f:
json.dump(js, f)
else:
if compress:
with gzip.open(abspath, "wb") as f:
f.write(json.dumps(js, sort_keys=True,
indent=4, separators=("," , ": ")).encode("utf-8"))
else:
with open(abspath, "w") as f:
json.dump(js, f, sort_keys=True,
indent=4, separators=("," , ": ") )
if enable_verbose:
print("\tComplete! Elapse %.6f sec" % (time.clock() - st) )
def safe_dump_js(js, abspath,
fastmode=False, compress=False, enable_verbose=True):
"""A stable version of dump_js, silently overwrite existing file.
When your program been interrupted, you lose nothing. Typically if your
program is interrupted by any reason, it only leaves a incomplete file.
If you use replace=True, then you also lose your old file.
So a bettr way is to:
1. dump json to a temp file.
2. when it's done, rename it to #abspath, overwrite the old one.
This way guarantee atomic write.
:param js: Serializable python object.
:type js: dict or list
:param abspath: ``save as`` path, file extension has to be ``.json`` or ``.gz``
(for compressed json).
:type abspath: string
:param fastmode: (default False) If ``True``, then dumping json without
sorted keys and pretty indent, and it's faster and smaller in size.
:type fastmode: boolean
:param compress: (default False) If ``True``, use GNU program gzip to
compress the json file. Disk usage can be greatly reduced. But you have
to use :func:`load_js(abspath, compress=True)<load_js>` in loading.
:type compress: boolean
:param enable_verbose: (default True) Trigger for message.
:type enable_verbose: boolean
Usage::
>>> from weatherlab.lib.dataIO.js import dump_js
>>> js = {"a": 1, "b": 2}
>>> safe_dump_js(js, "test.json")
Dumping to test.json...
Complete! Elapse 0.002432 sec
**中文文档**
在对文件进行写入时, 如果程序中断, 则会留下一个不完整的文件。如果你使用了覆盖式
写入, 则你同时也丢失了原文件。所以为了保证写操作的原子性(要么全部完成, 要么全部
都不完成), 更好的方法是: 首先将文件写入一个临时文件中, 完成后再讲文件重命名,
覆盖旧文件。这样即使中途程序被中断, 也仅仅是留下了一个未完成的临时文件而已, 不会
影响原文件。
参数列表
:param js: 可Json化的Python对象
:type js: ``字典`` 或 ``列表``
:param abspath: 写入文件的路径。扩展名必须为 ``.json`` 或 ``.gz``, 其中gz用于被压
缩的Json
:type abspath: ``字符串``
:param replace: (默认 False) 当为``True``时, 如果写入路径已经存在, 则会自动覆盖
原文件。而为``False``时, 则会抛出异常。防止误操作覆盖源文件。
:type replace: ``布尔值``
:param compress: (默认 False) 当为``True``时, 使用开源压缩标准gzip压缩Json文件。
通常能让文件大小缩小10-20倍不等。如要读取文件, 则需要使用函数
:func:`load_js(abspath, compress=True)<load_js>`.
:type compress: ``布尔值``
:param enable_verbose: (默认 True) 是否打开信息提示开关, 批处理时建议关闭.
:type enable_verbose: ``布尔值``
"""
abspath = str(abspath) # try stringlize
temp_abspath = "%s.tmp" % abspath
dump_js(js, temp_abspath, fastmode=fastmode,
replace=True, compress=compress, enable_verbose=enable_verbose)
shutil.move(temp_abspath, abspath)
def js2str(js, sort_keys=True, indent=4):
"""Encode js to nicely formatted human readable string. (utf-8 encoding)
Usage::
>>> from weatherlab.lib.dataIO.js import js2str
>>> s = js2str({"a": 1, "b": 2})
>>> print(s)
{
"a": 1,
"b": 2
}
**中文文档**
将可Json化的Python对象转化成格式化的字符串。
"""
return json.dumps(js, sort_keys=sort_keys,
indent=indent, separators=("," , ": "))
def prt_js(js, sort_keys=True, indent=4):
"""Print Json in pretty format.
There's a standard module pprint, can pretty print python dict and list.
But it doesn't support sorted key. That why we need this func.
Usage::
>>> from weatherlab.lib.dataIO.js import prt_js
>>> prt_js({"a": 1, "b": 2})
{
"a": 1,
"b": 2
}
**中文文档**
以人类可读的方式打印可Json化的Python对象。
"""
print(js2str(js, sort_keys, indent) )
############
# Unittest #
############
if __name__ == "__main__":
import unittest
class JSUnittest(unittest.TestCase):
def test_write_and_read(self):
data = {"a": [1, 2], "b": ["是", "否"]}
safe_dump_js(data, "data.json")
data = load_js("data.json")
self.assertEqual(data["a"][0], 1)
self.assertEqual(data["b"][0], "是")
def test_js2str(self):
data = {"a": [1, 2], "b": ["是", "否"]}
prt_js(data)
def test_compress(self):
data = {"a": list(range(32)),
"b": list(range(32)),}
safe_dump_js(data, "data.gz", compress=True)
prt_js(load_js("data.gz", compress=True))
def tearDown(self):
for path in ["data.json", "data.gz"]:
try:
os.remove(path)
except:
pass
unittest.main() |
MacHu-GWU/pyknackhq-project | pyknackhq/js.py | safe_dump_js | python | def safe_dump_js(js, abspath,
fastmode=False, compress=False, enable_verbose=True):
abspath = str(abspath) # try stringlize
temp_abspath = "%s.tmp" % abspath
dump_js(js, temp_abspath, fastmode=fastmode,
replace=True, compress=compress, enable_verbose=enable_verbose)
shutil.move(temp_abspath, abspath) | A stable version of dump_js, silently overwrite existing file.
When your program been interrupted, you lose nothing. Typically if your
program is interrupted by any reason, it only leaves a incomplete file.
If you use replace=True, then you also lose your old file.
So a bettr way is to:
1. dump json to a temp file.
2. when it's done, rename it to #abspath, overwrite the old one.
This way guarantee atomic write.
:param js: Serializable python object.
:type js: dict or list
:param abspath: ``save as`` path, file extension has to be ``.json`` or ``.gz``
(for compressed json).
:type abspath: string
:param fastmode: (default False) If ``True``, then dumping json without
sorted keys and pretty indent, and it's faster and smaller in size.
:type fastmode: boolean
:param compress: (default False) If ``True``, use GNU program gzip to
compress the json file. Disk usage can be greatly reduced. But you have
to use :func:`load_js(abspath, compress=True)<load_js>` in loading.
:type compress: boolean
:param enable_verbose: (default True) Trigger for message.
:type enable_verbose: boolean
Usage::
>>> from weatherlab.lib.dataIO.js import dump_js
>>> js = {"a": 1, "b": 2}
>>> safe_dump_js(js, "test.json")
Dumping to test.json...
Complete! Elapse 0.002432 sec
**中文文档**
在对文件进行写入时, 如果程序中断, 则会留下一个不完整的文件。如果你使用了覆盖式
写入, 则你同时也丢失了原文件。所以为了保证写操作的原子性(要么全部完成, 要么全部
都不完成), 更好的方法是: 首先将文件写入一个临时文件中, 完成后再讲文件重命名,
覆盖旧文件。这样即使中途程序被中断, 也仅仅是留下了一个未完成的临时文件而已, 不会
影响原文件。
参数列表
:param js: 可Json化的Python对象
:type js: ``字典`` 或 ``列表``
:param abspath: 写入文件的路径。扩展名必须为 ``.json`` 或 ``.gz``, 其中gz用于被压
缩的Json
:type abspath: ``字符串``
:param replace: (默认 False) 当为``True``时, 如果写入路径已经存在, 则会自动覆盖
原文件。而为``False``时, 则会抛出异常。防止误操作覆盖源文件。
:type replace: ``布尔值``
:param compress: (默认 False) 当为``True``时, 使用开源压缩标准gzip压缩Json文件。
通常能让文件大小缩小10-20倍不等。如要读取文件, 则需要使用函数
:func:`load_js(abspath, compress=True)<load_js>`.
:type compress: ``布尔值``
:param enable_verbose: (默认 True) 是否打开信息提示开关, 批处理时建议关闭.
:type enable_verbose: ``布尔值`` | train | https://github.com/MacHu-GWU/pyknackhq-project/blob/dd937f24d7b0a351ba3818eb746c31b29a8cc341/pyknackhq/js.py#L260-L335 | [
"def dump_js(js, abspath, \n fastmode=False, replace=False, compress=False, enable_verbose=True):\n \"\"\"Dump Json serializable object to file.\n Provides multiple choice to customize the behavior.\n\n :param js: Serializable python object.\n :type js: dict or list\n\n :param abspath: ``save as`` path, file extension has to be ``.json`` or ``.gz`` \n (for compressed json).\n :type abspath: string\n\n :param fastmode: (default False) If ``True``, then dumping json without \n sorted keys and pretty indent, and it's faster and smaller in size.\n :type fastmode: boolean\n\n :param replace: (default False) If ``True``, when you dump json to a existing \n path, it silently overwrite it. If False, an exception will be raised.\n Default False setting is to prevent overwrite file by mistake.\n :type replace: boolean\n\n :param compress: (default False) If ``True``, use GNU program gzip to \n compress the json file. Disk usage can be greatly reduced. But you have \n to use :func:`load_js(abspath, compress=True)<load_js>` in loading.\n :type compress: boolean\n\n :param enable_verbose: (default True) Trigger for message.\n :type enable_verbose: boolean\n\n Usage::\n\n >>> from weatherlab.lib.dataIO.js import dump_js\n >>> js = {\"a\": 1, \"b\": 2}\n >>> dump_js(js, \"test.json\", replace=True)\n Dumping to test.json...\n Complete! Elapse 0.002432 sec\n\n **中文文档**\n\n 将Python中可被序列化的\"字典\", \"列表\"以及他们的组合, 按照Json的编码方式写入文件\n 文件\n\n 参数列表\n\n :param js: 可Json化的Python对象\n :type js: ``字典`` 或 ``列表``\n\n :param abspath: 写入文件的路径。扩展名必须为``.json``或``.gz``, 其中gz用于被压\n 缩的Json\n :type abspath: ``字符串``\n\n :param fastmode: (默认 False) 当为``True``时, Json编码时不对Key进行排序, 也不\n 进行缩进排版。这样做写入的速度更快, 文件的大小也更小。\n :type fastmode: \"布尔值\"\n\n :param replace: (默认 False) 当为``True``时, 如果写入路径已经存在, 则会自动覆盖\n 原文件。而为``False``时, 则会抛出异常。防止误操作覆盖源文件。\n :type replace: \"布尔值\"\n\n :param compress: (默认 False) 当为``True``时, 使用开源压缩标准gzip压缩Json文件。\n 通常能让文件大小缩小10-20倍不等。如要读取文件, 则需要使用函数\n :func:`load_js(abspath, compress=True)<load_js>`.\n :type compress: \"布尔值\"\n\n :param enable_verbose: (默认 True) 是否打开信息提示开关, 批处理时建议关闭.\n :type enable_verbose: \"布尔值\"\n \"\"\"\n abspath = str(abspath) # try stringlize\n\n if compress: # check extension name\n root, ext = os.path.splitext(abspath)\n if ext != \".gz\":\n if ext != \".tmp\":\n raise Exception(\"compressed json has to use extension '.gz'!\")\n else:\n _, ext = os.path.splitext(root)\n if ext != \".gz\":\n raise Exception(\"compressed json has to use extension '.gz'!\")\n else:\n root, ext = os.path.splitext(abspath)\n if ext != \".json\":\n if ext != \".tmp\":\n raise Exception(\"file extension are not '.json'!\")\n else:\n _, ext = os.path.splitext(root)\n if ext != \".json\":\n raise Exception(\"file extension are not '.json'!\")\n\n if enable_verbose:\n print(\"\\nDumping to %s...\" % abspath)\n st = time.clock()\n\n if os.path.exists(abspath): # if exists, check replace option\n if replace: # replace existing file\n if fastmode: # no sort and indent, do the fastest dumping\n if compress:\n with gzip.open(abspath, \"wb\") as f:\n f.write(json.dumps(js).encode(\"utf-8\"))\n else:\n with open(abspath, \"w\") as f:\n json.dump(js, f)\n else:\n if compress:\n with gzip.open(abspath, \"wb\") as f:\n f.write(json.dumps(js, sort_keys=True,\n indent=4, separators=(\",\" , \": \")).encode(\"utf-8\"))\n else:\n with open(abspath, \"w\") as f:\n json.dump(js, f, sort_keys=True, \n indent=4, separators=(\",\" , \": \") )\n else: # stop, print error message\n raise Exception(\"\\tCANNOT WRITE to %s, it's already \"\n \"exists\" % abspath)\n\n else: # if not exists, just write to it\n if fastmode: # no sort and indent, do the fastest dumping\n if compress:\n with gzip.open(abspath, \"wb\") as f:\n f.write(json.dumps(js).encode(\"utf-8\"))\n else:\n with open(abspath, \"w\") as f:\n json.dump(js, f)\n else:\n if compress:\n with gzip.open(abspath, \"wb\") as f:\n f.write(json.dumps(js, sort_keys=True,\n indent=4, separators=(\",\" , \": \")).encode(\"utf-8\"))\n else:\n with open(abspath, \"w\") as f:\n json.dump(js, f, sort_keys=True, \n indent=4, separators=(\",\" , \": \") )\n\n if enable_verbose:\n print(\"\\tComplete! Elapse %.6f sec\" % (time.clock() - st) )\n"
] | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module description
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module is re-pack of some json utility functions.
- :func:`load_js`: Load Json from file. If file are not exists, returns
user defined ``default value``.
- :func:`dump_js`: Dump Json serializable object to file.
- :func:`safe_dump_js`: An atomic write version of dump_js, silently overwrite
existing file.
- :func:`js2str`: Encode js to nicely formatted human readable string.
- :func:`prt_js`: Print Json in pretty format.
Highlight
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- :func:`load_js`, :func:`dump_js`, :func:`safe_dump_js` support gzip compress,
size is **10 - 20 times** smaller in average.
Compatibility
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- Python2: Yes
- Python3: Yes
Prerequisites
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- None
Class, method, function, exception
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from __future__ import print_function, unicode_literals
import json, gzip
import os, shutil
import time
def load_js(abspath, default=dict(), compress=False, enable_verbose=True):
"""Load Json from file. If file are not exists, returns ``default``.
:param abspath: File path. Use absolute path as much as you can. File
extension has to be ``.json`` or ``.gz``. (for compressed Json)
:type abspath: string
:param default: (default dict()) If ``abspath`` not exists, return the
default Python object instead.
:param compress: (default False) Load from a gzip compressed Json file.
Check :func:`dump_js()<dump_js>` function for more information.
:type compress: boolean
:param enable_verbose: (default True) Trigger for message.
:type enable_verbose: boolean
Usage::
>>> from weatherlab.lib.dataIO.js import load_js
>>> load_js("test.json") # if you have a json file
Loading from test.json...
Complete! Elapse 0.000432 sec.
{'a': 1, 'b': 2}
**中文文档**
从Json文件中读取数据
参数列表
:param abspath: 文件路径, 扩展名需为 ``.json`` 或 ``.gz``
:type abspath: ``字符串``
:param default: (默认 dict()) 如果文件路径不存在, 则会返回一个默认的Python对象。
:param compress: (默认 False) 是否从一个gzip压缩过的Json文件中读取数据。 请
参考 :func:`dump_js()<dump_js>` 获得更多信息.
:type compress: ``布尔值``
:param enable_verbose: (默认 True) 是否打开信息提示开关, 批处理时建议关闭.
:type enable_verbose: ``布尔值``
"""
abspath = str(abspath) # try stringlize
if compress: # check extension name
if os.path.splitext(abspath)[1] != ".gz":
raise Exception("compressed json has to use extension '.gz'!")
else:
if os.path.splitext(abspath)[1] != ".json":
raise Exception("file extension are not '.json'!")
if enable_verbose:
print("\nLoading from %s..." % abspath)
st = time.clock()
if os.path.exists(abspath): # exists, then load
if compress:
with gzip.open(abspath, "rb") as f:
js = json.loads(f.read().decode("utf-8"))
else:
with open(abspath, "r") as f:
js = json.load(f)
if enable_verbose:
print("\tComplete! Elapse %.6f sec." % (time.clock() - st) )
return js
else:
if enable_verbose:
print("\t%s not exists! cannot load! Create an default object "
"instead" % abspath)
return default
def dump_js(js, abspath,
fastmode=False, replace=False, compress=False, enable_verbose=True):
"""Dump Json serializable object to file.
Provides multiple choice to customize the behavior.
:param js: Serializable python object.
:type js: dict or list
:param abspath: ``save as`` path, file extension has to be ``.json`` or ``.gz``
(for compressed json).
:type abspath: string
:param fastmode: (default False) If ``True``, then dumping json without
sorted keys and pretty indent, and it's faster and smaller in size.
:type fastmode: boolean
:param replace: (default False) If ``True``, when you dump json to a existing
path, it silently overwrite it. If False, an exception will be raised.
Default False setting is to prevent overwrite file by mistake.
:type replace: boolean
:param compress: (default False) If ``True``, use GNU program gzip to
compress the json file. Disk usage can be greatly reduced. But you have
to use :func:`load_js(abspath, compress=True)<load_js>` in loading.
:type compress: boolean
:param enable_verbose: (default True) Trigger for message.
:type enable_verbose: boolean
Usage::
>>> from weatherlab.lib.dataIO.js import dump_js
>>> js = {"a": 1, "b": 2}
>>> dump_js(js, "test.json", replace=True)
Dumping to test.json...
Complete! Elapse 0.002432 sec
**中文文档**
将Python中可被序列化的"字典", "列表"以及他们的组合, 按照Json的编码方式写入文件
文件
参数列表
:param js: 可Json化的Python对象
:type js: ``字典`` 或 ``列表``
:param abspath: 写入文件的路径。扩展名必须为``.json``或``.gz``, 其中gz用于被压
缩的Json
:type abspath: ``字符串``
:param fastmode: (默认 False) 当为``True``时, Json编码时不对Key进行排序, 也不
进行缩进排版。这样做写入的速度更快, 文件的大小也更小。
:type fastmode: "布尔值"
:param replace: (默认 False) 当为``True``时, 如果写入路径已经存在, 则会自动覆盖
原文件。而为``False``时, 则会抛出异常。防止误操作覆盖源文件。
:type replace: "布尔值"
:param compress: (默认 False) 当为``True``时, 使用开源压缩标准gzip压缩Json文件。
通常能让文件大小缩小10-20倍不等。如要读取文件, 则需要使用函数
:func:`load_js(abspath, compress=True)<load_js>`.
:type compress: "布尔值"
:param enable_verbose: (默认 True) 是否打开信息提示开关, 批处理时建议关闭.
:type enable_verbose: "布尔值"
"""
abspath = str(abspath) # try stringlize
if compress: # check extension name
root, ext = os.path.splitext(abspath)
if ext != ".gz":
if ext != ".tmp":
raise Exception("compressed json has to use extension '.gz'!")
else:
_, ext = os.path.splitext(root)
if ext != ".gz":
raise Exception("compressed json has to use extension '.gz'!")
else:
root, ext = os.path.splitext(abspath)
if ext != ".json":
if ext != ".tmp":
raise Exception("file extension are not '.json'!")
else:
_, ext = os.path.splitext(root)
if ext != ".json":
raise Exception("file extension are not '.json'!")
if enable_verbose:
print("\nDumping to %s..." % abspath)
st = time.clock()
if os.path.exists(abspath): # if exists, check replace option
if replace: # replace existing file
if fastmode: # no sort and indent, do the fastest dumping
if compress:
with gzip.open(abspath, "wb") as f:
f.write(json.dumps(js).encode("utf-8"))
else:
with open(abspath, "w") as f:
json.dump(js, f)
else:
if compress:
with gzip.open(abspath, "wb") as f:
f.write(json.dumps(js, sort_keys=True,
indent=4, separators=("," , ": ")).encode("utf-8"))
else:
with open(abspath, "w") as f:
json.dump(js, f, sort_keys=True,
indent=4, separators=("," , ": ") )
else: # stop, print error message
raise Exception("\tCANNOT WRITE to %s, it's already "
"exists" % abspath)
else: # if not exists, just write to it
if fastmode: # no sort and indent, do the fastest dumping
if compress:
with gzip.open(abspath, "wb") as f:
f.write(json.dumps(js).encode("utf-8"))
else:
with open(abspath, "w") as f:
json.dump(js, f)
else:
if compress:
with gzip.open(abspath, "wb") as f:
f.write(json.dumps(js, sort_keys=True,
indent=4, separators=("," , ": ")).encode("utf-8"))
else:
with open(abspath, "w") as f:
json.dump(js, f, sort_keys=True,
indent=4, separators=("," , ": ") )
if enable_verbose:
print("\tComplete! Elapse %.6f sec" % (time.clock() - st) )
def safe_dump_js(js, abspath,
fastmode=False, compress=False, enable_verbose=True):
"""A stable version of dump_js, silently overwrite existing file.
When your program been interrupted, you lose nothing. Typically if your
program is interrupted by any reason, it only leaves a incomplete file.
If you use replace=True, then you also lose your old file.
So a bettr way is to:
1. dump json to a temp file.
2. when it's done, rename it to #abspath, overwrite the old one.
This way guarantee atomic write.
:param js: Serializable python object.
:type js: dict or list
:param abspath: ``save as`` path, file extension has to be ``.json`` or ``.gz``
(for compressed json).
:type abspath: string
:param fastmode: (default False) If ``True``, then dumping json without
sorted keys and pretty indent, and it's faster and smaller in size.
:type fastmode: boolean
:param compress: (default False) If ``True``, use GNU program gzip to
compress the json file. Disk usage can be greatly reduced. But you have
to use :func:`load_js(abspath, compress=True)<load_js>` in loading.
:type compress: boolean
:param enable_verbose: (default True) Trigger for message.
:type enable_verbose: boolean
Usage::
>>> from weatherlab.lib.dataIO.js import dump_js
>>> js = {"a": 1, "b": 2}
>>> safe_dump_js(js, "test.json")
Dumping to test.json...
Complete! Elapse 0.002432 sec
**中文文档**
在对文件进行写入时, 如果程序中断, 则会留下一个不完整的文件。如果你使用了覆盖式
写入, 则你同时也丢失了原文件。所以为了保证写操作的原子性(要么全部完成, 要么全部
都不完成), 更好的方法是: 首先将文件写入一个临时文件中, 完成后再讲文件重命名,
覆盖旧文件。这样即使中途程序被中断, 也仅仅是留下了一个未完成的临时文件而已, 不会
影响原文件。
参数列表
:param js: 可Json化的Python对象
:type js: ``字典`` 或 ``列表``
:param abspath: 写入文件的路径。扩展名必须为 ``.json`` 或 ``.gz``, 其中gz用于被压
缩的Json
:type abspath: ``字符串``
:param replace: (默认 False) 当为``True``时, 如果写入路径已经存在, 则会自动覆盖
原文件。而为``False``时, 则会抛出异常。防止误操作覆盖源文件。
:type replace: ``布尔值``
:param compress: (默认 False) 当为``True``时, 使用开源压缩标准gzip压缩Json文件。
通常能让文件大小缩小10-20倍不等。如要读取文件, 则需要使用函数
:func:`load_js(abspath, compress=True)<load_js>`.
:type compress: ``布尔值``
:param enable_verbose: (默认 True) 是否打开信息提示开关, 批处理时建议关闭.
:type enable_verbose: ``布尔值``
"""
abspath = str(abspath) # try stringlize
temp_abspath = "%s.tmp" % abspath
dump_js(js, temp_abspath, fastmode=fastmode,
replace=True, compress=compress, enable_verbose=enable_verbose)
shutil.move(temp_abspath, abspath)
def js2str(js, sort_keys=True, indent=4):
"""Encode js to nicely formatted human readable string. (utf-8 encoding)
Usage::
>>> from weatherlab.lib.dataIO.js import js2str
>>> s = js2str({"a": 1, "b": 2})
>>> print(s)
{
"a": 1,
"b": 2
}
**中文文档**
将可Json化的Python对象转化成格式化的字符串。
"""
return json.dumps(js, sort_keys=sort_keys,
indent=indent, separators=("," , ": "))
def prt_js(js, sort_keys=True, indent=4):
"""Print Json in pretty format.
There's a standard module pprint, can pretty print python dict and list.
But it doesn't support sorted key. That why we need this func.
Usage::
>>> from weatherlab.lib.dataIO.js import prt_js
>>> prt_js({"a": 1, "b": 2})
{
"a": 1,
"b": 2
}
**中文文档**
以人类可读的方式打印可Json化的Python对象。
"""
print(js2str(js, sort_keys, indent) )
############
# Unittest #
############
if __name__ == "__main__":
import unittest
class JSUnittest(unittest.TestCase):
def test_write_and_read(self):
data = {"a": [1, 2], "b": ["是", "否"]}
safe_dump_js(data, "data.json")
data = load_js("data.json")
self.assertEqual(data["a"][0], 1)
self.assertEqual(data["b"][0], "是")
def test_js2str(self):
data = {"a": [1, 2], "b": ["是", "否"]}
prt_js(data)
def test_compress(self):
data = {"a": list(range(32)),
"b": list(range(32)),}
safe_dump_js(data, "data.gz", compress=True)
prt_js(load_js("data.gz", compress=True))
def tearDown(self):
for path in ["data.json", "data.gz"]:
try:
os.remove(path)
except:
pass
unittest.main() |
MacHu-GWU/pyknackhq-project | pyknackhq/client.py | Collection.convert_keys | python | def convert_keys(self, pydict):
new_dict = dict()
for key, value in pydict.items():
new_dict[self.get_field_key(key)] = value
return new_dict | Convert field_name to field_key.
{"field_name": value} => {"field_key": value} | train | https://github.com/MacHu-GWU/pyknackhq-project/blob/dd937f24d7b0a351ba3818eb746c31b29a8cc341/pyknackhq/client.py#L40-L48 | [
"def get_field_key(self, key, using_name=True):\n \"\"\"Given a field key or name, return it's field key.\n \"\"\"\n try:\n if using_name:\n return self.f_name[key].key\n else:\n return self.f[key].key\n except KeyError:\n raise ValueError(\"'%s' are not found!\" % key)\n"
] | class Collection(Object):
"""A collection is the equivalent of an RDBMS table, collection of MongoDB
and object of Knackhq. Most of CRUD method can be executed using this.
- :meth:`~Collection.insert_one`
- :meth:`~Collection.insert`
- :meth:`~Collection.find_one`
- :meth:`~Collection.find`
- :meth:`~Collection.update_one`
- :meth:`~Collection.delete_one`
- :meth:`~Collection.delete_all`
"""
def __str__(self):
return "Collection('%s')" % self.name
def __repr__(self):
return "Collection(key='%s', name='%s')" % (self.key, self.name)
@staticmethod
def from_dict(d):
return Collection(**d)
@property
def get_url(self):
return "https://api.knackhq.com/v1/objects/%s/records" % self.key
@property
def post_url(self):
return "https://api.knackhq.com/v1/objects/%s/records" % self.key
def convert_keys(self, pydict):
"""Convert field_name to field_key.
{"field_name": value} => {"field_key": value}
"""
new_dict = dict()
for key, value in pydict.items():
new_dict[self.get_field_key(key)] = value
return new_dict
def get_html_values(self, pydict, recovery_name=True):
"""Convert naive get response data to human readable field name format.
using html data format.
"""
new_dict = {"id": pydict["id"]}
for field in self:
if field.key in pydict:
if recovery_name:
new_dict[field.name] = pydict[field.key]
else:
new_dict[field.key] = pydict[field.key]
return new_dict
def get_raw_values(self, pydict, recovery_name=True):
"""Convert naive get response data to human readable field name format.
using raw data format.
"""
new_dict = {"id": pydict["id"]}
for field in self:
raw_key = "%s_raw" % field.key
if raw_key in pydict:
if recovery_name:
new_dict[field.name] = pydict[raw_key]
else:
new_dict[field.key] = pydict[raw_key]
return new_dict
def convert_values(self, pydict):
"""Convert knackhq data type instance to json friendly data.
"""
new_dict = dict()
for key, value in pydict.items():
try: # is it's BaseDataType Instance
new_dict[key] = value._data
except AttributeError:
new_dict[key] = value
return new_dict
#-------------------------------------------------------------------------#
# CRUD method #
#-------------------------------------------------------------------------#
def insert_one(self, data, using_name=True):
"""Insert one record.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#create
For more information of the raw structure of all data type, read this:
http://helpdesk.knackhq.com/support/solutions/articles/5000446405-field-types
:param data: dict type data
:param using_name: if you are using field_name in data,
please set using_name = True (it's the default), otherwise, False
**中文文档**
插入一条记录
"""
data = self.convert_values(data)
if using_name:
data = self.convert_keys(data)
res = self.post(self.post_url, data)
return res
def insert(self, data, using_name=True):
"""Insert one or many records.
:param data: dict type data or list of dict
:param using_name: if you are using field name in data,
please set using_name = True (it's the default), otherwise, False
**中文文档**
插入多条记录
"""
if isinstance(data, list): # if iterable, insert one by one
for d in data:
self.insert_one(d, using_name=using_name)
else: # not iterable, execute insert_one
self.insert_one(data, using_name=using_name)
def find_one(self, id_, raw=True, recovery_name=True):
"""Find one record.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#retrieve
:param id_: record id_
:param using_name: if you are using field name in filter and sort_field,
please set using_name = True (it's the default), otherwise, False
:param raw: Default True, set True if you want the data in raw format.
Otherwise, html format
:param recovery_name: Default True, set True if you want field name
instead of field key
**中文文档**
返回一条记录
"""
url = "https://api.knackhq.com/v1/objects/%s/records/%s" % (
self.key, id_)
res = self.get(url)
if raw:
try:
res = self.get_raw_values(res, recovery_name=recovery_name)
except:
pass
else:
try:
res = self.get_html_values(res, recovery_name=recovery_name)
except:
pass
return res
def find(self, filter=list(),
sort_field=None, sort_order=None,
page=None, rows_per_page=None,
using_name=True, data_only=True, raw=True, recovery_name=True):
"""Execute a find query.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#retrieve
:param filter: list of criterions. For more information:
http://helpdesk.knackhq.com/support/solutions/articles/5000447623-api-reference-filters-search
:param sort_field: field_name or field_id, taking field_name by default.
if using field_id, please set using_name = False.
:param sort_order: -1 or 1, 1 means ascending, -1 means descending
:param page and rows_per_page: skip first #page * #rows_per_page,
returns #rows_per_page of records. For more information:
http://helpdesk.knackhq.com/support/solutions/articles/5000444173-working-with-the-api#pagination
:param using_name: if you are using field_name in filter and sort_field,
please set using_name = True (it's the default), otherwise, False
:param data_only: set True you only need the data or the full api
response
:param raw: Default True, set True if you want the data in raw format.
Otherwise, html format
:param recovery_name: Default True, set True if you want field name
instead of field key
**中文文档**
返回多条记录
"""
if using_name:
for criterion in filter:
criterion["field"] = self.get_field_key(criterion["field"])
if sort_field:
sort_field = self.get_field_key(sort_field)
if sort_order is None:
pass
elif sort_order == 1:
sort_order = "asc"
elif sort_order == -1:
sort_order = "desc"
else:
raise ValueError
params = dict()
if len(filter) >= 1:
params["filters"] = json.dumps(filter)
if sort_field:
params["sort_field"] = sort_field
params["sort_order"] = sort_order
if (page is not None) \
and (rows_per_page is not None) \
and isinstance(page, int) \
and isinstance(rows_per_page, int) \
and (page >= 1) \
and (rows_per_page >= 1):
params["page"] = page
params["rows_per_page"] = rows_per_page
res = self.get(self.get_url, params)
# handle data_only and recovery
if data_only:
try:
res = res["records"]
if raw:
res = [self.get_raw_values(data, recovery_name) for data in res]
else:
res = [self.get_html_values(data, recovery_name) for data in res]
except KeyError:
pass
else:
if raw:
try:
res["records"] = [
self.get_raw_values(data, recovery_name) for data in res["records"]]
except KeyError:
pass
else:
try:
res["records"] = [
self.get_html_values(data, recovery_name) for data in res["records"]]
except KeyError:
pass
return res
def update_one(self, id_, data, using_name=True):
"""Update one record. Any fields you don't specify will remain unchanged.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#update
:param id_: record id_
:param data: the new data fields and values
:param using_name: if you are using field name in data,
please set using_name = True (it's the default), otherwise, False
**中文文档**
对一条记录进行更新
"""
data = self.convert_values(data)
if using_name:
data = self.convert_keys(data)
url = "https://api.knackhq.com/v1/objects/%s/records/%s" % (
self.key, id_)
res = self.put(url, data)
return res
def delete_one(self, id_):
"""Delete one record.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#delete
:param id_: record id_
**中文文档**
删除一条记录
"""
url = "https://api.knackhq.com/v1/objects/%s/records/%s" % (
self.key, id_)
res = self.delete(url)
return res
def delete_all(self):
"""Delete all record in the table/collection of this object.
**中文文档**
删除表中的所有记录
"""
for record in self.find(using_name=False, data_only=True):
res = self.delete_one(record["id"])
|
MacHu-GWU/pyknackhq-project | pyknackhq/client.py | Collection.get_html_values | python | def get_html_values(self, pydict, recovery_name=True):
new_dict = {"id": pydict["id"]}
for field in self:
if field.key in pydict:
if recovery_name:
new_dict[field.name] = pydict[field.key]
else:
new_dict[field.key] = pydict[field.key]
return new_dict | Convert naive get response data to human readable field name format.
using html data format. | train | https://github.com/MacHu-GWU/pyknackhq-project/blob/dd937f24d7b0a351ba3818eb746c31b29a8cc341/pyknackhq/client.py#L50-L62 | null | class Collection(Object):
"""A collection is the equivalent of an RDBMS table, collection of MongoDB
and object of Knackhq. Most of CRUD method can be executed using this.
- :meth:`~Collection.insert_one`
- :meth:`~Collection.insert`
- :meth:`~Collection.find_one`
- :meth:`~Collection.find`
- :meth:`~Collection.update_one`
- :meth:`~Collection.delete_one`
- :meth:`~Collection.delete_all`
"""
def __str__(self):
return "Collection('%s')" % self.name
def __repr__(self):
return "Collection(key='%s', name='%s')" % (self.key, self.name)
@staticmethod
def from_dict(d):
return Collection(**d)
@property
def get_url(self):
return "https://api.knackhq.com/v1/objects/%s/records" % self.key
@property
def post_url(self):
return "https://api.knackhq.com/v1/objects/%s/records" % self.key
def convert_keys(self, pydict):
"""Convert field_name to field_key.
{"field_name": value} => {"field_key": value}
"""
new_dict = dict()
for key, value in pydict.items():
new_dict[self.get_field_key(key)] = value
return new_dict
def get_html_values(self, pydict, recovery_name=True):
"""Convert naive get response data to human readable field name format.
using html data format.
"""
new_dict = {"id": pydict["id"]}
for field in self:
if field.key in pydict:
if recovery_name:
new_dict[field.name] = pydict[field.key]
else:
new_dict[field.key] = pydict[field.key]
return new_dict
def get_raw_values(self, pydict, recovery_name=True):
"""Convert naive get response data to human readable field name format.
using raw data format.
"""
new_dict = {"id": pydict["id"]}
for field in self:
raw_key = "%s_raw" % field.key
if raw_key in pydict:
if recovery_name:
new_dict[field.name] = pydict[raw_key]
else:
new_dict[field.key] = pydict[raw_key]
return new_dict
def convert_values(self, pydict):
"""Convert knackhq data type instance to json friendly data.
"""
new_dict = dict()
for key, value in pydict.items():
try: # is it's BaseDataType Instance
new_dict[key] = value._data
except AttributeError:
new_dict[key] = value
return new_dict
#-------------------------------------------------------------------------#
# CRUD method #
#-------------------------------------------------------------------------#
def insert_one(self, data, using_name=True):
"""Insert one record.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#create
For more information of the raw structure of all data type, read this:
http://helpdesk.knackhq.com/support/solutions/articles/5000446405-field-types
:param data: dict type data
:param using_name: if you are using field_name in data,
please set using_name = True (it's the default), otherwise, False
**中文文档**
插入一条记录
"""
data = self.convert_values(data)
if using_name:
data = self.convert_keys(data)
res = self.post(self.post_url, data)
return res
def insert(self, data, using_name=True):
"""Insert one or many records.
:param data: dict type data or list of dict
:param using_name: if you are using field name in data,
please set using_name = True (it's the default), otherwise, False
**中文文档**
插入多条记录
"""
if isinstance(data, list): # if iterable, insert one by one
for d in data:
self.insert_one(d, using_name=using_name)
else: # not iterable, execute insert_one
self.insert_one(data, using_name=using_name)
def find_one(self, id_, raw=True, recovery_name=True):
"""Find one record.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#retrieve
:param id_: record id_
:param using_name: if you are using field name in filter and sort_field,
please set using_name = True (it's the default), otherwise, False
:param raw: Default True, set True if you want the data in raw format.
Otherwise, html format
:param recovery_name: Default True, set True if you want field name
instead of field key
**中文文档**
返回一条记录
"""
url = "https://api.knackhq.com/v1/objects/%s/records/%s" % (
self.key, id_)
res = self.get(url)
if raw:
try:
res = self.get_raw_values(res, recovery_name=recovery_name)
except:
pass
else:
try:
res = self.get_html_values(res, recovery_name=recovery_name)
except:
pass
return res
def find(self, filter=list(),
sort_field=None, sort_order=None,
page=None, rows_per_page=None,
using_name=True, data_only=True, raw=True, recovery_name=True):
"""Execute a find query.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#retrieve
:param filter: list of criterions. For more information:
http://helpdesk.knackhq.com/support/solutions/articles/5000447623-api-reference-filters-search
:param sort_field: field_name or field_id, taking field_name by default.
if using field_id, please set using_name = False.
:param sort_order: -1 or 1, 1 means ascending, -1 means descending
:param page and rows_per_page: skip first #page * #rows_per_page,
returns #rows_per_page of records. For more information:
http://helpdesk.knackhq.com/support/solutions/articles/5000444173-working-with-the-api#pagination
:param using_name: if you are using field_name in filter and sort_field,
please set using_name = True (it's the default), otherwise, False
:param data_only: set True you only need the data or the full api
response
:param raw: Default True, set True if you want the data in raw format.
Otherwise, html format
:param recovery_name: Default True, set True if you want field name
instead of field key
**中文文档**
返回多条记录
"""
if using_name:
for criterion in filter:
criterion["field"] = self.get_field_key(criterion["field"])
if sort_field:
sort_field = self.get_field_key(sort_field)
if sort_order is None:
pass
elif sort_order == 1:
sort_order = "asc"
elif sort_order == -1:
sort_order = "desc"
else:
raise ValueError
params = dict()
if len(filter) >= 1:
params["filters"] = json.dumps(filter)
if sort_field:
params["sort_field"] = sort_field
params["sort_order"] = sort_order
if (page is not None) \
and (rows_per_page is not None) \
and isinstance(page, int) \
and isinstance(rows_per_page, int) \
and (page >= 1) \
and (rows_per_page >= 1):
params["page"] = page
params["rows_per_page"] = rows_per_page
res = self.get(self.get_url, params)
# handle data_only and recovery
if data_only:
try:
res = res["records"]
if raw:
res = [self.get_raw_values(data, recovery_name) for data in res]
else:
res = [self.get_html_values(data, recovery_name) for data in res]
except KeyError:
pass
else:
if raw:
try:
res["records"] = [
self.get_raw_values(data, recovery_name) for data in res["records"]]
except KeyError:
pass
else:
try:
res["records"] = [
self.get_html_values(data, recovery_name) for data in res["records"]]
except KeyError:
pass
return res
def update_one(self, id_, data, using_name=True):
"""Update one record. Any fields you don't specify will remain unchanged.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#update
:param id_: record id_
:param data: the new data fields and values
:param using_name: if you are using field name in data,
please set using_name = True (it's the default), otherwise, False
**中文文档**
对一条记录进行更新
"""
data = self.convert_values(data)
if using_name:
data = self.convert_keys(data)
url = "https://api.knackhq.com/v1/objects/%s/records/%s" % (
self.key, id_)
res = self.put(url, data)
return res
def delete_one(self, id_):
"""Delete one record.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#delete
:param id_: record id_
**中文文档**
删除一条记录
"""
url = "https://api.knackhq.com/v1/objects/%s/records/%s" % (
self.key, id_)
res = self.delete(url)
return res
def delete_all(self):
"""Delete all record in the table/collection of this object.
**中文文档**
删除表中的所有记录
"""
for record in self.find(using_name=False, data_only=True):
res = self.delete_one(record["id"])
|
MacHu-GWU/pyknackhq-project | pyknackhq/client.py | Collection.get_raw_values | python | def get_raw_values(self, pydict, recovery_name=True):
new_dict = {"id": pydict["id"]}
for field in self:
raw_key = "%s_raw" % field.key
if raw_key in pydict:
if recovery_name:
new_dict[field.name] = pydict[raw_key]
else:
new_dict[field.key] = pydict[raw_key]
return new_dict | Convert naive get response data to human readable field name format.
using raw data format. | train | https://github.com/MacHu-GWU/pyknackhq-project/blob/dd937f24d7b0a351ba3818eb746c31b29a8cc341/pyknackhq/client.py#L64-L77 | null | class Collection(Object):
"""A collection is the equivalent of an RDBMS table, collection of MongoDB
and object of Knackhq. Most of CRUD method can be executed using this.
- :meth:`~Collection.insert_one`
- :meth:`~Collection.insert`
- :meth:`~Collection.find_one`
- :meth:`~Collection.find`
- :meth:`~Collection.update_one`
- :meth:`~Collection.delete_one`
- :meth:`~Collection.delete_all`
"""
def __str__(self):
return "Collection('%s')" % self.name
def __repr__(self):
return "Collection(key='%s', name='%s')" % (self.key, self.name)
@staticmethod
def from_dict(d):
return Collection(**d)
@property
def get_url(self):
return "https://api.knackhq.com/v1/objects/%s/records" % self.key
@property
def post_url(self):
return "https://api.knackhq.com/v1/objects/%s/records" % self.key
def convert_keys(self, pydict):
"""Convert field_name to field_key.
{"field_name": value} => {"field_key": value}
"""
new_dict = dict()
for key, value in pydict.items():
new_dict[self.get_field_key(key)] = value
return new_dict
def get_html_values(self, pydict, recovery_name=True):
"""Convert naive get response data to human readable field name format.
using html data format.
"""
new_dict = {"id": pydict["id"]}
for field in self:
if field.key in pydict:
if recovery_name:
new_dict[field.name] = pydict[field.key]
else:
new_dict[field.key] = pydict[field.key]
return new_dict
def get_raw_values(self, pydict, recovery_name=True):
"""Convert naive get response data to human readable field name format.
using raw data format.
"""
new_dict = {"id": pydict["id"]}
for field in self:
raw_key = "%s_raw" % field.key
if raw_key in pydict:
if recovery_name:
new_dict[field.name] = pydict[raw_key]
else:
new_dict[field.key] = pydict[raw_key]
return new_dict
def convert_values(self, pydict):
"""Convert knackhq data type instance to json friendly data.
"""
new_dict = dict()
for key, value in pydict.items():
try: # is it's BaseDataType Instance
new_dict[key] = value._data
except AttributeError:
new_dict[key] = value
return new_dict
#-------------------------------------------------------------------------#
# CRUD method #
#-------------------------------------------------------------------------#
def insert_one(self, data, using_name=True):
"""Insert one record.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#create
For more information of the raw structure of all data type, read this:
http://helpdesk.knackhq.com/support/solutions/articles/5000446405-field-types
:param data: dict type data
:param using_name: if you are using field_name in data,
please set using_name = True (it's the default), otherwise, False
**中文文档**
插入一条记录
"""
data = self.convert_values(data)
if using_name:
data = self.convert_keys(data)
res = self.post(self.post_url, data)
return res
def insert(self, data, using_name=True):
"""Insert one or many records.
:param data: dict type data or list of dict
:param using_name: if you are using field name in data,
please set using_name = True (it's the default), otherwise, False
**中文文档**
插入多条记录
"""
if isinstance(data, list): # if iterable, insert one by one
for d in data:
self.insert_one(d, using_name=using_name)
else: # not iterable, execute insert_one
self.insert_one(data, using_name=using_name)
def find_one(self, id_, raw=True, recovery_name=True):
"""Find one record.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#retrieve
:param id_: record id_
:param using_name: if you are using field name in filter and sort_field,
please set using_name = True (it's the default), otherwise, False
:param raw: Default True, set True if you want the data in raw format.
Otherwise, html format
:param recovery_name: Default True, set True if you want field name
instead of field key
**中文文档**
返回一条记录
"""
url = "https://api.knackhq.com/v1/objects/%s/records/%s" % (
self.key, id_)
res = self.get(url)
if raw:
try:
res = self.get_raw_values(res, recovery_name=recovery_name)
except:
pass
else:
try:
res = self.get_html_values(res, recovery_name=recovery_name)
except:
pass
return res
def find(self, filter=list(),
sort_field=None, sort_order=None,
page=None, rows_per_page=None,
using_name=True, data_only=True, raw=True, recovery_name=True):
"""Execute a find query.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#retrieve
:param filter: list of criterions. For more information:
http://helpdesk.knackhq.com/support/solutions/articles/5000447623-api-reference-filters-search
:param sort_field: field_name or field_id, taking field_name by default.
if using field_id, please set using_name = False.
:param sort_order: -1 or 1, 1 means ascending, -1 means descending
:param page and rows_per_page: skip first #page * #rows_per_page,
returns #rows_per_page of records. For more information:
http://helpdesk.knackhq.com/support/solutions/articles/5000444173-working-with-the-api#pagination
:param using_name: if you are using field_name in filter and sort_field,
please set using_name = True (it's the default), otherwise, False
:param data_only: set True you only need the data or the full api
response
:param raw: Default True, set True if you want the data in raw format.
Otherwise, html format
:param recovery_name: Default True, set True if you want field name
instead of field key
**中文文档**
返回多条记录
"""
if using_name:
for criterion in filter:
criterion["field"] = self.get_field_key(criterion["field"])
if sort_field:
sort_field = self.get_field_key(sort_field)
if sort_order is None:
pass
elif sort_order == 1:
sort_order = "asc"
elif sort_order == -1:
sort_order = "desc"
else:
raise ValueError
params = dict()
if len(filter) >= 1:
params["filters"] = json.dumps(filter)
if sort_field:
params["sort_field"] = sort_field
params["sort_order"] = sort_order
if (page is not None) \
and (rows_per_page is not None) \
and isinstance(page, int) \
and isinstance(rows_per_page, int) \
and (page >= 1) \
and (rows_per_page >= 1):
params["page"] = page
params["rows_per_page"] = rows_per_page
res = self.get(self.get_url, params)
# handle data_only and recovery
if data_only:
try:
res = res["records"]
if raw:
res = [self.get_raw_values(data, recovery_name) for data in res]
else:
res = [self.get_html_values(data, recovery_name) for data in res]
except KeyError:
pass
else:
if raw:
try:
res["records"] = [
self.get_raw_values(data, recovery_name) for data in res["records"]]
except KeyError:
pass
else:
try:
res["records"] = [
self.get_html_values(data, recovery_name) for data in res["records"]]
except KeyError:
pass
return res
def update_one(self, id_, data, using_name=True):
"""Update one record. Any fields you don't specify will remain unchanged.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#update
:param id_: record id_
:param data: the new data fields and values
:param using_name: if you are using field name in data,
please set using_name = True (it's the default), otherwise, False
**中文文档**
对一条记录进行更新
"""
data = self.convert_values(data)
if using_name:
data = self.convert_keys(data)
url = "https://api.knackhq.com/v1/objects/%s/records/%s" % (
self.key, id_)
res = self.put(url, data)
return res
def delete_one(self, id_):
"""Delete one record.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#delete
:param id_: record id_
**中文文档**
删除一条记录
"""
url = "https://api.knackhq.com/v1/objects/%s/records/%s" % (
self.key, id_)
res = self.delete(url)
return res
def delete_all(self):
"""Delete all record in the table/collection of this object.
**中文文档**
删除表中的所有记录
"""
for record in self.find(using_name=False, data_only=True):
res = self.delete_one(record["id"])
|
MacHu-GWU/pyknackhq-project | pyknackhq/client.py | Collection.convert_values | python | def convert_values(self, pydict):
new_dict = dict()
for key, value in pydict.items():
try: # is it's BaseDataType Instance
new_dict[key] = value._data
except AttributeError:
new_dict[key] = value
return new_dict | Convert knackhq data type instance to json friendly data. | train | https://github.com/MacHu-GWU/pyknackhq-project/blob/dd937f24d7b0a351ba3818eb746c31b29a8cc341/pyknackhq/client.py#L79-L88 | null | class Collection(Object):
"""A collection is the equivalent of an RDBMS table, collection of MongoDB
and object of Knackhq. Most of CRUD method can be executed using this.
- :meth:`~Collection.insert_one`
- :meth:`~Collection.insert`
- :meth:`~Collection.find_one`
- :meth:`~Collection.find`
- :meth:`~Collection.update_one`
- :meth:`~Collection.delete_one`
- :meth:`~Collection.delete_all`
"""
def __str__(self):
return "Collection('%s')" % self.name
def __repr__(self):
return "Collection(key='%s', name='%s')" % (self.key, self.name)
@staticmethod
def from_dict(d):
return Collection(**d)
@property
def get_url(self):
return "https://api.knackhq.com/v1/objects/%s/records" % self.key
@property
def post_url(self):
return "https://api.knackhq.com/v1/objects/%s/records" % self.key
def convert_keys(self, pydict):
"""Convert field_name to field_key.
{"field_name": value} => {"field_key": value}
"""
new_dict = dict()
for key, value in pydict.items():
new_dict[self.get_field_key(key)] = value
return new_dict
def get_html_values(self, pydict, recovery_name=True):
"""Convert naive get response data to human readable field name format.
using html data format.
"""
new_dict = {"id": pydict["id"]}
for field in self:
if field.key in pydict:
if recovery_name:
new_dict[field.name] = pydict[field.key]
else:
new_dict[field.key] = pydict[field.key]
return new_dict
def get_raw_values(self, pydict, recovery_name=True):
"""Convert naive get response data to human readable field name format.
using raw data format.
"""
new_dict = {"id": pydict["id"]}
for field in self:
raw_key = "%s_raw" % field.key
if raw_key in pydict:
if recovery_name:
new_dict[field.name] = pydict[raw_key]
else:
new_dict[field.key] = pydict[raw_key]
return new_dict
#-------------------------------------------------------------------------#
# CRUD method #
#-------------------------------------------------------------------------#
def insert_one(self, data, using_name=True):
"""Insert one record.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#create
For more information of the raw structure of all data type, read this:
http://helpdesk.knackhq.com/support/solutions/articles/5000446405-field-types
:param data: dict type data
:param using_name: if you are using field_name in data,
please set using_name = True (it's the default), otherwise, False
**中文文档**
插入一条记录
"""
data = self.convert_values(data)
if using_name:
data = self.convert_keys(data)
res = self.post(self.post_url, data)
return res
def insert(self, data, using_name=True):
"""Insert one or many records.
:param data: dict type data or list of dict
:param using_name: if you are using field name in data,
please set using_name = True (it's the default), otherwise, False
**中文文档**
插入多条记录
"""
if isinstance(data, list): # if iterable, insert one by one
for d in data:
self.insert_one(d, using_name=using_name)
else: # not iterable, execute insert_one
self.insert_one(data, using_name=using_name)
def find_one(self, id_, raw=True, recovery_name=True):
"""Find one record.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#retrieve
:param id_: record id_
:param using_name: if you are using field name in filter and sort_field,
please set using_name = True (it's the default), otherwise, False
:param raw: Default True, set True if you want the data in raw format.
Otherwise, html format
:param recovery_name: Default True, set True if you want field name
instead of field key
**中文文档**
返回一条记录
"""
url = "https://api.knackhq.com/v1/objects/%s/records/%s" % (
self.key, id_)
res = self.get(url)
if raw:
try:
res = self.get_raw_values(res, recovery_name=recovery_name)
except:
pass
else:
try:
res = self.get_html_values(res, recovery_name=recovery_name)
except:
pass
return res
def find(self, filter=list(),
sort_field=None, sort_order=None,
page=None, rows_per_page=None,
using_name=True, data_only=True, raw=True, recovery_name=True):
"""Execute a find query.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#retrieve
:param filter: list of criterions. For more information:
http://helpdesk.knackhq.com/support/solutions/articles/5000447623-api-reference-filters-search
:param sort_field: field_name or field_id, taking field_name by default.
if using field_id, please set using_name = False.
:param sort_order: -1 or 1, 1 means ascending, -1 means descending
:param page and rows_per_page: skip first #page * #rows_per_page,
returns #rows_per_page of records. For more information:
http://helpdesk.knackhq.com/support/solutions/articles/5000444173-working-with-the-api#pagination
:param using_name: if you are using field_name in filter and sort_field,
please set using_name = True (it's the default), otherwise, False
:param data_only: set True you only need the data or the full api
response
:param raw: Default True, set True if you want the data in raw format.
Otherwise, html format
:param recovery_name: Default True, set True if you want field name
instead of field key
**中文文档**
返回多条记录
"""
if using_name:
for criterion in filter:
criterion["field"] = self.get_field_key(criterion["field"])
if sort_field:
sort_field = self.get_field_key(sort_field)
if sort_order is None:
pass
elif sort_order == 1:
sort_order = "asc"
elif sort_order == -1:
sort_order = "desc"
else:
raise ValueError
params = dict()
if len(filter) >= 1:
params["filters"] = json.dumps(filter)
if sort_field:
params["sort_field"] = sort_field
params["sort_order"] = sort_order
if (page is not None) \
and (rows_per_page is not None) \
and isinstance(page, int) \
and isinstance(rows_per_page, int) \
and (page >= 1) \
and (rows_per_page >= 1):
params["page"] = page
params["rows_per_page"] = rows_per_page
res = self.get(self.get_url, params)
# handle data_only and recovery
if data_only:
try:
res = res["records"]
if raw:
res = [self.get_raw_values(data, recovery_name) for data in res]
else:
res = [self.get_html_values(data, recovery_name) for data in res]
except KeyError:
pass
else:
if raw:
try:
res["records"] = [
self.get_raw_values(data, recovery_name) for data in res["records"]]
except KeyError:
pass
else:
try:
res["records"] = [
self.get_html_values(data, recovery_name) for data in res["records"]]
except KeyError:
pass
return res
def update_one(self, id_, data, using_name=True):
"""Update one record. Any fields you don't specify will remain unchanged.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#update
:param id_: record id_
:param data: the new data fields and values
:param using_name: if you are using field name in data,
please set using_name = True (it's the default), otherwise, False
**中文文档**
对一条记录进行更新
"""
data = self.convert_values(data)
if using_name:
data = self.convert_keys(data)
url = "https://api.knackhq.com/v1/objects/%s/records/%s" % (
self.key, id_)
res = self.put(url, data)
return res
def delete_one(self, id_):
"""Delete one record.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#delete
:param id_: record id_
**中文文档**
删除一条记录
"""
url = "https://api.knackhq.com/v1/objects/%s/records/%s" % (
self.key, id_)
res = self.delete(url)
return res
def delete_all(self):
"""Delete all record in the table/collection of this object.
**中文文档**
删除表中的所有记录
"""
for record in self.find(using_name=False, data_only=True):
res = self.delete_one(record["id"])
|
MacHu-GWU/pyknackhq-project | pyknackhq/client.py | Collection.insert_one | python | def insert_one(self, data, using_name=True):
data = self.convert_values(data)
if using_name:
data = self.convert_keys(data)
res = self.post(self.post_url, data)
return res | Insert one record.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#create
For more information of the raw structure of all data type, read this:
http://helpdesk.knackhq.com/support/solutions/articles/5000446405-field-types
:param data: dict type data
:param using_name: if you are using field_name in data,
please set using_name = True (it's the default), otherwise, False
**中文文档**
插入一条记录 | train | https://github.com/MacHu-GWU/pyknackhq-project/blob/dd937f24d7b0a351ba3818eb746c31b29a8cc341/pyknackhq/client.py#L93-L113 | [
"def convert_keys(self, pydict):\n \"\"\"Convert field_name to field_key.\n\n {\"field_name\": value} => {\"field_key\": value}\n \"\"\"\n new_dict = dict()\n for key, value in pydict.items():\n new_dict[self.get_field_key(key)] = value\n return new_dict\n",
"def convert_values(self, pydict):\n \"\"\"Convert knackhq data type instance to json friendly data.\n \"\"\"\n new_dict = dict()\n for key, value in pydict.items():\n try: # is it's BaseDataType Instance\n new_dict[key] = value._data\n except AttributeError:\n new_dict[key] = value\n return new_dict\n"
] | class Collection(Object):
"""A collection is the equivalent of an RDBMS table, collection of MongoDB
and object of Knackhq. Most of CRUD method can be executed using this.
- :meth:`~Collection.insert_one`
- :meth:`~Collection.insert`
- :meth:`~Collection.find_one`
- :meth:`~Collection.find`
- :meth:`~Collection.update_one`
- :meth:`~Collection.delete_one`
- :meth:`~Collection.delete_all`
"""
def __str__(self):
return "Collection('%s')" % self.name
def __repr__(self):
return "Collection(key='%s', name='%s')" % (self.key, self.name)
@staticmethod
def from_dict(d):
return Collection(**d)
@property
def get_url(self):
return "https://api.knackhq.com/v1/objects/%s/records" % self.key
@property
def post_url(self):
return "https://api.knackhq.com/v1/objects/%s/records" % self.key
def convert_keys(self, pydict):
"""Convert field_name to field_key.
{"field_name": value} => {"field_key": value}
"""
new_dict = dict()
for key, value in pydict.items():
new_dict[self.get_field_key(key)] = value
return new_dict
def get_html_values(self, pydict, recovery_name=True):
"""Convert naive get response data to human readable field name format.
using html data format.
"""
new_dict = {"id": pydict["id"]}
for field in self:
if field.key in pydict:
if recovery_name:
new_dict[field.name] = pydict[field.key]
else:
new_dict[field.key] = pydict[field.key]
return new_dict
def get_raw_values(self, pydict, recovery_name=True):
"""Convert naive get response data to human readable field name format.
using raw data format.
"""
new_dict = {"id": pydict["id"]}
for field in self:
raw_key = "%s_raw" % field.key
if raw_key in pydict:
if recovery_name:
new_dict[field.name] = pydict[raw_key]
else:
new_dict[field.key] = pydict[raw_key]
return new_dict
def convert_values(self, pydict):
"""Convert knackhq data type instance to json friendly data.
"""
new_dict = dict()
for key, value in pydict.items():
try: # is it's BaseDataType Instance
new_dict[key] = value._data
except AttributeError:
new_dict[key] = value
return new_dict
#-------------------------------------------------------------------------#
# CRUD method #
#-------------------------------------------------------------------------#
def insert_one(self, data, using_name=True):
"""Insert one record.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#create
For more information of the raw structure of all data type, read this:
http://helpdesk.knackhq.com/support/solutions/articles/5000446405-field-types
:param data: dict type data
:param using_name: if you are using field_name in data,
please set using_name = True (it's the default), otherwise, False
**中文文档**
插入一条记录
"""
data = self.convert_values(data)
if using_name:
data = self.convert_keys(data)
res = self.post(self.post_url, data)
return res
def insert(self, data, using_name=True):
"""Insert one or many records.
:param data: dict type data or list of dict
:param using_name: if you are using field name in data,
please set using_name = True (it's the default), otherwise, False
**中文文档**
插入多条记录
"""
if isinstance(data, list): # if iterable, insert one by one
for d in data:
self.insert_one(d, using_name=using_name)
else: # not iterable, execute insert_one
self.insert_one(data, using_name=using_name)
def find_one(self, id_, raw=True, recovery_name=True):
"""Find one record.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#retrieve
:param id_: record id_
:param using_name: if you are using field name in filter and sort_field,
please set using_name = True (it's the default), otherwise, False
:param raw: Default True, set True if you want the data in raw format.
Otherwise, html format
:param recovery_name: Default True, set True if you want field name
instead of field key
**中文文档**
返回一条记录
"""
url = "https://api.knackhq.com/v1/objects/%s/records/%s" % (
self.key, id_)
res = self.get(url)
if raw:
try:
res = self.get_raw_values(res, recovery_name=recovery_name)
except:
pass
else:
try:
res = self.get_html_values(res, recovery_name=recovery_name)
except:
pass
return res
def find(self, filter=list(),
sort_field=None, sort_order=None,
page=None, rows_per_page=None,
using_name=True, data_only=True, raw=True, recovery_name=True):
"""Execute a find query.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#retrieve
:param filter: list of criterions. For more information:
http://helpdesk.knackhq.com/support/solutions/articles/5000447623-api-reference-filters-search
:param sort_field: field_name or field_id, taking field_name by default.
if using field_id, please set using_name = False.
:param sort_order: -1 or 1, 1 means ascending, -1 means descending
:param page and rows_per_page: skip first #page * #rows_per_page,
returns #rows_per_page of records. For more information:
http://helpdesk.knackhq.com/support/solutions/articles/5000444173-working-with-the-api#pagination
:param using_name: if you are using field_name in filter and sort_field,
please set using_name = True (it's the default), otherwise, False
:param data_only: set True you only need the data or the full api
response
:param raw: Default True, set True if you want the data in raw format.
Otherwise, html format
:param recovery_name: Default True, set True if you want field name
instead of field key
**中文文档**
返回多条记录
"""
if using_name:
for criterion in filter:
criterion["field"] = self.get_field_key(criterion["field"])
if sort_field:
sort_field = self.get_field_key(sort_field)
if sort_order is None:
pass
elif sort_order == 1:
sort_order = "asc"
elif sort_order == -1:
sort_order = "desc"
else:
raise ValueError
params = dict()
if len(filter) >= 1:
params["filters"] = json.dumps(filter)
if sort_field:
params["sort_field"] = sort_field
params["sort_order"] = sort_order
if (page is not None) \
and (rows_per_page is not None) \
and isinstance(page, int) \
and isinstance(rows_per_page, int) \
and (page >= 1) \
and (rows_per_page >= 1):
params["page"] = page
params["rows_per_page"] = rows_per_page
res = self.get(self.get_url, params)
# handle data_only and recovery
if data_only:
try:
res = res["records"]
if raw:
res = [self.get_raw_values(data, recovery_name) for data in res]
else:
res = [self.get_html_values(data, recovery_name) for data in res]
except KeyError:
pass
else:
if raw:
try:
res["records"] = [
self.get_raw_values(data, recovery_name) for data in res["records"]]
except KeyError:
pass
else:
try:
res["records"] = [
self.get_html_values(data, recovery_name) for data in res["records"]]
except KeyError:
pass
return res
def update_one(self, id_, data, using_name=True):
"""Update one record. Any fields you don't specify will remain unchanged.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#update
:param id_: record id_
:param data: the new data fields and values
:param using_name: if you are using field name in data,
please set using_name = True (it's the default), otherwise, False
**中文文档**
对一条记录进行更新
"""
data = self.convert_values(data)
if using_name:
data = self.convert_keys(data)
url = "https://api.knackhq.com/v1/objects/%s/records/%s" % (
self.key, id_)
res = self.put(url, data)
return res
def delete_one(self, id_):
"""Delete one record.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#delete
:param id_: record id_
**中文文档**
删除一条记录
"""
url = "https://api.knackhq.com/v1/objects/%s/records/%s" % (
self.key, id_)
res = self.delete(url)
return res
def delete_all(self):
"""Delete all record in the table/collection of this object.
**中文文档**
删除表中的所有记录
"""
for record in self.find(using_name=False, data_only=True):
res = self.delete_one(record["id"])
|
MacHu-GWU/pyknackhq-project | pyknackhq/client.py | Collection.insert | python | def insert(self, data, using_name=True):
if isinstance(data, list): # if iterable, insert one by one
for d in data:
self.insert_one(d, using_name=using_name)
else: # not iterable, execute insert_one
self.insert_one(data, using_name=using_name) | Insert one or many records.
:param data: dict type data or list of dict
:param using_name: if you are using field name in data,
please set using_name = True (it's the default), otherwise, False
**中文文档**
插入多条记录 | train | https://github.com/MacHu-GWU/pyknackhq-project/blob/dd937f24d7b0a351ba3818eb746c31b29a8cc341/pyknackhq/client.py#L115-L130 | [
"def insert_one(self, data, using_name=True):\n \"\"\"Insert one record.\n\n Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#create\n\n For more information of the raw structure of all data type, read this:\n http://helpdesk.knackhq.com/support/solutions/articles/5000446405-field-types\n\n :param data: dict type data\n :param using_name: if you are using field_name in data,\n please set using_name = True (it's the default), otherwise, False\n\n **中文文档**\n\n 插入一条记录\n \"\"\"\n data = self.convert_values(data)\n if using_name:\n data = self.convert_keys(data)\n res = self.post(self.post_url, data)\n return res\n"
] | class Collection(Object):
"""A collection is the equivalent of an RDBMS table, collection of MongoDB
and object of Knackhq. Most of CRUD method can be executed using this.
- :meth:`~Collection.insert_one`
- :meth:`~Collection.insert`
- :meth:`~Collection.find_one`
- :meth:`~Collection.find`
- :meth:`~Collection.update_one`
- :meth:`~Collection.delete_one`
- :meth:`~Collection.delete_all`
"""
def __str__(self):
return "Collection('%s')" % self.name
def __repr__(self):
return "Collection(key='%s', name='%s')" % (self.key, self.name)
@staticmethod
def from_dict(d):
return Collection(**d)
@property
def get_url(self):
return "https://api.knackhq.com/v1/objects/%s/records" % self.key
@property
def post_url(self):
return "https://api.knackhq.com/v1/objects/%s/records" % self.key
def convert_keys(self, pydict):
"""Convert field_name to field_key.
{"field_name": value} => {"field_key": value}
"""
new_dict = dict()
for key, value in pydict.items():
new_dict[self.get_field_key(key)] = value
return new_dict
def get_html_values(self, pydict, recovery_name=True):
"""Convert naive get response data to human readable field name format.
using html data format.
"""
new_dict = {"id": pydict["id"]}
for field in self:
if field.key in pydict:
if recovery_name:
new_dict[field.name] = pydict[field.key]
else:
new_dict[field.key] = pydict[field.key]
return new_dict
def get_raw_values(self, pydict, recovery_name=True):
"""Convert naive get response data to human readable field name format.
using raw data format.
"""
new_dict = {"id": pydict["id"]}
for field in self:
raw_key = "%s_raw" % field.key
if raw_key in pydict:
if recovery_name:
new_dict[field.name] = pydict[raw_key]
else:
new_dict[field.key] = pydict[raw_key]
return new_dict
def convert_values(self, pydict):
"""Convert knackhq data type instance to json friendly data.
"""
new_dict = dict()
for key, value in pydict.items():
try: # is it's BaseDataType Instance
new_dict[key] = value._data
except AttributeError:
new_dict[key] = value
return new_dict
#-------------------------------------------------------------------------#
# CRUD method #
#-------------------------------------------------------------------------#
def insert_one(self, data, using_name=True):
"""Insert one record.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#create
For more information of the raw structure of all data type, read this:
http://helpdesk.knackhq.com/support/solutions/articles/5000446405-field-types
:param data: dict type data
:param using_name: if you are using field_name in data,
please set using_name = True (it's the default), otherwise, False
**中文文档**
插入一条记录
"""
data = self.convert_values(data)
if using_name:
data = self.convert_keys(data)
res = self.post(self.post_url, data)
return res
def insert(self, data, using_name=True):
"""Insert one or many records.
:param data: dict type data or list of dict
:param using_name: if you are using field name in data,
please set using_name = True (it's the default), otherwise, False
**中文文档**
插入多条记录
"""
if isinstance(data, list): # if iterable, insert one by one
for d in data:
self.insert_one(d, using_name=using_name)
else: # not iterable, execute insert_one
self.insert_one(data, using_name=using_name)
def find_one(self, id_, raw=True, recovery_name=True):
"""Find one record.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#retrieve
:param id_: record id_
:param using_name: if you are using field name in filter and sort_field,
please set using_name = True (it's the default), otherwise, False
:param raw: Default True, set True if you want the data in raw format.
Otherwise, html format
:param recovery_name: Default True, set True if you want field name
instead of field key
**中文文档**
返回一条记录
"""
url = "https://api.knackhq.com/v1/objects/%s/records/%s" % (
self.key, id_)
res = self.get(url)
if raw:
try:
res = self.get_raw_values(res, recovery_name=recovery_name)
except:
pass
else:
try:
res = self.get_html_values(res, recovery_name=recovery_name)
except:
pass
return res
def find(self, filter=list(),
sort_field=None, sort_order=None,
page=None, rows_per_page=None,
using_name=True, data_only=True, raw=True, recovery_name=True):
"""Execute a find query.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#retrieve
:param filter: list of criterions. For more information:
http://helpdesk.knackhq.com/support/solutions/articles/5000447623-api-reference-filters-search
:param sort_field: field_name or field_id, taking field_name by default.
if using field_id, please set using_name = False.
:param sort_order: -1 or 1, 1 means ascending, -1 means descending
:param page and rows_per_page: skip first #page * #rows_per_page,
returns #rows_per_page of records. For more information:
http://helpdesk.knackhq.com/support/solutions/articles/5000444173-working-with-the-api#pagination
:param using_name: if you are using field_name in filter and sort_field,
please set using_name = True (it's the default), otherwise, False
:param data_only: set True you only need the data or the full api
response
:param raw: Default True, set True if you want the data in raw format.
Otherwise, html format
:param recovery_name: Default True, set True if you want field name
instead of field key
**中文文档**
返回多条记录
"""
if using_name:
for criterion in filter:
criterion["field"] = self.get_field_key(criterion["field"])
if sort_field:
sort_field = self.get_field_key(sort_field)
if sort_order is None:
pass
elif sort_order == 1:
sort_order = "asc"
elif sort_order == -1:
sort_order = "desc"
else:
raise ValueError
params = dict()
if len(filter) >= 1:
params["filters"] = json.dumps(filter)
if sort_field:
params["sort_field"] = sort_field
params["sort_order"] = sort_order
if (page is not None) \
and (rows_per_page is not None) \
and isinstance(page, int) \
and isinstance(rows_per_page, int) \
and (page >= 1) \
and (rows_per_page >= 1):
params["page"] = page
params["rows_per_page"] = rows_per_page
res = self.get(self.get_url, params)
# handle data_only and recovery
if data_only:
try:
res = res["records"]
if raw:
res = [self.get_raw_values(data, recovery_name) for data in res]
else:
res = [self.get_html_values(data, recovery_name) for data in res]
except KeyError:
pass
else:
if raw:
try:
res["records"] = [
self.get_raw_values(data, recovery_name) for data in res["records"]]
except KeyError:
pass
else:
try:
res["records"] = [
self.get_html_values(data, recovery_name) for data in res["records"]]
except KeyError:
pass
return res
def update_one(self, id_, data, using_name=True):
"""Update one record. Any fields you don't specify will remain unchanged.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#update
:param id_: record id_
:param data: the new data fields and values
:param using_name: if you are using field name in data,
please set using_name = True (it's the default), otherwise, False
**中文文档**
对一条记录进行更新
"""
data = self.convert_values(data)
if using_name:
data = self.convert_keys(data)
url = "https://api.knackhq.com/v1/objects/%s/records/%s" % (
self.key, id_)
res = self.put(url, data)
return res
def delete_one(self, id_):
"""Delete one record.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#delete
:param id_: record id_
**中文文档**
删除一条记录
"""
url = "https://api.knackhq.com/v1/objects/%s/records/%s" % (
self.key, id_)
res = self.delete(url)
return res
def delete_all(self):
"""Delete all record in the table/collection of this object.
**中文文档**
删除表中的所有记录
"""
for record in self.find(using_name=False, data_only=True):
res = self.delete_one(record["id"])
|
MacHu-GWU/pyknackhq-project | pyknackhq/client.py | Collection.find_one | python | def find_one(self, id_, raw=True, recovery_name=True):
url = "https://api.knackhq.com/v1/objects/%s/records/%s" % (
self.key, id_)
res = self.get(url)
if raw:
try:
res = self.get_raw_values(res, recovery_name=recovery_name)
except:
pass
else:
try:
res = self.get_html_values(res, recovery_name=recovery_name)
except:
pass
return res | Find one record.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#retrieve
:param id_: record id_
:param using_name: if you are using field name in filter and sort_field,
please set using_name = True (it's the default), otherwise, False
:param raw: Default True, set True if you want the data in raw format.
Otherwise, html format
:param recovery_name: Default True, set True if you want field name
instead of field key
**中文文档**
返回一条记录 | train | https://github.com/MacHu-GWU/pyknackhq-project/blob/dd937f24d7b0a351ba3818eb746c31b29a8cc341/pyknackhq/client.py#L132-L164 | [
"def get_html_values(self, pydict, recovery_name=True):\n \"\"\"Convert naive get response data to human readable field name format.\n\n using html data format.\n \"\"\"\n new_dict = {\"id\": pydict[\"id\"]}\n for field in self:\n if field.key in pydict:\n if recovery_name:\n new_dict[field.name] = pydict[field.key]\n else:\n new_dict[field.key] = pydict[field.key]\n return new_dict\n",
"def get_raw_values(self, pydict, recovery_name=True):\n \"\"\"Convert naive get response data to human readable field name format.\n\n using raw data format.\n \"\"\"\n new_dict = {\"id\": pydict[\"id\"]}\n for field in self:\n raw_key = \"%s_raw\" % field.key\n if raw_key in pydict:\n if recovery_name:\n new_dict[field.name] = pydict[raw_key]\n else:\n new_dict[field.key] = pydict[raw_key]\n return new_dict\n"
] | class Collection(Object):
"""A collection is the equivalent of an RDBMS table, collection of MongoDB
and object of Knackhq. Most of CRUD method can be executed using this.
- :meth:`~Collection.insert_one`
- :meth:`~Collection.insert`
- :meth:`~Collection.find_one`
- :meth:`~Collection.find`
- :meth:`~Collection.update_one`
- :meth:`~Collection.delete_one`
- :meth:`~Collection.delete_all`
"""
def __str__(self):
return "Collection('%s')" % self.name
def __repr__(self):
return "Collection(key='%s', name='%s')" % (self.key, self.name)
@staticmethod
def from_dict(d):
return Collection(**d)
@property
def get_url(self):
return "https://api.knackhq.com/v1/objects/%s/records" % self.key
@property
def post_url(self):
return "https://api.knackhq.com/v1/objects/%s/records" % self.key
def convert_keys(self, pydict):
"""Convert field_name to field_key.
{"field_name": value} => {"field_key": value}
"""
new_dict = dict()
for key, value in pydict.items():
new_dict[self.get_field_key(key)] = value
return new_dict
def get_html_values(self, pydict, recovery_name=True):
"""Convert naive get response data to human readable field name format.
using html data format.
"""
new_dict = {"id": pydict["id"]}
for field in self:
if field.key in pydict:
if recovery_name:
new_dict[field.name] = pydict[field.key]
else:
new_dict[field.key] = pydict[field.key]
return new_dict
def get_raw_values(self, pydict, recovery_name=True):
"""Convert naive get response data to human readable field name format.
using raw data format.
"""
new_dict = {"id": pydict["id"]}
for field in self:
raw_key = "%s_raw" % field.key
if raw_key in pydict:
if recovery_name:
new_dict[field.name] = pydict[raw_key]
else:
new_dict[field.key] = pydict[raw_key]
return new_dict
def convert_values(self, pydict):
"""Convert knackhq data type instance to json friendly data.
"""
new_dict = dict()
for key, value in pydict.items():
try: # is it's BaseDataType Instance
new_dict[key] = value._data
except AttributeError:
new_dict[key] = value
return new_dict
#-------------------------------------------------------------------------#
# CRUD method #
#-------------------------------------------------------------------------#
def insert_one(self, data, using_name=True):
"""Insert one record.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#create
For more information of the raw structure of all data type, read this:
http://helpdesk.knackhq.com/support/solutions/articles/5000446405-field-types
:param data: dict type data
:param using_name: if you are using field_name in data,
please set using_name = True (it's the default), otherwise, False
**中文文档**
插入一条记录
"""
data = self.convert_values(data)
if using_name:
data = self.convert_keys(data)
res = self.post(self.post_url, data)
return res
def insert(self, data, using_name=True):
"""Insert one or many records.
:param data: dict type data or list of dict
:param using_name: if you are using field name in data,
please set using_name = True (it's the default), otherwise, False
**中文文档**
插入多条记录
"""
if isinstance(data, list): # if iterable, insert one by one
for d in data:
self.insert_one(d, using_name=using_name)
else: # not iterable, execute insert_one
self.insert_one(data, using_name=using_name)
def find_one(self, id_, raw=True, recovery_name=True):
"""Find one record.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#retrieve
:param id_: record id_
:param using_name: if you are using field name in filter and sort_field,
please set using_name = True (it's the default), otherwise, False
:param raw: Default True, set True if you want the data in raw format.
Otherwise, html format
:param recovery_name: Default True, set True if you want field name
instead of field key
**中文文档**
返回一条记录
"""
url = "https://api.knackhq.com/v1/objects/%s/records/%s" % (
self.key, id_)
res = self.get(url)
if raw:
try:
res = self.get_raw_values(res, recovery_name=recovery_name)
except:
pass
else:
try:
res = self.get_html_values(res, recovery_name=recovery_name)
except:
pass
return res
def find(self, filter=list(),
sort_field=None, sort_order=None,
page=None, rows_per_page=None,
using_name=True, data_only=True, raw=True, recovery_name=True):
"""Execute a find query.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#retrieve
:param filter: list of criterions. For more information:
http://helpdesk.knackhq.com/support/solutions/articles/5000447623-api-reference-filters-search
:param sort_field: field_name or field_id, taking field_name by default.
if using field_id, please set using_name = False.
:param sort_order: -1 or 1, 1 means ascending, -1 means descending
:param page and rows_per_page: skip first #page * #rows_per_page,
returns #rows_per_page of records. For more information:
http://helpdesk.knackhq.com/support/solutions/articles/5000444173-working-with-the-api#pagination
:param using_name: if you are using field_name in filter and sort_field,
please set using_name = True (it's the default), otherwise, False
:param data_only: set True you only need the data or the full api
response
:param raw: Default True, set True if you want the data in raw format.
Otherwise, html format
:param recovery_name: Default True, set True if you want field name
instead of field key
**中文文档**
返回多条记录
"""
if using_name:
for criterion in filter:
criterion["field"] = self.get_field_key(criterion["field"])
if sort_field:
sort_field = self.get_field_key(sort_field)
if sort_order is None:
pass
elif sort_order == 1:
sort_order = "asc"
elif sort_order == -1:
sort_order = "desc"
else:
raise ValueError
params = dict()
if len(filter) >= 1:
params["filters"] = json.dumps(filter)
if sort_field:
params["sort_field"] = sort_field
params["sort_order"] = sort_order
if (page is not None) \
and (rows_per_page is not None) \
and isinstance(page, int) \
and isinstance(rows_per_page, int) \
and (page >= 1) \
and (rows_per_page >= 1):
params["page"] = page
params["rows_per_page"] = rows_per_page
res = self.get(self.get_url, params)
# handle data_only and recovery
if data_only:
try:
res = res["records"]
if raw:
res = [self.get_raw_values(data, recovery_name) for data in res]
else:
res = [self.get_html_values(data, recovery_name) for data in res]
except KeyError:
pass
else:
if raw:
try:
res["records"] = [
self.get_raw_values(data, recovery_name) for data in res["records"]]
except KeyError:
pass
else:
try:
res["records"] = [
self.get_html_values(data, recovery_name) for data in res["records"]]
except KeyError:
pass
return res
def update_one(self, id_, data, using_name=True):
"""Update one record. Any fields you don't specify will remain unchanged.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#update
:param id_: record id_
:param data: the new data fields and values
:param using_name: if you are using field name in data,
please set using_name = True (it's the default), otherwise, False
**中文文档**
对一条记录进行更新
"""
data = self.convert_values(data)
if using_name:
data = self.convert_keys(data)
url = "https://api.knackhq.com/v1/objects/%s/records/%s" % (
self.key, id_)
res = self.put(url, data)
return res
def delete_one(self, id_):
"""Delete one record.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#delete
:param id_: record id_
**中文文档**
删除一条记录
"""
url = "https://api.knackhq.com/v1/objects/%s/records/%s" % (
self.key, id_)
res = self.delete(url)
return res
def delete_all(self):
"""Delete all record in the table/collection of this object.
**中文文档**
删除表中的所有记录
"""
for record in self.find(using_name=False, data_only=True):
res = self.delete_one(record["id"])
|
MacHu-GWU/pyknackhq-project | pyknackhq/client.py | Collection.find | python | def find(self, filter=list(),
sort_field=None, sort_order=None,
page=None, rows_per_page=None,
using_name=True, data_only=True, raw=True, recovery_name=True):
if using_name:
for criterion in filter:
criterion["field"] = self.get_field_key(criterion["field"])
if sort_field:
sort_field = self.get_field_key(sort_field)
if sort_order is None:
pass
elif sort_order == 1:
sort_order = "asc"
elif sort_order == -1:
sort_order = "desc"
else:
raise ValueError
params = dict()
if len(filter) >= 1:
params["filters"] = json.dumps(filter)
if sort_field:
params["sort_field"] = sort_field
params["sort_order"] = sort_order
if (page is not None) \
and (rows_per_page is not None) \
and isinstance(page, int) \
and isinstance(rows_per_page, int) \
and (page >= 1) \
and (rows_per_page >= 1):
params["page"] = page
params["rows_per_page"] = rows_per_page
res = self.get(self.get_url, params)
# handle data_only and recovery
if data_only:
try:
res = res["records"]
if raw:
res = [self.get_raw_values(data, recovery_name) for data in res]
else:
res = [self.get_html_values(data, recovery_name) for data in res]
except KeyError:
pass
else:
if raw:
try:
res["records"] = [
self.get_raw_values(data, recovery_name) for data in res["records"]]
except KeyError:
pass
else:
try:
res["records"] = [
self.get_html_values(data, recovery_name) for data in res["records"]]
except KeyError:
pass
return res | Execute a find query.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#retrieve
:param filter: list of criterions. For more information:
http://helpdesk.knackhq.com/support/solutions/articles/5000447623-api-reference-filters-search
:param sort_field: field_name or field_id, taking field_name by default.
if using field_id, please set using_name = False.
:param sort_order: -1 or 1, 1 means ascending, -1 means descending
:param page and rows_per_page: skip first #page * #rows_per_page,
returns #rows_per_page of records. For more information:
http://helpdesk.knackhq.com/support/solutions/articles/5000444173-working-with-the-api#pagination
:param using_name: if you are using field_name in filter and sort_field,
please set using_name = True (it's the default), otherwise, False
:param data_only: set True you only need the data or the full api
response
:param raw: Default True, set True if you want the data in raw format.
Otherwise, html format
:param recovery_name: Default True, set True if you want field name
instead of field key
**中文文档**
返回多条记录 | train | https://github.com/MacHu-GWU/pyknackhq-project/blob/dd937f24d7b0a351ba3818eb746c31b29a8cc341/pyknackhq/client.py#L166-L253 | [
"def get_field_key(self, key, using_name=True):\n \"\"\"Given a field key or name, return it's field key.\n \"\"\"\n try:\n if using_name:\n return self.f_name[key].key\n else:\n return self.f[key].key\n except KeyError:\n raise ValueError(\"'%s' are not found!\" % key)\n"
] | class Collection(Object):
"""A collection is the equivalent of an RDBMS table, collection of MongoDB
and object of Knackhq. Most of CRUD method can be executed using this.
- :meth:`~Collection.insert_one`
- :meth:`~Collection.insert`
- :meth:`~Collection.find_one`
- :meth:`~Collection.find`
- :meth:`~Collection.update_one`
- :meth:`~Collection.delete_one`
- :meth:`~Collection.delete_all`
"""
def __str__(self):
return "Collection('%s')" % self.name
def __repr__(self):
return "Collection(key='%s', name='%s')" % (self.key, self.name)
@staticmethod
def from_dict(d):
return Collection(**d)
@property
def get_url(self):
return "https://api.knackhq.com/v1/objects/%s/records" % self.key
@property
def post_url(self):
return "https://api.knackhq.com/v1/objects/%s/records" % self.key
def convert_keys(self, pydict):
"""Convert field_name to field_key.
{"field_name": value} => {"field_key": value}
"""
new_dict = dict()
for key, value in pydict.items():
new_dict[self.get_field_key(key)] = value
return new_dict
def get_html_values(self, pydict, recovery_name=True):
"""Convert naive get response data to human readable field name format.
using html data format.
"""
new_dict = {"id": pydict["id"]}
for field in self:
if field.key in pydict:
if recovery_name:
new_dict[field.name] = pydict[field.key]
else:
new_dict[field.key] = pydict[field.key]
return new_dict
def get_raw_values(self, pydict, recovery_name=True):
"""Convert naive get response data to human readable field name format.
using raw data format.
"""
new_dict = {"id": pydict["id"]}
for field in self:
raw_key = "%s_raw" % field.key
if raw_key in pydict:
if recovery_name:
new_dict[field.name] = pydict[raw_key]
else:
new_dict[field.key] = pydict[raw_key]
return new_dict
def convert_values(self, pydict):
"""Convert knackhq data type instance to json friendly data.
"""
new_dict = dict()
for key, value in pydict.items():
try: # is it's BaseDataType Instance
new_dict[key] = value._data
except AttributeError:
new_dict[key] = value
return new_dict
#-------------------------------------------------------------------------#
# CRUD method #
#-------------------------------------------------------------------------#
def insert_one(self, data, using_name=True):
"""Insert one record.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#create
For more information of the raw structure of all data type, read this:
http://helpdesk.knackhq.com/support/solutions/articles/5000446405-field-types
:param data: dict type data
:param using_name: if you are using field_name in data,
please set using_name = True (it's the default), otherwise, False
**中文文档**
插入一条记录
"""
data = self.convert_values(data)
if using_name:
data = self.convert_keys(data)
res = self.post(self.post_url, data)
return res
def insert(self, data, using_name=True):
"""Insert one or many records.
:param data: dict type data or list of dict
:param using_name: if you are using field name in data,
please set using_name = True (it's the default), otherwise, False
**中文文档**
插入多条记录
"""
if isinstance(data, list): # if iterable, insert one by one
for d in data:
self.insert_one(d, using_name=using_name)
else: # not iterable, execute insert_one
self.insert_one(data, using_name=using_name)
def find_one(self, id_, raw=True, recovery_name=True):
"""Find one record.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#retrieve
:param id_: record id_
:param using_name: if you are using field name in filter and sort_field,
please set using_name = True (it's the default), otherwise, False
:param raw: Default True, set True if you want the data in raw format.
Otherwise, html format
:param recovery_name: Default True, set True if you want field name
instead of field key
**中文文档**
返回一条记录
"""
url = "https://api.knackhq.com/v1/objects/%s/records/%s" % (
self.key, id_)
res = self.get(url)
if raw:
try:
res = self.get_raw_values(res, recovery_name=recovery_name)
except:
pass
else:
try:
res = self.get_html_values(res, recovery_name=recovery_name)
except:
pass
return res
def find(self, filter=list(),
sort_field=None, sort_order=None,
page=None, rows_per_page=None,
using_name=True, data_only=True, raw=True, recovery_name=True):
"""Execute a find query.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#retrieve
:param filter: list of criterions. For more information:
http://helpdesk.knackhq.com/support/solutions/articles/5000447623-api-reference-filters-search
:param sort_field: field_name or field_id, taking field_name by default.
if using field_id, please set using_name = False.
:param sort_order: -1 or 1, 1 means ascending, -1 means descending
:param page and rows_per_page: skip first #page * #rows_per_page,
returns #rows_per_page of records. For more information:
http://helpdesk.knackhq.com/support/solutions/articles/5000444173-working-with-the-api#pagination
:param using_name: if you are using field_name in filter and sort_field,
please set using_name = True (it's the default), otherwise, False
:param data_only: set True you only need the data or the full api
response
:param raw: Default True, set True if you want the data in raw format.
Otherwise, html format
:param recovery_name: Default True, set True if you want field name
instead of field key
**中文文档**
返回多条记录
"""
if using_name:
for criterion in filter:
criterion["field"] = self.get_field_key(criterion["field"])
if sort_field:
sort_field = self.get_field_key(sort_field)
if sort_order is None:
pass
elif sort_order == 1:
sort_order = "asc"
elif sort_order == -1:
sort_order = "desc"
else:
raise ValueError
params = dict()
if len(filter) >= 1:
params["filters"] = json.dumps(filter)
if sort_field:
params["sort_field"] = sort_field
params["sort_order"] = sort_order
if (page is not None) \
and (rows_per_page is not None) \
and isinstance(page, int) \
and isinstance(rows_per_page, int) \
and (page >= 1) \
and (rows_per_page >= 1):
params["page"] = page
params["rows_per_page"] = rows_per_page
res = self.get(self.get_url, params)
# handle data_only and recovery
if data_only:
try:
res = res["records"]
if raw:
res = [self.get_raw_values(data, recovery_name) for data in res]
else:
res = [self.get_html_values(data, recovery_name) for data in res]
except KeyError:
pass
else:
if raw:
try:
res["records"] = [
self.get_raw_values(data, recovery_name) for data in res["records"]]
except KeyError:
pass
else:
try:
res["records"] = [
self.get_html_values(data, recovery_name) for data in res["records"]]
except KeyError:
pass
return res
def update_one(self, id_, data, using_name=True):
"""Update one record. Any fields you don't specify will remain unchanged.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#update
:param id_: record id_
:param data: the new data fields and values
:param using_name: if you are using field name in data,
please set using_name = True (it's the default), otherwise, False
**中文文档**
对一条记录进行更新
"""
data = self.convert_values(data)
if using_name:
data = self.convert_keys(data)
url = "https://api.knackhq.com/v1/objects/%s/records/%s" % (
self.key, id_)
res = self.put(url, data)
return res
def delete_one(self, id_):
"""Delete one record.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#delete
:param id_: record id_
**中文文档**
删除一条记录
"""
url = "https://api.knackhq.com/v1/objects/%s/records/%s" % (
self.key, id_)
res = self.delete(url)
return res
def delete_all(self):
"""Delete all record in the table/collection of this object.
**中文文档**
删除表中的所有记录
"""
for record in self.find(using_name=False, data_only=True):
res = self.delete_one(record["id"])
|
MacHu-GWU/pyknackhq-project | pyknackhq/client.py | Collection.update_one | python | def update_one(self, id_, data, using_name=True):
data = self.convert_values(data)
if using_name:
data = self.convert_keys(data)
url = "https://api.knackhq.com/v1/objects/%s/records/%s" % (
self.key, id_)
res = self.put(url, data)
return res | Update one record. Any fields you don't specify will remain unchanged.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#update
:param id_: record id_
:param data: the new data fields and values
:param using_name: if you are using field name in data,
please set using_name = True (it's the default), otherwise, False
**中文文档**
对一条记录进行更新 | train | https://github.com/MacHu-GWU/pyknackhq-project/blob/dd937f24d7b0a351ba3818eb746c31b29a8cc341/pyknackhq/client.py#L255-L276 | [
"def convert_keys(self, pydict):\n \"\"\"Convert field_name to field_key.\n\n {\"field_name\": value} => {\"field_key\": value}\n \"\"\"\n new_dict = dict()\n for key, value in pydict.items():\n new_dict[self.get_field_key(key)] = value\n return new_dict\n",
"def convert_values(self, pydict):\n \"\"\"Convert knackhq data type instance to json friendly data.\n \"\"\"\n new_dict = dict()\n for key, value in pydict.items():\n try: # is it's BaseDataType Instance\n new_dict[key] = value._data\n except AttributeError:\n new_dict[key] = value\n return new_dict\n"
] | class Collection(Object):
"""A collection is the equivalent of an RDBMS table, collection of MongoDB
and object of Knackhq. Most of CRUD method can be executed using this.
- :meth:`~Collection.insert_one`
- :meth:`~Collection.insert`
- :meth:`~Collection.find_one`
- :meth:`~Collection.find`
- :meth:`~Collection.update_one`
- :meth:`~Collection.delete_one`
- :meth:`~Collection.delete_all`
"""
def __str__(self):
return "Collection('%s')" % self.name
def __repr__(self):
return "Collection(key='%s', name='%s')" % (self.key, self.name)
@staticmethod
def from_dict(d):
return Collection(**d)
@property
def get_url(self):
return "https://api.knackhq.com/v1/objects/%s/records" % self.key
@property
def post_url(self):
return "https://api.knackhq.com/v1/objects/%s/records" % self.key
def convert_keys(self, pydict):
"""Convert field_name to field_key.
{"field_name": value} => {"field_key": value}
"""
new_dict = dict()
for key, value in pydict.items():
new_dict[self.get_field_key(key)] = value
return new_dict
def get_html_values(self, pydict, recovery_name=True):
"""Convert naive get response data to human readable field name format.
using html data format.
"""
new_dict = {"id": pydict["id"]}
for field in self:
if field.key in pydict:
if recovery_name:
new_dict[field.name] = pydict[field.key]
else:
new_dict[field.key] = pydict[field.key]
return new_dict
def get_raw_values(self, pydict, recovery_name=True):
"""Convert naive get response data to human readable field name format.
using raw data format.
"""
new_dict = {"id": pydict["id"]}
for field in self:
raw_key = "%s_raw" % field.key
if raw_key in pydict:
if recovery_name:
new_dict[field.name] = pydict[raw_key]
else:
new_dict[field.key] = pydict[raw_key]
return new_dict
def convert_values(self, pydict):
"""Convert knackhq data type instance to json friendly data.
"""
new_dict = dict()
for key, value in pydict.items():
try: # is it's BaseDataType Instance
new_dict[key] = value._data
except AttributeError:
new_dict[key] = value
return new_dict
#-------------------------------------------------------------------------#
# CRUD method #
#-------------------------------------------------------------------------#
def insert_one(self, data, using_name=True):
"""Insert one record.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#create
For more information of the raw structure of all data type, read this:
http://helpdesk.knackhq.com/support/solutions/articles/5000446405-field-types
:param data: dict type data
:param using_name: if you are using field_name in data,
please set using_name = True (it's the default), otherwise, False
**中文文档**
插入一条记录
"""
data = self.convert_values(data)
if using_name:
data = self.convert_keys(data)
res = self.post(self.post_url, data)
return res
def insert(self, data, using_name=True):
"""Insert one or many records.
:param data: dict type data or list of dict
:param using_name: if you are using field name in data,
please set using_name = True (it's the default), otherwise, False
**中文文档**
插入多条记录
"""
if isinstance(data, list): # if iterable, insert one by one
for d in data:
self.insert_one(d, using_name=using_name)
else: # not iterable, execute insert_one
self.insert_one(data, using_name=using_name)
def find_one(self, id_, raw=True, recovery_name=True):
"""Find one record.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#retrieve
:param id_: record id_
:param using_name: if you are using field name in filter and sort_field,
please set using_name = True (it's the default), otherwise, False
:param raw: Default True, set True if you want the data in raw format.
Otherwise, html format
:param recovery_name: Default True, set True if you want field name
instead of field key
**中文文档**
返回一条记录
"""
url = "https://api.knackhq.com/v1/objects/%s/records/%s" % (
self.key, id_)
res = self.get(url)
if raw:
try:
res = self.get_raw_values(res, recovery_name=recovery_name)
except:
pass
else:
try:
res = self.get_html_values(res, recovery_name=recovery_name)
except:
pass
return res
def find(self, filter=list(),
sort_field=None, sort_order=None,
page=None, rows_per_page=None,
using_name=True, data_only=True, raw=True, recovery_name=True):
"""Execute a find query.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#retrieve
:param filter: list of criterions. For more information:
http://helpdesk.knackhq.com/support/solutions/articles/5000447623-api-reference-filters-search
:param sort_field: field_name or field_id, taking field_name by default.
if using field_id, please set using_name = False.
:param sort_order: -1 or 1, 1 means ascending, -1 means descending
:param page and rows_per_page: skip first #page * #rows_per_page,
returns #rows_per_page of records. For more information:
http://helpdesk.knackhq.com/support/solutions/articles/5000444173-working-with-the-api#pagination
:param using_name: if you are using field_name in filter and sort_field,
please set using_name = True (it's the default), otherwise, False
:param data_only: set True you only need the data or the full api
response
:param raw: Default True, set True if you want the data in raw format.
Otherwise, html format
:param recovery_name: Default True, set True if you want field name
instead of field key
**中文文档**
返回多条记录
"""
if using_name:
for criterion in filter:
criterion["field"] = self.get_field_key(criterion["field"])
if sort_field:
sort_field = self.get_field_key(sort_field)
if sort_order is None:
pass
elif sort_order == 1:
sort_order = "asc"
elif sort_order == -1:
sort_order = "desc"
else:
raise ValueError
params = dict()
if len(filter) >= 1:
params["filters"] = json.dumps(filter)
if sort_field:
params["sort_field"] = sort_field
params["sort_order"] = sort_order
if (page is not None) \
and (rows_per_page is not None) \
and isinstance(page, int) \
and isinstance(rows_per_page, int) \
and (page >= 1) \
and (rows_per_page >= 1):
params["page"] = page
params["rows_per_page"] = rows_per_page
res = self.get(self.get_url, params)
# handle data_only and recovery
if data_only:
try:
res = res["records"]
if raw:
res = [self.get_raw_values(data, recovery_name) for data in res]
else:
res = [self.get_html_values(data, recovery_name) for data in res]
except KeyError:
pass
else:
if raw:
try:
res["records"] = [
self.get_raw_values(data, recovery_name) for data in res["records"]]
except KeyError:
pass
else:
try:
res["records"] = [
self.get_html_values(data, recovery_name) for data in res["records"]]
except KeyError:
pass
return res
def update_one(self, id_, data, using_name=True):
"""Update one record. Any fields you don't specify will remain unchanged.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#update
:param id_: record id_
:param data: the new data fields and values
:param using_name: if you are using field name in data,
please set using_name = True (it's the default), otherwise, False
**中文文档**
对一条记录进行更新
"""
data = self.convert_values(data)
if using_name:
data = self.convert_keys(data)
url = "https://api.knackhq.com/v1/objects/%s/records/%s" % (
self.key, id_)
res = self.put(url, data)
return res
def delete_one(self, id_):
"""Delete one record.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#delete
:param id_: record id_
**中文文档**
删除一条记录
"""
url = "https://api.knackhq.com/v1/objects/%s/records/%s" % (
self.key, id_)
res = self.delete(url)
return res
def delete_all(self):
"""Delete all record in the table/collection of this object.
**中文文档**
删除表中的所有记录
"""
for record in self.find(using_name=False, data_only=True):
res = self.delete_one(record["id"])
|
MacHu-GWU/pyknackhq-project | pyknackhq/client.py | Collection.delete_one | python | def delete_one(self, id_):
"""Delete one record.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#delete
:param id_: record id_
**中文文档**
删除一条记录
"""
url = "https://api.knackhq.com/v1/objects/%s/records/%s" % (
self.key, id_)
res = self.delete(url)
return res | Delete one record.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#delete
:param id_: record id_
**中文文档**
删除一条记录 | train | https://github.com/MacHu-GWU/pyknackhq-project/blob/dd937f24d7b0a351ba3818eb746c31b29a8cc341/pyknackhq/client.py#L278-L292 | null | class Collection(Object):
"""A collection is the equivalent of an RDBMS table, collection of MongoDB
and object of Knackhq. Most of CRUD method can be executed using this.
- :meth:`~Collection.insert_one`
- :meth:`~Collection.insert`
- :meth:`~Collection.find_one`
- :meth:`~Collection.find`
- :meth:`~Collection.update_one`
- :meth:`~Collection.delete_one`
- :meth:`~Collection.delete_all`
"""
def __str__(self):
return "Collection('%s')" % self.name
def __repr__(self):
return "Collection(key='%s', name='%s')" % (self.key, self.name)
@staticmethod
def from_dict(d):
return Collection(**d)
@property
def get_url(self):
return "https://api.knackhq.com/v1/objects/%s/records" % self.key
@property
def post_url(self):
return "https://api.knackhq.com/v1/objects/%s/records" % self.key
def convert_keys(self, pydict):
"""Convert field_name to field_key.
{"field_name": value} => {"field_key": value}
"""
new_dict = dict()
for key, value in pydict.items():
new_dict[self.get_field_key(key)] = value
return new_dict
def get_html_values(self, pydict, recovery_name=True):
"""Convert naive get response data to human readable field name format.
using html data format.
"""
new_dict = {"id": pydict["id"]}
for field in self:
if field.key in pydict:
if recovery_name:
new_dict[field.name] = pydict[field.key]
else:
new_dict[field.key] = pydict[field.key]
return new_dict
def get_raw_values(self, pydict, recovery_name=True):
"""Convert naive get response data to human readable field name format.
using raw data format.
"""
new_dict = {"id": pydict["id"]}
for field in self:
raw_key = "%s_raw" % field.key
if raw_key in pydict:
if recovery_name:
new_dict[field.name] = pydict[raw_key]
else:
new_dict[field.key] = pydict[raw_key]
return new_dict
def convert_values(self, pydict):
"""Convert knackhq data type instance to json friendly data.
"""
new_dict = dict()
for key, value in pydict.items():
try: # is it's BaseDataType Instance
new_dict[key] = value._data
except AttributeError:
new_dict[key] = value
return new_dict
#-------------------------------------------------------------------------#
# CRUD method #
#-------------------------------------------------------------------------#
def insert_one(self, data, using_name=True):
"""Insert one record.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#create
For more information of the raw structure of all data type, read this:
http://helpdesk.knackhq.com/support/solutions/articles/5000446405-field-types
:param data: dict type data
:param using_name: if you are using field_name in data,
please set using_name = True (it's the default), otherwise, False
**中文文档**
插入一条记录
"""
data = self.convert_values(data)
if using_name:
data = self.convert_keys(data)
res = self.post(self.post_url, data)
return res
def insert(self, data, using_name=True):
"""Insert one or many records.
:param data: dict type data or list of dict
:param using_name: if you are using field name in data,
please set using_name = True (it's the default), otherwise, False
**中文文档**
插入多条记录
"""
if isinstance(data, list): # if iterable, insert one by one
for d in data:
self.insert_one(d, using_name=using_name)
else: # not iterable, execute insert_one
self.insert_one(data, using_name=using_name)
def find_one(self, id_, raw=True, recovery_name=True):
"""Find one record.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#retrieve
:param id_: record id_
:param using_name: if you are using field name in filter and sort_field,
please set using_name = True (it's the default), otherwise, False
:param raw: Default True, set True if you want the data in raw format.
Otherwise, html format
:param recovery_name: Default True, set True if you want field name
instead of field key
**中文文档**
返回一条记录
"""
url = "https://api.knackhq.com/v1/objects/%s/records/%s" % (
self.key, id_)
res = self.get(url)
if raw:
try:
res = self.get_raw_values(res, recovery_name=recovery_name)
except:
pass
else:
try:
res = self.get_html_values(res, recovery_name=recovery_name)
except:
pass
return res
def find(self, filter=list(),
sort_field=None, sort_order=None,
page=None, rows_per_page=None,
using_name=True, data_only=True, raw=True, recovery_name=True):
"""Execute a find query.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#retrieve
:param filter: list of criterions. For more information:
http://helpdesk.knackhq.com/support/solutions/articles/5000447623-api-reference-filters-search
:param sort_field: field_name or field_id, taking field_name by default.
if using field_id, please set using_name = False.
:param sort_order: -1 or 1, 1 means ascending, -1 means descending
:param page and rows_per_page: skip first #page * #rows_per_page,
returns #rows_per_page of records. For more information:
http://helpdesk.knackhq.com/support/solutions/articles/5000444173-working-with-the-api#pagination
:param using_name: if you are using field_name in filter and sort_field,
please set using_name = True (it's the default), otherwise, False
:param data_only: set True you only need the data or the full api
response
:param raw: Default True, set True if you want the data in raw format.
Otherwise, html format
:param recovery_name: Default True, set True if you want field name
instead of field key
**中文文档**
返回多条记录
"""
if using_name:
for criterion in filter:
criterion["field"] = self.get_field_key(criterion["field"])
if sort_field:
sort_field = self.get_field_key(sort_field)
if sort_order is None:
pass
elif sort_order == 1:
sort_order = "asc"
elif sort_order == -1:
sort_order = "desc"
else:
raise ValueError
params = dict()
if len(filter) >= 1:
params["filters"] = json.dumps(filter)
if sort_field:
params["sort_field"] = sort_field
params["sort_order"] = sort_order
if (page is not None) \
and (rows_per_page is not None) \
and isinstance(page, int) \
and isinstance(rows_per_page, int) \
and (page >= 1) \
and (rows_per_page >= 1):
params["page"] = page
params["rows_per_page"] = rows_per_page
res = self.get(self.get_url, params)
# handle data_only and recovery
if data_only:
try:
res = res["records"]
if raw:
res = [self.get_raw_values(data, recovery_name) for data in res]
else:
res = [self.get_html_values(data, recovery_name) for data in res]
except KeyError:
pass
else:
if raw:
try:
res["records"] = [
self.get_raw_values(data, recovery_name) for data in res["records"]]
except KeyError:
pass
else:
try:
res["records"] = [
self.get_html_values(data, recovery_name) for data in res["records"]]
except KeyError:
pass
return res
def update_one(self, id_, data, using_name=True):
"""Update one record. Any fields you don't specify will remain unchanged.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#update
:param id_: record id_
:param data: the new data fields and values
:param using_name: if you are using field name in data,
please set using_name = True (it's the default), otherwise, False
**中文文档**
对一条记录进行更新
"""
data = self.convert_values(data)
if using_name:
data = self.convert_keys(data)
url = "https://api.knackhq.com/v1/objects/%s/records/%s" % (
self.key, id_)
res = self.put(url, data)
return res
def delete_one(self, id_):
"""Delete one record.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#delete
:param id_: record id_
**中文文档**
删除一条记录
"""
url = "https://api.knackhq.com/v1/objects/%s/records/%s" % (
self.key, id_)
res = self.delete(url)
return res
def delete_all(self):
"""Delete all record in the table/collection of this object.
**中文文档**
删除表中的所有记录
"""
for record in self.find(using_name=False, data_only=True):
res = self.delete_one(record["id"])
|
MacHu-GWU/pyknackhq-project | pyknackhq/client.py | Collection.delete_all | python | def delete_all(self):
for record in self.find(using_name=False, data_only=True):
res = self.delete_one(record["id"]) | Delete all record in the table/collection of this object.
**中文文档**
删除表中的所有记录 | train | https://github.com/MacHu-GWU/pyknackhq-project/blob/dd937f24d7b0a351ba3818eb746c31b29a8cc341/pyknackhq/client.py#L294-L302 | [
"def find(self, filter=list(), \n sort_field=None, sort_order=None, \n page=None, rows_per_page=None,\n using_name=True, data_only=True, raw=True, recovery_name=True):\n \"\"\"Execute a find query.\n\n Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#retrieve\n\n :param filter: list of criterions. For more information: \n http://helpdesk.knackhq.com/support/solutions/articles/5000447623-api-reference-filters-search\n :param sort_field: field_name or field_id, taking field_name by default.\n if using field_id, please set using_name = False.\n :param sort_order: -1 or 1, 1 means ascending, -1 means descending\n :param page and rows_per_page: skip first #page * #rows_per_page, \n returns #rows_per_page of records. For more information:\n http://helpdesk.knackhq.com/support/solutions/articles/5000444173-working-with-the-api#pagination\n :param using_name: if you are using field_name in filter and sort_field, \n please set using_name = True (it's the default), otherwise, False\n :param data_only: set True you only need the data or the full api\n response\n :param raw: Default True, set True if you want the data in raw format. \n Otherwise, html format\n :param recovery_name: Default True, set True if you want field name\n instead of field key\n\n **中文文档**\n\n 返回多条记录\n \"\"\"\n if using_name: \n for criterion in filter:\n criterion[\"field\"] = self.get_field_key(criterion[\"field\"])\n\n if sort_field:\n sort_field = self.get_field_key(sort_field)\n\n if sort_order is None:\n pass\n elif sort_order == 1:\n sort_order = \"asc\"\n elif sort_order == -1:\n sort_order = \"desc\"\n else:\n raise ValueError\n\n params = dict()\n if len(filter) >= 1:\n params[\"filters\"] = json.dumps(filter)\n\n if sort_field:\n params[\"sort_field\"] = sort_field\n params[\"sort_order\"] = sort_order\n\n if (page is not None) \\\n and (rows_per_page is not None) \\\n and isinstance(page, int) \\\n and isinstance(rows_per_page, int) \\\n and (page >= 1) \\\n and (rows_per_page >= 1):\n params[\"page\"] = page\n params[\"rows_per_page\"] = rows_per_page\n\n res = self.get(self.get_url, params)\n\n # handle data_only and recovery\n if data_only:\n try:\n res = res[\"records\"]\n if raw:\n res = [self.get_raw_values(data, recovery_name) for data in res]\n else:\n res = [self.get_html_values(data, recovery_name) for data in res]\n except KeyError:\n pass\n else:\n if raw:\n try:\n res[\"records\"] = [\n self.get_raw_values(data, recovery_name) for data in res[\"records\"]]\n except KeyError:\n pass\n else:\n try:\n res[\"records\"] = [\n self.get_html_values(data, recovery_name) for data in res[\"records\"]]\n except KeyError:\n pass\n return res\n",
"def delete_one(self, id_):\n \"\"\"Delete one record.\n\n Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#delete\n\n :param id_: record id_\n\n **中文文档**\n\n 删除一条记录\n \"\"\" \n url = \"https://api.knackhq.com/v1/objects/%s/records/%s\" % (\n self.key, id_)\n res = self.delete(url)\n return res\n"
] | class Collection(Object):
"""A collection is the equivalent of an RDBMS table, collection of MongoDB
and object of Knackhq. Most of CRUD method can be executed using this.
- :meth:`~Collection.insert_one`
- :meth:`~Collection.insert`
- :meth:`~Collection.find_one`
- :meth:`~Collection.find`
- :meth:`~Collection.update_one`
- :meth:`~Collection.delete_one`
- :meth:`~Collection.delete_all`
"""
def __str__(self):
return "Collection('%s')" % self.name
def __repr__(self):
return "Collection(key='%s', name='%s')" % (self.key, self.name)
@staticmethod
def from_dict(d):
return Collection(**d)
@property
def get_url(self):
return "https://api.knackhq.com/v1/objects/%s/records" % self.key
@property
def post_url(self):
return "https://api.knackhq.com/v1/objects/%s/records" % self.key
def convert_keys(self, pydict):
"""Convert field_name to field_key.
{"field_name": value} => {"field_key": value}
"""
new_dict = dict()
for key, value in pydict.items():
new_dict[self.get_field_key(key)] = value
return new_dict
def get_html_values(self, pydict, recovery_name=True):
"""Convert naive get response data to human readable field name format.
using html data format.
"""
new_dict = {"id": pydict["id"]}
for field in self:
if field.key in pydict:
if recovery_name:
new_dict[field.name] = pydict[field.key]
else:
new_dict[field.key] = pydict[field.key]
return new_dict
def get_raw_values(self, pydict, recovery_name=True):
"""Convert naive get response data to human readable field name format.
using raw data format.
"""
new_dict = {"id": pydict["id"]}
for field in self:
raw_key = "%s_raw" % field.key
if raw_key in pydict:
if recovery_name:
new_dict[field.name] = pydict[raw_key]
else:
new_dict[field.key] = pydict[raw_key]
return new_dict
def convert_values(self, pydict):
"""Convert knackhq data type instance to json friendly data.
"""
new_dict = dict()
for key, value in pydict.items():
try: # is it's BaseDataType Instance
new_dict[key] = value._data
except AttributeError:
new_dict[key] = value
return new_dict
#-------------------------------------------------------------------------#
# CRUD method #
#-------------------------------------------------------------------------#
def insert_one(self, data, using_name=True):
"""Insert one record.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#create
For more information of the raw structure of all data type, read this:
http://helpdesk.knackhq.com/support/solutions/articles/5000446405-field-types
:param data: dict type data
:param using_name: if you are using field_name in data,
please set using_name = True (it's the default), otherwise, False
**中文文档**
插入一条记录
"""
data = self.convert_values(data)
if using_name:
data = self.convert_keys(data)
res = self.post(self.post_url, data)
return res
def insert(self, data, using_name=True):
"""Insert one or many records.
:param data: dict type data or list of dict
:param using_name: if you are using field name in data,
please set using_name = True (it's the default), otherwise, False
**中文文档**
插入多条记录
"""
if isinstance(data, list): # if iterable, insert one by one
for d in data:
self.insert_one(d, using_name=using_name)
else: # not iterable, execute insert_one
self.insert_one(data, using_name=using_name)
def find_one(self, id_, raw=True, recovery_name=True):
"""Find one record.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#retrieve
:param id_: record id_
:param using_name: if you are using field name in filter and sort_field,
please set using_name = True (it's the default), otherwise, False
:param raw: Default True, set True if you want the data in raw format.
Otherwise, html format
:param recovery_name: Default True, set True if you want field name
instead of field key
**中文文档**
返回一条记录
"""
url = "https://api.knackhq.com/v1/objects/%s/records/%s" % (
self.key, id_)
res = self.get(url)
if raw:
try:
res = self.get_raw_values(res, recovery_name=recovery_name)
except:
pass
else:
try:
res = self.get_html_values(res, recovery_name=recovery_name)
except:
pass
return res
def find(self, filter=list(),
sort_field=None, sort_order=None,
page=None, rows_per_page=None,
using_name=True, data_only=True, raw=True, recovery_name=True):
"""Execute a find query.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#retrieve
:param filter: list of criterions. For more information:
http://helpdesk.knackhq.com/support/solutions/articles/5000447623-api-reference-filters-search
:param sort_field: field_name or field_id, taking field_name by default.
if using field_id, please set using_name = False.
:param sort_order: -1 or 1, 1 means ascending, -1 means descending
:param page and rows_per_page: skip first #page * #rows_per_page,
returns #rows_per_page of records. For more information:
http://helpdesk.knackhq.com/support/solutions/articles/5000444173-working-with-the-api#pagination
:param using_name: if you are using field_name in filter and sort_field,
please set using_name = True (it's the default), otherwise, False
:param data_only: set True you only need the data or the full api
response
:param raw: Default True, set True if you want the data in raw format.
Otherwise, html format
:param recovery_name: Default True, set True if you want field name
instead of field key
**中文文档**
返回多条记录
"""
if using_name:
for criterion in filter:
criterion["field"] = self.get_field_key(criterion["field"])
if sort_field:
sort_field = self.get_field_key(sort_field)
if sort_order is None:
pass
elif sort_order == 1:
sort_order = "asc"
elif sort_order == -1:
sort_order = "desc"
else:
raise ValueError
params = dict()
if len(filter) >= 1:
params["filters"] = json.dumps(filter)
if sort_field:
params["sort_field"] = sort_field
params["sort_order"] = sort_order
if (page is not None) \
and (rows_per_page is not None) \
and isinstance(page, int) \
and isinstance(rows_per_page, int) \
and (page >= 1) \
and (rows_per_page >= 1):
params["page"] = page
params["rows_per_page"] = rows_per_page
res = self.get(self.get_url, params)
# handle data_only and recovery
if data_only:
try:
res = res["records"]
if raw:
res = [self.get_raw_values(data, recovery_name) for data in res]
else:
res = [self.get_html_values(data, recovery_name) for data in res]
except KeyError:
pass
else:
if raw:
try:
res["records"] = [
self.get_raw_values(data, recovery_name) for data in res["records"]]
except KeyError:
pass
else:
try:
res["records"] = [
self.get_html_values(data, recovery_name) for data in res["records"]]
except KeyError:
pass
return res
def update_one(self, id_, data, using_name=True):
"""Update one record. Any fields you don't specify will remain unchanged.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#update
:param id_: record id_
:param data: the new data fields and values
:param using_name: if you are using field name in data,
please set using_name = True (it's the default), otherwise, False
**中文文档**
对一条记录进行更新
"""
data = self.convert_values(data)
if using_name:
data = self.convert_keys(data)
url = "https://api.knackhq.com/v1/objects/%s/records/%s" % (
self.key, id_)
res = self.put(url, data)
return res
def delete_one(self, id_):
"""Delete one record.
Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#delete
:param id_: record id_
**中文文档**
删除一条记录
"""
url = "https://api.knackhq.com/v1/objects/%s/records/%s" % (
self.key, id_)
res = self.delete(url)
return res
def delete_all(self):
"""Delete all record in the table/collection of this object.
**中文文档**
删除表中的所有记录
"""
for record in self.find(using_name=False, data_only=True):
res = self.delete_one(record["id"])
|
MacHu-GWU/pyknackhq-project | pyknackhq/client.py | KnackhqAuth.get | python | def get(self, url, params=dict()):
try:
res = requests.get(url, headers=self.headers, params=params)
return json.loads(res.text)
except Exception as e:
print(e)
return "error" | Http get method wrapper, to support search. | train | https://github.com/MacHu-GWU/pyknackhq-project/blob/dd937f24d7b0a351ba3818eb746c31b29a8cc341/pyknackhq/client.py#L330-L338 | null | class KnackhqAuth(object):
"""Knackhq API authentication class.
:param application_id: str type, Application ID
:param api_key: str type, API Key
To get your Application ID and API Key, read this tutorial:
http://helpdesk.knackhq.com/support/solutions/articles/5000444173-working-with-the-api#key
"""
def __init__(self, application_id, api_key):
self.application_id = application_id
self.api_key = api_key
self.headers = {
"X-Knack-Application-Id": self.application_id,
"X-Knack-REST-API-Key": self.api_key,
"Content-Type": "application/json",
}
@staticmethod
def from_dict(d):
return KnackhqAuth(**d)
@staticmethod
def from_json(abspath):
return KnackhqAuth.from_dict(load_js(abspath, enable_verbose=False))
def post(self, url, data):
"""Http post method wrapper, to support insert.
"""
try:
res = requests.post(
url, headers=self.headers, data=json.dumps(data))
return json.loads(res.text)
except Exception as e:
print(e)
return "error"
def put(self, url, data):
"""Http put method wrapper, to support update.
"""
try:
res = requests.put(
url, headers=self.headers, data=json.dumps(data))
return json.loads(res.text)
except Exception as e:
print(e)
return "error"
def delete(self, url):
"""Http delete method wrapper, to support delete.
"""
try:
res = requests.delete(url, headers=self.headers)
return json.loads(res.text)
except Exception as e:
print(e)
return "error"
|
MacHu-GWU/pyknackhq-project | pyknackhq/client.py | KnackhqAuth.post | python | def post(self, url, data):
try:
res = requests.post(
url, headers=self.headers, data=json.dumps(data))
return json.loads(res.text)
except Exception as e:
print(e)
return "error" | Http post method wrapper, to support insert. | train | https://github.com/MacHu-GWU/pyknackhq-project/blob/dd937f24d7b0a351ba3818eb746c31b29a8cc341/pyknackhq/client.py#L340-L349 | null | class KnackhqAuth(object):
"""Knackhq API authentication class.
:param application_id: str type, Application ID
:param api_key: str type, API Key
To get your Application ID and API Key, read this tutorial:
http://helpdesk.knackhq.com/support/solutions/articles/5000444173-working-with-the-api#key
"""
def __init__(self, application_id, api_key):
self.application_id = application_id
self.api_key = api_key
self.headers = {
"X-Knack-Application-Id": self.application_id,
"X-Knack-REST-API-Key": self.api_key,
"Content-Type": "application/json",
}
@staticmethod
def from_dict(d):
return KnackhqAuth(**d)
@staticmethod
def from_json(abspath):
return KnackhqAuth.from_dict(load_js(abspath, enable_verbose=False))
def get(self, url, params=dict()):
"""Http get method wrapper, to support search.
"""
try:
res = requests.get(url, headers=self.headers, params=params)
return json.loads(res.text)
except Exception as e:
print(e)
return "error"
def put(self, url, data):
"""Http put method wrapper, to support update.
"""
try:
res = requests.put(
url, headers=self.headers, data=json.dumps(data))
return json.loads(res.text)
except Exception as e:
print(e)
return "error"
def delete(self, url):
"""Http delete method wrapper, to support delete.
"""
try:
res = requests.delete(url, headers=self.headers)
return json.loads(res.text)
except Exception as e:
print(e)
return "error"
|
MacHu-GWU/pyknackhq-project | pyknackhq/client.py | KnackhqAuth.delete | python | def delete(self, url):
try:
res = requests.delete(url, headers=self.headers)
return json.loads(res.text)
except Exception as e:
print(e)
return "error" | Http delete method wrapper, to support delete. | train | https://github.com/MacHu-GWU/pyknackhq-project/blob/dd937f24d7b0a351ba3818eb746c31b29a8cc341/pyknackhq/client.py#L362-L370 | null | class KnackhqAuth(object):
"""Knackhq API authentication class.
:param application_id: str type, Application ID
:param api_key: str type, API Key
To get your Application ID and API Key, read this tutorial:
http://helpdesk.knackhq.com/support/solutions/articles/5000444173-working-with-the-api#key
"""
def __init__(self, application_id, api_key):
self.application_id = application_id
self.api_key = api_key
self.headers = {
"X-Knack-Application-Id": self.application_id,
"X-Knack-REST-API-Key": self.api_key,
"Content-Type": "application/json",
}
@staticmethod
def from_dict(d):
return KnackhqAuth(**d)
@staticmethod
def from_json(abspath):
return KnackhqAuth.from_dict(load_js(abspath, enable_verbose=False))
def get(self, url, params=dict()):
"""Http get method wrapper, to support search.
"""
try:
res = requests.get(url, headers=self.headers, params=params)
return json.loads(res.text)
except Exception as e:
print(e)
return "error"
def post(self, url, data):
"""Http post method wrapper, to support insert.
"""
try:
res = requests.post(
url, headers=self.headers, data=json.dumps(data))
return json.loads(res.text)
except Exception as e:
print(e)
return "error"
def put(self, url, data):
"""Http put method wrapper, to support update.
"""
try:
res = requests.put(
url, headers=self.headers, data=json.dumps(data))
return json.loads(res.text)
except Exception as e:
print(e)
return "error"
|
MacHu-GWU/pyknackhq-project | pyknackhq/client.py | KnackhqClient.get_collection | python | def get_collection(self, key, using_name=True):
object_ = self.application.get_object(key, using_name=using_name)
collection = Collection.from_dict(object_.__dict__)
for http_cmd in ["get", "post", "put", "delete"]:
collection.__setattr__(http_cmd, self.auth.__getattribute__(http_cmd))
return collection | Get :class:`Collection` instance.
:param key: object_key or object_name
:param using_name: True if getting object by object name | train | https://github.com/MacHu-GWU/pyknackhq-project/blob/dd937f24d7b0a351ba3818eb746c31b29a8cc341/pyknackhq/client.py#L410-L420 | [
"def from_dict(d):\n return Collection(**d)\n"
] | class KnackhqClient(object):
"""Knackhq API client class.
:param auth: A :class:`KnackAuth` instance.
:param application: An :class:`~pyknackhq.schema.Application` instance.
If it is not given, the client automatically pull it from knack server.
How to construct a knackhq api client::
from pyknackhq import KnackhqClient, KnackhqAuth
auth = KnackhqAuth(application_id="your app id", api_key="your api key")
client = KnackClient(auth=auth)
"""
def __init__(self, auth, application=None):
self.auth = auth
if isinstance(application, Application):
self.application = application
else: # get the schema json, construct Application instance
res = requests.get(
"https://api.knackhq.com/v1/applications/%s" %
self.auth.application_id)
self.application = Application.from_dict(json.loads(res.text))
def __str__(self):
return "KnackhqClient(application='%s')" % self.application
def __repr__(self):
return str(self)
@property
def all_object_key(self):
return self.application.all_object_key
@property
def all_object_name(self):
return self.application.all_object_name
def get_collection(self, key, using_name=True):
"""Get :class:`Collection` instance.
:param key: object_key or object_name
:param using_name: True if getting object by object name
"""
object_ = self.application.get_object(key, using_name=using_name)
collection = Collection.from_dict(object_.__dict__)
for http_cmd in ["get", "post", "put", "delete"]:
collection.__setattr__(http_cmd, self.auth.__getattribute__(http_cmd))
return collection
def export_schema(self, abspath):
"""Export application detailed information to a nicely formatted json
file.
"""
self.application.to_json(abspath)
|
ryanjdillon/pylleo | pylleo/utils_bokeh.py | create_bokeh_server | python | def create_bokeh_server(io_loop, files, argvs, host, port):
'''Start bokeh server with applications paths'''
from bokeh.server.server import Server
from bokeh.command.util import build_single_handler_applications
# Turn file paths into bokeh apps
apps = build_single_handler_applications(files, argvs)
# kwargs lifted from bokeh serve call to Server, with created io_loop
kwargs = {
'io_loop':io_loop,
'generate_session_ids':True,
'redirect_root':True,
'use_x_headers':False,
'secret_key':None,
'num_procs':1,
'host': host,
'sign_sessions':False,
'develop':False,
'port':port,
'use_index':True
}
server = Server(apps,**kwargs)
return server | Start bokeh server with applications paths | train | https://github.com/ryanjdillon/pylleo/blob/b9b999fef19eaeccce4f207ab1b6198287c1bfec/pylleo/utils_bokeh.py#L2-L26 | null |
def run_server_to_disconnect(files, port=5000, new='tab'):
def start_bokeh(io_loop):
'''Start the `io_loop`'''
io_loop.start()
return None
def launch_app(host, app_name, new):
'''Lauch app in browser
Ideally this would `bokeh.util.browser.view()`, but it doesn't work
'''
import webbrowser
# Map method strings to webbrowser method
options = {'current':0, 'window':1, 'tab':2}
# Concatenate url and open in browser, creating a session
app_url = 'http://{}/{}'.format(host, app_name)
print('Opening `{}` in browser'.format(app_url))
webbrowser.open(app_url, new=options[new])
return None
def server_loop(server, io_loop):
'''Check connections once session created and close on disconnect'''
import time
connected = [True,]
session_loaded = False
while any(connected):
# Check if no session started on server
sessions = server.get_sessions()
if not session_loaded:
if sessions:
session_loaded = True
# Once 1+ sessions started, check for no connections
else:
# List of bools for each session
connected = [True,]*len(sessions)
# Set `connected` item false no connections on session
for i in range(len(sessions)):
if sessions[i].connection_count == 0:
connected[i] = False
# Keep the pace down
time.sleep(2)
# Stop server once opened session connections closed
io_loop.stop()
return None
import os
import threading
import tornado.ioloop
import tornado.autoreload
import time
# Initialize some values, sanatize the paths to the bokeh plots
argvs = {}
app_names = []
for path in files:
argvs[path] = None
app_names.append(os.path.splitext(os.path.split(path)[1])[0])
# Concate hostname/port for creating handlers, launching apps
host = 'localhost:{}'.format(port)
# Initialize the tornado server
io_loop = tornado.ioloop.IOLoop.instance()
tornado.autoreload.start(io_loop)
# Add the io_loop to the bokeh server
server = create_bokeh_server(io_loop, files, argvs, host, port)
print('Starting the server on {}'.format(host))
args = (io_loop,)
th_startup = threading.Thread(target=start_bokeh, args=args)
th_startup.start()
# Launch each application in own tab or window
th_launch = [None,]*len(app_names)
for i in range(len(app_names)):
args = (host, app_names[i], new)
th_launch[i] = threading.Thread(target=launch_app, args=args)
th_launch[i].start()
# Delay to allow tabs to open in same browser window
time.sleep(2)
# Run session connection test, then stop `io_loop`
args = (server, io_loop)
th_shutdown = threading.Thread(target=server_loop, args=args)
th_shutdown.start()
return None
|
ryanjdillon/pylleo | pylleo/utils.py | predict_encoding | python | def predict_encoding(file_path, n_lines=20):
'''Get file encoding of a text file'''
import chardet
# Open the file as binary data
with open(file_path, 'rb') as f:
# Join binary lines for specified number of lines
rawdata = b''.join([f.readline() for _ in range(n_lines)])
return chardet.detect(rawdata)['encoding'] | Get file encoding of a text file | train | https://github.com/ryanjdillon/pylleo/blob/b9b999fef19eaeccce4f207ab1b6198287c1bfec/pylleo/utils.py#L15-L24 | null |
def get_testdata_path(tag_model):
'''Get path to sample data directory for given tag model'''
import os
tag_model = tag_model.upper().replace('-','').replace('_','')
sample_path = os.path.join('../datasets/{}'.format(tag_model))
if not os.path.isdir(sample_path):
raise FileNotFoundError('No sample dataset found for tag '
'{}.'.format(tag_model))
return sample_path
def get_n_header(f, header_char='"'):
'''Get the nummber of header rows in a Little Leonardo data file
Args
----
f : file stream
File handle for the file from which header rows will be read
header_char: str
Character array at beginning of each header line
Returns
-------
n_header: int
Number of header rows in Little Leonardo data file
'''
n_header = 0
reading_headers = True
while reading_headers:
line = f.readline()
if line.startswith(header_char):
n_header += 1
else:
reading_headers = False
return n_header
def get_tag_params(tag_model):
'''Load param strs and n_header based on model of tag model'''
tag_model = tag_model.replace('-', '')
tags = dict()
tags['W190PD3GT'] = ['Acceleration-X', 'Acceleration-Y', 'Acceleration-Z',
'Depth', 'Propeller', 'Temperature']
# Return tag parameters if found, else raise error
if tag_model in tags:
return tags[tag_model]
else:
raise KeyError('{} not found in tag dictionary'.format(tag_model))
def find_file(path_dir, search_str, file_ext):
'''Find path of file in directory containing the search string'''
import os
file_path = None
for file_name in os.listdir(path_dir):
if (search_str in file_name) and (file_name.endswith(file_ext)):
file_path = os.path.join(path_dir, file_name)
break
if file_path == None:
raise SystemError('No file found containing string: '
'{}.'.format(search_str))
return file_path
def posix_string(s):
'''Return string in lower case with spaces and dashes as underscores
Args
----
s: str
string to modify
Returns
-------
s_mod: str
string with ` ` and `-` replaced with `_`
'''
return s.lower().replace(' ','_').replace('-','_')
def nearest(items, pivot):
'''Find nearest value in array, including datetimes
Args
----
items: iterable
List of values from which to find nearest value to `pivot`
pivot: int or float
Value to find nearest of in `items`
Returns
-------
nearest: int or float
Value in items nearest to `pivot`
'''
return min(items, key=lambda x: abs(x - pivot))
def parse_experiment_params(name_exp):
'''Parse experiment parameters from the data directory name
Args
----
name_exp: str
Name of data directory with experiment parameters
Returns
-------
tag_params: dict of str
Dictionary of parsed experiment parameters
'''
if ('/' in name_exp) or ('\\' in name_exp):
raise ValueError("The path {} appears to be a path. Please pass "
"only the data directory's name (i.e. the "
"experiment name)".format(name_exp))
tag_params = dict()
tag_params['experiment'] = name_exp
tag_params['tag_model'] = (name_exp.split('_')[1]).replace('-','')
tag_params['tag_id'] = name_exp.split('_')[2]
tag_params['animal'] = name_exp.split('_')[3]
tag_params['notes'] = name_exp.split('_')[4]
return tag_params
|
ryanjdillon/pylleo | pylleo/utils.py | get_n_header | python | def get_n_header(f, header_char='"'):
'''Get the nummber of header rows in a Little Leonardo data file
Args
----
f : file stream
File handle for the file from which header rows will be read
header_char: str
Character array at beginning of each header line
Returns
-------
n_header: int
Number of header rows in Little Leonardo data file
'''
n_header = 0
reading_headers = True
while reading_headers:
line = f.readline()
if line.startswith(header_char):
n_header += 1
else:
reading_headers = False
return n_header | Get the nummber of header rows in a Little Leonardo data file
Args
----
f : file stream
File handle for the file from which header rows will be read
header_char: str
Character array at beginning of each header line
Returns
-------
n_header: int
Number of header rows in Little Leonardo data file | train | https://github.com/ryanjdillon/pylleo/blob/b9b999fef19eaeccce4f207ab1b6198287c1bfec/pylleo/utils.py#L27-L52 | null |
def get_testdata_path(tag_model):
'''Get path to sample data directory for given tag model'''
import os
tag_model = tag_model.upper().replace('-','').replace('_','')
sample_path = os.path.join('../datasets/{}'.format(tag_model))
if not os.path.isdir(sample_path):
raise FileNotFoundError('No sample dataset found for tag '
'{}.'.format(tag_model))
return sample_path
def predict_encoding(file_path, n_lines=20):
'''Get file encoding of a text file'''
import chardet
# Open the file as binary data
with open(file_path, 'rb') as f:
# Join binary lines for specified number of lines
rawdata = b''.join([f.readline() for _ in range(n_lines)])
return chardet.detect(rawdata)['encoding']
def get_tag_params(tag_model):
'''Load param strs and n_header based on model of tag model'''
tag_model = tag_model.replace('-', '')
tags = dict()
tags['W190PD3GT'] = ['Acceleration-X', 'Acceleration-Y', 'Acceleration-Z',
'Depth', 'Propeller', 'Temperature']
# Return tag parameters if found, else raise error
if tag_model in tags:
return tags[tag_model]
else:
raise KeyError('{} not found in tag dictionary'.format(tag_model))
def find_file(path_dir, search_str, file_ext):
'''Find path of file in directory containing the search string'''
import os
file_path = None
for file_name in os.listdir(path_dir):
if (search_str in file_name) and (file_name.endswith(file_ext)):
file_path = os.path.join(path_dir, file_name)
break
if file_path == None:
raise SystemError('No file found containing string: '
'{}.'.format(search_str))
return file_path
def posix_string(s):
'''Return string in lower case with spaces and dashes as underscores
Args
----
s: str
string to modify
Returns
-------
s_mod: str
string with ` ` and `-` replaced with `_`
'''
return s.lower().replace(' ','_').replace('-','_')
def nearest(items, pivot):
'''Find nearest value in array, including datetimes
Args
----
items: iterable
List of values from which to find nearest value to `pivot`
pivot: int or float
Value to find nearest of in `items`
Returns
-------
nearest: int or float
Value in items nearest to `pivot`
'''
return min(items, key=lambda x: abs(x - pivot))
def parse_experiment_params(name_exp):
'''Parse experiment parameters from the data directory name
Args
----
name_exp: str
Name of data directory with experiment parameters
Returns
-------
tag_params: dict of str
Dictionary of parsed experiment parameters
'''
if ('/' in name_exp) or ('\\' in name_exp):
raise ValueError("The path {} appears to be a path. Please pass "
"only the data directory's name (i.e. the "
"experiment name)".format(name_exp))
tag_params = dict()
tag_params['experiment'] = name_exp
tag_params['tag_model'] = (name_exp.split('_')[1]).replace('-','')
tag_params['tag_id'] = name_exp.split('_')[2]
tag_params['animal'] = name_exp.split('_')[3]
tag_params['notes'] = name_exp.split('_')[4]
return tag_params
|
ryanjdillon/pylleo | pylleo/utils.py | get_tag_params | python | def get_tag_params(tag_model):
'''Load param strs and n_header based on model of tag model'''
tag_model = tag_model.replace('-', '')
tags = dict()
tags['W190PD3GT'] = ['Acceleration-X', 'Acceleration-Y', 'Acceleration-Z',
'Depth', 'Propeller', 'Temperature']
# Return tag parameters if found, else raise error
if tag_model in tags:
return tags[tag_model]
else:
raise KeyError('{} not found in tag dictionary'.format(tag_model)) | Load param strs and n_header based on model of tag model | train | https://github.com/ryanjdillon/pylleo/blob/b9b999fef19eaeccce4f207ab1b6198287c1bfec/pylleo/utils.py#L55-L67 | null |
def get_testdata_path(tag_model):
'''Get path to sample data directory for given tag model'''
import os
tag_model = tag_model.upper().replace('-','').replace('_','')
sample_path = os.path.join('../datasets/{}'.format(tag_model))
if not os.path.isdir(sample_path):
raise FileNotFoundError('No sample dataset found for tag '
'{}.'.format(tag_model))
return sample_path
def predict_encoding(file_path, n_lines=20):
'''Get file encoding of a text file'''
import chardet
# Open the file as binary data
with open(file_path, 'rb') as f:
# Join binary lines for specified number of lines
rawdata = b''.join([f.readline() for _ in range(n_lines)])
return chardet.detect(rawdata)['encoding']
def get_n_header(f, header_char='"'):
'''Get the nummber of header rows in a Little Leonardo data file
Args
----
f : file stream
File handle for the file from which header rows will be read
header_char: str
Character array at beginning of each header line
Returns
-------
n_header: int
Number of header rows in Little Leonardo data file
'''
n_header = 0
reading_headers = True
while reading_headers:
line = f.readline()
if line.startswith(header_char):
n_header += 1
else:
reading_headers = False
return n_header
def find_file(path_dir, search_str, file_ext):
'''Find path of file in directory containing the search string'''
import os
file_path = None
for file_name in os.listdir(path_dir):
if (search_str in file_name) and (file_name.endswith(file_ext)):
file_path = os.path.join(path_dir, file_name)
break
if file_path == None:
raise SystemError('No file found containing string: '
'{}.'.format(search_str))
return file_path
def posix_string(s):
'''Return string in lower case with spaces and dashes as underscores
Args
----
s: str
string to modify
Returns
-------
s_mod: str
string with ` ` and `-` replaced with `_`
'''
return s.lower().replace(' ','_').replace('-','_')
def nearest(items, pivot):
'''Find nearest value in array, including datetimes
Args
----
items: iterable
List of values from which to find nearest value to `pivot`
pivot: int or float
Value to find nearest of in `items`
Returns
-------
nearest: int or float
Value in items nearest to `pivot`
'''
return min(items, key=lambda x: abs(x - pivot))
def parse_experiment_params(name_exp):
'''Parse experiment parameters from the data directory name
Args
----
name_exp: str
Name of data directory with experiment parameters
Returns
-------
tag_params: dict of str
Dictionary of parsed experiment parameters
'''
if ('/' in name_exp) or ('\\' in name_exp):
raise ValueError("The path {} appears to be a path. Please pass "
"only the data directory's name (i.e. the "
"experiment name)".format(name_exp))
tag_params = dict()
tag_params['experiment'] = name_exp
tag_params['tag_model'] = (name_exp.split('_')[1]).replace('-','')
tag_params['tag_id'] = name_exp.split('_')[2]
tag_params['animal'] = name_exp.split('_')[3]
tag_params['notes'] = name_exp.split('_')[4]
return tag_params
|
ryanjdillon/pylleo | pylleo/utils.py | find_file | python | def find_file(path_dir, search_str, file_ext):
'''Find path of file in directory containing the search string'''
import os
file_path = None
for file_name in os.listdir(path_dir):
if (search_str in file_name) and (file_name.endswith(file_ext)):
file_path = os.path.join(path_dir, file_name)
break
if file_path == None:
raise SystemError('No file found containing string: '
'{}.'.format(search_str))
return file_path | Find path of file in directory containing the search string | train | https://github.com/ryanjdillon/pylleo/blob/b9b999fef19eaeccce4f207ab1b6198287c1bfec/pylleo/utils.py#L70-L85 | null |
def get_testdata_path(tag_model):
'''Get path to sample data directory for given tag model'''
import os
tag_model = tag_model.upper().replace('-','').replace('_','')
sample_path = os.path.join('../datasets/{}'.format(tag_model))
if not os.path.isdir(sample_path):
raise FileNotFoundError('No sample dataset found for tag '
'{}.'.format(tag_model))
return sample_path
def predict_encoding(file_path, n_lines=20):
'''Get file encoding of a text file'''
import chardet
# Open the file as binary data
with open(file_path, 'rb') as f:
# Join binary lines for specified number of lines
rawdata = b''.join([f.readline() for _ in range(n_lines)])
return chardet.detect(rawdata)['encoding']
def get_n_header(f, header_char='"'):
'''Get the nummber of header rows in a Little Leonardo data file
Args
----
f : file stream
File handle for the file from which header rows will be read
header_char: str
Character array at beginning of each header line
Returns
-------
n_header: int
Number of header rows in Little Leonardo data file
'''
n_header = 0
reading_headers = True
while reading_headers:
line = f.readline()
if line.startswith(header_char):
n_header += 1
else:
reading_headers = False
return n_header
def get_tag_params(tag_model):
'''Load param strs and n_header based on model of tag model'''
tag_model = tag_model.replace('-', '')
tags = dict()
tags['W190PD3GT'] = ['Acceleration-X', 'Acceleration-Y', 'Acceleration-Z',
'Depth', 'Propeller', 'Temperature']
# Return tag parameters if found, else raise error
if tag_model in tags:
return tags[tag_model]
else:
raise KeyError('{} not found in tag dictionary'.format(tag_model))
def posix_string(s):
'''Return string in lower case with spaces and dashes as underscores
Args
----
s: str
string to modify
Returns
-------
s_mod: str
string with ` ` and `-` replaced with `_`
'''
return s.lower().replace(' ','_').replace('-','_')
def nearest(items, pivot):
'''Find nearest value in array, including datetimes
Args
----
items: iterable
List of values from which to find nearest value to `pivot`
pivot: int or float
Value to find nearest of in `items`
Returns
-------
nearest: int or float
Value in items nearest to `pivot`
'''
return min(items, key=lambda x: abs(x - pivot))
def parse_experiment_params(name_exp):
'''Parse experiment parameters from the data directory name
Args
----
name_exp: str
Name of data directory with experiment parameters
Returns
-------
tag_params: dict of str
Dictionary of parsed experiment parameters
'''
if ('/' in name_exp) or ('\\' in name_exp):
raise ValueError("The path {} appears to be a path. Please pass "
"only the data directory's name (i.e. the "
"experiment name)".format(name_exp))
tag_params = dict()
tag_params['experiment'] = name_exp
tag_params['tag_model'] = (name_exp.split('_')[1]).replace('-','')
tag_params['tag_id'] = name_exp.split('_')[2]
tag_params['animal'] = name_exp.split('_')[3]
tag_params['notes'] = name_exp.split('_')[4]
return tag_params
|
ryanjdillon/pylleo | pylleo/utils.py | nearest | python | def nearest(items, pivot):
'''Find nearest value in array, including datetimes
Args
----
items: iterable
List of values from which to find nearest value to `pivot`
pivot: int or float
Value to find nearest of in `items`
Returns
-------
nearest: int or float
Value in items nearest to `pivot`
'''
return min(items, key=lambda x: abs(x - pivot)) | Find nearest value in array, including datetimes
Args
----
items: iterable
List of values from which to find nearest value to `pivot`
pivot: int or float
Value to find nearest of in `items`
Returns
-------
nearest: int or float
Value in items nearest to `pivot` | train | https://github.com/ryanjdillon/pylleo/blob/b9b999fef19eaeccce4f207ab1b6198287c1bfec/pylleo/utils.py#L104-L119 | null |
def get_testdata_path(tag_model):
'''Get path to sample data directory for given tag model'''
import os
tag_model = tag_model.upper().replace('-','').replace('_','')
sample_path = os.path.join('../datasets/{}'.format(tag_model))
if not os.path.isdir(sample_path):
raise FileNotFoundError('No sample dataset found for tag '
'{}.'.format(tag_model))
return sample_path
def predict_encoding(file_path, n_lines=20):
'''Get file encoding of a text file'''
import chardet
# Open the file as binary data
with open(file_path, 'rb') as f:
# Join binary lines for specified number of lines
rawdata = b''.join([f.readline() for _ in range(n_lines)])
return chardet.detect(rawdata)['encoding']
def get_n_header(f, header_char='"'):
'''Get the nummber of header rows in a Little Leonardo data file
Args
----
f : file stream
File handle for the file from which header rows will be read
header_char: str
Character array at beginning of each header line
Returns
-------
n_header: int
Number of header rows in Little Leonardo data file
'''
n_header = 0
reading_headers = True
while reading_headers:
line = f.readline()
if line.startswith(header_char):
n_header += 1
else:
reading_headers = False
return n_header
def get_tag_params(tag_model):
'''Load param strs and n_header based on model of tag model'''
tag_model = tag_model.replace('-', '')
tags = dict()
tags['W190PD3GT'] = ['Acceleration-X', 'Acceleration-Y', 'Acceleration-Z',
'Depth', 'Propeller', 'Temperature']
# Return tag parameters if found, else raise error
if tag_model in tags:
return tags[tag_model]
else:
raise KeyError('{} not found in tag dictionary'.format(tag_model))
def find_file(path_dir, search_str, file_ext):
'''Find path of file in directory containing the search string'''
import os
file_path = None
for file_name in os.listdir(path_dir):
if (search_str in file_name) and (file_name.endswith(file_ext)):
file_path = os.path.join(path_dir, file_name)
break
if file_path == None:
raise SystemError('No file found containing string: '
'{}.'.format(search_str))
return file_path
def posix_string(s):
'''Return string in lower case with spaces and dashes as underscores
Args
----
s: str
string to modify
Returns
-------
s_mod: str
string with ` ` and `-` replaced with `_`
'''
return s.lower().replace(' ','_').replace('-','_')
def parse_experiment_params(name_exp):
'''Parse experiment parameters from the data directory name
Args
----
name_exp: str
Name of data directory with experiment parameters
Returns
-------
tag_params: dict of str
Dictionary of parsed experiment parameters
'''
if ('/' in name_exp) or ('\\' in name_exp):
raise ValueError("The path {} appears to be a path. Please pass "
"only the data directory's name (i.e. the "
"experiment name)".format(name_exp))
tag_params = dict()
tag_params['experiment'] = name_exp
tag_params['tag_model'] = (name_exp.split('_')[1]).replace('-','')
tag_params['tag_id'] = name_exp.split('_')[2]
tag_params['animal'] = name_exp.split('_')[3]
tag_params['notes'] = name_exp.split('_')[4]
return tag_params
|
ryanjdillon/pylleo | pylleo/utils.py | parse_experiment_params | python | def parse_experiment_params(name_exp):
'''Parse experiment parameters from the data directory name
Args
----
name_exp: str
Name of data directory with experiment parameters
Returns
-------
tag_params: dict of str
Dictionary of parsed experiment parameters
'''
if ('/' in name_exp) or ('\\' in name_exp):
raise ValueError("The path {} appears to be a path. Please pass "
"only the data directory's name (i.e. the "
"experiment name)".format(name_exp))
tag_params = dict()
tag_params['experiment'] = name_exp
tag_params['tag_model'] = (name_exp.split('_')[1]).replace('-','')
tag_params['tag_id'] = name_exp.split('_')[2]
tag_params['animal'] = name_exp.split('_')[3]
tag_params['notes'] = name_exp.split('_')[4]
return tag_params | Parse experiment parameters from the data directory name
Args
----
name_exp: str
Name of data directory with experiment parameters
Returns
-------
tag_params: dict of str
Dictionary of parsed experiment parameters | train | https://github.com/ryanjdillon/pylleo/blob/b9b999fef19eaeccce4f207ab1b6198287c1bfec/pylleo/utils.py#L122-L147 | null |
def get_testdata_path(tag_model):
'''Get path to sample data directory for given tag model'''
import os
tag_model = tag_model.upper().replace('-','').replace('_','')
sample_path = os.path.join('../datasets/{}'.format(tag_model))
if not os.path.isdir(sample_path):
raise FileNotFoundError('No sample dataset found for tag '
'{}.'.format(tag_model))
return sample_path
def predict_encoding(file_path, n_lines=20):
'''Get file encoding of a text file'''
import chardet
# Open the file as binary data
with open(file_path, 'rb') as f:
# Join binary lines for specified number of lines
rawdata = b''.join([f.readline() for _ in range(n_lines)])
return chardet.detect(rawdata)['encoding']
def get_n_header(f, header_char='"'):
'''Get the nummber of header rows in a Little Leonardo data file
Args
----
f : file stream
File handle for the file from which header rows will be read
header_char: str
Character array at beginning of each header line
Returns
-------
n_header: int
Number of header rows in Little Leonardo data file
'''
n_header = 0
reading_headers = True
while reading_headers:
line = f.readline()
if line.startswith(header_char):
n_header += 1
else:
reading_headers = False
return n_header
def get_tag_params(tag_model):
'''Load param strs and n_header based on model of tag model'''
tag_model = tag_model.replace('-', '')
tags = dict()
tags['W190PD3GT'] = ['Acceleration-X', 'Acceleration-Y', 'Acceleration-Z',
'Depth', 'Propeller', 'Temperature']
# Return tag parameters if found, else raise error
if tag_model in tags:
return tags[tag_model]
else:
raise KeyError('{} not found in tag dictionary'.format(tag_model))
def find_file(path_dir, search_str, file_ext):
'''Find path of file in directory containing the search string'''
import os
file_path = None
for file_name in os.listdir(path_dir):
if (search_str in file_name) and (file_name.endswith(file_ext)):
file_path = os.path.join(path_dir, file_name)
break
if file_path == None:
raise SystemError('No file found containing string: '
'{}.'.format(search_str))
return file_path
def posix_string(s):
'''Return string in lower case with spaces and dashes as underscores
Args
----
s: str
string to modify
Returns
-------
s_mod: str
string with ` ` and `-` replaced with `_`
'''
return s.lower().replace(' ','_').replace('-','_')
def nearest(items, pivot):
'''Find nearest value in array, including datetimes
Args
----
items: iterable
List of values from which to find nearest value to `pivot`
pivot: int or float
Value to find nearest of in `items`
Returns
-------
nearest: int or float
Value in items nearest to `pivot`
'''
return min(items, key=lambda x: abs(x - pivot))
|
ryanjdillon/pylleo | pylleo/lleoio.py | read_meta | python | def read_meta(path_dir, tag_model, tag_id):
'''Read meta data from Little Leonardo data header rows
Args
----
path_dir: str
Parent directory containing lleo data files
tag_model: str
Little Leonardo tag model name
tag_id: str, int
Little Leonardo tag ID number
Returns
-------
meta: dict
dictionary with meta data from header lines of lleo data files
'''
from collections import OrderedDict
import os
import yamlord
from . import utils
def _parse_meta_line(line):
'''Return key, value pair parsed from data header line'''
# Parse the key and its value from the line
key, val = line.replace(':', '').replace('"', '').split(',')
return key.strip(), val.strip()
def _read_meta_all(f, meta, n_header):
'''Read all meta data from header rows of data file'''
# Skip 'File name' line
f.seek(0)
_ = f.readline()
# Create child dictionary for channel / file
line = f.readline()
key_ch, val_ch = _parse_meta_line(line)
val_ch = utils.posix_string(val_ch)
meta['parameters'][val_ch] = OrderedDict()
# Write header values to channel dict
for _ in range(n_header-2):
line = f.readline()
key, val = _parse_meta_line(line)
meta['parameters'][val_ch][key] = val.strip()
return meta
def _create_meta(path_dir, tag_model, tag_id):
'''Create meta data dictionary'''
import datetime
from . import utils
param_strs = utils.get_tag_params(tag_model)
# Create dictionary of meta data
meta = OrderedDict()
# Create fields for the parameters in data directory name
exp_name = os.path.split(path_dir)[1]
params_tag = utils.parse_experiment_params(exp_name)
for key, value in params_tag.items():
meta[key] = value
fmt = "%Y-%m-%d %H:%M:%S"
meta['date_modified'] = datetime.datetime.now().strftime(fmt)
meta['parameters'] = OrderedDict()
for param_str in param_strs:
print('Create meta entry for {}'.format(param_str))
path_file = utils.find_file(path_dir, param_str, '.TXT')
# Get number of header rows
enc = utils.predict_encoding(path_file, n_lines=20)
with open(path_file, 'r', encoding=enc) as f:
n_header = utils.get_n_header(f)
f.seek(0)
meta = _read_meta_all(f, meta, n_header=n_header)
return meta
# Load meta data from YAML file if it already exists
meta_yaml_path = os.path.join(path_dir, 'meta.yml')
# Load file if exists else create
if os.path.isfile(meta_yaml_path):
meta = yamlord.read_yaml(meta_yaml_path)
# Else create meta dictionary and save to YAML
else:
meta = _create_meta(path_dir, tag_model, tag_id)
yamlord.write_yaml(meta, meta_yaml_path)
return meta | Read meta data from Little Leonardo data header rows
Args
----
path_dir: str
Parent directory containing lleo data files
tag_model: str
Little Leonardo tag model name
tag_id: str, int
Little Leonardo tag ID number
Returns
-------
meta: dict
dictionary with meta data from header lines of lleo data files | train | https://github.com/ryanjdillon/pylleo/blob/b9b999fef19eaeccce4f207ab1b6198287c1bfec/pylleo/lleoio.py#L2-L103 | [
"def _create_meta(path_dir, tag_model, tag_id):\n '''Create meta data dictionary'''\n import datetime\n from . import utils\n\n param_strs = utils.get_tag_params(tag_model)\n\n # Create dictionary of meta data\n meta = OrderedDict()\n\n # Create fields for the parameters in data directory name\n exp_name = os.path.split(path_dir)[1]\n params_tag = utils.parse_experiment_params(exp_name)\n for key, value in params_tag.items():\n meta[key] = value\n\n fmt = \"%Y-%m-%d %H:%M:%S\"\n meta['date_modified'] = datetime.datetime.now().strftime(fmt)\n\n meta['parameters'] = OrderedDict()\n\n for param_str in param_strs:\n print('Create meta entry for {}'.format(param_str))\n\n path_file = utils.find_file(path_dir, param_str, '.TXT')\n # Get number of header rows\n enc = utils.predict_encoding(path_file, n_lines=20)\n with open(path_file, 'r', encoding=enc) as f:\n n_header = utils.get_n_header(f)\n f.seek(0)\n meta = _read_meta_all(f, meta, n_header=n_header)\n\n return meta\n"
] |
def read_data(meta, path_dir, sample_f=1, decimate=False, overwrite=False):
'''Read accelerometry data from leonardo txt files
Args
----
meta: dict
Dictionary of meta data from header lines of lleo data files
path_dir: str
Parent directory containing lleo data files
sample_f: int
Return every `sample_f` data points
Returns
-------
acc: pandas.DataFrame
Dataframe containing accelerometry data on x, y, z axes [m/s^2]
depth: pandas.DataFrame
Dataframe containing depth data [m]
prop: pandas.DataFrame
Dataframe containing speed data from propeller
temp: pandas.DataFrame
Dataframe containing temperature data
'''
import os
import pandas
from . import utils
def _generate_datetimes(date, time, interval_s, n_timestamps):
'''Generate list of datetimes from date/time with given interval'''
from datetime import datetime, timedelta
import pandas
# TODO problematic if both m/d d/m options
fmts = ['%Y/%m/%d %H%M%S',
'%d/%m/%Y %H%M%S',
'%m/%d/%Y %I%M%S %p',
'%d/%m/%Y %I%M%S %p',]
for fmt in fmts:
try:
start = pandas.to_datetime('{} {}'.format(date,time), format=fmt)
except:
print('Date format {:18} incorrect, '
'trying next...'.format(fmt))
else:
print('Date format {:18} correct.'.format(fmt))
break
# Create datetime array
datetimes = list()
for i in range(n_timestamps):
secs = interval_s*i
datetimes.append(start + timedelta(seconds=secs))
return datetimes
def _read_data_file(meta, path_dir, param_str):
'''Read single Little Leonardo txt data file'''
import numpy
import os
import pandas
from . import utils
# Get path of data file and associated pickle file
path_file = utils.find_file(path_dir, param_str, '.TXT')
col_name = utils.posix_string(param_str)
# Get number of header rows in file
enc = utils.predict_encoding(path_file, n_lines=20)
with open(path_file, 'r', encoding=enc) as f:
n_header = utils.get_n_header(f)
print('\nReading: {}'.format(col_name))
data = numpy.genfromtxt(path_file, skip_header=n_header)
interval_s = float(meta['parameters'][col_name]['Interval(Sec)'])
date = meta['parameters'][col_name]['Start date']
time = meta['parameters'][col_name]['Start time']
# TODO review
# Generate summed data if propeller sampling rate not 1
if (col_name == 'propeller') and (interval_s < 1):
print('Too high sampling interval, taking sums')
# Sampling rate
fs = int(1/interval_s)
print('data before', data.max())
# Drop elements to make divisible by fs for summing
data = data[:-int(len(data)%fs)]
# Reshape to 2D with columns `fs` in length to be summed
data = data.reshape(fs, int(len(data)/fs))
data = numpy.sum(data, axis=0)
interval_s = 1
print('data after', data.max())
datetimes = _generate_datetimes(date, time, interval_s, len(data))
data = numpy.vstack((datetimes, data)).T
df = pandas.DataFrame(data, columns=['datetimes', col_name])
return df
# Get list of string parameter names for tag model
param_names = utils.get_tag_params(meta['tag_model'])
# Load pickle file exists and code unchanged
pickle_file = os.path.join(path_dir, 'pydata_'+meta['experiment']+'.p')
# Load or create pandas DataFrame with parameters associated with tag model
if (os.path.exists(pickle_file)) and (overwrite is not True):
data_df = pandas.read_pickle(pickle_file)
else:
first_col = True
for name in param_names:
next_df = _read_data_file(meta, path_dir, name)
if first_col == False:
data_df = pandas.merge(data_df, next_df, on='datetimes', how='left')
else:
data_df = next_df
first_col = False
print('')
# Covert columns to `datetime64` or `float64` types
data_df = data_df.apply(lambda x: pandas.to_numeric(x, errors='ignore'))
# Save file to pickle
data_df.to_pickle(pickle_file)
# Return DataFrame with ever `sample_f` values
return data_df.iloc[::sample_f,:]
|
ryanjdillon/pylleo | pylleo/lleoio.py | read_data | python | def read_data(meta, path_dir, sample_f=1, decimate=False, overwrite=False):
'''Read accelerometry data from leonardo txt files
Args
----
meta: dict
Dictionary of meta data from header lines of lleo data files
path_dir: str
Parent directory containing lleo data files
sample_f: int
Return every `sample_f` data points
Returns
-------
acc: pandas.DataFrame
Dataframe containing accelerometry data on x, y, z axes [m/s^2]
depth: pandas.DataFrame
Dataframe containing depth data [m]
prop: pandas.DataFrame
Dataframe containing speed data from propeller
temp: pandas.DataFrame
Dataframe containing temperature data
'''
import os
import pandas
from . import utils
def _generate_datetimes(date, time, interval_s, n_timestamps):
'''Generate list of datetimes from date/time with given interval'''
from datetime import datetime, timedelta
import pandas
# TODO problematic if both m/d d/m options
fmts = ['%Y/%m/%d %H%M%S',
'%d/%m/%Y %H%M%S',
'%m/%d/%Y %I%M%S %p',
'%d/%m/%Y %I%M%S %p',]
for fmt in fmts:
try:
start = pandas.to_datetime('{} {}'.format(date,time), format=fmt)
except:
print('Date format {:18} incorrect, '
'trying next...'.format(fmt))
else:
print('Date format {:18} correct.'.format(fmt))
break
# Create datetime array
datetimes = list()
for i in range(n_timestamps):
secs = interval_s*i
datetimes.append(start + timedelta(seconds=secs))
return datetimes
def _read_data_file(meta, path_dir, param_str):
'''Read single Little Leonardo txt data file'''
import numpy
import os
import pandas
from . import utils
# Get path of data file and associated pickle file
path_file = utils.find_file(path_dir, param_str, '.TXT')
col_name = utils.posix_string(param_str)
# Get number of header rows in file
enc = utils.predict_encoding(path_file, n_lines=20)
with open(path_file, 'r', encoding=enc) as f:
n_header = utils.get_n_header(f)
print('\nReading: {}'.format(col_name))
data = numpy.genfromtxt(path_file, skip_header=n_header)
interval_s = float(meta['parameters'][col_name]['Interval(Sec)'])
date = meta['parameters'][col_name]['Start date']
time = meta['parameters'][col_name]['Start time']
# TODO review
# Generate summed data if propeller sampling rate not 1
if (col_name == 'propeller') and (interval_s < 1):
print('Too high sampling interval, taking sums')
# Sampling rate
fs = int(1/interval_s)
print('data before', data.max())
# Drop elements to make divisible by fs for summing
data = data[:-int(len(data)%fs)]
# Reshape to 2D with columns `fs` in length to be summed
data = data.reshape(fs, int(len(data)/fs))
data = numpy.sum(data, axis=0)
interval_s = 1
print('data after', data.max())
datetimes = _generate_datetimes(date, time, interval_s, len(data))
data = numpy.vstack((datetimes, data)).T
df = pandas.DataFrame(data, columns=['datetimes', col_name])
return df
# Get list of string parameter names for tag model
param_names = utils.get_tag_params(meta['tag_model'])
# Load pickle file exists and code unchanged
pickle_file = os.path.join(path_dir, 'pydata_'+meta['experiment']+'.p')
# Load or create pandas DataFrame with parameters associated with tag model
if (os.path.exists(pickle_file)) and (overwrite is not True):
data_df = pandas.read_pickle(pickle_file)
else:
first_col = True
for name in param_names:
next_df = _read_data_file(meta, path_dir, name)
if first_col == False:
data_df = pandas.merge(data_df, next_df, on='datetimes', how='left')
else:
data_df = next_df
first_col = False
print('')
# Covert columns to `datetime64` or `float64` types
data_df = data_df.apply(lambda x: pandas.to_numeric(x, errors='ignore'))
# Save file to pickle
data_df.to_pickle(pickle_file)
# Return DataFrame with ever `sample_f` values
return data_df.iloc[::sample_f,:] | Read accelerometry data from leonardo txt files
Args
----
meta: dict
Dictionary of meta data from header lines of lleo data files
path_dir: str
Parent directory containing lleo data files
sample_f: int
Return every `sample_f` data points
Returns
-------
acc: pandas.DataFrame
Dataframe containing accelerometry data on x, y, z axes [m/s^2]
depth: pandas.DataFrame
Dataframe containing depth data [m]
prop: pandas.DataFrame
Dataframe containing speed data from propeller
temp: pandas.DataFrame
Dataframe containing temperature data | train | https://github.com/ryanjdillon/pylleo/blob/b9b999fef19eaeccce4f207ab1b6198287c1bfec/pylleo/lleoio.py#L106-L240 | [
"def get_tag_params(tag_model):\n '''Load param strs and n_header based on model of tag model'''\n\n tag_model = tag_model.replace('-', '')\n tags = dict()\n tags['W190PD3GT'] = ['Acceleration-X', 'Acceleration-Y', 'Acceleration-Z',\n 'Depth', 'Propeller', 'Temperature']\n\n # Return tag parameters if found, else raise error\n if tag_model in tags:\n return tags[tag_model]\n else:\n raise KeyError('{} not found in tag dictionary'.format(tag_model))\n",
"def _read_data_file(meta, path_dir, param_str):\n '''Read single Little Leonardo txt data file'''\n import numpy\n import os\n import pandas\n\n from . import utils\n\n # Get path of data file and associated pickle file\n path_file = utils.find_file(path_dir, param_str, '.TXT')\n col_name = utils.posix_string(param_str)\n\n # Get number of header rows in file\n enc = utils.predict_encoding(path_file, n_lines=20)\n with open(path_file, 'r', encoding=enc) as f:\n n_header = utils.get_n_header(f)\n\n print('\\nReading: {}'.format(col_name))\n\n data = numpy.genfromtxt(path_file, skip_header=n_header)\n\n interval_s = float(meta['parameters'][col_name]['Interval(Sec)'])\n date = meta['parameters'][col_name]['Start date']\n time = meta['parameters'][col_name]['Start time']\n\n # TODO review\n # Generate summed data if propeller sampling rate not 1\n if (col_name == 'propeller') and (interval_s < 1):\n print('Too high sampling interval, taking sums')\n # Sampling rate\n fs = int(1/interval_s)\n\n print('data before', data.max())\n # Drop elements to make divisible by fs for summing\n data = data[:-int(len(data)%fs)]\n\n # Reshape to 2D with columns `fs` in length to be summed\n data = data.reshape(fs, int(len(data)/fs))\n data = numpy.sum(data, axis=0)\n interval_s = 1\n\n print('data after', data.max())\n\n datetimes = _generate_datetimes(date, time, interval_s, len(data))\n data = numpy.vstack((datetimes, data)).T\n df = pandas.DataFrame(data, columns=['datetimes', col_name])\n\n return df\n"
] |
def read_meta(path_dir, tag_model, tag_id):
'''Read meta data from Little Leonardo data header rows
Args
----
path_dir: str
Parent directory containing lleo data files
tag_model: str
Little Leonardo tag model name
tag_id: str, int
Little Leonardo tag ID number
Returns
-------
meta: dict
dictionary with meta data from header lines of lleo data files
'''
from collections import OrderedDict
import os
import yamlord
from . import utils
def _parse_meta_line(line):
'''Return key, value pair parsed from data header line'''
# Parse the key and its value from the line
key, val = line.replace(':', '').replace('"', '').split(',')
return key.strip(), val.strip()
def _read_meta_all(f, meta, n_header):
'''Read all meta data from header rows of data file'''
# Skip 'File name' line
f.seek(0)
_ = f.readline()
# Create child dictionary for channel / file
line = f.readline()
key_ch, val_ch = _parse_meta_line(line)
val_ch = utils.posix_string(val_ch)
meta['parameters'][val_ch] = OrderedDict()
# Write header values to channel dict
for _ in range(n_header-2):
line = f.readline()
key, val = _parse_meta_line(line)
meta['parameters'][val_ch][key] = val.strip()
return meta
def _create_meta(path_dir, tag_model, tag_id):
'''Create meta data dictionary'''
import datetime
from . import utils
param_strs = utils.get_tag_params(tag_model)
# Create dictionary of meta data
meta = OrderedDict()
# Create fields for the parameters in data directory name
exp_name = os.path.split(path_dir)[1]
params_tag = utils.parse_experiment_params(exp_name)
for key, value in params_tag.items():
meta[key] = value
fmt = "%Y-%m-%d %H:%M:%S"
meta['date_modified'] = datetime.datetime.now().strftime(fmt)
meta['parameters'] = OrderedDict()
for param_str in param_strs:
print('Create meta entry for {}'.format(param_str))
path_file = utils.find_file(path_dir, param_str, '.TXT')
# Get number of header rows
enc = utils.predict_encoding(path_file, n_lines=20)
with open(path_file, 'r', encoding=enc) as f:
n_header = utils.get_n_header(f)
f.seek(0)
meta = _read_meta_all(f, meta, n_header=n_header)
return meta
# Load meta data from YAML file if it already exists
meta_yaml_path = os.path.join(path_dir, 'meta.yml')
# Load file if exists else create
if os.path.isfile(meta_yaml_path):
meta = yamlord.read_yaml(meta_yaml_path)
# Else create meta dictionary and save to YAML
else:
meta = _create_meta(path_dir, tag_model, tag_id)
yamlord.write_yaml(meta, meta_yaml_path)
return meta
|
ryanjdillon/pylleo | pylleo/lleocal.py | get_cal_data | python | def get_cal_data(data_df, cal_dict, param):
'''Get data along specified axis during calibration intervals
Args
----
data_df: pandas.DataFrame
Pandas dataframe with lleo data
cal_dict: dict
Calibration dictionary
Returns
-------
lower: pandas dataframe
slice of lleo datafram containing points at -1g calibration position
upper: pandas dataframe
slice of lleo datafram containing points at -1g calibration position
See also
--------
lleoio.read_data: creates pandas dataframe `data_df`
read_cal: creates `cal_dict` and describes fields
'''
param = param.lower().replace(' ','_').replace('-','_')
idx_lower_start = cal_dict['parameters'][param]['lower']['start']
idx_lower_end = cal_dict['parameters'][param]['lower']['end']
idx_upper_start = cal_dict['parameters'][param]['upper']['start']
idx_upper_end = cal_dict['parameters'][param]['upper']['end']
idx_lower = (data_df.index >= idx_lower_start) & \
(data_df.index <= idx_lower_end)
idx_upper = (data_df.index >= idx_upper_start) & \
(data_df.index <= idx_upper_end)
return data_df[param][idx_lower], data_df[param][idx_upper] | Get data along specified axis during calibration intervals
Args
----
data_df: pandas.DataFrame
Pandas dataframe with lleo data
cal_dict: dict
Calibration dictionary
Returns
-------
lower: pandas dataframe
slice of lleo datafram containing points at -1g calibration position
upper: pandas dataframe
slice of lleo datafram containing points at -1g calibration position
See also
--------
lleoio.read_data: creates pandas dataframe `data_df`
read_cal: creates `cal_dict` and describes fields | train | https://github.com/ryanjdillon/pylleo/blob/b9b999fef19eaeccce4f207ab1b6198287c1bfec/pylleo/lleocal.py#L2-L38 | null |
def read_cal(cal_yaml_path):
'''Load calibration file if exists, else create
Args
----
cal_yaml_path: str
Path to calibration YAML file
Returns
-------
cal_dict: dict
Key value pairs of calibration meta data
'''
from collections import OrderedDict
import datetime
import os
import warnings
import yamlord
from . import utils
def __create_cal(cal_yaml_path):
cal_dict = OrderedDict()
# Add experiment name for calibration reference
base_path, _ = os.path.split(cal_yaml_path)
_, exp_name = os.path.split(base_path)
cal_dict['experiment'] = exp_name
return cal_dict
# Try reading cal file, else create
if os.path.isfile(cal_yaml_path):
cal_dict = yamlord.read_yaml(cal_yaml_path)
else:
cal_dict = __create_cal(cal_yaml_path)
cal_dict['parameters'] = OrderedDict()
for key, val in utils.parse_experiment_params(cal_dict['experiment']).items():
cal_dict[key] = val
fmt = "%Y-%m-%d %H:%M:%S"
cal_dict['date_modified'] = datetime.datetime.now().strftime(fmt)
return cal_dict
def update(data_df, cal_dict, param, bound, start, end):
'''Update calibration times for give parameter and boundary'''
from collections import OrderedDict
if param not in cal_dict['parameters']:
cal_dict['parameters'][param] = OrderedDict()
if bound not in cal_dict['parameters'][param]:
cal_dict['parameters'][param][bound] = OrderedDict()
cal_dict['parameters'][param][bound]['start'] = start
cal_dict['parameters'][param][bound]['end'] = end
return cal_dict
def fit1d(lower, upper):
'''Fit acceleration data at lower and upper boundaries of gravity
Args
----
lower: pandas dataframe
slice of lleo datafram containing points at -1g calibration position
upper: pandas dataframe
slice of lleo datafram containing points at -1g calibration position
Returns
-------
p: ndarray
Polynomial coefficients, highest power first. If y was 2-D, the
coefficients for k-th data set are in p[:,k]. From `numpy.polyfit()`.
NOTE
----
This method should be compared agaist alternate linalg method, which allows
for 2d for 2d poly, see - http://stackoverflow.com/a/33966967/943773
A = numpy.vstack(lower, upper).transpose()
y = A[:,1]
m, c = numpy.linalg.lstsq(A, y)[0]
'''
import numpy
# Get smallest size as index position for slicing
idx = min(len(lower), len(upper))
# Stack accelerometer count values for upper and lower bounds of curve
x = numpy.hstack((lower[:idx].values, upper[:idx].values))
x = x.astype(float)
# Make corresponding y array where all lower bound points equal -g
# and all upper bound points equal +g
y = numpy.zeros(len(x), dtype=float)
y[:idx] = -1.0 # negative gravity
y[idx:] = 1.0 # positive gravity
return numpy.polyfit(x, y, deg=1)
def calibrate_acc(data_df, cal_dict):
def apply_poly(data_df, cal_dict, param):
'''Apply poly fit to data array'''
import numpy
poly = cal_dict['parameters'][param]['poly']
a = numpy.polyval(poly, data_df[param])
return a.astype(float)
# Apply calibration and add as a new column to the dataframe
for ax in ['x', 'y', 'z']:
col = 'A{}_g'.format(ax)
col_cal = 'acceleration_{}'.format(ax)
data_df[col] = apply_poly(data_df, cal_dict, col_cal)
return data_df
def create_speed_csv(cal_fname, data):
import numpy
# Get a mask of values which contain a sample, assuming the propeller was
# not sampled at as high of a frequency as the accelerometer
notnan = ~numpy.isnan(data['propeller'])
# Read speed, start, and end times from csv
cal = pandas.read_csv(cal_fname)
# For each calibration in `speed_calibrations.csv`
for i in range(len(cal)):
start = cal.loc[i, 'start']
start = cal.loc[i, 'end']
dt0 = pylleo.utils.nearest(data['datetimes'][notnan], start)
dt1 = pylleo.utils.nearest(data['datetimes'][notnan], end)
cal_mask = (data['datetimes']>=dt0) & (data['datetimes']<=dt1)
count_avg = data['propeller'][cal_mask].mean()
cal.loc[i, 'count_average'] = count_avg
cal.to_csv(cal_fname)
return cal
def calibrate_propeller(data_df, cal_fname, plot=False):
def speed_calibration_average(cal_fname, plot):
'''Cacluate the coefficients for the mean fit of calibrations
Notes
-----
`cal_fname` should contain three columns:
date,est_speed,count_average
2014-04-18,2.012,30
'''
import datetime
import matplotlib.pyplot as plt
import numpy
import pandas
# Read calibration data
calibs = pandas.read_csv(cal_fname)
calibs['date'] = pandas.to_datetime(calibs['date'])
# Get unique dates to process fits for
udates = numpy.unique(calibs['date'])
# Create x data for samples and output array for y
n_samples = 1000
x = numpy.arange(n_samples)
fits = numpy.zeros((len(udates), n_samples), dtype=float)
# Calculate fit coefficients then store `n_samples number of samples
# Force intercept through zero (i.e. zero counts = zero speed)
# http://stackoverflow.com/a/9994484/943773
for i in range(len(udates)):
cal = calibs[calibs['date']==udates[i]]
xi = cal['count_average'].values[:, numpy.newaxis]
yi = cal['est_speed'].values
m, _, _, _ = numpy.linalg.lstsq(xi, yi)
fits[i, :] = m*x
# Add fit to plot if switch on
if plot:
plt.plot(x, fits[i,:], label='cal{}'.format(i))
# Calculate average of calibration samples
y_avg = numpy.mean(fits, axis=0)
# Add average fit to plot and show if switch on
if plot:
plt.plot(x, y_avg, label='avg')
plt.legend()
plt.show()
# Calculate fit coefficients for average samples
x_avg = x[:, numpy.newaxis]
m_avg, _, _, _ = numpy.linalg.lstsq(x_avg, y_avg)
return m_avg
m_avg = speed_calibration_average(cal_fname, plot=plot)
data_df['speed'] = m_avg * data_df['propeller']
return data_df
|
ryanjdillon/pylleo | pylleo/lleocal.py | read_cal | python | def read_cal(cal_yaml_path):
'''Load calibration file if exists, else create
Args
----
cal_yaml_path: str
Path to calibration YAML file
Returns
-------
cal_dict: dict
Key value pairs of calibration meta data
'''
from collections import OrderedDict
import datetime
import os
import warnings
import yamlord
from . import utils
def __create_cal(cal_yaml_path):
cal_dict = OrderedDict()
# Add experiment name for calibration reference
base_path, _ = os.path.split(cal_yaml_path)
_, exp_name = os.path.split(base_path)
cal_dict['experiment'] = exp_name
return cal_dict
# Try reading cal file, else create
if os.path.isfile(cal_yaml_path):
cal_dict = yamlord.read_yaml(cal_yaml_path)
else:
cal_dict = __create_cal(cal_yaml_path)
cal_dict['parameters'] = OrderedDict()
for key, val in utils.parse_experiment_params(cal_dict['experiment']).items():
cal_dict[key] = val
fmt = "%Y-%m-%d %H:%M:%S"
cal_dict['date_modified'] = datetime.datetime.now().strftime(fmt)
return cal_dict | Load calibration file if exists, else create
Args
----
cal_yaml_path: str
Path to calibration YAML file
Returns
-------
cal_dict: dict
Key value pairs of calibration meta data | train | https://github.com/ryanjdillon/pylleo/blob/b9b999fef19eaeccce4f207ab1b6198287c1bfec/pylleo/lleocal.py#L41-L85 | [
"def parse_experiment_params(name_exp):\n '''Parse experiment parameters from the data directory name\n\n Args\n ----\n name_exp: str\n Name of data directory with experiment parameters\n\n Returns\n -------\n tag_params: dict of str\n Dictionary of parsed experiment parameters\n '''\n if ('/' in name_exp) or ('\\\\' in name_exp):\n raise ValueError(\"The path {} appears to be a path. Please pass \"\n \"only the data directory's name (i.e. the \"\n \"experiment name)\".format(name_exp))\n\n tag_params = dict()\n tag_params['experiment'] = name_exp\n tag_params['tag_model'] = (name_exp.split('_')[1]).replace('-','')\n tag_params['tag_id'] = name_exp.split('_')[2]\n tag_params['animal'] = name_exp.split('_')[3]\n tag_params['notes'] = name_exp.split('_')[4]\n\n return tag_params\n",
"def __create_cal(cal_yaml_path):\n cal_dict = OrderedDict()\n\n # Add experiment name for calibration reference\n base_path, _ = os.path.split(cal_yaml_path)\n _, exp_name = os.path.split(base_path)\n cal_dict['experiment'] = exp_name\n\n return cal_dict\n"
] |
def get_cal_data(data_df, cal_dict, param):
'''Get data along specified axis during calibration intervals
Args
----
data_df: pandas.DataFrame
Pandas dataframe with lleo data
cal_dict: dict
Calibration dictionary
Returns
-------
lower: pandas dataframe
slice of lleo datafram containing points at -1g calibration position
upper: pandas dataframe
slice of lleo datafram containing points at -1g calibration position
See also
--------
lleoio.read_data: creates pandas dataframe `data_df`
read_cal: creates `cal_dict` and describes fields
'''
param = param.lower().replace(' ','_').replace('-','_')
idx_lower_start = cal_dict['parameters'][param]['lower']['start']
idx_lower_end = cal_dict['parameters'][param]['lower']['end']
idx_upper_start = cal_dict['parameters'][param]['upper']['start']
idx_upper_end = cal_dict['parameters'][param]['upper']['end']
idx_lower = (data_df.index >= idx_lower_start) & \
(data_df.index <= idx_lower_end)
idx_upper = (data_df.index >= idx_upper_start) & \
(data_df.index <= idx_upper_end)
return data_df[param][idx_lower], data_df[param][idx_upper]
def update(data_df, cal_dict, param, bound, start, end):
'''Update calibration times for give parameter and boundary'''
from collections import OrderedDict
if param not in cal_dict['parameters']:
cal_dict['parameters'][param] = OrderedDict()
if bound not in cal_dict['parameters'][param]:
cal_dict['parameters'][param][bound] = OrderedDict()
cal_dict['parameters'][param][bound]['start'] = start
cal_dict['parameters'][param][bound]['end'] = end
return cal_dict
def fit1d(lower, upper):
'''Fit acceleration data at lower and upper boundaries of gravity
Args
----
lower: pandas dataframe
slice of lleo datafram containing points at -1g calibration position
upper: pandas dataframe
slice of lleo datafram containing points at -1g calibration position
Returns
-------
p: ndarray
Polynomial coefficients, highest power first. If y was 2-D, the
coefficients for k-th data set are in p[:,k]. From `numpy.polyfit()`.
NOTE
----
This method should be compared agaist alternate linalg method, which allows
for 2d for 2d poly, see - http://stackoverflow.com/a/33966967/943773
A = numpy.vstack(lower, upper).transpose()
y = A[:,1]
m, c = numpy.linalg.lstsq(A, y)[0]
'''
import numpy
# Get smallest size as index position for slicing
idx = min(len(lower), len(upper))
# Stack accelerometer count values for upper and lower bounds of curve
x = numpy.hstack((lower[:idx].values, upper[:idx].values))
x = x.astype(float)
# Make corresponding y array where all lower bound points equal -g
# and all upper bound points equal +g
y = numpy.zeros(len(x), dtype=float)
y[:idx] = -1.0 # negative gravity
y[idx:] = 1.0 # positive gravity
return numpy.polyfit(x, y, deg=1)
def calibrate_acc(data_df, cal_dict):
def apply_poly(data_df, cal_dict, param):
'''Apply poly fit to data array'''
import numpy
poly = cal_dict['parameters'][param]['poly']
a = numpy.polyval(poly, data_df[param])
return a.astype(float)
# Apply calibration and add as a new column to the dataframe
for ax in ['x', 'y', 'z']:
col = 'A{}_g'.format(ax)
col_cal = 'acceleration_{}'.format(ax)
data_df[col] = apply_poly(data_df, cal_dict, col_cal)
return data_df
def create_speed_csv(cal_fname, data):
import numpy
# Get a mask of values which contain a sample, assuming the propeller was
# not sampled at as high of a frequency as the accelerometer
notnan = ~numpy.isnan(data['propeller'])
# Read speed, start, and end times from csv
cal = pandas.read_csv(cal_fname)
# For each calibration in `speed_calibrations.csv`
for i in range(len(cal)):
start = cal.loc[i, 'start']
start = cal.loc[i, 'end']
dt0 = pylleo.utils.nearest(data['datetimes'][notnan], start)
dt1 = pylleo.utils.nearest(data['datetimes'][notnan], end)
cal_mask = (data['datetimes']>=dt0) & (data['datetimes']<=dt1)
count_avg = data['propeller'][cal_mask].mean()
cal.loc[i, 'count_average'] = count_avg
cal.to_csv(cal_fname)
return cal
def calibrate_propeller(data_df, cal_fname, plot=False):
def speed_calibration_average(cal_fname, plot):
'''Cacluate the coefficients for the mean fit of calibrations
Notes
-----
`cal_fname` should contain three columns:
date,est_speed,count_average
2014-04-18,2.012,30
'''
import datetime
import matplotlib.pyplot as plt
import numpy
import pandas
# Read calibration data
calibs = pandas.read_csv(cal_fname)
calibs['date'] = pandas.to_datetime(calibs['date'])
# Get unique dates to process fits for
udates = numpy.unique(calibs['date'])
# Create x data for samples and output array for y
n_samples = 1000
x = numpy.arange(n_samples)
fits = numpy.zeros((len(udates), n_samples), dtype=float)
# Calculate fit coefficients then store `n_samples number of samples
# Force intercept through zero (i.e. zero counts = zero speed)
# http://stackoverflow.com/a/9994484/943773
for i in range(len(udates)):
cal = calibs[calibs['date']==udates[i]]
xi = cal['count_average'].values[:, numpy.newaxis]
yi = cal['est_speed'].values
m, _, _, _ = numpy.linalg.lstsq(xi, yi)
fits[i, :] = m*x
# Add fit to plot if switch on
if plot:
plt.plot(x, fits[i,:], label='cal{}'.format(i))
# Calculate average of calibration samples
y_avg = numpy.mean(fits, axis=0)
# Add average fit to plot and show if switch on
if plot:
plt.plot(x, y_avg, label='avg')
plt.legend()
plt.show()
# Calculate fit coefficients for average samples
x_avg = x[:, numpy.newaxis]
m_avg, _, _, _ = numpy.linalg.lstsq(x_avg, y_avg)
return m_avg
m_avg = speed_calibration_average(cal_fname, plot=plot)
data_df['speed'] = m_avg * data_df['propeller']
return data_df
|
ryanjdillon/pylleo | pylleo/lleocal.py | update | python | def update(data_df, cal_dict, param, bound, start, end):
'''Update calibration times for give parameter and boundary'''
from collections import OrderedDict
if param not in cal_dict['parameters']:
cal_dict['parameters'][param] = OrderedDict()
if bound not in cal_dict['parameters'][param]:
cal_dict['parameters'][param][bound] = OrderedDict()
cal_dict['parameters'][param][bound]['start'] = start
cal_dict['parameters'][param][bound]['end'] = end
return cal_dict | Update calibration times for give parameter and boundary | train | https://github.com/ryanjdillon/pylleo/blob/b9b999fef19eaeccce4f207ab1b6198287c1bfec/pylleo/lleocal.py#L88-L100 | null |
def get_cal_data(data_df, cal_dict, param):
'''Get data along specified axis during calibration intervals
Args
----
data_df: pandas.DataFrame
Pandas dataframe with lleo data
cal_dict: dict
Calibration dictionary
Returns
-------
lower: pandas dataframe
slice of lleo datafram containing points at -1g calibration position
upper: pandas dataframe
slice of lleo datafram containing points at -1g calibration position
See also
--------
lleoio.read_data: creates pandas dataframe `data_df`
read_cal: creates `cal_dict` and describes fields
'''
param = param.lower().replace(' ','_').replace('-','_')
idx_lower_start = cal_dict['parameters'][param]['lower']['start']
idx_lower_end = cal_dict['parameters'][param]['lower']['end']
idx_upper_start = cal_dict['parameters'][param]['upper']['start']
idx_upper_end = cal_dict['parameters'][param]['upper']['end']
idx_lower = (data_df.index >= idx_lower_start) & \
(data_df.index <= idx_lower_end)
idx_upper = (data_df.index >= idx_upper_start) & \
(data_df.index <= idx_upper_end)
return data_df[param][idx_lower], data_df[param][idx_upper]
def read_cal(cal_yaml_path):
'''Load calibration file if exists, else create
Args
----
cal_yaml_path: str
Path to calibration YAML file
Returns
-------
cal_dict: dict
Key value pairs of calibration meta data
'''
from collections import OrderedDict
import datetime
import os
import warnings
import yamlord
from . import utils
def __create_cal(cal_yaml_path):
cal_dict = OrderedDict()
# Add experiment name for calibration reference
base_path, _ = os.path.split(cal_yaml_path)
_, exp_name = os.path.split(base_path)
cal_dict['experiment'] = exp_name
return cal_dict
# Try reading cal file, else create
if os.path.isfile(cal_yaml_path):
cal_dict = yamlord.read_yaml(cal_yaml_path)
else:
cal_dict = __create_cal(cal_yaml_path)
cal_dict['parameters'] = OrderedDict()
for key, val in utils.parse_experiment_params(cal_dict['experiment']).items():
cal_dict[key] = val
fmt = "%Y-%m-%d %H:%M:%S"
cal_dict['date_modified'] = datetime.datetime.now().strftime(fmt)
return cal_dict
def fit1d(lower, upper):
'''Fit acceleration data at lower and upper boundaries of gravity
Args
----
lower: pandas dataframe
slice of lleo datafram containing points at -1g calibration position
upper: pandas dataframe
slice of lleo datafram containing points at -1g calibration position
Returns
-------
p: ndarray
Polynomial coefficients, highest power first. If y was 2-D, the
coefficients for k-th data set are in p[:,k]. From `numpy.polyfit()`.
NOTE
----
This method should be compared agaist alternate linalg method, which allows
for 2d for 2d poly, see - http://stackoverflow.com/a/33966967/943773
A = numpy.vstack(lower, upper).transpose()
y = A[:,1]
m, c = numpy.linalg.lstsq(A, y)[0]
'''
import numpy
# Get smallest size as index position for slicing
idx = min(len(lower), len(upper))
# Stack accelerometer count values for upper and lower bounds of curve
x = numpy.hstack((lower[:idx].values, upper[:idx].values))
x = x.astype(float)
# Make corresponding y array where all lower bound points equal -g
# and all upper bound points equal +g
y = numpy.zeros(len(x), dtype=float)
y[:idx] = -1.0 # negative gravity
y[idx:] = 1.0 # positive gravity
return numpy.polyfit(x, y, deg=1)
def calibrate_acc(data_df, cal_dict):
def apply_poly(data_df, cal_dict, param):
'''Apply poly fit to data array'''
import numpy
poly = cal_dict['parameters'][param]['poly']
a = numpy.polyval(poly, data_df[param])
return a.astype(float)
# Apply calibration and add as a new column to the dataframe
for ax in ['x', 'y', 'z']:
col = 'A{}_g'.format(ax)
col_cal = 'acceleration_{}'.format(ax)
data_df[col] = apply_poly(data_df, cal_dict, col_cal)
return data_df
def create_speed_csv(cal_fname, data):
import numpy
# Get a mask of values which contain a sample, assuming the propeller was
# not sampled at as high of a frequency as the accelerometer
notnan = ~numpy.isnan(data['propeller'])
# Read speed, start, and end times from csv
cal = pandas.read_csv(cal_fname)
# For each calibration in `speed_calibrations.csv`
for i in range(len(cal)):
start = cal.loc[i, 'start']
start = cal.loc[i, 'end']
dt0 = pylleo.utils.nearest(data['datetimes'][notnan], start)
dt1 = pylleo.utils.nearest(data['datetimes'][notnan], end)
cal_mask = (data['datetimes']>=dt0) & (data['datetimes']<=dt1)
count_avg = data['propeller'][cal_mask].mean()
cal.loc[i, 'count_average'] = count_avg
cal.to_csv(cal_fname)
return cal
def calibrate_propeller(data_df, cal_fname, plot=False):
def speed_calibration_average(cal_fname, plot):
'''Cacluate the coefficients for the mean fit of calibrations
Notes
-----
`cal_fname` should contain three columns:
date,est_speed,count_average
2014-04-18,2.012,30
'''
import datetime
import matplotlib.pyplot as plt
import numpy
import pandas
# Read calibration data
calibs = pandas.read_csv(cal_fname)
calibs['date'] = pandas.to_datetime(calibs['date'])
# Get unique dates to process fits for
udates = numpy.unique(calibs['date'])
# Create x data for samples and output array for y
n_samples = 1000
x = numpy.arange(n_samples)
fits = numpy.zeros((len(udates), n_samples), dtype=float)
# Calculate fit coefficients then store `n_samples number of samples
# Force intercept through zero (i.e. zero counts = zero speed)
# http://stackoverflow.com/a/9994484/943773
for i in range(len(udates)):
cal = calibs[calibs['date']==udates[i]]
xi = cal['count_average'].values[:, numpy.newaxis]
yi = cal['est_speed'].values
m, _, _, _ = numpy.linalg.lstsq(xi, yi)
fits[i, :] = m*x
# Add fit to plot if switch on
if plot:
plt.plot(x, fits[i,:], label='cal{}'.format(i))
# Calculate average of calibration samples
y_avg = numpy.mean(fits, axis=0)
# Add average fit to plot and show if switch on
if plot:
plt.plot(x, y_avg, label='avg')
plt.legend()
plt.show()
# Calculate fit coefficients for average samples
x_avg = x[:, numpy.newaxis]
m_avg, _, _, _ = numpy.linalg.lstsq(x_avg, y_avg)
return m_avg
m_avg = speed_calibration_average(cal_fname, plot=plot)
data_df['speed'] = m_avg * data_df['propeller']
return data_df
|
ryanjdillon/pylleo | pylleo/lleocal.py | fit1d | python | def fit1d(lower, upper):
'''Fit acceleration data at lower and upper boundaries of gravity
Args
----
lower: pandas dataframe
slice of lleo datafram containing points at -1g calibration position
upper: pandas dataframe
slice of lleo datafram containing points at -1g calibration position
Returns
-------
p: ndarray
Polynomial coefficients, highest power first. If y was 2-D, the
coefficients for k-th data set are in p[:,k]. From `numpy.polyfit()`.
NOTE
----
This method should be compared agaist alternate linalg method, which allows
for 2d for 2d poly, see - http://stackoverflow.com/a/33966967/943773
A = numpy.vstack(lower, upper).transpose()
y = A[:,1]
m, c = numpy.linalg.lstsq(A, y)[0]
'''
import numpy
# Get smallest size as index position for slicing
idx = min(len(lower), len(upper))
# Stack accelerometer count values for upper and lower bounds of curve
x = numpy.hstack((lower[:idx].values, upper[:idx].values))
x = x.astype(float)
# Make corresponding y array where all lower bound points equal -g
# and all upper bound points equal +g
y = numpy.zeros(len(x), dtype=float)
y[:idx] = -1.0 # negative gravity
y[idx:] = 1.0 # positive gravity
return numpy.polyfit(x, y, deg=1) | Fit acceleration data at lower and upper boundaries of gravity
Args
----
lower: pandas dataframe
slice of lleo datafram containing points at -1g calibration position
upper: pandas dataframe
slice of lleo datafram containing points at -1g calibration position
Returns
-------
p: ndarray
Polynomial coefficients, highest power first. If y was 2-D, the
coefficients for k-th data set are in p[:,k]. From `numpy.polyfit()`.
NOTE
----
This method should be compared agaist alternate linalg method, which allows
for 2d for 2d poly, see - http://stackoverflow.com/a/33966967/943773
A = numpy.vstack(lower, upper).transpose()
y = A[:,1]
m, c = numpy.linalg.lstsq(A, y)[0] | train | https://github.com/ryanjdillon/pylleo/blob/b9b999fef19eaeccce4f207ab1b6198287c1bfec/pylleo/lleocal.py#L103-L143 | null |
def get_cal_data(data_df, cal_dict, param):
'''Get data along specified axis during calibration intervals
Args
----
data_df: pandas.DataFrame
Pandas dataframe with lleo data
cal_dict: dict
Calibration dictionary
Returns
-------
lower: pandas dataframe
slice of lleo datafram containing points at -1g calibration position
upper: pandas dataframe
slice of lleo datafram containing points at -1g calibration position
See also
--------
lleoio.read_data: creates pandas dataframe `data_df`
read_cal: creates `cal_dict` and describes fields
'''
param = param.lower().replace(' ','_').replace('-','_')
idx_lower_start = cal_dict['parameters'][param]['lower']['start']
idx_lower_end = cal_dict['parameters'][param]['lower']['end']
idx_upper_start = cal_dict['parameters'][param]['upper']['start']
idx_upper_end = cal_dict['parameters'][param]['upper']['end']
idx_lower = (data_df.index >= idx_lower_start) & \
(data_df.index <= idx_lower_end)
idx_upper = (data_df.index >= idx_upper_start) & \
(data_df.index <= idx_upper_end)
return data_df[param][idx_lower], data_df[param][idx_upper]
def read_cal(cal_yaml_path):
'''Load calibration file if exists, else create
Args
----
cal_yaml_path: str
Path to calibration YAML file
Returns
-------
cal_dict: dict
Key value pairs of calibration meta data
'''
from collections import OrderedDict
import datetime
import os
import warnings
import yamlord
from . import utils
def __create_cal(cal_yaml_path):
cal_dict = OrderedDict()
# Add experiment name for calibration reference
base_path, _ = os.path.split(cal_yaml_path)
_, exp_name = os.path.split(base_path)
cal_dict['experiment'] = exp_name
return cal_dict
# Try reading cal file, else create
if os.path.isfile(cal_yaml_path):
cal_dict = yamlord.read_yaml(cal_yaml_path)
else:
cal_dict = __create_cal(cal_yaml_path)
cal_dict['parameters'] = OrderedDict()
for key, val in utils.parse_experiment_params(cal_dict['experiment']).items():
cal_dict[key] = val
fmt = "%Y-%m-%d %H:%M:%S"
cal_dict['date_modified'] = datetime.datetime.now().strftime(fmt)
return cal_dict
def update(data_df, cal_dict, param, bound, start, end):
'''Update calibration times for give parameter and boundary'''
from collections import OrderedDict
if param not in cal_dict['parameters']:
cal_dict['parameters'][param] = OrderedDict()
if bound not in cal_dict['parameters'][param]:
cal_dict['parameters'][param][bound] = OrderedDict()
cal_dict['parameters'][param][bound]['start'] = start
cal_dict['parameters'][param][bound]['end'] = end
return cal_dict
def calibrate_acc(data_df, cal_dict):
def apply_poly(data_df, cal_dict, param):
'''Apply poly fit to data array'''
import numpy
poly = cal_dict['parameters'][param]['poly']
a = numpy.polyval(poly, data_df[param])
return a.astype(float)
# Apply calibration and add as a new column to the dataframe
for ax in ['x', 'y', 'z']:
col = 'A{}_g'.format(ax)
col_cal = 'acceleration_{}'.format(ax)
data_df[col] = apply_poly(data_df, cal_dict, col_cal)
return data_df
def create_speed_csv(cal_fname, data):
import numpy
# Get a mask of values which contain a sample, assuming the propeller was
# not sampled at as high of a frequency as the accelerometer
notnan = ~numpy.isnan(data['propeller'])
# Read speed, start, and end times from csv
cal = pandas.read_csv(cal_fname)
# For each calibration in `speed_calibrations.csv`
for i in range(len(cal)):
start = cal.loc[i, 'start']
start = cal.loc[i, 'end']
dt0 = pylleo.utils.nearest(data['datetimes'][notnan], start)
dt1 = pylleo.utils.nearest(data['datetimes'][notnan], end)
cal_mask = (data['datetimes']>=dt0) & (data['datetimes']<=dt1)
count_avg = data['propeller'][cal_mask].mean()
cal.loc[i, 'count_average'] = count_avg
cal.to_csv(cal_fname)
return cal
def calibrate_propeller(data_df, cal_fname, plot=False):
def speed_calibration_average(cal_fname, plot):
'''Cacluate the coefficients for the mean fit of calibrations
Notes
-----
`cal_fname` should contain three columns:
date,est_speed,count_average
2014-04-18,2.012,30
'''
import datetime
import matplotlib.pyplot as plt
import numpy
import pandas
# Read calibration data
calibs = pandas.read_csv(cal_fname)
calibs['date'] = pandas.to_datetime(calibs['date'])
# Get unique dates to process fits for
udates = numpy.unique(calibs['date'])
# Create x data for samples and output array for y
n_samples = 1000
x = numpy.arange(n_samples)
fits = numpy.zeros((len(udates), n_samples), dtype=float)
# Calculate fit coefficients then store `n_samples number of samples
# Force intercept through zero (i.e. zero counts = zero speed)
# http://stackoverflow.com/a/9994484/943773
for i in range(len(udates)):
cal = calibs[calibs['date']==udates[i]]
xi = cal['count_average'].values[:, numpy.newaxis]
yi = cal['est_speed'].values
m, _, _, _ = numpy.linalg.lstsq(xi, yi)
fits[i, :] = m*x
# Add fit to plot if switch on
if plot:
plt.plot(x, fits[i,:], label='cal{}'.format(i))
# Calculate average of calibration samples
y_avg = numpy.mean(fits, axis=0)
# Add average fit to plot and show if switch on
if plot:
plt.plot(x, y_avg, label='avg')
plt.legend()
plt.show()
# Calculate fit coefficients for average samples
x_avg = x[:, numpy.newaxis]
m_avg, _, _, _ = numpy.linalg.lstsq(x_avg, y_avg)
return m_avg
m_avg = speed_calibration_average(cal_fname, plot=plot)
data_df['speed'] = m_avg * data_df['propeller']
return data_df
|
ryanjdillon/pylleo | pylleo/calapp/main.py | plot_triaxial | python | def plot_triaxial(height, width, tools):
'''Plot pandas dataframe containing an x, y, and z column'''
import bokeh.plotting
p = bokeh.plotting.figure(x_axis_type='datetime',
plot_height=height,
plot_width=width,
title=' ',
toolbar_sticky=False,
tools=tools,
active_drag=BoxZoomTool(),
output_backend='webgl')
p.yaxis.axis_label = 'Acceleration (count)'
p.xaxis.axis_label = 'Time (timezone as programmed)'
# Plot accelerometry data as lines and scatter (for BoxSelectTool)
colors = ['#1b9e77', '#d95f02', '#7570b3']
axes = ['x', 'y', 'z']
lines = [None,]*3
scats = [None,]*3
for i, (ax, c) in enumerate(zip(axes, colors)):
lines[i] = p.line(y=ax, x='dt', color=c, legend=False, source=source)
scats[i] = p.scatter(y=ax, x='dt', color=c, legend=False, size=1,
source=source)
return p, lines, scats | Plot pandas dataframe containing an x, y, and z column | train | https://github.com/ryanjdillon/pylleo/blob/b9b999fef19eaeccce4f207ab1b6198287c1bfec/pylleo/calapp/main.py#L18-L43 | null | '''
LeoADCA
Little Leonardo Accelerometer Data Calibration Application
This app will launch an window in your default broweser to visually identify
the times at which various axis of the lleo tag have been placed into +/-g
orientations.
Enter the start and end times of these orientation periods, then click 'save'
to write those to a calibration YAML file (cal.yml) in the data directory
Example
-------
bokeh serve --show bokeh_calibration.py
'''
def load_data(path_dir):
'''Load data, directory parameters, and accelerometer parameter names
Args
----
path_dir: str
Path to the data directory
Returns
-------
data: pandas.DataFrame
Experiment data
params_tag: dict
A dictionary of parameters parsed from the directory name
params_data: list
A list of the accelerometer parameter names
'''
import os
import pylleo
exp_name = os.path.split(path_dir)[1]
params_tag = pylleo.utils.parse_experiment_params(exp_name)
# Load the Little Leonardo tag data
meta = pylleo.lleoio.read_meta(path_dir, params_tag['tag_model'],
params_tag['tag_id'])
data = pylleo.lleoio.read_data(meta, path_dir, sample_f=sample_f)
# Get and curate the parameter names of the loaded dataframe
params_data = pylleo.utils.get_tag_params(params_tag['tag_model'])
params_data = [pylleo.utils.posix_string(p) for p in params_data]
params_data = [p for p in params_data if p.startswith('acc')]
return data, params_tag, params_data
def callback_parent(attr, old, new):
'''Update data directories drop down with new parent directory'''
import os
# Remove accidental white space if copy/pasted
new = new.strip()
parent_input.value = new
# Verify new parent path exists and update `datadirs_select` widget
if os.path.exists(new):
# Create sorted list of data directories, ignore files
joinisdir = lambda parent, d: os.path.isdir(os.path.join(parent, d))
options = sorted([d for d in os.listdir(new) if joinisdir(new, d)])
# Update dropdown list of available data directories and select first
datadirs_select.options = options
datadirs_select.value = options[0]
callback_datadirs('value', options[0], options[0])
else:
msg = '''
The parent path `{}` does not exist.
Check that you have entered the absolute path.
'''.format(new)
output_window.text = output_template.format(msg)
return None
def callback_datadirs(attr, old, new):
'''Update source and controls with data loaded from selected directory'''
import os
global data
try:
# Load data from new data directory
path_dir = os.path.join(parent_input.value, new)
data, params_tag, params_data = load_data(path_dir)
# Make title with new data directory
p.title.text = 'Calibrating {}'.format(params_tag['experiment'])
# Update `source` data fields from dataframe
dt_str = [dt.strftime(dt_fmt) for dt in data['datetimes']]
source.data = dict(x = list(data['acceleration_x']),
y = list(data['acceleration_y']),
z = list(data['acceleration_z']),
ind = list(data.index),
dt = list(data['datetimes']),
dt_str = dt_str)
# Update values for control widgets
param_checkbox.active = [0, 1, 2]
param_select.options = params_data
param_select.value = params_data[0]
regions = ['lower', 'upper']
region_select.options = regions
region_select.value = regions[0]
start_input.value = str(data.index[0])
end_input.value = str(data.index[-1])
except Exception as e:
msg = '''
Problem loading data directory `{}`.
Please check that data exists in that directory.
Details:
{}
'''.format(new, e)
output_window.text = output_template.format(msg)
return None
def callback_box_select(attr, old, new):
'''Update TextInput start/end entries from BoxSelectTool selection'''
# Get indices of selection
ind = sorted(new['1d']['indices'])
if new is None:
start_input.value = '0'
end_input.value = '0'
else:
start_input.value = str(source.data['ind'][ind[0]])
end_input.value = str(source.data['ind'][ind[-1]])
msg = '''
New start and end index values set.
'''
output_window.text = output_template.format(msg)
return None
def callback_checkbox(attr, old, new):
'''Update visible data from parameters selectin in the CheckboxSelect'''
import numpy
for i in range(len(lines)):
lines[i].visible = i in param_checkbox.active
scats[i].visible = i in param_checkbox.active
return None
def callback_save_indices():
'''Save index from bokeh textinput'''
import datetime
import os
import pylleo
import yamlord
if datadirs_select.value != 'None':
path_dir = os.path.join(parent_input.value, datadirs_select.value)
cal_yaml_path = os.path.join(path_dir, 'cal.yml')
param = (param_select.value).lower().replace('-','_')
region = region_select.value
start = int(start_input.value)
end = int(end_input.value)
msg = '''
Updated calibration times for:<br>
<b>{}/{}</b>
<br>
<br>
star index: {}<br>
end index: {}<br>
'''.format(param, region, start, end)
output_window.text = output_template.format(msg)
cal_dict = pylleo.lleocal.read_cal(cal_yaml_path)
# Generalize for Class-ifying
cal_dict = pylleo.lleocal.update(data, cal_dict, param, region, start, end)
yamlord.write_yaml(cal_dict, cal_yaml_path)
else:
msg = '''
You must first load data and select indices for calibration
regions before you can save the indices to `cal.yml`
'''
output_window.text = output_template.format(msg)
return None
def callback_save_poly():
'''Perform polyfit once regions selected
Globals: cal_fname, data (read-only, so no declaration)
'''
import datetime
import pylleo
import yamlord
import itertools
def _check_param_regions(param, regions, cal_dict):
msg = '''
<b>{}</b> was not found in the calibration dictionary.
Process that parameter and then try saving the polyfit again.
'''.format(param)
params_present = True
if param not in cal_dict['parameters']:
params_present = False
msg.format(param)
else:
for region in regions:
if region not in cal_dict['parameters'][param]:
params_present = False
msg.format('{}/{}'.format(param, region))
output_window.text = output_template.format(msg)
return params_present
def _check_index_order(param, regions, cal_dict):
'''Check that index positions exist for each calibration region'''
indices_present = True
for region in regions:
start = cal_dict['parameters'][param][region]['start']
end = cal_dict['parameters'][param][region]['end']
# Check if start comes after end
if int(start) > int(end):
indices_present = False
msg = '''
The start index ({}) comes after the end index ({}).
Please set new start/end indexes for <b>{}/{}</b>
'''.format(start, end, param, region)
msg.format(start, end, param, region)
output_window.text = output_template.format(msg)
return indices_present
if datadirs_select.value != 'None':
path_dir = os.path.join(parent_input.value, datadirs_select.value)
cal_yaml_path = os.path.join(path_dir, cal_fname)
cal_dict = pylleo.lleocal.read_cal(cal_yaml_path)
# Get currently selected parameter
param = param_select.value
regions = region_select.options
# Check that index positions have been recorded in `cal.yml`
if not _check_index_order(param, regions, cal_dict):
return None
# Check that index positions are in sequence
if not _check_index_order(param, regions, cal_dict):
return None
param = (param_select.value).lower().replace('-','_')
try:
msg = '''
Saved polyfit for <b>{}</b> to <b>{}</b>.
'''.format(param, cal_fname)
output_window.text = output_template.format(msg)
lower, upper = pylleo.lleocal.get_cal_data(data, cal_dict, param)
poly = list(pylleo.lleocal.fit1d(lower, upper))
poly = [float(str(i)) for i in poly]
cal_dict['parameters'][param]['poly'] = poly
yamlord.write_yaml(cal_dict, cal_yaml_path)
except Exception as e:
msg = 'Problem saving polyfit: {}'.format(e)
output_window.text = output_template.format(msg)
else:
msg = '''
You must first load data and select indices for calibration
regions before you can save to polyfit to `cal.yml`
'''
output_window.text = output_template.format(msg)
return None
import datetime
import numpy
import os
import sys
import subprocess
from bokeh.layouts import widgetbox, column, row
from bokeh.models import ColumnDataSource
from bokeh.models import PanTool, WheelZoomTool, BoxZoomTool, HoverTool
from bokeh.models import BoxSelectTool
from bokeh.models.widgets import Div, PreText, CheckboxButtonGroup
from bokeh.models.widgets import Select, TextInput, Button
from bokeh.io import curdoc
# DATA
#------------------------------------------------------------------------------
cal_fname = 'cal.yml'
sample_f = 30
dt_fmt = '%H:%M'
# Create Column Data Source that will be used by the plot
# use 6hr span to avoid straing xaxis labels
t0 = datetime.datetime.now()
t1 = t0 + datetime.timedelta(hours=6)
source = ColumnDataSource(data = dict(x = [0, 0],
y = [0, 0],
z = [0, 0],
ind = [0, 0],
dt = [t0, t1],
dt_str = [t0.strftime(dt_fmt),
t1.strftime(dt_fmt)],
))
# Input
#------------------------------------------------------------------------------
# Path for entering the parent directory of data directories
title = 'Parent directory:'
css = ['widthfix']
parent_input = TextInput(value='', title=title, css_classes=css)
parent_input.on_change('value', callback_parent)
# Dropdown list of data directories in parent to load data from
data_dirs = ['None']
title = 'Data directories:'
datadirs_select = Select(title=title, value=data_dirs[0], options=data_dirs)
datadirs_select.on_change('value', callback_datadirs)
# Select which axes to select calibration start/end points
param_checkbox_pre = PreText(text='Axes to display')
labels_ax = ['x', 'y', 'z']
active_ax = []
param_checkbox = CheckboxButtonGroup(labels=labels_ax, active=active_ax)
param_checkbox.on_change('active', callback_checkbox)
# Select with parameter to collect start/end times for and perform a data fit
params_data = ['None']
title = 'Parameter to calibrate:'
param_select = Select(title=title, value=params_data[0], options=params_data)
# Select upper or lower acceleration region to calibrate
regions = ['None']
title = 'Bound (lower = -g; upper = +g):'
region_select = Select(title=title, value=regions[0], options=regions)
# User input start end times, save to cal
start_input = TextInput(value='0', title='Start index:')
end_input = TextInput(value='0', title='End index:')
# Save the start end times selcted with BoxSelectTool (or manually entered)
button_save = Button(label='Save Index Values', button_type='success')
button_save.on_click(callback_save_indices)
# Perform a polyfit on the data points occuring between the start/end points
# for the parameter and region selected from the dropdown menus
button_poly = Button(label='Perform Polyfit', button_type='success')
button_poly.on_click(callback_save_poly)
# Print text output from callback/button routines in styled div container
output_template = ('<div style="display:inline-block; width:300px; '
'height:150px; padding: 10px; background-color:#f2f2f2; '
'border-radius:10px; overflow:scroll">{}</div>')
output_window = Div(text=output_template.format('Status updates display here'))
# Plotting
#------------------------------------------------------------------------------
# Format data to display when HoverTool activated
hover = HoverTool(tooltips=[('index', '@ind'),
('acc', '$y'),
('time', '@dt_str'),
])
# Define plots tools and create plot object and glyph objects
tools = [PanTool(), WheelZoomTool(), BoxSelectTool(), BoxZoomTool(), hover]
p, lines, scats = plot_triaxial(height=300, width=800, tools=tools)
p.select(BoxSelectTool).select_every_mousemove = False
# Force run of callback to make dummy line not visible at init
callback_checkbox('active', active_ax, active_ax)
# Update start/end input text boxes with BoxSelectTool
for scat in scats:
scat.data_source.on_change('selected', callback_box_select)
# Rendering
#------------------------------------------------------------------------------
# Bundle controls for inserting into the layout
controls = (param_checkbox_pre, param_checkbox, param_select, region_select,
start_input, end_input, button_save, button_poly)
# Create layout
row1 = row(column(widgetbox(parent_input, datadirs_select)))
col1 = column(widgetbox(*controls), width=350)
# See `output_template for css sizing of window
vbuffer = row([], height=35)
col2 = column(vbuffer, widgetbox(output_window))
row2 = row(col1, col2)
layout = column(p, row1, row2, width=1100)
# Generate document from layout
curdoc().add_root(layout)
|
ryanjdillon/pylleo | pylleo/calapp/main.py | load_data | python | def load_data(path_dir):
'''Load data, directory parameters, and accelerometer parameter names
Args
----
path_dir: str
Path to the data directory
Returns
-------
data: pandas.DataFrame
Experiment data
params_tag: dict
A dictionary of parameters parsed from the directory name
params_data: list
A list of the accelerometer parameter names
'''
import os
import pylleo
exp_name = os.path.split(path_dir)[1]
params_tag = pylleo.utils.parse_experiment_params(exp_name)
# Load the Little Leonardo tag data
meta = pylleo.lleoio.read_meta(path_dir, params_tag['tag_model'],
params_tag['tag_id'])
data = pylleo.lleoio.read_data(meta, path_dir, sample_f=sample_f)
# Get and curate the parameter names of the loaded dataframe
params_data = pylleo.utils.get_tag_params(params_tag['tag_model'])
params_data = [pylleo.utils.posix_string(p) for p in params_data]
params_data = [p for p in params_data if p.startswith('acc')]
return data, params_tag, params_data | Load data, directory parameters, and accelerometer parameter names
Args
----
path_dir: str
Path to the data directory
Returns
-------
data: pandas.DataFrame
Experiment data
params_tag: dict
A dictionary of parameters parsed from the directory name
params_data: list
A list of the accelerometer parameter names | train | https://github.com/ryanjdillon/pylleo/blob/b9b999fef19eaeccce4f207ab1b6198287c1bfec/pylleo/calapp/main.py#L46-L79 | [
"def parse_experiment_params(name_exp):\n '''Parse experiment parameters from the data directory name\n\n Args\n ----\n name_exp: str\n Name of data directory with experiment parameters\n\n Returns\n -------\n tag_params: dict of str\n Dictionary of parsed experiment parameters\n '''\n if ('/' in name_exp) or ('\\\\' in name_exp):\n raise ValueError(\"The path {} appears to be a path. Please pass \"\n \"only the data directory's name (i.e. the \"\n \"experiment name)\".format(name_exp))\n\n tag_params = dict()\n tag_params['experiment'] = name_exp\n tag_params['tag_model'] = (name_exp.split('_')[1]).replace('-','')\n tag_params['tag_id'] = name_exp.split('_')[2]\n tag_params['animal'] = name_exp.split('_')[3]\n tag_params['notes'] = name_exp.split('_')[4]\n\n return tag_params\n",
"def read_meta(path_dir, tag_model, tag_id):\n '''Read meta data from Little Leonardo data header rows\n\n Args\n ----\n path_dir: str\n Parent directory containing lleo data files\n tag_model: str\n Little Leonardo tag model name\n tag_id: str, int\n Little Leonardo tag ID number\n\n Returns\n -------\n meta: dict\n dictionary with meta data from header lines of lleo data files\n '''\n from collections import OrderedDict\n import os\n import yamlord\n\n from . import utils\n\n def _parse_meta_line(line):\n '''Return key, value pair parsed from data header line'''\n\n # Parse the key and its value from the line\n key, val = line.replace(':', '').replace('\"', '').split(',')\n\n return key.strip(), val.strip()\n\n\n def _read_meta_all(f, meta, n_header):\n '''Read all meta data from header rows of data file'''\n\n # Skip 'File name' line\n f.seek(0)\n _ = f.readline()\n\n # Create child dictionary for channel / file\n line = f.readline()\n key_ch, val_ch = _parse_meta_line(line)\n val_ch = utils.posix_string(val_ch)\n meta['parameters'][val_ch] = OrderedDict()\n\n # Write header values to channel dict\n for _ in range(n_header-2):\n line = f.readline()\n key, val = _parse_meta_line(line)\n meta['parameters'][val_ch][key] = val.strip()\n\n return meta\n\n\n def _create_meta(path_dir, tag_model, tag_id):\n '''Create meta data dictionary'''\n import datetime\n from . import utils\n\n param_strs = utils.get_tag_params(tag_model)\n\n # Create dictionary of meta data\n meta = OrderedDict()\n\n # Create fields for the parameters in data directory name\n exp_name = os.path.split(path_dir)[1]\n params_tag = utils.parse_experiment_params(exp_name)\n for key, value in params_tag.items():\n meta[key] = value\n\n fmt = \"%Y-%m-%d %H:%M:%S\"\n meta['date_modified'] = datetime.datetime.now().strftime(fmt)\n\n meta['parameters'] = OrderedDict()\n\n for param_str in param_strs:\n print('Create meta entry for {}'.format(param_str))\n\n path_file = utils.find_file(path_dir, param_str, '.TXT')\n # Get number of header rows\n enc = utils.predict_encoding(path_file, n_lines=20)\n with open(path_file, 'r', encoding=enc) as f:\n n_header = utils.get_n_header(f)\n f.seek(0)\n meta = _read_meta_all(f, meta, n_header=n_header)\n\n return meta\n\n\n # Load meta data from YAML file if it already exists\n meta_yaml_path = os.path.join(path_dir, 'meta.yml')\n\n # Load file if exists else create\n if os.path.isfile(meta_yaml_path):\n meta = yamlord.read_yaml(meta_yaml_path)\n\n # Else create meta dictionary and save to YAML\n else:\n meta = _create_meta(path_dir, tag_model, tag_id)\n yamlord.write_yaml(meta, meta_yaml_path)\n\n return meta\n",
"def get_tag_params(tag_model):\n '''Load param strs and n_header based on model of tag model'''\n\n tag_model = tag_model.replace('-', '')\n tags = dict()\n tags['W190PD3GT'] = ['Acceleration-X', 'Acceleration-Y', 'Acceleration-Z',\n 'Depth', 'Propeller', 'Temperature']\n\n # Return tag parameters if found, else raise error\n if tag_model in tags:\n return tags[tag_model]\n else:\n raise KeyError('{} not found in tag dictionary'.format(tag_model))\n",
"def read_data(meta, path_dir, sample_f=1, decimate=False, overwrite=False):\n '''Read accelerometry data from leonardo txt files\n\n Args\n ----\n meta: dict\n Dictionary of meta data from header lines of lleo data files\n path_dir: str\n Parent directory containing lleo data files\n sample_f: int\n Return every `sample_f` data points\n\n Returns\n -------\n acc: pandas.DataFrame\n Dataframe containing accelerometry data on x, y, z axes [m/s^2]\n depth: pandas.DataFrame\n Dataframe containing depth data [m]\n prop: pandas.DataFrame\n Dataframe containing speed data from propeller\n temp: pandas.DataFrame\n Dataframe containing temperature data\n '''\n import os\n import pandas\n\n from . import utils\n\n def _generate_datetimes(date, time, interval_s, n_timestamps):\n '''Generate list of datetimes from date/time with given interval'''\n from datetime import datetime, timedelta\n import pandas\n\n # TODO problematic if both m/d d/m options\n fmts = ['%Y/%m/%d %H%M%S',\n '%d/%m/%Y %H%M%S',\n '%m/%d/%Y %I%M%S %p',\n '%d/%m/%Y %I%M%S %p',]\n\n for fmt in fmts:\n try:\n start = pandas.to_datetime('{} {}'.format(date,time), format=fmt)\n except:\n print('Date format {:18} incorrect, '\n 'trying next...'.format(fmt))\n else:\n print('Date format {:18} correct.'.format(fmt))\n break\n\n # Create datetime array\n datetimes = list()\n for i in range(n_timestamps):\n secs = interval_s*i\n datetimes.append(start + timedelta(seconds=secs))\n\n return datetimes\n\n\n def _read_data_file(meta, path_dir, param_str):\n '''Read single Little Leonardo txt data file'''\n import numpy\n import os\n import pandas\n\n from . import utils\n\n # Get path of data file and associated pickle file\n path_file = utils.find_file(path_dir, param_str, '.TXT')\n col_name = utils.posix_string(param_str)\n\n # Get number of header rows in file\n enc = utils.predict_encoding(path_file, n_lines=20)\n with open(path_file, 'r', encoding=enc) as f:\n n_header = utils.get_n_header(f)\n\n print('\\nReading: {}'.format(col_name))\n\n data = numpy.genfromtxt(path_file, skip_header=n_header)\n\n interval_s = float(meta['parameters'][col_name]['Interval(Sec)'])\n date = meta['parameters'][col_name]['Start date']\n time = meta['parameters'][col_name]['Start time']\n\n # TODO review\n # Generate summed data if propeller sampling rate not 1\n if (col_name == 'propeller') and (interval_s < 1):\n print('Too high sampling interval, taking sums')\n # Sampling rate\n fs = int(1/interval_s)\n\n print('data before', data.max())\n # Drop elements to make divisible by fs for summing\n data = data[:-int(len(data)%fs)]\n\n # Reshape to 2D with columns `fs` in length to be summed\n data = data.reshape(fs, int(len(data)/fs))\n data = numpy.sum(data, axis=0)\n interval_s = 1\n\n print('data after', data.max())\n\n datetimes = _generate_datetimes(date, time, interval_s, len(data))\n data = numpy.vstack((datetimes, data)).T\n df = pandas.DataFrame(data, columns=['datetimes', col_name])\n\n return df\n\n # Get list of string parameter names for tag model\n param_names = utils.get_tag_params(meta['tag_model'])\n\n # Load pickle file exists and code unchanged\n pickle_file = os.path.join(path_dir, 'pydata_'+meta['experiment']+'.p')\n\n # Load or create pandas DataFrame with parameters associated with tag model\n if (os.path.exists(pickle_file)) and (overwrite is not True):\n data_df = pandas.read_pickle(pickle_file)\n else:\n first_col = True\n for name in param_names:\n next_df = _read_data_file(meta, path_dir, name)\n if first_col == False:\n data_df = pandas.merge(data_df, next_df, on='datetimes', how='left')\n else:\n data_df = next_df\n first_col = False\n print('')\n\n # Covert columns to `datetime64` or `float64` types\n data_df = data_df.apply(lambda x: pandas.to_numeric(x, errors='ignore'))\n\n # Save file to pickle\n data_df.to_pickle(pickle_file)\n\n # Return DataFrame with ever `sample_f` values\n return data_df.iloc[::sample_f,:]\n"
] | '''
LeoADCA
Little Leonardo Accelerometer Data Calibration Application
This app will launch an window in your default broweser to visually identify
the times at which various axis of the lleo tag have been placed into +/-g
orientations.
Enter the start and end times of these orientation periods, then click 'save'
to write those to a calibration YAML file (cal.yml) in the data directory
Example
-------
bokeh serve --show bokeh_calibration.py
'''
def plot_triaxial(height, width, tools):
'''Plot pandas dataframe containing an x, y, and z column'''
import bokeh.plotting
p = bokeh.plotting.figure(x_axis_type='datetime',
plot_height=height,
plot_width=width,
title=' ',
toolbar_sticky=False,
tools=tools,
active_drag=BoxZoomTool(),
output_backend='webgl')
p.yaxis.axis_label = 'Acceleration (count)'
p.xaxis.axis_label = 'Time (timezone as programmed)'
# Plot accelerometry data as lines and scatter (for BoxSelectTool)
colors = ['#1b9e77', '#d95f02', '#7570b3']
axes = ['x', 'y', 'z']
lines = [None,]*3
scats = [None,]*3
for i, (ax, c) in enumerate(zip(axes, colors)):
lines[i] = p.line(y=ax, x='dt', color=c, legend=False, source=source)
scats[i] = p.scatter(y=ax, x='dt', color=c, legend=False, size=1,
source=source)
return p, lines, scats
def callback_parent(attr, old, new):
'''Update data directories drop down with new parent directory'''
import os
# Remove accidental white space if copy/pasted
new = new.strip()
parent_input.value = new
# Verify new parent path exists and update `datadirs_select` widget
if os.path.exists(new):
# Create sorted list of data directories, ignore files
joinisdir = lambda parent, d: os.path.isdir(os.path.join(parent, d))
options = sorted([d for d in os.listdir(new) if joinisdir(new, d)])
# Update dropdown list of available data directories and select first
datadirs_select.options = options
datadirs_select.value = options[0]
callback_datadirs('value', options[0], options[0])
else:
msg = '''
The parent path `{}` does not exist.
Check that you have entered the absolute path.
'''.format(new)
output_window.text = output_template.format(msg)
return None
def callback_datadirs(attr, old, new):
'''Update source and controls with data loaded from selected directory'''
import os
global data
try:
# Load data from new data directory
path_dir = os.path.join(parent_input.value, new)
data, params_tag, params_data = load_data(path_dir)
# Make title with new data directory
p.title.text = 'Calibrating {}'.format(params_tag['experiment'])
# Update `source` data fields from dataframe
dt_str = [dt.strftime(dt_fmt) for dt in data['datetimes']]
source.data = dict(x = list(data['acceleration_x']),
y = list(data['acceleration_y']),
z = list(data['acceleration_z']),
ind = list(data.index),
dt = list(data['datetimes']),
dt_str = dt_str)
# Update values for control widgets
param_checkbox.active = [0, 1, 2]
param_select.options = params_data
param_select.value = params_data[0]
regions = ['lower', 'upper']
region_select.options = regions
region_select.value = regions[0]
start_input.value = str(data.index[0])
end_input.value = str(data.index[-1])
except Exception as e:
msg = '''
Problem loading data directory `{}`.
Please check that data exists in that directory.
Details:
{}
'''.format(new, e)
output_window.text = output_template.format(msg)
return None
def callback_box_select(attr, old, new):
'''Update TextInput start/end entries from BoxSelectTool selection'''
# Get indices of selection
ind = sorted(new['1d']['indices'])
if new is None:
start_input.value = '0'
end_input.value = '0'
else:
start_input.value = str(source.data['ind'][ind[0]])
end_input.value = str(source.data['ind'][ind[-1]])
msg = '''
New start and end index values set.
'''
output_window.text = output_template.format(msg)
return None
def callback_checkbox(attr, old, new):
'''Update visible data from parameters selectin in the CheckboxSelect'''
import numpy
for i in range(len(lines)):
lines[i].visible = i in param_checkbox.active
scats[i].visible = i in param_checkbox.active
return None
def callback_save_indices():
'''Save index from bokeh textinput'''
import datetime
import os
import pylleo
import yamlord
if datadirs_select.value != 'None':
path_dir = os.path.join(parent_input.value, datadirs_select.value)
cal_yaml_path = os.path.join(path_dir, 'cal.yml')
param = (param_select.value).lower().replace('-','_')
region = region_select.value
start = int(start_input.value)
end = int(end_input.value)
msg = '''
Updated calibration times for:<br>
<b>{}/{}</b>
<br>
<br>
star index: {}<br>
end index: {}<br>
'''.format(param, region, start, end)
output_window.text = output_template.format(msg)
cal_dict = pylleo.lleocal.read_cal(cal_yaml_path)
# Generalize for Class-ifying
cal_dict = pylleo.lleocal.update(data, cal_dict, param, region, start, end)
yamlord.write_yaml(cal_dict, cal_yaml_path)
else:
msg = '''
You must first load data and select indices for calibration
regions before you can save the indices to `cal.yml`
'''
output_window.text = output_template.format(msg)
return None
def callback_save_poly():
'''Perform polyfit once regions selected
Globals: cal_fname, data (read-only, so no declaration)
'''
import datetime
import pylleo
import yamlord
import itertools
def _check_param_regions(param, regions, cal_dict):
msg = '''
<b>{}</b> was not found in the calibration dictionary.
Process that parameter and then try saving the polyfit again.
'''.format(param)
params_present = True
if param not in cal_dict['parameters']:
params_present = False
msg.format(param)
else:
for region in regions:
if region not in cal_dict['parameters'][param]:
params_present = False
msg.format('{}/{}'.format(param, region))
output_window.text = output_template.format(msg)
return params_present
def _check_index_order(param, regions, cal_dict):
'''Check that index positions exist for each calibration region'''
indices_present = True
for region in regions:
start = cal_dict['parameters'][param][region]['start']
end = cal_dict['parameters'][param][region]['end']
# Check if start comes after end
if int(start) > int(end):
indices_present = False
msg = '''
The start index ({}) comes after the end index ({}).
Please set new start/end indexes for <b>{}/{}</b>
'''.format(start, end, param, region)
msg.format(start, end, param, region)
output_window.text = output_template.format(msg)
return indices_present
if datadirs_select.value != 'None':
path_dir = os.path.join(parent_input.value, datadirs_select.value)
cal_yaml_path = os.path.join(path_dir, cal_fname)
cal_dict = pylleo.lleocal.read_cal(cal_yaml_path)
# Get currently selected parameter
param = param_select.value
regions = region_select.options
# Check that index positions have been recorded in `cal.yml`
if not _check_index_order(param, regions, cal_dict):
return None
# Check that index positions are in sequence
if not _check_index_order(param, regions, cal_dict):
return None
param = (param_select.value).lower().replace('-','_')
try:
msg = '''
Saved polyfit for <b>{}</b> to <b>{}</b>.
'''.format(param, cal_fname)
output_window.text = output_template.format(msg)
lower, upper = pylleo.lleocal.get_cal_data(data, cal_dict, param)
poly = list(pylleo.lleocal.fit1d(lower, upper))
poly = [float(str(i)) for i in poly]
cal_dict['parameters'][param]['poly'] = poly
yamlord.write_yaml(cal_dict, cal_yaml_path)
except Exception as e:
msg = 'Problem saving polyfit: {}'.format(e)
output_window.text = output_template.format(msg)
else:
msg = '''
You must first load data and select indices for calibration
regions before you can save to polyfit to `cal.yml`
'''
output_window.text = output_template.format(msg)
return None
import datetime
import numpy
import os
import sys
import subprocess
from bokeh.layouts import widgetbox, column, row
from bokeh.models import ColumnDataSource
from bokeh.models import PanTool, WheelZoomTool, BoxZoomTool, HoverTool
from bokeh.models import BoxSelectTool
from bokeh.models.widgets import Div, PreText, CheckboxButtonGroup
from bokeh.models.widgets import Select, TextInput, Button
from bokeh.io import curdoc
# DATA
#------------------------------------------------------------------------------
cal_fname = 'cal.yml'
sample_f = 30
dt_fmt = '%H:%M'
# Create Column Data Source that will be used by the plot
# use 6hr span to avoid straing xaxis labels
t0 = datetime.datetime.now()
t1 = t0 + datetime.timedelta(hours=6)
source = ColumnDataSource(data = dict(x = [0, 0],
y = [0, 0],
z = [0, 0],
ind = [0, 0],
dt = [t0, t1],
dt_str = [t0.strftime(dt_fmt),
t1.strftime(dt_fmt)],
))
# Input
#------------------------------------------------------------------------------
# Path for entering the parent directory of data directories
title = 'Parent directory:'
css = ['widthfix']
parent_input = TextInput(value='', title=title, css_classes=css)
parent_input.on_change('value', callback_parent)
# Dropdown list of data directories in parent to load data from
data_dirs = ['None']
title = 'Data directories:'
datadirs_select = Select(title=title, value=data_dirs[0], options=data_dirs)
datadirs_select.on_change('value', callback_datadirs)
# Select which axes to select calibration start/end points
param_checkbox_pre = PreText(text='Axes to display')
labels_ax = ['x', 'y', 'z']
active_ax = []
param_checkbox = CheckboxButtonGroup(labels=labels_ax, active=active_ax)
param_checkbox.on_change('active', callback_checkbox)
# Select with parameter to collect start/end times for and perform a data fit
params_data = ['None']
title = 'Parameter to calibrate:'
param_select = Select(title=title, value=params_data[0], options=params_data)
# Select upper or lower acceleration region to calibrate
regions = ['None']
title = 'Bound (lower = -g; upper = +g):'
region_select = Select(title=title, value=regions[0], options=regions)
# User input start end times, save to cal
start_input = TextInput(value='0', title='Start index:')
end_input = TextInput(value='0', title='End index:')
# Save the start end times selcted with BoxSelectTool (or manually entered)
button_save = Button(label='Save Index Values', button_type='success')
button_save.on_click(callback_save_indices)
# Perform a polyfit on the data points occuring between the start/end points
# for the parameter and region selected from the dropdown menus
button_poly = Button(label='Perform Polyfit', button_type='success')
button_poly.on_click(callback_save_poly)
# Print text output from callback/button routines in styled div container
output_template = ('<div style="display:inline-block; width:300px; '
'height:150px; padding: 10px; background-color:#f2f2f2; '
'border-radius:10px; overflow:scroll">{}</div>')
output_window = Div(text=output_template.format('Status updates display here'))
# Plotting
#------------------------------------------------------------------------------
# Format data to display when HoverTool activated
hover = HoverTool(tooltips=[('index', '@ind'),
('acc', '$y'),
('time', '@dt_str'),
])
# Define plots tools and create plot object and glyph objects
tools = [PanTool(), WheelZoomTool(), BoxSelectTool(), BoxZoomTool(), hover]
p, lines, scats = plot_triaxial(height=300, width=800, tools=tools)
p.select(BoxSelectTool).select_every_mousemove = False
# Force run of callback to make dummy line not visible at init
callback_checkbox('active', active_ax, active_ax)
# Update start/end input text boxes with BoxSelectTool
for scat in scats:
scat.data_source.on_change('selected', callback_box_select)
# Rendering
#------------------------------------------------------------------------------
# Bundle controls for inserting into the layout
controls = (param_checkbox_pre, param_checkbox, param_select, region_select,
start_input, end_input, button_save, button_poly)
# Create layout
row1 = row(column(widgetbox(parent_input, datadirs_select)))
col1 = column(widgetbox(*controls), width=350)
# See `output_template for css sizing of window
vbuffer = row([], height=35)
col2 = column(vbuffer, widgetbox(output_window))
row2 = row(col1, col2)
layout = column(p, row1, row2, width=1100)
# Generate document from layout
curdoc().add_root(layout)
|
ryanjdillon/pylleo | pylleo/calapp/main.py | callback_parent | python | def callback_parent(attr, old, new):
'''Update data directories drop down with new parent directory'''
import os
# Remove accidental white space if copy/pasted
new = new.strip()
parent_input.value = new
# Verify new parent path exists and update `datadirs_select` widget
if os.path.exists(new):
# Create sorted list of data directories, ignore files
joinisdir = lambda parent, d: os.path.isdir(os.path.join(parent, d))
options = sorted([d for d in os.listdir(new) if joinisdir(new, d)])
# Update dropdown list of available data directories and select first
datadirs_select.options = options
datadirs_select.value = options[0]
callback_datadirs('value', options[0], options[0])
else:
msg = '''
The parent path `{}` does not exist.
Check that you have entered the absolute path.
'''.format(new)
output_window.text = output_template.format(msg)
return None | Update data directories drop down with new parent directory | train | https://github.com/ryanjdillon/pylleo/blob/b9b999fef19eaeccce4f207ab1b6198287c1bfec/pylleo/calapp/main.py#L82-L109 | [
"def callback_datadirs(attr, old, new):\n '''Update source and controls with data loaded from selected directory'''\n import os\n\n global data\n\n try:\n # Load data from new data directory\n path_dir = os.path.join(parent_input.value, new)\n data, params_tag, params_data = load_data(path_dir)\n\n # Make title with new data directory\n p.title.text = 'Calibrating {}'.format(params_tag['experiment'])\n\n # Update `source` data fields from dataframe\n dt_str = [dt.strftime(dt_fmt) for dt in data['datetimes']]\n source.data = dict(x = list(data['acceleration_x']),\n y = list(data['acceleration_y']),\n z = list(data['acceleration_z']),\n ind = list(data.index),\n dt = list(data['datetimes']),\n dt_str = dt_str)\n\n # Update values for control widgets\n param_checkbox.active = [0, 1, 2]\n param_select.options = params_data\n param_select.value = params_data[0]\n regions = ['lower', 'upper']\n region_select.options = regions\n region_select.value = regions[0]\n start_input.value = str(data.index[0])\n end_input.value = str(data.index[-1])\n except Exception as e:\n msg = '''\n Problem loading data directory `{}`.\n\n Please check that data exists in that directory.\n\n Details:\n {}\n '''.format(new, e)\n output_window.text = output_template.format(msg)\n\n\n return None\n"
] | '''
LeoADCA
Little Leonardo Accelerometer Data Calibration Application
This app will launch an window in your default broweser to visually identify
the times at which various axis of the lleo tag have been placed into +/-g
orientations.
Enter the start and end times of these orientation periods, then click 'save'
to write those to a calibration YAML file (cal.yml) in the data directory
Example
-------
bokeh serve --show bokeh_calibration.py
'''
def plot_triaxial(height, width, tools):
'''Plot pandas dataframe containing an x, y, and z column'''
import bokeh.plotting
p = bokeh.plotting.figure(x_axis_type='datetime',
plot_height=height,
plot_width=width,
title=' ',
toolbar_sticky=False,
tools=tools,
active_drag=BoxZoomTool(),
output_backend='webgl')
p.yaxis.axis_label = 'Acceleration (count)'
p.xaxis.axis_label = 'Time (timezone as programmed)'
# Plot accelerometry data as lines and scatter (for BoxSelectTool)
colors = ['#1b9e77', '#d95f02', '#7570b3']
axes = ['x', 'y', 'z']
lines = [None,]*3
scats = [None,]*3
for i, (ax, c) in enumerate(zip(axes, colors)):
lines[i] = p.line(y=ax, x='dt', color=c, legend=False, source=source)
scats[i] = p.scatter(y=ax, x='dt', color=c, legend=False, size=1,
source=source)
return p, lines, scats
def load_data(path_dir):
'''Load data, directory parameters, and accelerometer parameter names
Args
----
path_dir: str
Path to the data directory
Returns
-------
data: pandas.DataFrame
Experiment data
params_tag: dict
A dictionary of parameters parsed from the directory name
params_data: list
A list of the accelerometer parameter names
'''
import os
import pylleo
exp_name = os.path.split(path_dir)[1]
params_tag = pylleo.utils.parse_experiment_params(exp_name)
# Load the Little Leonardo tag data
meta = pylleo.lleoio.read_meta(path_dir, params_tag['tag_model'],
params_tag['tag_id'])
data = pylleo.lleoio.read_data(meta, path_dir, sample_f=sample_f)
# Get and curate the parameter names of the loaded dataframe
params_data = pylleo.utils.get_tag_params(params_tag['tag_model'])
params_data = [pylleo.utils.posix_string(p) for p in params_data]
params_data = [p for p in params_data if p.startswith('acc')]
return data, params_tag, params_data
def callback_datadirs(attr, old, new):
'''Update source and controls with data loaded from selected directory'''
import os
global data
try:
# Load data from new data directory
path_dir = os.path.join(parent_input.value, new)
data, params_tag, params_data = load_data(path_dir)
# Make title with new data directory
p.title.text = 'Calibrating {}'.format(params_tag['experiment'])
# Update `source` data fields from dataframe
dt_str = [dt.strftime(dt_fmt) for dt in data['datetimes']]
source.data = dict(x = list(data['acceleration_x']),
y = list(data['acceleration_y']),
z = list(data['acceleration_z']),
ind = list(data.index),
dt = list(data['datetimes']),
dt_str = dt_str)
# Update values for control widgets
param_checkbox.active = [0, 1, 2]
param_select.options = params_data
param_select.value = params_data[0]
regions = ['lower', 'upper']
region_select.options = regions
region_select.value = regions[0]
start_input.value = str(data.index[0])
end_input.value = str(data.index[-1])
except Exception as e:
msg = '''
Problem loading data directory `{}`.
Please check that data exists in that directory.
Details:
{}
'''.format(new, e)
output_window.text = output_template.format(msg)
return None
def callback_box_select(attr, old, new):
'''Update TextInput start/end entries from BoxSelectTool selection'''
# Get indices of selection
ind = sorted(new['1d']['indices'])
if new is None:
start_input.value = '0'
end_input.value = '0'
else:
start_input.value = str(source.data['ind'][ind[0]])
end_input.value = str(source.data['ind'][ind[-1]])
msg = '''
New start and end index values set.
'''
output_window.text = output_template.format(msg)
return None
def callback_checkbox(attr, old, new):
'''Update visible data from parameters selectin in the CheckboxSelect'''
import numpy
for i in range(len(lines)):
lines[i].visible = i in param_checkbox.active
scats[i].visible = i in param_checkbox.active
return None
def callback_save_indices():
'''Save index from bokeh textinput'''
import datetime
import os
import pylleo
import yamlord
if datadirs_select.value != 'None':
path_dir = os.path.join(parent_input.value, datadirs_select.value)
cal_yaml_path = os.path.join(path_dir, 'cal.yml')
param = (param_select.value).lower().replace('-','_')
region = region_select.value
start = int(start_input.value)
end = int(end_input.value)
msg = '''
Updated calibration times for:<br>
<b>{}/{}</b>
<br>
<br>
star index: {}<br>
end index: {}<br>
'''.format(param, region, start, end)
output_window.text = output_template.format(msg)
cal_dict = pylleo.lleocal.read_cal(cal_yaml_path)
# Generalize for Class-ifying
cal_dict = pylleo.lleocal.update(data, cal_dict, param, region, start, end)
yamlord.write_yaml(cal_dict, cal_yaml_path)
else:
msg = '''
You must first load data and select indices for calibration
regions before you can save the indices to `cal.yml`
'''
output_window.text = output_template.format(msg)
return None
def callback_save_poly():
'''Perform polyfit once regions selected
Globals: cal_fname, data (read-only, so no declaration)
'''
import datetime
import pylleo
import yamlord
import itertools
def _check_param_regions(param, regions, cal_dict):
msg = '''
<b>{}</b> was not found in the calibration dictionary.
Process that parameter and then try saving the polyfit again.
'''.format(param)
params_present = True
if param not in cal_dict['parameters']:
params_present = False
msg.format(param)
else:
for region in regions:
if region not in cal_dict['parameters'][param]:
params_present = False
msg.format('{}/{}'.format(param, region))
output_window.text = output_template.format(msg)
return params_present
def _check_index_order(param, regions, cal_dict):
'''Check that index positions exist for each calibration region'''
indices_present = True
for region in regions:
start = cal_dict['parameters'][param][region]['start']
end = cal_dict['parameters'][param][region]['end']
# Check if start comes after end
if int(start) > int(end):
indices_present = False
msg = '''
The start index ({}) comes after the end index ({}).
Please set new start/end indexes for <b>{}/{}</b>
'''.format(start, end, param, region)
msg.format(start, end, param, region)
output_window.text = output_template.format(msg)
return indices_present
if datadirs_select.value != 'None':
path_dir = os.path.join(parent_input.value, datadirs_select.value)
cal_yaml_path = os.path.join(path_dir, cal_fname)
cal_dict = pylleo.lleocal.read_cal(cal_yaml_path)
# Get currently selected parameter
param = param_select.value
regions = region_select.options
# Check that index positions have been recorded in `cal.yml`
if not _check_index_order(param, regions, cal_dict):
return None
# Check that index positions are in sequence
if not _check_index_order(param, regions, cal_dict):
return None
param = (param_select.value).lower().replace('-','_')
try:
msg = '''
Saved polyfit for <b>{}</b> to <b>{}</b>.
'''.format(param, cal_fname)
output_window.text = output_template.format(msg)
lower, upper = pylleo.lleocal.get_cal_data(data, cal_dict, param)
poly = list(pylleo.lleocal.fit1d(lower, upper))
poly = [float(str(i)) for i in poly]
cal_dict['parameters'][param]['poly'] = poly
yamlord.write_yaml(cal_dict, cal_yaml_path)
except Exception as e:
msg = 'Problem saving polyfit: {}'.format(e)
output_window.text = output_template.format(msg)
else:
msg = '''
You must first load data and select indices for calibration
regions before you can save to polyfit to `cal.yml`
'''
output_window.text = output_template.format(msg)
return None
import datetime
import numpy
import os
import sys
import subprocess
from bokeh.layouts import widgetbox, column, row
from bokeh.models import ColumnDataSource
from bokeh.models import PanTool, WheelZoomTool, BoxZoomTool, HoverTool
from bokeh.models import BoxSelectTool
from bokeh.models.widgets import Div, PreText, CheckboxButtonGroup
from bokeh.models.widgets import Select, TextInput, Button
from bokeh.io import curdoc
# DATA
#------------------------------------------------------------------------------
cal_fname = 'cal.yml'
sample_f = 30
dt_fmt = '%H:%M'
# Create Column Data Source that will be used by the plot
# use 6hr span to avoid straing xaxis labels
t0 = datetime.datetime.now()
t1 = t0 + datetime.timedelta(hours=6)
source = ColumnDataSource(data = dict(x = [0, 0],
y = [0, 0],
z = [0, 0],
ind = [0, 0],
dt = [t0, t1],
dt_str = [t0.strftime(dt_fmt),
t1.strftime(dt_fmt)],
))
# Input
#------------------------------------------------------------------------------
# Path for entering the parent directory of data directories
title = 'Parent directory:'
css = ['widthfix']
parent_input = TextInput(value='', title=title, css_classes=css)
parent_input.on_change('value', callback_parent)
# Dropdown list of data directories in parent to load data from
data_dirs = ['None']
title = 'Data directories:'
datadirs_select = Select(title=title, value=data_dirs[0], options=data_dirs)
datadirs_select.on_change('value', callback_datadirs)
# Select which axes to select calibration start/end points
param_checkbox_pre = PreText(text='Axes to display')
labels_ax = ['x', 'y', 'z']
active_ax = []
param_checkbox = CheckboxButtonGroup(labels=labels_ax, active=active_ax)
param_checkbox.on_change('active', callback_checkbox)
# Select with parameter to collect start/end times for and perform a data fit
params_data = ['None']
title = 'Parameter to calibrate:'
param_select = Select(title=title, value=params_data[0], options=params_data)
# Select upper or lower acceleration region to calibrate
regions = ['None']
title = 'Bound (lower = -g; upper = +g):'
region_select = Select(title=title, value=regions[0], options=regions)
# User input start end times, save to cal
start_input = TextInput(value='0', title='Start index:')
end_input = TextInput(value='0', title='End index:')
# Save the start end times selcted with BoxSelectTool (or manually entered)
button_save = Button(label='Save Index Values', button_type='success')
button_save.on_click(callback_save_indices)
# Perform a polyfit on the data points occuring between the start/end points
# for the parameter and region selected from the dropdown menus
button_poly = Button(label='Perform Polyfit', button_type='success')
button_poly.on_click(callback_save_poly)
# Print text output from callback/button routines in styled div container
output_template = ('<div style="display:inline-block; width:300px; '
'height:150px; padding: 10px; background-color:#f2f2f2; '
'border-radius:10px; overflow:scroll">{}</div>')
output_window = Div(text=output_template.format('Status updates display here'))
# Plotting
#------------------------------------------------------------------------------
# Format data to display when HoverTool activated
hover = HoverTool(tooltips=[('index', '@ind'),
('acc', '$y'),
('time', '@dt_str'),
])
# Define plots tools and create plot object and glyph objects
tools = [PanTool(), WheelZoomTool(), BoxSelectTool(), BoxZoomTool(), hover]
p, lines, scats = plot_triaxial(height=300, width=800, tools=tools)
p.select(BoxSelectTool).select_every_mousemove = False
# Force run of callback to make dummy line not visible at init
callback_checkbox('active', active_ax, active_ax)
# Update start/end input text boxes with BoxSelectTool
for scat in scats:
scat.data_source.on_change('selected', callback_box_select)
# Rendering
#------------------------------------------------------------------------------
# Bundle controls for inserting into the layout
controls = (param_checkbox_pre, param_checkbox, param_select, region_select,
start_input, end_input, button_save, button_poly)
# Create layout
row1 = row(column(widgetbox(parent_input, datadirs_select)))
col1 = column(widgetbox(*controls), width=350)
# See `output_template for css sizing of window
vbuffer = row([], height=35)
col2 = column(vbuffer, widgetbox(output_window))
row2 = row(col1, col2)
layout = column(p, row1, row2, width=1100)
# Generate document from layout
curdoc().add_root(layout)
|
ryanjdillon/pylleo | pylleo/calapp/main.py | callback_datadirs | python | def callback_datadirs(attr, old, new):
'''Update source and controls with data loaded from selected directory'''
import os
global data
try:
# Load data from new data directory
path_dir = os.path.join(parent_input.value, new)
data, params_tag, params_data = load_data(path_dir)
# Make title with new data directory
p.title.text = 'Calibrating {}'.format(params_tag['experiment'])
# Update `source` data fields from dataframe
dt_str = [dt.strftime(dt_fmt) for dt in data['datetimes']]
source.data = dict(x = list(data['acceleration_x']),
y = list(data['acceleration_y']),
z = list(data['acceleration_z']),
ind = list(data.index),
dt = list(data['datetimes']),
dt_str = dt_str)
# Update values for control widgets
param_checkbox.active = [0, 1, 2]
param_select.options = params_data
param_select.value = params_data[0]
regions = ['lower', 'upper']
region_select.options = regions
region_select.value = regions[0]
start_input.value = str(data.index[0])
end_input.value = str(data.index[-1])
except Exception as e:
msg = '''
Problem loading data directory `{}`.
Please check that data exists in that directory.
Details:
{}
'''.format(new, e)
output_window.text = output_template.format(msg)
return None | Update source and controls with data loaded from selected directory | train | https://github.com/ryanjdillon/pylleo/blob/b9b999fef19eaeccce4f207ab1b6198287c1bfec/pylleo/calapp/main.py#L112-L156 | [
"def load_data(path_dir):\n '''Load data, directory parameters, and accelerometer parameter names\n\n Args\n ----\n path_dir: str\n Path to the data directory\n\n Returns\n -------\n data: pandas.DataFrame\n Experiment data\n params_tag: dict\n A dictionary of parameters parsed from the directory name\n params_data: list\n A list of the accelerometer parameter names\n '''\n import os\n import pylleo\n\n exp_name = os.path.split(path_dir)[1]\n params_tag = pylleo.utils.parse_experiment_params(exp_name)\n\n # Load the Little Leonardo tag data\n meta = pylleo.lleoio.read_meta(path_dir, params_tag['tag_model'],\n params_tag['tag_id'])\n data = pylleo.lleoio.read_data(meta, path_dir, sample_f=sample_f)\n\n # Get and curate the parameter names of the loaded dataframe\n params_data = pylleo.utils.get_tag_params(params_tag['tag_model'])\n params_data = [pylleo.utils.posix_string(p) for p in params_data]\n params_data = [p for p in params_data if p.startswith('acc')]\n\n return data, params_tag, params_data\n"
] | '''
LeoADCA
Little Leonardo Accelerometer Data Calibration Application
This app will launch an window in your default broweser to visually identify
the times at which various axis of the lleo tag have been placed into +/-g
orientations.
Enter the start and end times of these orientation periods, then click 'save'
to write those to a calibration YAML file (cal.yml) in the data directory
Example
-------
bokeh serve --show bokeh_calibration.py
'''
def plot_triaxial(height, width, tools):
'''Plot pandas dataframe containing an x, y, and z column'''
import bokeh.plotting
p = bokeh.plotting.figure(x_axis_type='datetime',
plot_height=height,
plot_width=width,
title=' ',
toolbar_sticky=False,
tools=tools,
active_drag=BoxZoomTool(),
output_backend='webgl')
p.yaxis.axis_label = 'Acceleration (count)'
p.xaxis.axis_label = 'Time (timezone as programmed)'
# Plot accelerometry data as lines and scatter (for BoxSelectTool)
colors = ['#1b9e77', '#d95f02', '#7570b3']
axes = ['x', 'y', 'z']
lines = [None,]*3
scats = [None,]*3
for i, (ax, c) in enumerate(zip(axes, colors)):
lines[i] = p.line(y=ax, x='dt', color=c, legend=False, source=source)
scats[i] = p.scatter(y=ax, x='dt', color=c, legend=False, size=1,
source=source)
return p, lines, scats
def load_data(path_dir):
'''Load data, directory parameters, and accelerometer parameter names
Args
----
path_dir: str
Path to the data directory
Returns
-------
data: pandas.DataFrame
Experiment data
params_tag: dict
A dictionary of parameters parsed from the directory name
params_data: list
A list of the accelerometer parameter names
'''
import os
import pylleo
exp_name = os.path.split(path_dir)[1]
params_tag = pylleo.utils.parse_experiment_params(exp_name)
# Load the Little Leonardo tag data
meta = pylleo.lleoio.read_meta(path_dir, params_tag['tag_model'],
params_tag['tag_id'])
data = pylleo.lleoio.read_data(meta, path_dir, sample_f=sample_f)
# Get and curate the parameter names of the loaded dataframe
params_data = pylleo.utils.get_tag_params(params_tag['tag_model'])
params_data = [pylleo.utils.posix_string(p) for p in params_data]
params_data = [p for p in params_data if p.startswith('acc')]
return data, params_tag, params_data
def callback_parent(attr, old, new):
'''Update data directories drop down with new parent directory'''
import os
# Remove accidental white space if copy/pasted
new = new.strip()
parent_input.value = new
# Verify new parent path exists and update `datadirs_select` widget
if os.path.exists(new):
# Create sorted list of data directories, ignore files
joinisdir = lambda parent, d: os.path.isdir(os.path.join(parent, d))
options = sorted([d for d in os.listdir(new) if joinisdir(new, d)])
# Update dropdown list of available data directories and select first
datadirs_select.options = options
datadirs_select.value = options[0]
callback_datadirs('value', options[0], options[0])
else:
msg = '''
The parent path `{}` does not exist.
Check that you have entered the absolute path.
'''.format(new)
output_window.text = output_template.format(msg)
return None
def callback_box_select(attr, old, new):
'''Update TextInput start/end entries from BoxSelectTool selection'''
# Get indices of selection
ind = sorted(new['1d']['indices'])
if new is None:
start_input.value = '0'
end_input.value = '0'
else:
start_input.value = str(source.data['ind'][ind[0]])
end_input.value = str(source.data['ind'][ind[-1]])
msg = '''
New start and end index values set.
'''
output_window.text = output_template.format(msg)
return None
def callback_checkbox(attr, old, new):
'''Update visible data from parameters selectin in the CheckboxSelect'''
import numpy
for i in range(len(lines)):
lines[i].visible = i in param_checkbox.active
scats[i].visible = i in param_checkbox.active
return None
def callback_save_indices():
'''Save index from bokeh textinput'''
import datetime
import os
import pylleo
import yamlord
if datadirs_select.value != 'None':
path_dir = os.path.join(parent_input.value, datadirs_select.value)
cal_yaml_path = os.path.join(path_dir, 'cal.yml')
param = (param_select.value).lower().replace('-','_')
region = region_select.value
start = int(start_input.value)
end = int(end_input.value)
msg = '''
Updated calibration times for:<br>
<b>{}/{}</b>
<br>
<br>
star index: {}<br>
end index: {}<br>
'''.format(param, region, start, end)
output_window.text = output_template.format(msg)
cal_dict = pylleo.lleocal.read_cal(cal_yaml_path)
# Generalize for Class-ifying
cal_dict = pylleo.lleocal.update(data, cal_dict, param, region, start, end)
yamlord.write_yaml(cal_dict, cal_yaml_path)
else:
msg = '''
You must first load data and select indices for calibration
regions before you can save the indices to `cal.yml`
'''
output_window.text = output_template.format(msg)
return None
def callback_save_poly():
'''Perform polyfit once regions selected
Globals: cal_fname, data (read-only, so no declaration)
'''
import datetime
import pylleo
import yamlord
import itertools
def _check_param_regions(param, regions, cal_dict):
msg = '''
<b>{}</b> was not found in the calibration dictionary.
Process that parameter and then try saving the polyfit again.
'''.format(param)
params_present = True
if param not in cal_dict['parameters']:
params_present = False
msg.format(param)
else:
for region in regions:
if region not in cal_dict['parameters'][param]:
params_present = False
msg.format('{}/{}'.format(param, region))
output_window.text = output_template.format(msg)
return params_present
def _check_index_order(param, regions, cal_dict):
'''Check that index positions exist for each calibration region'''
indices_present = True
for region in regions:
start = cal_dict['parameters'][param][region]['start']
end = cal_dict['parameters'][param][region]['end']
# Check if start comes after end
if int(start) > int(end):
indices_present = False
msg = '''
The start index ({}) comes after the end index ({}).
Please set new start/end indexes for <b>{}/{}</b>
'''.format(start, end, param, region)
msg.format(start, end, param, region)
output_window.text = output_template.format(msg)
return indices_present
if datadirs_select.value != 'None':
path_dir = os.path.join(parent_input.value, datadirs_select.value)
cal_yaml_path = os.path.join(path_dir, cal_fname)
cal_dict = pylleo.lleocal.read_cal(cal_yaml_path)
# Get currently selected parameter
param = param_select.value
regions = region_select.options
# Check that index positions have been recorded in `cal.yml`
if not _check_index_order(param, regions, cal_dict):
return None
# Check that index positions are in sequence
if not _check_index_order(param, regions, cal_dict):
return None
param = (param_select.value).lower().replace('-','_')
try:
msg = '''
Saved polyfit for <b>{}</b> to <b>{}</b>.
'''.format(param, cal_fname)
output_window.text = output_template.format(msg)
lower, upper = pylleo.lleocal.get_cal_data(data, cal_dict, param)
poly = list(pylleo.lleocal.fit1d(lower, upper))
poly = [float(str(i)) for i in poly]
cal_dict['parameters'][param]['poly'] = poly
yamlord.write_yaml(cal_dict, cal_yaml_path)
except Exception as e:
msg = 'Problem saving polyfit: {}'.format(e)
output_window.text = output_template.format(msg)
else:
msg = '''
You must first load data and select indices for calibration
regions before you can save to polyfit to `cal.yml`
'''
output_window.text = output_template.format(msg)
return None
import datetime
import numpy
import os
import sys
import subprocess
from bokeh.layouts import widgetbox, column, row
from bokeh.models import ColumnDataSource
from bokeh.models import PanTool, WheelZoomTool, BoxZoomTool, HoverTool
from bokeh.models import BoxSelectTool
from bokeh.models.widgets import Div, PreText, CheckboxButtonGroup
from bokeh.models.widgets import Select, TextInput, Button
from bokeh.io import curdoc
# DATA
#------------------------------------------------------------------------------
cal_fname = 'cal.yml'
sample_f = 30
dt_fmt = '%H:%M'
# Create Column Data Source that will be used by the plot
# use 6hr span to avoid straing xaxis labels
t0 = datetime.datetime.now()
t1 = t0 + datetime.timedelta(hours=6)
source = ColumnDataSource(data = dict(x = [0, 0],
y = [0, 0],
z = [0, 0],
ind = [0, 0],
dt = [t0, t1],
dt_str = [t0.strftime(dt_fmt),
t1.strftime(dt_fmt)],
))
# Input
#------------------------------------------------------------------------------
# Path for entering the parent directory of data directories
title = 'Parent directory:'
css = ['widthfix']
parent_input = TextInput(value='', title=title, css_classes=css)
parent_input.on_change('value', callback_parent)
# Dropdown list of data directories in parent to load data from
data_dirs = ['None']
title = 'Data directories:'
datadirs_select = Select(title=title, value=data_dirs[0], options=data_dirs)
datadirs_select.on_change('value', callback_datadirs)
# Select which axes to select calibration start/end points
param_checkbox_pre = PreText(text='Axes to display')
labels_ax = ['x', 'y', 'z']
active_ax = []
param_checkbox = CheckboxButtonGroup(labels=labels_ax, active=active_ax)
param_checkbox.on_change('active', callback_checkbox)
# Select with parameter to collect start/end times for and perform a data fit
params_data = ['None']
title = 'Parameter to calibrate:'
param_select = Select(title=title, value=params_data[0], options=params_data)
# Select upper or lower acceleration region to calibrate
regions = ['None']
title = 'Bound (lower = -g; upper = +g):'
region_select = Select(title=title, value=regions[0], options=regions)
# User input start end times, save to cal
start_input = TextInput(value='0', title='Start index:')
end_input = TextInput(value='0', title='End index:')
# Save the start end times selcted with BoxSelectTool (or manually entered)
button_save = Button(label='Save Index Values', button_type='success')
button_save.on_click(callback_save_indices)
# Perform a polyfit on the data points occuring between the start/end points
# for the parameter and region selected from the dropdown menus
button_poly = Button(label='Perform Polyfit', button_type='success')
button_poly.on_click(callback_save_poly)
# Print text output from callback/button routines in styled div container
output_template = ('<div style="display:inline-block; width:300px; '
'height:150px; padding: 10px; background-color:#f2f2f2; '
'border-radius:10px; overflow:scroll">{}</div>')
output_window = Div(text=output_template.format('Status updates display here'))
# Plotting
#------------------------------------------------------------------------------
# Format data to display when HoverTool activated
hover = HoverTool(tooltips=[('index', '@ind'),
('acc', '$y'),
('time', '@dt_str'),
])
# Define plots tools and create plot object and glyph objects
tools = [PanTool(), WheelZoomTool(), BoxSelectTool(), BoxZoomTool(), hover]
p, lines, scats = plot_triaxial(height=300, width=800, tools=tools)
p.select(BoxSelectTool).select_every_mousemove = False
# Force run of callback to make dummy line not visible at init
callback_checkbox('active', active_ax, active_ax)
# Update start/end input text boxes with BoxSelectTool
for scat in scats:
scat.data_source.on_change('selected', callback_box_select)
# Rendering
#------------------------------------------------------------------------------
# Bundle controls for inserting into the layout
controls = (param_checkbox_pre, param_checkbox, param_select, region_select,
start_input, end_input, button_save, button_poly)
# Create layout
row1 = row(column(widgetbox(parent_input, datadirs_select)))
col1 = column(widgetbox(*controls), width=350)
# See `output_template for css sizing of window
vbuffer = row([], height=35)
col2 = column(vbuffer, widgetbox(output_window))
row2 = row(col1, col2)
layout = column(p, row1, row2, width=1100)
# Generate document from layout
curdoc().add_root(layout)
|
ryanjdillon/pylleo | pylleo/calapp/main.py | callback_box_select | python | def callback_box_select(attr, old, new):
'''Update TextInput start/end entries from BoxSelectTool selection'''
# Get indices of selection
ind = sorted(new['1d']['indices'])
if new is None:
start_input.value = '0'
end_input.value = '0'
else:
start_input.value = str(source.data['ind'][ind[0]])
end_input.value = str(source.data['ind'][ind[-1]])
msg = '''
New start and end index values set.
'''
output_window.text = output_template.format(msg)
return None | Update TextInput start/end entries from BoxSelectTool selection | train | https://github.com/ryanjdillon/pylleo/blob/b9b999fef19eaeccce4f207ab1b6198287c1bfec/pylleo/calapp/main.py#L159-L176 | null | '''
LeoADCA
Little Leonardo Accelerometer Data Calibration Application
This app will launch an window in your default broweser to visually identify
the times at which various axis of the lleo tag have been placed into +/-g
orientations.
Enter the start and end times of these orientation periods, then click 'save'
to write those to a calibration YAML file (cal.yml) in the data directory
Example
-------
bokeh serve --show bokeh_calibration.py
'''
def plot_triaxial(height, width, tools):
'''Plot pandas dataframe containing an x, y, and z column'''
import bokeh.plotting
p = bokeh.plotting.figure(x_axis_type='datetime',
plot_height=height,
plot_width=width,
title=' ',
toolbar_sticky=False,
tools=tools,
active_drag=BoxZoomTool(),
output_backend='webgl')
p.yaxis.axis_label = 'Acceleration (count)'
p.xaxis.axis_label = 'Time (timezone as programmed)'
# Plot accelerometry data as lines and scatter (for BoxSelectTool)
colors = ['#1b9e77', '#d95f02', '#7570b3']
axes = ['x', 'y', 'z']
lines = [None,]*3
scats = [None,]*3
for i, (ax, c) in enumerate(zip(axes, colors)):
lines[i] = p.line(y=ax, x='dt', color=c, legend=False, source=source)
scats[i] = p.scatter(y=ax, x='dt', color=c, legend=False, size=1,
source=source)
return p, lines, scats
def load_data(path_dir):
'''Load data, directory parameters, and accelerometer parameter names
Args
----
path_dir: str
Path to the data directory
Returns
-------
data: pandas.DataFrame
Experiment data
params_tag: dict
A dictionary of parameters parsed from the directory name
params_data: list
A list of the accelerometer parameter names
'''
import os
import pylleo
exp_name = os.path.split(path_dir)[1]
params_tag = pylleo.utils.parse_experiment_params(exp_name)
# Load the Little Leonardo tag data
meta = pylleo.lleoio.read_meta(path_dir, params_tag['tag_model'],
params_tag['tag_id'])
data = pylleo.lleoio.read_data(meta, path_dir, sample_f=sample_f)
# Get and curate the parameter names of the loaded dataframe
params_data = pylleo.utils.get_tag_params(params_tag['tag_model'])
params_data = [pylleo.utils.posix_string(p) for p in params_data]
params_data = [p for p in params_data if p.startswith('acc')]
return data, params_tag, params_data
def callback_parent(attr, old, new):
'''Update data directories drop down with new parent directory'''
import os
# Remove accidental white space if copy/pasted
new = new.strip()
parent_input.value = new
# Verify new parent path exists and update `datadirs_select` widget
if os.path.exists(new):
# Create sorted list of data directories, ignore files
joinisdir = lambda parent, d: os.path.isdir(os.path.join(parent, d))
options = sorted([d for d in os.listdir(new) if joinisdir(new, d)])
# Update dropdown list of available data directories and select first
datadirs_select.options = options
datadirs_select.value = options[0]
callback_datadirs('value', options[0], options[0])
else:
msg = '''
The parent path `{}` does not exist.
Check that you have entered the absolute path.
'''.format(new)
output_window.text = output_template.format(msg)
return None
def callback_datadirs(attr, old, new):
'''Update source and controls with data loaded from selected directory'''
import os
global data
try:
# Load data from new data directory
path_dir = os.path.join(parent_input.value, new)
data, params_tag, params_data = load_data(path_dir)
# Make title with new data directory
p.title.text = 'Calibrating {}'.format(params_tag['experiment'])
# Update `source` data fields from dataframe
dt_str = [dt.strftime(dt_fmt) for dt in data['datetimes']]
source.data = dict(x = list(data['acceleration_x']),
y = list(data['acceleration_y']),
z = list(data['acceleration_z']),
ind = list(data.index),
dt = list(data['datetimes']),
dt_str = dt_str)
# Update values for control widgets
param_checkbox.active = [0, 1, 2]
param_select.options = params_data
param_select.value = params_data[0]
regions = ['lower', 'upper']
region_select.options = regions
region_select.value = regions[0]
start_input.value = str(data.index[0])
end_input.value = str(data.index[-1])
except Exception as e:
msg = '''
Problem loading data directory `{}`.
Please check that data exists in that directory.
Details:
{}
'''.format(new, e)
output_window.text = output_template.format(msg)
return None
def callback_checkbox(attr, old, new):
'''Update visible data from parameters selectin in the CheckboxSelect'''
import numpy
for i in range(len(lines)):
lines[i].visible = i in param_checkbox.active
scats[i].visible = i in param_checkbox.active
return None
def callback_save_indices():
'''Save index from bokeh textinput'''
import datetime
import os
import pylleo
import yamlord
if datadirs_select.value != 'None':
path_dir = os.path.join(parent_input.value, datadirs_select.value)
cal_yaml_path = os.path.join(path_dir, 'cal.yml')
param = (param_select.value).lower().replace('-','_')
region = region_select.value
start = int(start_input.value)
end = int(end_input.value)
msg = '''
Updated calibration times for:<br>
<b>{}/{}</b>
<br>
<br>
star index: {}<br>
end index: {}<br>
'''.format(param, region, start, end)
output_window.text = output_template.format(msg)
cal_dict = pylleo.lleocal.read_cal(cal_yaml_path)
# Generalize for Class-ifying
cal_dict = pylleo.lleocal.update(data, cal_dict, param, region, start, end)
yamlord.write_yaml(cal_dict, cal_yaml_path)
else:
msg = '''
You must first load data and select indices for calibration
regions before you can save the indices to `cal.yml`
'''
output_window.text = output_template.format(msg)
return None
def callback_save_poly():
'''Perform polyfit once regions selected
Globals: cal_fname, data (read-only, so no declaration)
'''
import datetime
import pylleo
import yamlord
import itertools
def _check_param_regions(param, regions, cal_dict):
msg = '''
<b>{}</b> was not found in the calibration dictionary.
Process that parameter and then try saving the polyfit again.
'''.format(param)
params_present = True
if param not in cal_dict['parameters']:
params_present = False
msg.format(param)
else:
for region in regions:
if region not in cal_dict['parameters'][param]:
params_present = False
msg.format('{}/{}'.format(param, region))
output_window.text = output_template.format(msg)
return params_present
def _check_index_order(param, regions, cal_dict):
'''Check that index positions exist for each calibration region'''
indices_present = True
for region in regions:
start = cal_dict['parameters'][param][region]['start']
end = cal_dict['parameters'][param][region]['end']
# Check if start comes after end
if int(start) > int(end):
indices_present = False
msg = '''
The start index ({}) comes after the end index ({}).
Please set new start/end indexes for <b>{}/{}</b>
'''.format(start, end, param, region)
msg.format(start, end, param, region)
output_window.text = output_template.format(msg)
return indices_present
if datadirs_select.value != 'None':
path_dir = os.path.join(parent_input.value, datadirs_select.value)
cal_yaml_path = os.path.join(path_dir, cal_fname)
cal_dict = pylleo.lleocal.read_cal(cal_yaml_path)
# Get currently selected parameter
param = param_select.value
regions = region_select.options
# Check that index positions have been recorded in `cal.yml`
if not _check_index_order(param, regions, cal_dict):
return None
# Check that index positions are in sequence
if not _check_index_order(param, regions, cal_dict):
return None
param = (param_select.value).lower().replace('-','_')
try:
msg = '''
Saved polyfit for <b>{}</b> to <b>{}</b>.
'''.format(param, cal_fname)
output_window.text = output_template.format(msg)
lower, upper = pylleo.lleocal.get_cal_data(data, cal_dict, param)
poly = list(pylleo.lleocal.fit1d(lower, upper))
poly = [float(str(i)) for i in poly]
cal_dict['parameters'][param]['poly'] = poly
yamlord.write_yaml(cal_dict, cal_yaml_path)
except Exception as e:
msg = 'Problem saving polyfit: {}'.format(e)
output_window.text = output_template.format(msg)
else:
msg = '''
You must first load data and select indices for calibration
regions before you can save to polyfit to `cal.yml`
'''
output_window.text = output_template.format(msg)
return None
import datetime
import numpy
import os
import sys
import subprocess
from bokeh.layouts import widgetbox, column, row
from bokeh.models import ColumnDataSource
from bokeh.models import PanTool, WheelZoomTool, BoxZoomTool, HoverTool
from bokeh.models import BoxSelectTool
from bokeh.models.widgets import Div, PreText, CheckboxButtonGroup
from bokeh.models.widgets import Select, TextInput, Button
from bokeh.io import curdoc
# DATA
#------------------------------------------------------------------------------
cal_fname = 'cal.yml'
sample_f = 30
dt_fmt = '%H:%M'
# Create Column Data Source that will be used by the plot
# use 6hr span to avoid straing xaxis labels
t0 = datetime.datetime.now()
t1 = t0 + datetime.timedelta(hours=6)
source = ColumnDataSource(data = dict(x = [0, 0],
y = [0, 0],
z = [0, 0],
ind = [0, 0],
dt = [t0, t1],
dt_str = [t0.strftime(dt_fmt),
t1.strftime(dt_fmt)],
))
# Input
#------------------------------------------------------------------------------
# Path for entering the parent directory of data directories
title = 'Parent directory:'
css = ['widthfix']
parent_input = TextInput(value='', title=title, css_classes=css)
parent_input.on_change('value', callback_parent)
# Dropdown list of data directories in parent to load data from
data_dirs = ['None']
title = 'Data directories:'
datadirs_select = Select(title=title, value=data_dirs[0], options=data_dirs)
datadirs_select.on_change('value', callback_datadirs)
# Select which axes to select calibration start/end points
param_checkbox_pre = PreText(text='Axes to display')
labels_ax = ['x', 'y', 'z']
active_ax = []
param_checkbox = CheckboxButtonGroup(labels=labels_ax, active=active_ax)
param_checkbox.on_change('active', callback_checkbox)
# Select with parameter to collect start/end times for and perform a data fit
params_data = ['None']
title = 'Parameter to calibrate:'
param_select = Select(title=title, value=params_data[0], options=params_data)
# Select upper or lower acceleration region to calibrate
regions = ['None']
title = 'Bound (lower = -g; upper = +g):'
region_select = Select(title=title, value=regions[0], options=regions)
# User input start end times, save to cal
start_input = TextInput(value='0', title='Start index:')
end_input = TextInput(value='0', title='End index:')
# Save the start end times selcted with BoxSelectTool (or manually entered)
button_save = Button(label='Save Index Values', button_type='success')
button_save.on_click(callback_save_indices)
# Perform a polyfit on the data points occuring between the start/end points
# for the parameter and region selected from the dropdown menus
button_poly = Button(label='Perform Polyfit', button_type='success')
button_poly.on_click(callback_save_poly)
# Print text output from callback/button routines in styled div container
output_template = ('<div style="display:inline-block; width:300px; '
'height:150px; padding: 10px; background-color:#f2f2f2; '
'border-radius:10px; overflow:scroll">{}</div>')
output_window = Div(text=output_template.format('Status updates display here'))
# Plotting
#------------------------------------------------------------------------------
# Format data to display when HoverTool activated
hover = HoverTool(tooltips=[('index', '@ind'),
('acc', '$y'),
('time', '@dt_str'),
])
# Define plots tools and create plot object and glyph objects
tools = [PanTool(), WheelZoomTool(), BoxSelectTool(), BoxZoomTool(), hover]
p, lines, scats = plot_triaxial(height=300, width=800, tools=tools)
p.select(BoxSelectTool).select_every_mousemove = False
# Force run of callback to make dummy line not visible at init
callback_checkbox('active', active_ax, active_ax)
# Update start/end input text boxes with BoxSelectTool
for scat in scats:
scat.data_source.on_change('selected', callback_box_select)
# Rendering
#------------------------------------------------------------------------------
# Bundle controls for inserting into the layout
controls = (param_checkbox_pre, param_checkbox, param_select, region_select,
start_input, end_input, button_save, button_poly)
# Create layout
row1 = row(column(widgetbox(parent_input, datadirs_select)))
col1 = column(widgetbox(*controls), width=350)
# See `output_template for css sizing of window
vbuffer = row([], height=35)
col2 = column(vbuffer, widgetbox(output_window))
row2 = row(col1, col2)
layout = column(p, row1, row2, width=1100)
# Generate document from layout
curdoc().add_root(layout)
|
ryanjdillon/pylleo | pylleo/calapp/main.py | callback_checkbox | python | def callback_checkbox(attr, old, new):
'''Update visible data from parameters selectin in the CheckboxSelect'''
import numpy
for i in range(len(lines)):
lines[i].visible = i in param_checkbox.active
scats[i].visible = i in param_checkbox.active
return None | Update visible data from parameters selectin in the CheckboxSelect | train | https://github.com/ryanjdillon/pylleo/blob/b9b999fef19eaeccce4f207ab1b6198287c1bfec/pylleo/calapp/main.py#L179-L187 | null | '''
LeoADCA
Little Leonardo Accelerometer Data Calibration Application
This app will launch an window in your default broweser to visually identify
the times at which various axis of the lleo tag have been placed into +/-g
orientations.
Enter the start and end times of these orientation periods, then click 'save'
to write those to a calibration YAML file (cal.yml) in the data directory
Example
-------
bokeh serve --show bokeh_calibration.py
'''
def plot_triaxial(height, width, tools):
'''Plot pandas dataframe containing an x, y, and z column'''
import bokeh.plotting
p = bokeh.plotting.figure(x_axis_type='datetime',
plot_height=height,
plot_width=width,
title=' ',
toolbar_sticky=False,
tools=tools,
active_drag=BoxZoomTool(),
output_backend='webgl')
p.yaxis.axis_label = 'Acceleration (count)'
p.xaxis.axis_label = 'Time (timezone as programmed)'
# Plot accelerometry data as lines and scatter (for BoxSelectTool)
colors = ['#1b9e77', '#d95f02', '#7570b3']
axes = ['x', 'y', 'z']
lines = [None,]*3
scats = [None,]*3
for i, (ax, c) in enumerate(zip(axes, colors)):
lines[i] = p.line(y=ax, x='dt', color=c, legend=False, source=source)
scats[i] = p.scatter(y=ax, x='dt', color=c, legend=False, size=1,
source=source)
return p, lines, scats
def load_data(path_dir):
'''Load data, directory parameters, and accelerometer parameter names
Args
----
path_dir: str
Path to the data directory
Returns
-------
data: pandas.DataFrame
Experiment data
params_tag: dict
A dictionary of parameters parsed from the directory name
params_data: list
A list of the accelerometer parameter names
'''
import os
import pylleo
exp_name = os.path.split(path_dir)[1]
params_tag = pylleo.utils.parse_experiment_params(exp_name)
# Load the Little Leonardo tag data
meta = pylleo.lleoio.read_meta(path_dir, params_tag['tag_model'],
params_tag['tag_id'])
data = pylleo.lleoio.read_data(meta, path_dir, sample_f=sample_f)
# Get and curate the parameter names of the loaded dataframe
params_data = pylleo.utils.get_tag_params(params_tag['tag_model'])
params_data = [pylleo.utils.posix_string(p) for p in params_data]
params_data = [p for p in params_data if p.startswith('acc')]
return data, params_tag, params_data
def callback_parent(attr, old, new):
'''Update data directories drop down with new parent directory'''
import os
# Remove accidental white space if copy/pasted
new = new.strip()
parent_input.value = new
# Verify new parent path exists and update `datadirs_select` widget
if os.path.exists(new):
# Create sorted list of data directories, ignore files
joinisdir = lambda parent, d: os.path.isdir(os.path.join(parent, d))
options = sorted([d for d in os.listdir(new) if joinisdir(new, d)])
# Update dropdown list of available data directories and select first
datadirs_select.options = options
datadirs_select.value = options[0]
callback_datadirs('value', options[0], options[0])
else:
msg = '''
The parent path `{}` does not exist.
Check that you have entered the absolute path.
'''.format(new)
output_window.text = output_template.format(msg)
return None
def callback_datadirs(attr, old, new):
'''Update source and controls with data loaded from selected directory'''
import os
global data
try:
# Load data from new data directory
path_dir = os.path.join(parent_input.value, new)
data, params_tag, params_data = load_data(path_dir)
# Make title with new data directory
p.title.text = 'Calibrating {}'.format(params_tag['experiment'])
# Update `source` data fields from dataframe
dt_str = [dt.strftime(dt_fmt) for dt in data['datetimes']]
source.data = dict(x = list(data['acceleration_x']),
y = list(data['acceleration_y']),
z = list(data['acceleration_z']),
ind = list(data.index),
dt = list(data['datetimes']),
dt_str = dt_str)
# Update values for control widgets
param_checkbox.active = [0, 1, 2]
param_select.options = params_data
param_select.value = params_data[0]
regions = ['lower', 'upper']
region_select.options = regions
region_select.value = regions[0]
start_input.value = str(data.index[0])
end_input.value = str(data.index[-1])
except Exception as e:
msg = '''
Problem loading data directory `{}`.
Please check that data exists in that directory.
Details:
{}
'''.format(new, e)
output_window.text = output_template.format(msg)
return None
def callback_box_select(attr, old, new):
'''Update TextInput start/end entries from BoxSelectTool selection'''
# Get indices of selection
ind = sorted(new['1d']['indices'])
if new is None:
start_input.value = '0'
end_input.value = '0'
else:
start_input.value = str(source.data['ind'][ind[0]])
end_input.value = str(source.data['ind'][ind[-1]])
msg = '''
New start and end index values set.
'''
output_window.text = output_template.format(msg)
return None
def callback_save_indices():
'''Save index from bokeh textinput'''
import datetime
import os
import pylleo
import yamlord
if datadirs_select.value != 'None':
path_dir = os.path.join(parent_input.value, datadirs_select.value)
cal_yaml_path = os.path.join(path_dir, 'cal.yml')
param = (param_select.value).lower().replace('-','_')
region = region_select.value
start = int(start_input.value)
end = int(end_input.value)
msg = '''
Updated calibration times for:<br>
<b>{}/{}</b>
<br>
<br>
star index: {}<br>
end index: {}<br>
'''.format(param, region, start, end)
output_window.text = output_template.format(msg)
cal_dict = pylleo.lleocal.read_cal(cal_yaml_path)
# Generalize for Class-ifying
cal_dict = pylleo.lleocal.update(data, cal_dict, param, region, start, end)
yamlord.write_yaml(cal_dict, cal_yaml_path)
else:
msg = '''
You must first load data and select indices for calibration
regions before you can save the indices to `cal.yml`
'''
output_window.text = output_template.format(msg)
return None
def callback_save_poly():
'''Perform polyfit once regions selected
Globals: cal_fname, data (read-only, so no declaration)
'''
import datetime
import pylleo
import yamlord
import itertools
def _check_param_regions(param, regions, cal_dict):
msg = '''
<b>{}</b> was not found in the calibration dictionary.
Process that parameter and then try saving the polyfit again.
'''.format(param)
params_present = True
if param not in cal_dict['parameters']:
params_present = False
msg.format(param)
else:
for region in regions:
if region not in cal_dict['parameters'][param]:
params_present = False
msg.format('{}/{}'.format(param, region))
output_window.text = output_template.format(msg)
return params_present
def _check_index_order(param, regions, cal_dict):
'''Check that index positions exist for each calibration region'''
indices_present = True
for region in regions:
start = cal_dict['parameters'][param][region]['start']
end = cal_dict['parameters'][param][region]['end']
# Check if start comes after end
if int(start) > int(end):
indices_present = False
msg = '''
The start index ({}) comes after the end index ({}).
Please set new start/end indexes for <b>{}/{}</b>
'''.format(start, end, param, region)
msg.format(start, end, param, region)
output_window.text = output_template.format(msg)
return indices_present
if datadirs_select.value != 'None':
path_dir = os.path.join(parent_input.value, datadirs_select.value)
cal_yaml_path = os.path.join(path_dir, cal_fname)
cal_dict = pylleo.lleocal.read_cal(cal_yaml_path)
# Get currently selected parameter
param = param_select.value
regions = region_select.options
# Check that index positions have been recorded in `cal.yml`
if not _check_index_order(param, regions, cal_dict):
return None
# Check that index positions are in sequence
if not _check_index_order(param, regions, cal_dict):
return None
param = (param_select.value).lower().replace('-','_')
try:
msg = '''
Saved polyfit for <b>{}</b> to <b>{}</b>.
'''.format(param, cal_fname)
output_window.text = output_template.format(msg)
lower, upper = pylleo.lleocal.get_cal_data(data, cal_dict, param)
poly = list(pylleo.lleocal.fit1d(lower, upper))
poly = [float(str(i)) for i in poly]
cal_dict['parameters'][param]['poly'] = poly
yamlord.write_yaml(cal_dict, cal_yaml_path)
except Exception as e:
msg = 'Problem saving polyfit: {}'.format(e)
output_window.text = output_template.format(msg)
else:
msg = '''
You must first load data and select indices for calibration
regions before you can save to polyfit to `cal.yml`
'''
output_window.text = output_template.format(msg)
return None
import datetime
import numpy
import os
import sys
import subprocess
from bokeh.layouts import widgetbox, column, row
from bokeh.models import ColumnDataSource
from bokeh.models import PanTool, WheelZoomTool, BoxZoomTool, HoverTool
from bokeh.models import BoxSelectTool
from bokeh.models.widgets import Div, PreText, CheckboxButtonGroup
from bokeh.models.widgets import Select, TextInput, Button
from bokeh.io import curdoc
# DATA
#------------------------------------------------------------------------------
cal_fname = 'cal.yml'
sample_f = 30
dt_fmt = '%H:%M'
# Create Column Data Source that will be used by the plot
# use 6hr span to avoid straing xaxis labels
t0 = datetime.datetime.now()
t1 = t0 + datetime.timedelta(hours=6)
source = ColumnDataSource(data = dict(x = [0, 0],
y = [0, 0],
z = [0, 0],
ind = [0, 0],
dt = [t0, t1],
dt_str = [t0.strftime(dt_fmt),
t1.strftime(dt_fmt)],
))
# Input
#------------------------------------------------------------------------------
# Path for entering the parent directory of data directories
title = 'Parent directory:'
css = ['widthfix']
parent_input = TextInput(value='', title=title, css_classes=css)
parent_input.on_change('value', callback_parent)
# Dropdown list of data directories in parent to load data from
data_dirs = ['None']
title = 'Data directories:'
datadirs_select = Select(title=title, value=data_dirs[0], options=data_dirs)
datadirs_select.on_change('value', callback_datadirs)
# Select which axes to select calibration start/end points
param_checkbox_pre = PreText(text='Axes to display')
labels_ax = ['x', 'y', 'z']
active_ax = []
param_checkbox = CheckboxButtonGroup(labels=labels_ax, active=active_ax)
param_checkbox.on_change('active', callback_checkbox)
# Select with parameter to collect start/end times for and perform a data fit
params_data = ['None']
title = 'Parameter to calibrate:'
param_select = Select(title=title, value=params_data[0], options=params_data)
# Select upper or lower acceleration region to calibrate
regions = ['None']
title = 'Bound (lower = -g; upper = +g):'
region_select = Select(title=title, value=regions[0], options=regions)
# User input start end times, save to cal
start_input = TextInput(value='0', title='Start index:')
end_input = TextInput(value='0', title='End index:')
# Save the start end times selcted with BoxSelectTool (or manually entered)
button_save = Button(label='Save Index Values', button_type='success')
button_save.on_click(callback_save_indices)
# Perform a polyfit on the data points occuring between the start/end points
# for the parameter and region selected from the dropdown menus
button_poly = Button(label='Perform Polyfit', button_type='success')
button_poly.on_click(callback_save_poly)
# Print text output from callback/button routines in styled div container
output_template = ('<div style="display:inline-block; width:300px; '
'height:150px; padding: 10px; background-color:#f2f2f2; '
'border-radius:10px; overflow:scroll">{}</div>')
output_window = Div(text=output_template.format('Status updates display here'))
# Plotting
#------------------------------------------------------------------------------
# Format data to display when HoverTool activated
hover = HoverTool(tooltips=[('index', '@ind'),
('acc', '$y'),
('time', '@dt_str'),
])
# Define plots tools and create plot object and glyph objects
tools = [PanTool(), WheelZoomTool(), BoxSelectTool(), BoxZoomTool(), hover]
p, lines, scats = plot_triaxial(height=300, width=800, tools=tools)
p.select(BoxSelectTool).select_every_mousemove = False
# Force run of callback to make dummy line not visible at init
callback_checkbox('active', active_ax, active_ax)
# Update start/end input text boxes with BoxSelectTool
for scat in scats:
scat.data_source.on_change('selected', callback_box_select)
# Rendering
#------------------------------------------------------------------------------
# Bundle controls for inserting into the layout
controls = (param_checkbox_pre, param_checkbox, param_select, region_select,
start_input, end_input, button_save, button_poly)
# Create layout
row1 = row(column(widgetbox(parent_input, datadirs_select)))
col1 = column(widgetbox(*controls), width=350)
# See `output_template for css sizing of window
vbuffer = row([], height=35)
col2 = column(vbuffer, widgetbox(output_window))
row2 = row(col1, col2)
layout = column(p, row1, row2, width=1100)
# Generate document from layout
curdoc().add_root(layout)
|
ryanjdillon/pylleo | pylleo/calapp/main.py | callback_save_indices | python | def callback_save_indices():
'''Save index from bokeh textinput'''
import datetime
import os
import pylleo
import yamlord
if datadirs_select.value != 'None':
path_dir = os.path.join(parent_input.value, datadirs_select.value)
cal_yaml_path = os.path.join(path_dir, 'cal.yml')
param = (param_select.value).lower().replace('-','_')
region = region_select.value
start = int(start_input.value)
end = int(end_input.value)
msg = '''
Updated calibration times for:<br>
<b>{}/{}</b>
<br>
<br>
star index: {}<br>
end index: {}<br>
'''.format(param, region, start, end)
output_window.text = output_template.format(msg)
cal_dict = pylleo.lleocal.read_cal(cal_yaml_path)
# Generalize for Class-ifying
cal_dict = pylleo.lleocal.update(data, cal_dict, param, region, start, end)
yamlord.write_yaml(cal_dict, cal_yaml_path)
else:
msg = '''
You must first load data and select indices for calibration
regions before you can save the indices to `cal.yml`
'''
output_window.text = output_template.format(msg)
return None | Save index from bokeh textinput | train | https://github.com/ryanjdillon/pylleo/blob/b9b999fef19eaeccce4f207ab1b6198287c1bfec/pylleo/calapp/main.py#L190-L227 | [
"def update(data_df, cal_dict, param, bound, start, end):\n '''Update calibration times for give parameter and boundary'''\n from collections import OrderedDict\n\n if param not in cal_dict['parameters']:\n cal_dict['parameters'][param] = OrderedDict()\n if bound not in cal_dict['parameters'][param]:\n cal_dict['parameters'][param][bound] = OrderedDict()\n\n cal_dict['parameters'][param][bound]['start'] = start\n cal_dict['parameters'][param][bound]['end'] = end\n\n return cal_dict\n",
"def read_cal(cal_yaml_path):\n '''Load calibration file if exists, else create\n\n Args\n ----\n cal_yaml_path: str\n Path to calibration YAML file\n\n Returns\n -------\n cal_dict: dict\n Key value pairs of calibration meta data\n '''\n from collections import OrderedDict\n import datetime\n import os\n import warnings\n import yamlord\n\n from . import utils\n\n def __create_cal(cal_yaml_path):\n cal_dict = OrderedDict()\n\n # Add experiment name for calibration reference\n base_path, _ = os.path.split(cal_yaml_path)\n _, exp_name = os.path.split(base_path)\n cal_dict['experiment'] = exp_name\n\n return cal_dict\n\n # Try reading cal file, else create\n if os.path.isfile(cal_yaml_path):\n cal_dict = yamlord.read_yaml(cal_yaml_path)\n else:\n cal_dict = __create_cal(cal_yaml_path)\n cal_dict['parameters'] = OrderedDict()\n\n for key, val in utils.parse_experiment_params(cal_dict['experiment']).items():\n cal_dict[key] = val\n\n fmt = \"%Y-%m-%d %H:%M:%S\"\n cal_dict['date_modified'] = datetime.datetime.now().strftime(fmt)\n\n return cal_dict\n"
] | '''
LeoADCA
Little Leonardo Accelerometer Data Calibration Application
This app will launch an window in your default broweser to visually identify
the times at which various axis of the lleo tag have been placed into +/-g
orientations.
Enter the start and end times of these orientation periods, then click 'save'
to write those to a calibration YAML file (cal.yml) in the data directory
Example
-------
bokeh serve --show bokeh_calibration.py
'''
def plot_triaxial(height, width, tools):
'''Plot pandas dataframe containing an x, y, and z column'''
import bokeh.plotting
p = bokeh.plotting.figure(x_axis_type='datetime',
plot_height=height,
plot_width=width,
title=' ',
toolbar_sticky=False,
tools=tools,
active_drag=BoxZoomTool(),
output_backend='webgl')
p.yaxis.axis_label = 'Acceleration (count)'
p.xaxis.axis_label = 'Time (timezone as programmed)'
# Plot accelerometry data as lines and scatter (for BoxSelectTool)
colors = ['#1b9e77', '#d95f02', '#7570b3']
axes = ['x', 'y', 'z']
lines = [None,]*3
scats = [None,]*3
for i, (ax, c) in enumerate(zip(axes, colors)):
lines[i] = p.line(y=ax, x='dt', color=c, legend=False, source=source)
scats[i] = p.scatter(y=ax, x='dt', color=c, legend=False, size=1,
source=source)
return p, lines, scats
def load_data(path_dir):
'''Load data, directory parameters, and accelerometer parameter names
Args
----
path_dir: str
Path to the data directory
Returns
-------
data: pandas.DataFrame
Experiment data
params_tag: dict
A dictionary of parameters parsed from the directory name
params_data: list
A list of the accelerometer parameter names
'''
import os
import pylleo
exp_name = os.path.split(path_dir)[1]
params_tag = pylleo.utils.parse_experiment_params(exp_name)
# Load the Little Leonardo tag data
meta = pylleo.lleoio.read_meta(path_dir, params_tag['tag_model'],
params_tag['tag_id'])
data = pylleo.lleoio.read_data(meta, path_dir, sample_f=sample_f)
# Get and curate the parameter names of the loaded dataframe
params_data = pylleo.utils.get_tag_params(params_tag['tag_model'])
params_data = [pylleo.utils.posix_string(p) for p in params_data]
params_data = [p for p in params_data if p.startswith('acc')]
return data, params_tag, params_data
def callback_parent(attr, old, new):
'''Update data directories drop down with new parent directory'''
import os
# Remove accidental white space if copy/pasted
new = new.strip()
parent_input.value = new
# Verify new parent path exists and update `datadirs_select` widget
if os.path.exists(new):
# Create sorted list of data directories, ignore files
joinisdir = lambda parent, d: os.path.isdir(os.path.join(parent, d))
options = sorted([d for d in os.listdir(new) if joinisdir(new, d)])
# Update dropdown list of available data directories and select first
datadirs_select.options = options
datadirs_select.value = options[0]
callback_datadirs('value', options[0], options[0])
else:
msg = '''
The parent path `{}` does not exist.
Check that you have entered the absolute path.
'''.format(new)
output_window.text = output_template.format(msg)
return None
def callback_datadirs(attr, old, new):
'''Update source and controls with data loaded from selected directory'''
import os
global data
try:
# Load data from new data directory
path_dir = os.path.join(parent_input.value, new)
data, params_tag, params_data = load_data(path_dir)
# Make title with new data directory
p.title.text = 'Calibrating {}'.format(params_tag['experiment'])
# Update `source` data fields from dataframe
dt_str = [dt.strftime(dt_fmt) for dt in data['datetimes']]
source.data = dict(x = list(data['acceleration_x']),
y = list(data['acceleration_y']),
z = list(data['acceleration_z']),
ind = list(data.index),
dt = list(data['datetimes']),
dt_str = dt_str)
# Update values for control widgets
param_checkbox.active = [0, 1, 2]
param_select.options = params_data
param_select.value = params_data[0]
regions = ['lower', 'upper']
region_select.options = regions
region_select.value = regions[0]
start_input.value = str(data.index[0])
end_input.value = str(data.index[-1])
except Exception as e:
msg = '''
Problem loading data directory `{}`.
Please check that data exists in that directory.
Details:
{}
'''.format(new, e)
output_window.text = output_template.format(msg)
return None
def callback_box_select(attr, old, new):
'''Update TextInput start/end entries from BoxSelectTool selection'''
# Get indices of selection
ind = sorted(new['1d']['indices'])
if new is None:
start_input.value = '0'
end_input.value = '0'
else:
start_input.value = str(source.data['ind'][ind[0]])
end_input.value = str(source.data['ind'][ind[-1]])
msg = '''
New start and end index values set.
'''
output_window.text = output_template.format(msg)
return None
def callback_checkbox(attr, old, new):
'''Update visible data from parameters selectin in the CheckboxSelect'''
import numpy
for i in range(len(lines)):
lines[i].visible = i in param_checkbox.active
scats[i].visible = i in param_checkbox.active
return None
def callback_save_poly():
'''Perform polyfit once regions selected
Globals: cal_fname, data (read-only, so no declaration)
'''
import datetime
import pylleo
import yamlord
import itertools
def _check_param_regions(param, regions, cal_dict):
msg = '''
<b>{}</b> was not found in the calibration dictionary.
Process that parameter and then try saving the polyfit again.
'''.format(param)
params_present = True
if param not in cal_dict['parameters']:
params_present = False
msg.format(param)
else:
for region in regions:
if region not in cal_dict['parameters'][param]:
params_present = False
msg.format('{}/{}'.format(param, region))
output_window.text = output_template.format(msg)
return params_present
def _check_index_order(param, regions, cal_dict):
'''Check that index positions exist for each calibration region'''
indices_present = True
for region in regions:
start = cal_dict['parameters'][param][region]['start']
end = cal_dict['parameters'][param][region]['end']
# Check if start comes after end
if int(start) > int(end):
indices_present = False
msg = '''
The start index ({}) comes after the end index ({}).
Please set new start/end indexes for <b>{}/{}</b>
'''.format(start, end, param, region)
msg.format(start, end, param, region)
output_window.text = output_template.format(msg)
return indices_present
if datadirs_select.value != 'None':
path_dir = os.path.join(parent_input.value, datadirs_select.value)
cal_yaml_path = os.path.join(path_dir, cal_fname)
cal_dict = pylleo.lleocal.read_cal(cal_yaml_path)
# Get currently selected parameter
param = param_select.value
regions = region_select.options
# Check that index positions have been recorded in `cal.yml`
if not _check_index_order(param, regions, cal_dict):
return None
# Check that index positions are in sequence
if not _check_index_order(param, regions, cal_dict):
return None
param = (param_select.value).lower().replace('-','_')
try:
msg = '''
Saved polyfit for <b>{}</b> to <b>{}</b>.
'''.format(param, cal_fname)
output_window.text = output_template.format(msg)
lower, upper = pylleo.lleocal.get_cal_data(data, cal_dict, param)
poly = list(pylleo.lleocal.fit1d(lower, upper))
poly = [float(str(i)) for i in poly]
cal_dict['parameters'][param]['poly'] = poly
yamlord.write_yaml(cal_dict, cal_yaml_path)
except Exception as e:
msg = 'Problem saving polyfit: {}'.format(e)
output_window.text = output_template.format(msg)
else:
msg = '''
You must first load data and select indices for calibration
regions before you can save to polyfit to `cal.yml`
'''
output_window.text = output_template.format(msg)
return None
import datetime
import numpy
import os
import sys
import subprocess
from bokeh.layouts import widgetbox, column, row
from bokeh.models import ColumnDataSource
from bokeh.models import PanTool, WheelZoomTool, BoxZoomTool, HoverTool
from bokeh.models import BoxSelectTool
from bokeh.models.widgets import Div, PreText, CheckboxButtonGroup
from bokeh.models.widgets import Select, TextInput, Button
from bokeh.io import curdoc
# DATA
#------------------------------------------------------------------------------
cal_fname = 'cal.yml'
sample_f = 30
dt_fmt = '%H:%M'
# Create Column Data Source that will be used by the plot
# use 6hr span to avoid straing xaxis labels
t0 = datetime.datetime.now()
t1 = t0 + datetime.timedelta(hours=6)
source = ColumnDataSource(data = dict(x = [0, 0],
y = [0, 0],
z = [0, 0],
ind = [0, 0],
dt = [t0, t1],
dt_str = [t0.strftime(dt_fmt),
t1.strftime(dt_fmt)],
))
# Input
#------------------------------------------------------------------------------
# Path for entering the parent directory of data directories
title = 'Parent directory:'
css = ['widthfix']
parent_input = TextInput(value='', title=title, css_classes=css)
parent_input.on_change('value', callback_parent)
# Dropdown list of data directories in parent to load data from
data_dirs = ['None']
title = 'Data directories:'
datadirs_select = Select(title=title, value=data_dirs[0], options=data_dirs)
datadirs_select.on_change('value', callback_datadirs)
# Select which axes to select calibration start/end points
param_checkbox_pre = PreText(text='Axes to display')
labels_ax = ['x', 'y', 'z']
active_ax = []
param_checkbox = CheckboxButtonGroup(labels=labels_ax, active=active_ax)
param_checkbox.on_change('active', callback_checkbox)
# Select with parameter to collect start/end times for and perform a data fit
params_data = ['None']
title = 'Parameter to calibrate:'
param_select = Select(title=title, value=params_data[0], options=params_data)
# Select upper or lower acceleration region to calibrate
regions = ['None']
title = 'Bound (lower = -g; upper = +g):'
region_select = Select(title=title, value=regions[0], options=regions)
# User input start end times, save to cal
start_input = TextInput(value='0', title='Start index:')
end_input = TextInput(value='0', title='End index:')
# Save the start end times selcted with BoxSelectTool (or manually entered)
button_save = Button(label='Save Index Values', button_type='success')
button_save.on_click(callback_save_indices)
# Perform a polyfit on the data points occuring between the start/end points
# for the parameter and region selected from the dropdown menus
button_poly = Button(label='Perform Polyfit', button_type='success')
button_poly.on_click(callback_save_poly)
# Print text output from callback/button routines in styled div container
output_template = ('<div style="display:inline-block; width:300px; '
'height:150px; padding: 10px; background-color:#f2f2f2; '
'border-radius:10px; overflow:scroll">{}</div>')
output_window = Div(text=output_template.format('Status updates display here'))
# Plotting
#------------------------------------------------------------------------------
# Format data to display when HoverTool activated
hover = HoverTool(tooltips=[('index', '@ind'),
('acc', '$y'),
('time', '@dt_str'),
])
# Define plots tools and create plot object and glyph objects
tools = [PanTool(), WheelZoomTool(), BoxSelectTool(), BoxZoomTool(), hover]
p, lines, scats = plot_triaxial(height=300, width=800, tools=tools)
p.select(BoxSelectTool).select_every_mousemove = False
# Force run of callback to make dummy line not visible at init
callback_checkbox('active', active_ax, active_ax)
# Update start/end input text boxes with BoxSelectTool
for scat in scats:
scat.data_source.on_change('selected', callback_box_select)
# Rendering
#------------------------------------------------------------------------------
# Bundle controls for inserting into the layout
controls = (param_checkbox_pre, param_checkbox, param_select, region_select,
start_input, end_input, button_save, button_poly)
# Create layout
row1 = row(column(widgetbox(parent_input, datadirs_select)))
col1 = column(widgetbox(*controls), width=350)
# See `output_template for css sizing of window
vbuffer = row([], height=35)
col2 = column(vbuffer, widgetbox(output_window))
row2 = row(col1, col2)
layout = column(p, row1, row2, width=1100)
# Generate document from layout
curdoc().add_root(layout)
|
ryanjdillon/pylleo | pylleo/calapp/main.py | callback_save_poly | python | def callback_save_poly():
'''Perform polyfit once regions selected
Globals: cal_fname, data (read-only, so no declaration)
'''
import datetime
import pylleo
import yamlord
import itertools
def _check_param_regions(param, regions, cal_dict):
msg = '''
<b>{}</b> was not found in the calibration dictionary.
Process that parameter and then try saving the polyfit again.
'''.format(param)
params_present = True
if param not in cal_dict['parameters']:
params_present = False
msg.format(param)
else:
for region in regions:
if region not in cal_dict['parameters'][param]:
params_present = False
msg.format('{}/{}'.format(param, region))
output_window.text = output_template.format(msg)
return params_present
def _check_index_order(param, regions, cal_dict):
'''Check that index positions exist for each calibration region'''
indices_present = True
for region in regions:
start = cal_dict['parameters'][param][region]['start']
end = cal_dict['parameters'][param][region]['end']
# Check if start comes after end
if int(start) > int(end):
indices_present = False
msg = '''
The start index ({}) comes after the end index ({}).
Please set new start/end indexes for <b>{}/{}</b>
'''.format(start, end, param, region)
msg.format(start, end, param, region)
output_window.text = output_template.format(msg)
return indices_present
if datadirs_select.value != 'None':
path_dir = os.path.join(parent_input.value, datadirs_select.value)
cal_yaml_path = os.path.join(path_dir, cal_fname)
cal_dict = pylleo.lleocal.read_cal(cal_yaml_path)
# Get currently selected parameter
param = param_select.value
regions = region_select.options
# Check that index positions have been recorded in `cal.yml`
if not _check_index_order(param, regions, cal_dict):
return None
# Check that index positions are in sequence
if not _check_index_order(param, regions, cal_dict):
return None
param = (param_select.value).lower().replace('-','_')
try:
msg = '''
Saved polyfit for <b>{}</b> to <b>{}</b>.
'''.format(param, cal_fname)
output_window.text = output_template.format(msg)
lower, upper = pylleo.lleocal.get_cal_data(data, cal_dict, param)
poly = list(pylleo.lleocal.fit1d(lower, upper))
poly = [float(str(i)) for i in poly]
cal_dict['parameters'][param]['poly'] = poly
yamlord.write_yaml(cal_dict, cal_yaml_path)
except Exception as e:
msg = 'Problem saving polyfit: {}'.format(e)
output_window.text = output_template.format(msg)
else:
msg = '''
You must first load data and select indices for calibration
regions before you can save to polyfit to `cal.yml`
'''
output_window.text = output_template.format(msg)
return None | Perform polyfit once regions selected
Globals: cal_fname, data (read-only, so no declaration) | train | https://github.com/ryanjdillon/pylleo/blob/b9b999fef19eaeccce4f207ab1b6198287c1bfec/pylleo/calapp/main.py#L230-L323 | [
"def read_cal(cal_yaml_path):\n '''Load calibration file if exists, else create\n\n Args\n ----\n cal_yaml_path: str\n Path to calibration YAML file\n\n Returns\n -------\n cal_dict: dict\n Key value pairs of calibration meta data\n '''\n from collections import OrderedDict\n import datetime\n import os\n import warnings\n import yamlord\n\n from . import utils\n\n def __create_cal(cal_yaml_path):\n cal_dict = OrderedDict()\n\n # Add experiment name for calibration reference\n base_path, _ = os.path.split(cal_yaml_path)\n _, exp_name = os.path.split(base_path)\n cal_dict['experiment'] = exp_name\n\n return cal_dict\n\n # Try reading cal file, else create\n if os.path.isfile(cal_yaml_path):\n cal_dict = yamlord.read_yaml(cal_yaml_path)\n else:\n cal_dict = __create_cal(cal_yaml_path)\n cal_dict['parameters'] = OrderedDict()\n\n for key, val in utils.parse_experiment_params(cal_dict['experiment']).items():\n cal_dict[key] = val\n\n fmt = \"%Y-%m-%d %H:%M:%S\"\n cal_dict['date_modified'] = datetime.datetime.now().strftime(fmt)\n\n return cal_dict\n",
"def _check_index_order(param, regions, cal_dict):\n '''Check that index positions exist for each calibration region'''\n\n indices_present = True\n for region in regions:\n start = cal_dict['parameters'][param][region]['start']\n end = cal_dict['parameters'][param][region]['end']\n # Check if start comes after end\n if int(start) > int(end):\n indices_present = False\n msg = '''\n The start index ({}) comes after the end index ({}).\n\n Please set new start/end indexes for <b>{}/{}</b>\n '''.format(start, end, param, region)\n msg.format(start, end, param, region)\n output_window.text = output_template.format(msg)\n\n return indices_present\n"
] | '''
LeoADCA
Little Leonardo Accelerometer Data Calibration Application
This app will launch an window in your default broweser to visually identify
the times at which various axis of the lleo tag have been placed into +/-g
orientations.
Enter the start and end times of these orientation periods, then click 'save'
to write those to a calibration YAML file (cal.yml) in the data directory
Example
-------
bokeh serve --show bokeh_calibration.py
'''
def plot_triaxial(height, width, tools):
'''Plot pandas dataframe containing an x, y, and z column'''
import bokeh.plotting
p = bokeh.plotting.figure(x_axis_type='datetime',
plot_height=height,
plot_width=width,
title=' ',
toolbar_sticky=False,
tools=tools,
active_drag=BoxZoomTool(),
output_backend='webgl')
p.yaxis.axis_label = 'Acceleration (count)'
p.xaxis.axis_label = 'Time (timezone as programmed)'
# Plot accelerometry data as lines and scatter (for BoxSelectTool)
colors = ['#1b9e77', '#d95f02', '#7570b3']
axes = ['x', 'y', 'z']
lines = [None,]*3
scats = [None,]*3
for i, (ax, c) in enumerate(zip(axes, colors)):
lines[i] = p.line(y=ax, x='dt', color=c, legend=False, source=source)
scats[i] = p.scatter(y=ax, x='dt', color=c, legend=False, size=1,
source=source)
return p, lines, scats
def load_data(path_dir):
'''Load data, directory parameters, and accelerometer parameter names
Args
----
path_dir: str
Path to the data directory
Returns
-------
data: pandas.DataFrame
Experiment data
params_tag: dict
A dictionary of parameters parsed from the directory name
params_data: list
A list of the accelerometer parameter names
'''
import os
import pylleo
exp_name = os.path.split(path_dir)[1]
params_tag = pylleo.utils.parse_experiment_params(exp_name)
# Load the Little Leonardo tag data
meta = pylleo.lleoio.read_meta(path_dir, params_tag['tag_model'],
params_tag['tag_id'])
data = pylleo.lleoio.read_data(meta, path_dir, sample_f=sample_f)
# Get and curate the parameter names of the loaded dataframe
params_data = pylleo.utils.get_tag_params(params_tag['tag_model'])
params_data = [pylleo.utils.posix_string(p) for p in params_data]
params_data = [p for p in params_data if p.startswith('acc')]
return data, params_tag, params_data
def callback_parent(attr, old, new):
'''Update data directories drop down with new parent directory'''
import os
# Remove accidental white space if copy/pasted
new = new.strip()
parent_input.value = new
# Verify new parent path exists and update `datadirs_select` widget
if os.path.exists(new):
# Create sorted list of data directories, ignore files
joinisdir = lambda parent, d: os.path.isdir(os.path.join(parent, d))
options = sorted([d for d in os.listdir(new) if joinisdir(new, d)])
# Update dropdown list of available data directories and select first
datadirs_select.options = options
datadirs_select.value = options[0]
callback_datadirs('value', options[0], options[0])
else:
msg = '''
The parent path `{}` does not exist.
Check that you have entered the absolute path.
'''.format(new)
output_window.text = output_template.format(msg)
return None
def callback_datadirs(attr, old, new):
'''Update source and controls with data loaded from selected directory'''
import os
global data
try:
# Load data from new data directory
path_dir = os.path.join(parent_input.value, new)
data, params_tag, params_data = load_data(path_dir)
# Make title with new data directory
p.title.text = 'Calibrating {}'.format(params_tag['experiment'])
# Update `source` data fields from dataframe
dt_str = [dt.strftime(dt_fmt) for dt in data['datetimes']]
source.data = dict(x = list(data['acceleration_x']),
y = list(data['acceleration_y']),
z = list(data['acceleration_z']),
ind = list(data.index),
dt = list(data['datetimes']),
dt_str = dt_str)
# Update values for control widgets
param_checkbox.active = [0, 1, 2]
param_select.options = params_data
param_select.value = params_data[0]
regions = ['lower', 'upper']
region_select.options = regions
region_select.value = regions[0]
start_input.value = str(data.index[0])
end_input.value = str(data.index[-1])
except Exception as e:
msg = '''
Problem loading data directory `{}`.
Please check that data exists in that directory.
Details:
{}
'''.format(new, e)
output_window.text = output_template.format(msg)
return None
def callback_box_select(attr, old, new):
'''Update TextInput start/end entries from BoxSelectTool selection'''
# Get indices of selection
ind = sorted(new['1d']['indices'])
if new is None:
start_input.value = '0'
end_input.value = '0'
else:
start_input.value = str(source.data['ind'][ind[0]])
end_input.value = str(source.data['ind'][ind[-1]])
msg = '''
New start and end index values set.
'''
output_window.text = output_template.format(msg)
return None
def callback_checkbox(attr, old, new):
'''Update visible data from parameters selectin in the CheckboxSelect'''
import numpy
for i in range(len(lines)):
lines[i].visible = i in param_checkbox.active
scats[i].visible = i in param_checkbox.active
return None
def callback_save_indices():
'''Save index from bokeh textinput'''
import datetime
import os
import pylleo
import yamlord
if datadirs_select.value != 'None':
path_dir = os.path.join(parent_input.value, datadirs_select.value)
cal_yaml_path = os.path.join(path_dir, 'cal.yml')
param = (param_select.value).lower().replace('-','_')
region = region_select.value
start = int(start_input.value)
end = int(end_input.value)
msg = '''
Updated calibration times for:<br>
<b>{}/{}</b>
<br>
<br>
star index: {}<br>
end index: {}<br>
'''.format(param, region, start, end)
output_window.text = output_template.format(msg)
cal_dict = pylleo.lleocal.read_cal(cal_yaml_path)
# Generalize for Class-ifying
cal_dict = pylleo.lleocal.update(data, cal_dict, param, region, start, end)
yamlord.write_yaml(cal_dict, cal_yaml_path)
else:
msg = '''
You must first load data and select indices for calibration
regions before you can save the indices to `cal.yml`
'''
output_window.text = output_template.format(msg)
return None
import datetime
import numpy
import os
import sys
import subprocess
from bokeh.layouts import widgetbox, column, row
from bokeh.models import ColumnDataSource
from bokeh.models import PanTool, WheelZoomTool, BoxZoomTool, HoverTool
from bokeh.models import BoxSelectTool
from bokeh.models.widgets import Div, PreText, CheckboxButtonGroup
from bokeh.models.widgets import Select, TextInput, Button
from bokeh.io import curdoc
# DATA
#------------------------------------------------------------------------------
cal_fname = 'cal.yml'
sample_f = 30
dt_fmt = '%H:%M'
# Create Column Data Source that will be used by the plot
# use 6hr span to avoid straing xaxis labels
t0 = datetime.datetime.now()
t1 = t0 + datetime.timedelta(hours=6)
source = ColumnDataSource(data = dict(x = [0, 0],
y = [0, 0],
z = [0, 0],
ind = [0, 0],
dt = [t0, t1],
dt_str = [t0.strftime(dt_fmt),
t1.strftime(dt_fmt)],
))
# Input
#------------------------------------------------------------------------------
# Path for entering the parent directory of data directories
title = 'Parent directory:'
css = ['widthfix']
parent_input = TextInput(value='', title=title, css_classes=css)
parent_input.on_change('value', callback_parent)
# Dropdown list of data directories in parent to load data from
data_dirs = ['None']
title = 'Data directories:'
datadirs_select = Select(title=title, value=data_dirs[0], options=data_dirs)
datadirs_select.on_change('value', callback_datadirs)
# Select which axes to select calibration start/end points
param_checkbox_pre = PreText(text='Axes to display')
labels_ax = ['x', 'y', 'z']
active_ax = []
param_checkbox = CheckboxButtonGroup(labels=labels_ax, active=active_ax)
param_checkbox.on_change('active', callback_checkbox)
# Select with parameter to collect start/end times for and perform a data fit
params_data = ['None']
title = 'Parameter to calibrate:'
param_select = Select(title=title, value=params_data[0], options=params_data)
# Select upper or lower acceleration region to calibrate
regions = ['None']
title = 'Bound (lower = -g; upper = +g):'
region_select = Select(title=title, value=regions[0], options=regions)
# User input start end times, save to cal
start_input = TextInput(value='0', title='Start index:')
end_input = TextInput(value='0', title='End index:')
# Save the start end times selcted with BoxSelectTool (or manually entered)
button_save = Button(label='Save Index Values', button_type='success')
button_save.on_click(callback_save_indices)
# Perform a polyfit on the data points occuring between the start/end points
# for the parameter and region selected from the dropdown menus
button_poly = Button(label='Perform Polyfit', button_type='success')
button_poly.on_click(callback_save_poly)
# Print text output from callback/button routines in styled div container
output_template = ('<div style="display:inline-block; width:300px; '
'height:150px; padding: 10px; background-color:#f2f2f2; '
'border-radius:10px; overflow:scroll">{}</div>')
output_window = Div(text=output_template.format('Status updates display here'))
# Plotting
#------------------------------------------------------------------------------
# Format data to display when HoverTool activated
hover = HoverTool(tooltips=[('index', '@ind'),
('acc', '$y'),
('time', '@dt_str'),
])
# Define plots tools and create plot object and glyph objects
tools = [PanTool(), WheelZoomTool(), BoxSelectTool(), BoxZoomTool(), hover]
p, lines, scats = plot_triaxial(height=300, width=800, tools=tools)
p.select(BoxSelectTool).select_every_mousemove = False
# Force run of callback to make dummy line not visible at init
callback_checkbox('active', active_ax, active_ax)
# Update start/end input text boxes with BoxSelectTool
for scat in scats:
scat.data_source.on_change('selected', callback_box_select)
# Rendering
#------------------------------------------------------------------------------
# Bundle controls for inserting into the layout
controls = (param_checkbox_pre, param_checkbox, param_select, region_select,
start_input, end_input, button_save, button_poly)
# Create layout
row1 = row(column(widgetbox(parent_input, datadirs_select)))
col1 = column(widgetbox(*controls), width=350)
# See `output_template for css sizing of window
vbuffer = row([], height=35)
col2 = column(vbuffer, widgetbox(output_window))
row2 = row(col1, col2)
layout = column(p, row1, row2, width=1100)
# Generate document from layout
curdoc().add_root(layout)
|
lwcook/horsetail-matching | horsetailmatching/hm.py | _extalg | python | def _extalg(xarr, alpha=100, axis=None):
'''Given an array xarr of values, smoothly return the max/min'''
return (np.sum(xarr * np.exp(alpha*xarr), axis=axis, keepdims=True)/
np.sum(np.exp(alpha*xarr), axis=axis, keepdims=True)) | Given an array xarr of values, smoothly return the max/min | train | https://github.com/lwcook/horsetail-matching/blob/f3d5f8d01249debbca978f412ce4eae017458119/horsetailmatching/hm.py#L834-L837 | null | import pdb
import time
import math
import copy
import warnings
import numpy as np
class HorsetailMatching(object):
'''Class for using horsetail matching within an optimization. The main
functionality is to evaluate the horsetail matching
metric (and optionally its gradient) that can be used with external
optimizers.
The code is written such that all arguments that can be used at the
initialization of a HorsetailMatching object can also be set as
attributes after creation to achieve exactly the same effect.
:param function fqoi: function that returns the quantity of interest, it
must take two ordered arguments - the value of the design variable
vector and the value of the uncertainty vector.
:param list prob_uncertainties: list of probabilistic uncertainties.
Each can be an instance of the UncertainParameter class,
in which case they will be sampled using the getSample() method.
Alternatiely each can be a function which returns sample(s) using
whatever method is desired.
:param list int_uncertainties: list of interval uncertainties [default []].
Each can be an instance of the IntervalParameter class,
in which case they will be sampled using the getSample() method.
Alternatiely each can be specified as a tuple/list of the bounds.
:param function ftarget: function that returns the value of the target
inverse CDF given a value in [0,1]. Can be a tuple that gives two
target fuctions, one for the upper bound and one for the lower bound on
the CDF under mixed uncertainties [default t(h) = 0]
:param bool/function jac: Argument that
specifies how to evaluate the gradient of the quantity of interest.
If False no gradients are propagated, if True the fqoi should return
a second argument g such that g_i = dq/dx_i. If a function, it should
have the same signature as fqoi but return g. [default False]
:param str method: method with which to evaluate the horsetil matching
metric, can be 'empirical' or 'kernel' [default 'empirical' if
jac is False else default 'kernel'].
:param int samples_prob: number of samples to take from the
probabilsitic uncertainties. [default 1000]
:param int samples_int: number of samples to take from the
interval uncertainties. Note that under mixed uncertainties, a nested
loop is used to evaluate the metric so the total number of
samples will be samples_prob*samples_int (at each interval uncertainty
sample samples_prob samples are taken from the probabilistic
uncertainties). [default 50]
:param list integration_points: Only for method='kernel'.
The integration point values to use when evaluating the metric using
kernels [by default 100 points spread over 3 times the range of
the samples of q obtained the first time the metric is evaluated]
:param number kernel_bandwidth: Only for method='kernel'. The bandwidth
used in the kernel function [by default it is found the first time
the metric is evaluated using Scott's rule]
:param str kernel_type: Only for method='kernel'. The type of kernel to
use, can be 'gaussian', 'uniform', or 'triangle' [default 'gaussian'].
:param function surrogate: Surrogate that is created at every design
point to be sampled instead of fqoi. It should be a function that
takes two arguments - an array with values of the uncertainties at
which to fit the surrogate of size (num_quadrature_points,
num_uncertainties), and an array of quantity of interest values
corresponding to these uncertainty values to which to fit the surrogate
of size (num_quadrature_points). It should return a functio that
predicts the qoi at an aribtrary value of the uncertainties.
[default None]
:param list surrogate_points: Only with a surrogate. List of points at
which fqoi is evaluated to give values to fit the surrogates to. These
are passed to the surrogate function along with the qoi evaluated at
these points when the surrogate is fitted [by default tensor
quadrature of 5 points in each uncertain dimension is used]
:param bool/function surrogate_jac: Only with a surrogate. Specifies how
to take surrogates of the gradient. It works similarly to the
jac argument: if False, the same surrogate is fitted to fqoi and each
component of its gradient, if True, the surrogate function is
expected to take a third argument - an array that is the gradient
at each of the quadrature points of size
(num_quadrature_points, num_design_variables). If a function, then
instead the array of uncertainty values and the array of gradient
values are passed to this function and it should return a function for
the surrogate model of the gradient.
:param bool reuse_samples: If True will reuse the same set of samples of
the uncertainties for evaluating the metric at any value of the
design variables, if False wise will re-sample every time evalMetric
is called [default True]
:param bool verbose: If True will print out details [default False].
*Example Declarations*::
>>> from horsetailmatching import HorsetailMatching,
UncertainParameter, PolySurrogate
>>> def myFunc(x, u): return x[0]*x[1] + u
>>> def myGrad(x, u): return [x[1], x[0]]
>>> def myTarg1(h): return 1-h**3
>>> def myTarg2(h): return 2-h**3
>>> u1 = UniformParameter()
>>> u2 = IntervalParameter()
>>> U = [u1, u2]
>>> poly = PolySurrogate(dimensions=2)
>>> poly_points = poly.getQuadraturePoints()
>>> theHM = HorsetailMatching(myFunc, U)
>>> theHM = HorsetailMatching(myFunc, U, jac=myGrad, method='kernel')
>>> theHM = HorsetailMatching(myFunc, U, ftarget=myTarg1)
>>> theHM = HorsetailMatching(myFunc, U, ftarget=(myTarg1, myTarg2))
>>> theHM = HorsetailMatching(myFunc, U, samples_prob=500,
samples_int = 50)
>>> theHM = HorsetailMatching(myFunc, U, method='kernel',
integration_points=numpy.linspace(0, 10, 100),
kernel_bandwidth=0.01)
>>> theHM = HorsetailMatching(myFunc, U,
surrogate=poly.surrogate, surrogate_jac=False,
surrogate_points=poly_points)
>>> theHM = HorsetailMatching(myFunc, U, verbose=True,
reuse_samples=True)
'''
def __init__(self, fqoi, prob_uncertainties, int_uncertainties=[],
ftarget=None, jac=False, method=None,
samples_prob=100, samples_int=50, integration_points=None,
kernel_bandwidth=None, kernel_type='gaussian', alpha=400,
surrogate=None, surrogate_points=None, surrogate_jac=False,
reuse_samples=True, verbose=False):
self.fqoi = fqoi
# self.uncertain_parameters = uncertain_parameters
self.prob_uncertainties = prob_uncertainties
self.int_uncertainties = int_uncertainties
self.ftarget = ftarget
self.jac = jac
self.method = method # Must be done after setting jac
self.samples_prob = samples_prob
self.samples_int = samples_int
self.integration_points = integration_points
self.kernel_bandwidth = kernel_bandwidth
self.kernel_type = kernel_type
self.alpha = alpha
self.reuse_samples = reuse_samples
self.u_samples = None
self.surrogate = surrogate
self.surrogate_points = surrogate_points
self.surrogate_jac = surrogate_jac
self.verbose = verbose
###############################################################################
## Properties with non-trivial setting behaviour
###############################################################################
# @property
# def uncertain_parameters(self):
# return self._u_params
#
# @uncertain_parameters.setter
# def uncertain_parameters(self, params):
# self._u_params = _makeIter(params)
# if len(self._u_params) == 0:
# raise ValueError('No uncertain parameters provided')
#
# self._u_int, self._u_prob = [], []
# for ii, u in enumerate(self._u_params):
# if u.is_interval_uncertainty:
# self._u_int.append((ii, u))
# else:
# self._u_prob.append((ii, u))
@property
def prob_uncertainties(self):
return self._prob_uncertainties
@prob_uncertainties.setter
def prob_uncertainties(self, params):
self._prob_uncertainties = _makeIter(params)
@property
def int_uncertainties(self):
return self._int_uncertainties
@int_uncertainties.setter
def int_uncertainties(self, params):
self._int_uncertainties = _makeIter(params)
@property
def samples_prob(self):
return self._samples_prob
@samples_prob.setter
def samples_prob(self, value):
if len(self.prob_uncertainties) > 0:
self._samples_prob = value
else:
self._samples_prob = 1
@property
def samples_int(self):
return self._samples_int
@samples_int.setter
def samples_int(self, value):
if len(self.int_uncertainties) > 0:
self._samples_int = value
else:
self._samples_int = 1
@property
def method(self):
return self._method
@method.setter
def method(self, value):
if value is None:
if self.jac is False:
self._method = 'empirical'
else:
self._method = 'kernel'
else:
self._method = value
@property
def ftarget(self):
return self._ftarget
@ftarget.setter
def ftarget(self, value):
def standardTarget(h):
return 0
try:
iter(value)
self._ftarg_u = value[0]
self._ftarg_l = value[1]
self._ftarget = value
except:
if value is None:
self._ftarget = standardTarget
else:
self._ftarget = value
self._ftarg_u = self._ftarget
self._ftarg_l = self._ftarget
@property
def u_samples(self):
return self._u_samples
@u_samples.setter
def u_samples(self, samples):
if samples is not None:
N_u = len(self.prob_uncertainties) + len(self.int_uncertainties)
if (not isinstance(samples, np.ndarray) or
samples.shape != (self.samples_int, self.samples_prob, N_u)):
raise TypeError('u_samples should be a np.array of size'
'(samples_int, samples_prob, num_uncertanities)')
self._u_samples = samples
@property
def kernel_type(self):
return self._kernel_type
@kernel_type.setter
def kernel_type(self, value):
allowed_types = ['gaussian', 'uniform', 'triangle']
if value not in allowed_types:
raise ValueError('Kernel type must be one of'+
', '.join([str(t) for t in allowed_types]))
else:
self._kernel_type = value
##############################################################################
## Public Methods
##############################################################################
def evalSamples(self, x):
'''Evalautes the samples of quantity of interest and its gradient
(if supplied) at the given values of the design variables
:param iterable x: values of the design variables, this is passed as
the first argument to the function fqoi
:return: (values of the quantity of interest, values of the gradient)
:rtype: Tuple
'''
# Make sure dimensions are correct
# u_sample_dimensions = self._processDimensions()
self._N_dv = len(_makeIter(x))
if self.verbose:
print('Evaluating surrogate')
if self.surrogate is None:
def fqoi(u):
return self.fqoi(x, u)
def fgrad(u):
return self.jac(x, u)
jac = self.jac
else:
fqoi, fgrad, surr_jac = self._makeSurrogates(x)
jac = surr_jac
u_samples = self._getParameterSamples()
if self.verbose:
print('Evaluating quantity of interest at samples')
q_samples, grad_samples = self._evalSamples(u_samples, fqoi, fgrad, jac)
return q_samples, grad_samples
def evalMetric(self, x, method=None):
'''Evaluates the horsetail matching metric at given values of the
design variables.
:param iterable x: values of the design variables, this is passed as
the first argument to the function fqoi
:param str method: method to use to evaluate the metric ('empirical' or
'kernel')
:return: metric_value - value of the metric evaluated at the design
point given by x
:rtype: float
*Example Usage*::
>>> def myFunc(x, u): return x[0]*x[1] + u
>>> u1 = UniformParameter()
>>> theHM = HorsetailMatching(myFunc, u)
>>> x0 = [1, 2]
>>> theHM.evalMetric(x0)
'''
# Make sure dimensions are correct
# u_sample_dimensions = self._processDimensions()
if self.verbose:
print('----------')
print('At design: ' + str(x))
q_samples, grad_samples = self.evalSamples(x)
if self.verbose:
print('Evaluating metric')
return self.evalMetricFromSamples(q_samples, grad_samples, method)
def evalMetricFromSamples(self, q_samples, grad_samples=None, method=None):
'''Evaluates the horsetail matching metric from given samples of the quantity
of interest and gradient instead of evaluating them at a design.
:param np.ndarray q_samples: samples of the quantity of interest,
size (M_int, M_prob)
:param np.ndarray grad_samples: samples of the gradien,
size (M_int, M_prob, n_x)
:return: metric_value - value of the metric
:rtype: float
'''
# Make sure dimensions are correct
# u_sample_dimensions = self._processDimensions()
q_samples = np.array(q_samples)
if not (q_samples.shape[0] == self.samples_int and
q_samples.shape[1] == self.samples_prob):
raise ValueError('Shape of q_samples should be [M_int, M_prob]')
if grad_samples is not None:
grad_samples = np.array(grad_samples)
if not (grad_samples.shape[0] == self.samples_int and
grad_samples.shape[1] == self.samples_prob):
raise ValueError('''Shape of grad_samples
should be [M_int, M_prob, n_dv]''')
if method is None:
method = self.method
if method.lower() == 'empirical':
return self._evalMetricEmpirical(q_samples, grad_samples)
elif method.lower() == 'kernel':
return self._evalMetricKernel(q_samples, grad_samples)
else:
raise ValueError('Unsupported metric evalation method')
def getHorsetail(self):
'''Function that gets vectors of the horsetail plot at the last design
evaluated.
:return: upper_curve, lower_curve, CDFs - returns three parameters,
the first two are tuples containing pairs of x/y vectors of the
upper and lower bounds on the CDFs (the horsetail plot). The
third parameter is a list of x/y tuples for individual CDFs
propagated at each sampled value of the interval uncertainties
*Example Usage*::
>>> def myFunc(x, u): return x[0]*x[1] + u
>>> u = UniformParameter()
>>> theHM = HorsetailMatching(myFunc, u)
>>> (x1, y1, t1), (x2, y2, t2), CDFs = theHM.getHorsetail()
>>> matplotlib.pyplot(x1, y1, 'b')
>>> matplotlib.pyplot(x2, y2, 'b')
>>> for (x, y) in CDFs:
... matplotlib.pyplot(x, y, 'k:')
>>> matplotlib.pyplot.show()
'''
if hasattr(self, '_ql'):
ql, qu, hl, hu = self._ql, self._qu, self._hl, self._hu
qh, hh = self._qh, self._hh
if self._qis is not None:
ql, hl = _appendPlotArrays(ql, hl, self._qis)
qu, hu = _appendPlotArrays(qu, hu, self._qis)
CDFs = []
for qi, hi in zip(qh, hh):
CDFs.append((qi, hi))
upper_target = [self._ftarg_u(h) for h in hu]
upper_curve = (qu, hu, upper_target)
lower_target = [self._ftarg_l(h) for h in hl]
lower_curve = (ql, hl, lower_target)
return upper_curve, lower_curve, CDFs
else:
raise ValueError('''The metric has not been evaluated at any
design point so the horsetail does not exist''')
##############################################################################
## Private methods ##
##############################################################################
def _evalMetricEmpirical(self, q_samples, grad_samples=None):
M_prob = self.samples_prob
M_int = self.samples_int
if M_int > 1:
alpha = self.alpha
else:
alpha = 1
h_htail = np.zeros([M_int, M_prob])
q_htail = np.zeros([M_int, M_prob])
q_l = np.zeros(M_prob)
q_u = np.zeros(M_prob)
if grad_samples is not None:
g_htail = np.zeros([M_int, M_prob, self._N_dv])
g_l = np.zeros([M_prob, self._N_dv])
g_u = np.zeros([M_prob, self._N_dv])
Du_grad = np.zeros(self._N_dv)
Dl_grad = np.zeros(self._N_dv)
for ii in np.arange(M_int):
# Get empirical CDF by sorting samples at each value of intervals
sortinds = np.argsort(q_samples[ii, :])
q_htail[ii, :] = q_samples[ii, sortinds]
M = q_samples.shape[1]
h_htail[ii, :] = [(1./M)*(0.5 + j) for j in range(M)]
if grad_samples is not None:
for ix in np.arange(self._N_dv):
g_htail[ii, :, ix] = grad_samples[ii, sortinds, ix]
for jj in np.arange(M_prob):
q_u[jj] = min(q_htail[:, jj])
q_l[jj] = max(q_htail[:, jj])
if grad_samples is not None:
q_u[jj] = _extalg(q_htail[:, jj], -1*alpha)
q_l[jj] = _extalg(q_htail[:, jj], alpha)
for ix in np.arange(self._N_dv):
gtemp = _extgrad(q_htail[:, jj], -1*alpha)
g_u[jj, ix] = gtemp.dot(g_htail[:, jj, ix])
gtemp = _extgrad(q_htail[:, jj], alpha)
g_l[jj, ix] = gtemp.dot(g_htail[:, jj, ix])
h_u, h_l = h_htail[0], h_htail[0] # h is same for all ECDFs
t_u = [self._ftarg_u(hi) for hi in h_u]
t_l = [self._ftarg_l(hi) for hi in h_u]
self._ql, self._qu, self._hl, self._hu = q_l, q_u, h_l, h_u
self._qh, self._hh = q_htail, h_htail
self._tl, self._tu = t_l, t_u
self._qis = None
Du = (1./M_prob)*sum((q_u - t_u)**2)
Dl = (1./M_prob)*sum((q_l - t_l)**2)
dhat = np.sqrt(Du + Dl)
if self.verbose:
print('Metric: ' + str(dhat))
if grad_samples is not None:
for ix in np.arange(self._N_dv):
Du_grad[ix] = (1./M_prob)*sum(2*(q_u - t_u)*g_u[:, ix])
Dl_grad[ix] = (1./M_prob)*sum(2*(q_l - t_l)*g_l[:, ix])
dhat_grad = (0.5*(Du+Dl)**(-0.5)*(Du_grad + Dl_grad))
if self.verbose:
print('Gradient: ' + str([g for g in dhat_grad]))
return dhat, dhat_grad
else:
return dhat
def _getKernelParameters(self, q_samples):
# If kernel bandwidth not specified, find it using Scott's rule
if self.kernel_bandwidth is None:
if len(self.prob_uncertainties) > 0:
if abs(np.max(q_samples) - np.min(q_samples)) < 1e-6:
bw = 1e-6
else:
bw = 0.33*((4/(3.*q_samples.shape[1]))**(1/5.)
*np.std(q_samples[0,:]))
else:
bw = 1e-3
self.kernel_bandwidth = bw
else:
bw = self.kernel_bandwidth
## Initalize arrays and prepare calculation
q_min = np.amin(q_samples)
q_max = np.amax(q_samples)
if self.integration_points is None:
q_range = q_max - q_min
qis_full = np.linspace(q_min - q_range, q_max + q_range, 10000)
self.integration_points = qis_full
else:
qis_full = np.array(self.integration_points)
ii_low, ii_high = 0, len(qis_full)
try:
ii_high, qi_high = next((iq, qi) for iq, qi in enumerate(qis_full) if
qi > q_max + 20*bw)
except StopIteration:
warnings.warn('Sample found higher than range of integration points')
try:
iiN_low, qi_low = next((iq, qi) for iq, qi in enumerate(qis_full[::-1]) if
qi < q_min - 20*bw)
ii_low = len(qis_full) - (iiN_low+1)
except StopIteration:
warnings.warn('Sample found lower than range of integration points')
qis = qis_full[ii_low:ii_high+1] # Only evaluate over range of samples
self._qis = qis
return qis, bw
def _evalMetricKernel(self, q_samples, grad_samples=None):
qis, bw = self._getKernelParameters(q_samples)
N_quad = len(qis)
M_prob = self.samples_prob
M_int = self.samples_int
if M_int > 1:
alpha = self.alpha
else:
alpha = 1
fhtail = np.zeros([N_quad, M_int])
qhtail = np.zeros([N_quad, M_int])
if grad_samples is not None:
fht_grad = np.zeros([N_quad, M_int, self._N_dv])
hu_grad = np.zeros([N_quad, self._N_dv])
hl_grad = np.zeros([N_quad, self._N_dv])
Du_grad = np.zeros(self._N_dv)
Dl_grad = np.zeros(self._N_dv)
# ALGORITHM 1 from publication
# Evaluate all individual CDFs and their gradients
for mm in np.arange(M_int):
qjs = q_samples[mm, :]
rmat = qis.reshape([N_quad, 1])-qjs.reshape([1, M_prob])
if grad_samples is not None:
Kcdf, Kprime = _kernel(rmat, M_prob, bw=bw,
ktype=self.kernel_type, bGrad=True)
for ix in np.arange(self._N_dv):
grad_js = grad_samples[mm, :, ix]
fht_grad[:, mm, ix] = Kprime.dot(-1*grad_js)
else:
Kcdf = _kernel(rmat, M_prob, bw=bw, ktype=self.kernel_type,
bGrad=False)
fhtail[:, mm] = Kcdf.dot(np.ones([M_prob, 1])).flatten()
qhtail[:, mm] = qis
# ALGORITHM 2 from publication
# Find horsetail curves - envelope of the CDFs and their gradients
# In Matrix form
if grad_samples is None:
hu = np.max(fhtail, axis=1).flatten()
hl = np.min(fhtail, axis=1).flatten()
else:
hu = _extalg(fhtail, alpha, axis=1).flatten()
hl = _extalg(fhtail, -1*alpha, axis=1).flatten()
Su_prime = _extgrad(fhtail, alpha, axis=1)
Sl_prime = _extgrad(fhtail, -1*alpha, axis=1)
for kx in np.arange(self._N_dv):
fis_grad = fht_grad[:, :, kx]
for ii in np.arange(N_quad):
hu_grad[ii, kx] = Su_prime[ii, :].dot(fis_grad[ii, :])
hl_grad[ii, kx] = Sl_prime[ii, :].dot(fis_grad[ii, :])
# ALGORITHM 3 from publication
# Evaluate overall metric and gradient using matrix multipliation
tu = np.array([self._ftarg_u(hi) for hi in hu])
tl = np.array([self._ftarg_l(hi) for hi in hl])
Du = _matrix_integration(qis, hu, tu)
Dl = _matrix_integration(qis, hl, tl)
dhat = float(np.sqrt(Du + Dl))
self._ql, self._qu, self._hl, self._hu = qis, qis, hl, hu
self._qh, self._hh = qhtail, fhtail
self._tl, self._tu = tl, tu
if self.verbose:
print('Metric: ' + str(dhat))
if grad_samples is not None:
tu_pr = np.array([_finDiff(self._ftarg_u, hi) for hi in hu])
tl_pr = np.array([_finDiff(self._ftarg_l, hi) for hi in hl])
for kx in np.arange(self._N_dv):
Du_grad[kx] = _matrix_grad(qis, hu, hu_grad[:, kx], tu, tu_pr)
Dl_grad[kx] = _matrix_grad(qis, hl, hl_grad[:, kx], tl, tl_pr)
dhat_grad = (0.5*(Du+Dl)**(-0.5)*(Du_grad + Dl_grad))
if self.verbose:
print('Gradient: ' + str([g for g in dhat_grad]))
return dhat, dhat_grad
else:
return dhat
def _makeSurrogates(self, x):
# Get quadrature points
if self.surrogate_points is None:
N_u = len(self.prob_uncertainties) + len(self.int_uncertainties)
mesh = np.meshgrid(*[np.linspace(-1, 1, 5) for n in np.arange(N_u)],
copy=False)
u_sparse = np.vstack([m.flatten() for m in mesh]).T
else:
u_sparse = self.surrogate_points
N_sparse = u_sparse.shape[0]
q_sparse = np.zeros(N_sparse)
# Get surrogates in correct form
if not self.jac:
for iu, u in enumerate(u_sparse):
q_sparse[iu] = self.fqoi(x, u)
surr_qoi = self.surrogate(u_sparse, q_sparse)
def fqoi(u):
return surr_qoi(u)
fgrad = False
surr_jac = False
else:
g_sparse = np.zeros([N_sparse, self._N_dv])
for iu, u in enumerate(u_sparse):
if isinstance(self.jac, bool) and self.jac:
q_sparse[iu], g_sparse[iu, :] = self.fqoi(x, u)
else:
q_sparse[iu] = self.fqoi(x, u)
g_sparse[iu, :] = self.jac(x, u)
if not self.surrogate_jac:
fpartial = [lambda u: 0 for _ in np.arange(self._N_dv)]
surr_qoi = self.surrogate(u_sparse, q_sparse)
for k in np.arange(self._N_dv):
fpartial[k] = self.surrogate(u_sparse, g_sparse[:, k])
def surr_grad(u):
return [f(u) for f in fpartial]
else:
if isinstance(self.surrogate_jac, bool) and self.surrogate_jac:
surr_qoi, surr_grad = self.surrogate(
u_sparse, q_sparse, g_sparse)
else:
surr_qoi = self.surrogate(u_sparse, q_sparse)
surr_grad = self.surrogate_jac(u_sparse, g_sparse)
def fqoi(u):
return(surr_qoi(u))
def fgrad(u):
return(surr_grad(u))
surr_jac = fgrad
return fqoi, fgrad, surr_jac
def _getParameterSamples(self):
N_u = len(self.prob_uncertainties) + len(self.int_uncertainties)
get_new = True
if self.reuse_samples and self.u_samples is not None:
if self.u_samples.shape != (self.samples_int, self.samples_prob, N_u):
if self.verbose:
print('''Stored samples do not match current dimensions,
getting new samples''')
else:
get_new = False
if get_new:
if self.verbose:
print('Getting uncertain parameter samples')
N_u = len(self.prob_uncertainties) + len(self.int_uncertainties)
N_prob = len(self.prob_uncertainties)
N_int = len(self.int_uncertainties)
# u_samples = np.zeros([self.samples_int, self.samples_prob, N_u])
u_samples_prob = np.zeros([self.samples_int, self.samples_prob,
len(self.prob_uncertainties)])
u_samples_int = np.zeros([self.samples_int, self.samples_prob,
len(self.int_uncertainties)])
u_ints = np.zeros([self.samples_int, len(self.int_uncertainties)])
for kk, uk in enumerate(self.int_uncertainties):
if callable(uk):
samps = np.array(uk()).flatten()
if len(samps) != self.samples_prob:
raise Exception('Number of samples returned not equal ' +
'to specified number of samples: please set number of ' +
'samples with samples_prob attribute')
else:
u_ints[:, kk] = samps
elif isinstance(uk, (tuple, list)): ## See if given as tuple/list of bounds
lb, ub = uk[0], uk[1]
u_ints[:, kk] = np.random.uniform(lb, ub, size=self.samples_int)
u_ints[0, kk] = lb
u_ints[-1, kk] = ub
elif hasattr(uk, 'getSample'):
for ii in np.arange(self.samples_int):
u_ints[ii, kk] = uk.getSample()
else:
raise TypeError('Unsupported interval uncertainty type')
u_samples_int = np.tile(u_ints[:, np.newaxis], (1, self.samples_prob, 1))
u_probs = np.zeros([self.samples_prob, len(self.prob_uncertainties)])
for kk, uk in enumerate(self.prob_uncertainties):
if callable(uk):
samps = np.array(uk()).flatten()
if len(samps) != self.samples_prob:
raise Exception('Number of samples returned not equal ' +
'to specified number of samples: please set number of ' +
'samples with samples_prob attribute')
else:
u_probs[:, kk] = samps
elif hasattr(uk, 'getSample'):
for jj in np.arange(self.samples_prob):
u_probs[jj, kk] = uk.getSample()
else:
raise TypeError('Unsupported probabilistic uncertainty type')
u_samples_prob = np.tile(u_probs[np.newaxis, :], (self.samples_int, 1, 1))
u_samples = np.concatenate((u_samples_int, u_samples_prob), axis=2)
self.u_samples = u_samples
return u_samples
else:
if self.verbose:
print('Re-using stored samples')
return self.u_samples
def _evalSamples(self, u_samples, fqoi, fgrad, jac):
# Array of shape (M_int, M_prob)
grad_samples = None
q_samples = np.zeros([self.samples_int, self.samples_prob])
if not jac:
for ii in np.arange(q_samples.shape[0]):
for jj in np.arange(q_samples.shape[1]):
q_samples[ii, jj] = fqoi(u_samples[ii, jj])
else:
grad_samples = np.zeros([self.samples_int, self.samples_prob,
self._N_dv])
for ii in np.arange(q_samples.shape[0]):
for jj in np.arange(q_samples.shape[1]):
if isinstance(jac, bool) and jac:
(q, grad) = fqoi(u_samples[ii, jj])
q_samples[ii, jj] = float(q)
grad_samples[ii, jj, :] = [_ for _ in grad]
else:
q_samples[ii, jj] = fqoi(u_samples[ii, jj])
grad_samples[ii, jj, :] = fgrad(u_samples[ii, jj])
self.grad_samples = grad_samples
self.q_samples = q_samples
return q_samples, grad_samples
##############################################################################
## Private functions
##############################################################################
def _extgrad(xarr, alpha=100, axis=None):
'''Given an array xarr of values, return the gradient of the smooth min/max
swith respect to each entry in the array'''
term1 = (np.exp(alpha*xarr)/
np.sum(np.exp(alpha*xarr), axis=axis, keepdims=True))
term2 = 1 + alpha*(xarr - _extalg(xarr, alpha, axis=axis))
return term1*term2
def _ramp(x, width):
return _minsmooth(1, _maxsmooth(0, (x - width/2)*(1/width)))
def _trint(x, width):
w = width/2.
xb = _maxsmooth(-w, _minsmooth(x, w))
y1 = 0.5 + xb/w + xb**2/(2*w**2)
y2 = xb/w - xb**2/(2*w**2)
return _minsmooth(y1, 0.5) + _maxsmooth(y2, 0.0)
def _minsmooth(a, b, eps=0.0000):
return 0.5*(a + b - np.sqrt((a-b)**2 + eps**2))
def _maxsmooth(a, b, eps=0.0000):
return 0.5*(a + b + np.sqrt((a-b)**2 + eps**2))
def _step(x):
return 1 * (x > 0)
def _erf(r):
## Numerical implementation of the error function for matrix comptibility
# save the sign of x
sign = np.sign(r)
x = np.absolute(r)
# constants
a1 = 0.254829592
a2 = -0.284496736
a3 = 1.421413741
a4 = -1.453152027
a5 = 1.061405429
p = 0.3275911
# A&S formula 7.1.26
t = 1.0/(1.0 + p*x)
y = 1.0 - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*np.exp(-x*x)
return sign*y # erf(-x) = -erf(x)
def _kernel(points, M, bw, ktype='gauss', bGrad=False):
if ktype == 'gauss' or ktype == 'gaussian':
KernelMat = (1./M)*((1 + _erf((points/bw)/np.sqrt(2.)))/2.)
# KernelMat = np.zeros(points.shape)
# for ir in np.arange(points.shape[0]):
# for ic in np.arange(points.shape[1]):
# KernelMat[ir, ic] = (1./M)*((1. +
# math.erf((points[ir, ic]/bw)/math.sqrt(2.)))/2.)
elif ktype == 'uniform' or ktype == 'uni':
KernelMat = (1./M)*_ramp(points, width=bw*np.sqrt(12))
elif ktype == 'triangle' or ktype == 'tri':
KernelMat = (1./M)*_trint(points, width=bw*2.*np.sqrt(6))
if bGrad:
if ktype == 'gauss' or ktype == 'gaussian':
const_term = 1.0/(M * np.sqrt(2*np.pi*bw**2))
KernelGradMat = const_term * np.exp(-(1./2.) * (points/bw)**2)
elif ktype == 'uniform' or ktype == 'uni':
width = bw*np.sqrt(12)
const = (1./M)*(1./width)
KernelGradMat = const*(_step(points+width/2) -
_step(points-width/2))
elif ktype == 'triangle' or ktype == 'tri':
width = bw*2.*np.sqrt(6)
const = (1./M)*(2./width)
KernelGradMat = const*(_ramp(points+width/4, width/2) -
_ramp(points-width/4, width/2))
return KernelMat, KernelGradMat
else:
return KernelMat
def _matrix_integration(q, h, t):
''' Returns the dp metric for a single horsetail
curve at a given value of the epistemic uncertainties'''
N = len(q)
# correction if CDF has gone out of trapezium range
if h[-1] < 0.9: h[-1] = 1.0
W = np.zeros([N, N])
for i in range(N):
W[i, i] = 0.5*(h[min(i+1, N-1)] - h[max(i-1, 0)])
dp = (q - t).T.dot(W).dot(q - t)
return dp
def _matrix_grad(q, h, h_dx, t, t_prime):
''' Returns the gradient with respect to a single variable'''
N = len(q)
W = np.zeros([N, N])
Wprime = np.zeros([N, N])
for i in range(N):
W[i, i] = 0.5*(h[min(i+1, N-1)] - h[max(i-1, 0)])
Wprime[i, i] = \
0.5*(h_dx[min(i+1, N-1)] - h_dx[max(i-1, 0)])
tgrad = np.array([t_prime[i]*h_dx[i] for i in np.arange(N)])
grad = 2.0*(q - t).T.dot(W).dot(-1.0*tgrad) \
+ (q - t).T.dot(Wprime).dot(q - t)
return grad
def _appendPlotArrays(q, h, integration_points):
q = np.insert(q, 0, q[0])
h = np.insert(h, 0, 0)
q = np.insert(q, 0, min(integration_points))
h = np.insert(h, 0, 0)
q = np.append(q, q[-1])
h = np.append(h, 1)
q = np.append(q, max(integration_points))
h = np.append(h, 1)
return q, h
def _finDiff(fobj, dv, f0=None, eps=10**-6):
if f0 is None:
f0 = fobj(dv)
fbase = copy.copy(f0)
fnew = fobj(dv + eps)
return float((fnew - fbase)/eps)
def _makeIter(x):
try:
iter(x)
return [xi for xi in x]
except:
return [x]
def _intervalSample(returned_samples, bounds):
if len(returned_samples) < 1:
return bounds[0]
elif len(returned_samples) < 2:
return bounds[1]
else:
return np.random.uniform(bounds[0], bounds[1])
|
lwcook/horsetail-matching | horsetailmatching/hm.py | _extgrad | python | def _extgrad(xarr, alpha=100, axis=None):
'''Given an array xarr of values, return the gradient of the smooth min/max
swith respect to each entry in the array'''
term1 = (np.exp(alpha*xarr)/
np.sum(np.exp(alpha*xarr), axis=axis, keepdims=True))
term2 = 1 + alpha*(xarr - _extalg(xarr, alpha, axis=axis))
return term1*term2 | Given an array xarr of values, return the gradient of the smooth min/max
swith respect to each entry in the array | train | https://github.com/lwcook/horsetail-matching/blob/f3d5f8d01249debbca978f412ce4eae017458119/horsetailmatching/hm.py#L839-L846 | null | import pdb
import time
import math
import copy
import warnings
import numpy as np
class HorsetailMatching(object):
'''Class for using horsetail matching within an optimization. The main
functionality is to evaluate the horsetail matching
metric (and optionally its gradient) that can be used with external
optimizers.
The code is written such that all arguments that can be used at the
initialization of a HorsetailMatching object can also be set as
attributes after creation to achieve exactly the same effect.
:param function fqoi: function that returns the quantity of interest, it
must take two ordered arguments - the value of the design variable
vector and the value of the uncertainty vector.
:param list prob_uncertainties: list of probabilistic uncertainties.
Each can be an instance of the UncertainParameter class,
in which case they will be sampled using the getSample() method.
Alternatiely each can be a function which returns sample(s) using
whatever method is desired.
:param list int_uncertainties: list of interval uncertainties [default []].
Each can be an instance of the IntervalParameter class,
in which case they will be sampled using the getSample() method.
Alternatiely each can be specified as a tuple/list of the bounds.
:param function ftarget: function that returns the value of the target
inverse CDF given a value in [0,1]. Can be a tuple that gives two
target fuctions, one for the upper bound and one for the lower bound on
the CDF under mixed uncertainties [default t(h) = 0]
:param bool/function jac: Argument that
specifies how to evaluate the gradient of the quantity of interest.
If False no gradients are propagated, if True the fqoi should return
a second argument g such that g_i = dq/dx_i. If a function, it should
have the same signature as fqoi but return g. [default False]
:param str method: method with which to evaluate the horsetil matching
metric, can be 'empirical' or 'kernel' [default 'empirical' if
jac is False else default 'kernel'].
:param int samples_prob: number of samples to take from the
probabilsitic uncertainties. [default 1000]
:param int samples_int: number of samples to take from the
interval uncertainties. Note that under mixed uncertainties, a nested
loop is used to evaluate the metric so the total number of
samples will be samples_prob*samples_int (at each interval uncertainty
sample samples_prob samples are taken from the probabilistic
uncertainties). [default 50]
:param list integration_points: Only for method='kernel'.
The integration point values to use when evaluating the metric using
kernels [by default 100 points spread over 3 times the range of
the samples of q obtained the first time the metric is evaluated]
:param number kernel_bandwidth: Only for method='kernel'. The bandwidth
used in the kernel function [by default it is found the first time
the metric is evaluated using Scott's rule]
:param str kernel_type: Only for method='kernel'. The type of kernel to
use, can be 'gaussian', 'uniform', or 'triangle' [default 'gaussian'].
:param function surrogate: Surrogate that is created at every design
point to be sampled instead of fqoi. It should be a function that
takes two arguments - an array with values of the uncertainties at
which to fit the surrogate of size (num_quadrature_points,
num_uncertainties), and an array of quantity of interest values
corresponding to these uncertainty values to which to fit the surrogate
of size (num_quadrature_points). It should return a functio that
predicts the qoi at an aribtrary value of the uncertainties.
[default None]
:param list surrogate_points: Only with a surrogate. List of points at
which fqoi is evaluated to give values to fit the surrogates to. These
are passed to the surrogate function along with the qoi evaluated at
these points when the surrogate is fitted [by default tensor
quadrature of 5 points in each uncertain dimension is used]
:param bool/function surrogate_jac: Only with a surrogate. Specifies how
to take surrogates of the gradient. It works similarly to the
jac argument: if False, the same surrogate is fitted to fqoi and each
component of its gradient, if True, the surrogate function is
expected to take a third argument - an array that is the gradient
at each of the quadrature points of size
(num_quadrature_points, num_design_variables). If a function, then
instead the array of uncertainty values and the array of gradient
values are passed to this function and it should return a function for
the surrogate model of the gradient.
:param bool reuse_samples: If True will reuse the same set of samples of
the uncertainties for evaluating the metric at any value of the
design variables, if False wise will re-sample every time evalMetric
is called [default True]
:param bool verbose: If True will print out details [default False].
*Example Declarations*::
>>> from horsetailmatching import HorsetailMatching,
UncertainParameter, PolySurrogate
>>> def myFunc(x, u): return x[0]*x[1] + u
>>> def myGrad(x, u): return [x[1], x[0]]
>>> def myTarg1(h): return 1-h**3
>>> def myTarg2(h): return 2-h**3
>>> u1 = UniformParameter()
>>> u2 = IntervalParameter()
>>> U = [u1, u2]
>>> poly = PolySurrogate(dimensions=2)
>>> poly_points = poly.getQuadraturePoints()
>>> theHM = HorsetailMatching(myFunc, U)
>>> theHM = HorsetailMatching(myFunc, U, jac=myGrad, method='kernel')
>>> theHM = HorsetailMatching(myFunc, U, ftarget=myTarg1)
>>> theHM = HorsetailMatching(myFunc, U, ftarget=(myTarg1, myTarg2))
>>> theHM = HorsetailMatching(myFunc, U, samples_prob=500,
samples_int = 50)
>>> theHM = HorsetailMatching(myFunc, U, method='kernel',
integration_points=numpy.linspace(0, 10, 100),
kernel_bandwidth=0.01)
>>> theHM = HorsetailMatching(myFunc, U,
surrogate=poly.surrogate, surrogate_jac=False,
surrogate_points=poly_points)
>>> theHM = HorsetailMatching(myFunc, U, verbose=True,
reuse_samples=True)
'''
def __init__(self, fqoi, prob_uncertainties, int_uncertainties=[],
ftarget=None, jac=False, method=None,
samples_prob=100, samples_int=50, integration_points=None,
kernel_bandwidth=None, kernel_type='gaussian', alpha=400,
surrogate=None, surrogate_points=None, surrogate_jac=False,
reuse_samples=True, verbose=False):
self.fqoi = fqoi
# self.uncertain_parameters = uncertain_parameters
self.prob_uncertainties = prob_uncertainties
self.int_uncertainties = int_uncertainties
self.ftarget = ftarget
self.jac = jac
self.method = method # Must be done after setting jac
self.samples_prob = samples_prob
self.samples_int = samples_int
self.integration_points = integration_points
self.kernel_bandwidth = kernel_bandwidth
self.kernel_type = kernel_type
self.alpha = alpha
self.reuse_samples = reuse_samples
self.u_samples = None
self.surrogate = surrogate
self.surrogate_points = surrogate_points
self.surrogate_jac = surrogate_jac
self.verbose = verbose
###############################################################################
## Properties with non-trivial setting behaviour
###############################################################################
# @property
# def uncertain_parameters(self):
# return self._u_params
#
# @uncertain_parameters.setter
# def uncertain_parameters(self, params):
# self._u_params = _makeIter(params)
# if len(self._u_params) == 0:
# raise ValueError('No uncertain parameters provided')
#
# self._u_int, self._u_prob = [], []
# for ii, u in enumerate(self._u_params):
# if u.is_interval_uncertainty:
# self._u_int.append((ii, u))
# else:
# self._u_prob.append((ii, u))
@property
def prob_uncertainties(self):
return self._prob_uncertainties
@prob_uncertainties.setter
def prob_uncertainties(self, params):
self._prob_uncertainties = _makeIter(params)
@property
def int_uncertainties(self):
return self._int_uncertainties
@int_uncertainties.setter
def int_uncertainties(self, params):
self._int_uncertainties = _makeIter(params)
@property
def samples_prob(self):
return self._samples_prob
@samples_prob.setter
def samples_prob(self, value):
if len(self.prob_uncertainties) > 0:
self._samples_prob = value
else:
self._samples_prob = 1
@property
def samples_int(self):
return self._samples_int
@samples_int.setter
def samples_int(self, value):
if len(self.int_uncertainties) > 0:
self._samples_int = value
else:
self._samples_int = 1
@property
def method(self):
return self._method
@method.setter
def method(self, value):
if value is None:
if self.jac is False:
self._method = 'empirical'
else:
self._method = 'kernel'
else:
self._method = value
@property
def ftarget(self):
return self._ftarget
@ftarget.setter
def ftarget(self, value):
def standardTarget(h):
return 0
try:
iter(value)
self._ftarg_u = value[0]
self._ftarg_l = value[1]
self._ftarget = value
except:
if value is None:
self._ftarget = standardTarget
else:
self._ftarget = value
self._ftarg_u = self._ftarget
self._ftarg_l = self._ftarget
@property
def u_samples(self):
return self._u_samples
@u_samples.setter
def u_samples(self, samples):
if samples is not None:
N_u = len(self.prob_uncertainties) + len(self.int_uncertainties)
if (not isinstance(samples, np.ndarray) or
samples.shape != (self.samples_int, self.samples_prob, N_u)):
raise TypeError('u_samples should be a np.array of size'
'(samples_int, samples_prob, num_uncertanities)')
self._u_samples = samples
@property
def kernel_type(self):
return self._kernel_type
@kernel_type.setter
def kernel_type(self, value):
allowed_types = ['gaussian', 'uniform', 'triangle']
if value not in allowed_types:
raise ValueError('Kernel type must be one of'+
', '.join([str(t) for t in allowed_types]))
else:
self._kernel_type = value
##############################################################################
## Public Methods
##############################################################################
def evalSamples(self, x):
'''Evalautes the samples of quantity of interest and its gradient
(if supplied) at the given values of the design variables
:param iterable x: values of the design variables, this is passed as
the first argument to the function fqoi
:return: (values of the quantity of interest, values of the gradient)
:rtype: Tuple
'''
# Make sure dimensions are correct
# u_sample_dimensions = self._processDimensions()
self._N_dv = len(_makeIter(x))
if self.verbose:
print('Evaluating surrogate')
if self.surrogate is None:
def fqoi(u):
return self.fqoi(x, u)
def fgrad(u):
return self.jac(x, u)
jac = self.jac
else:
fqoi, fgrad, surr_jac = self._makeSurrogates(x)
jac = surr_jac
u_samples = self._getParameterSamples()
if self.verbose:
print('Evaluating quantity of interest at samples')
q_samples, grad_samples = self._evalSamples(u_samples, fqoi, fgrad, jac)
return q_samples, grad_samples
def evalMetric(self, x, method=None):
'''Evaluates the horsetail matching metric at given values of the
design variables.
:param iterable x: values of the design variables, this is passed as
the first argument to the function fqoi
:param str method: method to use to evaluate the metric ('empirical' or
'kernel')
:return: metric_value - value of the metric evaluated at the design
point given by x
:rtype: float
*Example Usage*::
>>> def myFunc(x, u): return x[0]*x[1] + u
>>> u1 = UniformParameter()
>>> theHM = HorsetailMatching(myFunc, u)
>>> x0 = [1, 2]
>>> theHM.evalMetric(x0)
'''
# Make sure dimensions are correct
# u_sample_dimensions = self._processDimensions()
if self.verbose:
print('----------')
print('At design: ' + str(x))
q_samples, grad_samples = self.evalSamples(x)
if self.verbose:
print('Evaluating metric')
return self.evalMetricFromSamples(q_samples, grad_samples, method)
def evalMetricFromSamples(self, q_samples, grad_samples=None, method=None):
'''Evaluates the horsetail matching metric from given samples of the quantity
of interest and gradient instead of evaluating them at a design.
:param np.ndarray q_samples: samples of the quantity of interest,
size (M_int, M_prob)
:param np.ndarray grad_samples: samples of the gradien,
size (M_int, M_prob, n_x)
:return: metric_value - value of the metric
:rtype: float
'''
# Make sure dimensions are correct
# u_sample_dimensions = self._processDimensions()
q_samples = np.array(q_samples)
if not (q_samples.shape[0] == self.samples_int and
q_samples.shape[1] == self.samples_prob):
raise ValueError('Shape of q_samples should be [M_int, M_prob]')
if grad_samples is not None:
grad_samples = np.array(grad_samples)
if not (grad_samples.shape[0] == self.samples_int and
grad_samples.shape[1] == self.samples_prob):
raise ValueError('''Shape of grad_samples
should be [M_int, M_prob, n_dv]''')
if method is None:
method = self.method
if method.lower() == 'empirical':
return self._evalMetricEmpirical(q_samples, grad_samples)
elif method.lower() == 'kernel':
return self._evalMetricKernel(q_samples, grad_samples)
else:
raise ValueError('Unsupported metric evalation method')
def getHorsetail(self):
'''Function that gets vectors of the horsetail plot at the last design
evaluated.
:return: upper_curve, lower_curve, CDFs - returns three parameters,
the first two are tuples containing pairs of x/y vectors of the
upper and lower bounds on the CDFs (the horsetail plot). The
third parameter is a list of x/y tuples for individual CDFs
propagated at each sampled value of the interval uncertainties
*Example Usage*::
>>> def myFunc(x, u): return x[0]*x[1] + u
>>> u = UniformParameter()
>>> theHM = HorsetailMatching(myFunc, u)
>>> (x1, y1, t1), (x2, y2, t2), CDFs = theHM.getHorsetail()
>>> matplotlib.pyplot(x1, y1, 'b')
>>> matplotlib.pyplot(x2, y2, 'b')
>>> for (x, y) in CDFs:
... matplotlib.pyplot(x, y, 'k:')
>>> matplotlib.pyplot.show()
'''
if hasattr(self, '_ql'):
ql, qu, hl, hu = self._ql, self._qu, self._hl, self._hu
qh, hh = self._qh, self._hh
if self._qis is not None:
ql, hl = _appendPlotArrays(ql, hl, self._qis)
qu, hu = _appendPlotArrays(qu, hu, self._qis)
CDFs = []
for qi, hi in zip(qh, hh):
CDFs.append((qi, hi))
upper_target = [self._ftarg_u(h) for h in hu]
upper_curve = (qu, hu, upper_target)
lower_target = [self._ftarg_l(h) for h in hl]
lower_curve = (ql, hl, lower_target)
return upper_curve, lower_curve, CDFs
else:
raise ValueError('''The metric has not been evaluated at any
design point so the horsetail does not exist''')
##############################################################################
## Private methods ##
##############################################################################
def _evalMetricEmpirical(self, q_samples, grad_samples=None):
M_prob = self.samples_prob
M_int = self.samples_int
if M_int > 1:
alpha = self.alpha
else:
alpha = 1
h_htail = np.zeros([M_int, M_prob])
q_htail = np.zeros([M_int, M_prob])
q_l = np.zeros(M_prob)
q_u = np.zeros(M_prob)
if grad_samples is not None:
g_htail = np.zeros([M_int, M_prob, self._N_dv])
g_l = np.zeros([M_prob, self._N_dv])
g_u = np.zeros([M_prob, self._N_dv])
Du_grad = np.zeros(self._N_dv)
Dl_grad = np.zeros(self._N_dv)
for ii in np.arange(M_int):
# Get empirical CDF by sorting samples at each value of intervals
sortinds = np.argsort(q_samples[ii, :])
q_htail[ii, :] = q_samples[ii, sortinds]
M = q_samples.shape[1]
h_htail[ii, :] = [(1./M)*(0.5 + j) for j in range(M)]
if grad_samples is not None:
for ix in np.arange(self._N_dv):
g_htail[ii, :, ix] = grad_samples[ii, sortinds, ix]
for jj in np.arange(M_prob):
q_u[jj] = min(q_htail[:, jj])
q_l[jj] = max(q_htail[:, jj])
if grad_samples is not None:
q_u[jj] = _extalg(q_htail[:, jj], -1*alpha)
q_l[jj] = _extalg(q_htail[:, jj], alpha)
for ix in np.arange(self._N_dv):
gtemp = _extgrad(q_htail[:, jj], -1*alpha)
g_u[jj, ix] = gtemp.dot(g_htail[:, jj, ix])
gtemp = _extgrad(q_htail[:, jj], alpha)
g_l[jj, ix] = gtemp.dot(g_htail[:, jj, ix])
h_u, h_l = h_htail[0], h_htail[0] # h is same for all ECDFs
t_u = [self._ftarg_u(hi) for hi in h_u]
t_l = [self._ftarg_l(hi) for hi in h_u]
self._ql, self._qu, self._hl, self._hu = q_l, q_u, h_l, h_u
self._qh, self._hh = q_htail, h_htail
self._tl, self._tu = t_l, t_u
self._qis = None
Du = (1./M_prob)*sum((q_u - t_u)**2)
Dl = (1./M_prob)*sum((q_l - t_l)**2)
dhat = np.sqrt(Du + Dl)
if self.verbose:
print('Metric: ' + str(dhat))
if grad_samples is not None:
for ix in np.arange(self._N_dv):
Du_grad[ix] = (1./M_prob)*sum(2*(q_u - t_u)*g_u[:, ix])
Dl_grad[ix] = (1./M_prob)*sum(2*(q_l - t_l)*g_l[:, ix])
dhat_grad = (0.5*(Du+Dl)**(-0.5)*(Du_grad + Dl_grad))
if self.verbose:
print('Gradient: ' + str([g for g in dhat_grad]))
return dhat, dhat_grad
else:
return dhat
def _getKernelParameters(self, q_samples):
# If kernel bandwidth not specified, find it using Scott's rule
if self.kernel_bandwidth is None:
if len(self.prob_uncertainties) > 0:
if abs(np.max(q_samples) - np.min(q_samples)) < 1e-6:
bw = 1e-6
else:
bw = 0.33*((4/(3.*q_samples.shape[1]))**(1/5.)
*np.std(q_samples[0,:]))
else:
bw = 1e-3
self.kernel_bandwidth = bw
else:
bw = self.kernel_bandwidth
## Initalize arrays and prepare calculation
q_min = np.amin(q_samples)
q_max = np.amax(q_samples)
if self.integration_points is None:
q_range = q_max - q_min
qis_full = np.linspace(q_min - q_range, q_max + q_range, 10000)
self.integration_points = qis_full
else:
qis_full = np.array(self.integration_points)
ii_low, ii_high = 0, len(qis_full)
try:
ii_high, qi_high = next((iq, qi) for iq, qi in enumerate(qis_full) if
qi > q_max + 20*bw)
except StopIteration:
warnings.warn('Sample found higher than range of integration points')
try:
iiN_low, qi_low = next((iq, qi) for iq, qi in enumerate(qis_full[::-1]) if
qi < q_min - 20*bw)
ii_low = len(qis_full) - (iiN_low+1)
except StopIteration:
warnings.warn('Sample found lower than range of integration points')
qis = qis_full[ii_low:ii_high+1] # Only evaluate over range of samples
self._qis = qis
return qis, bw
def _evalMetricKernel(self, q_samples, grad_samples=None):
qis, bw = self._getKernelParameters(q_samples)
N_quad = len(qis)
M_prob = self.samples_prob
M_int = self.samples_int
if M_int > 1:
alpha = self.alpha
else:
alpha = 1
fhtail = np.zeros([N_quad, M_int])
qhtail = np.zeros([N_quad, M_int])
if grad_samples is not None:
fht_grad = np.zeros([N_quad, M_int, self._N_dv])
hu_grad = np.zeros([N_quad, self._N_dv])
hl_grad = np.zeros([N_quad, self._N_dv])
Du_grad = np.zeros(self._N_dv)
Dl_grad = np.zeros(self._N_dv)
# ALGORITHM 1 from publication
# Evaluate all individual CDFs and their gradients
for mm in np.arange(M_int):
qjs = q_samples[mm, :]
rmat = qis.reshape([N_quad, 1])-qjs.reshape([1, M_prob])
if grad_samples is not None:
Kcdf, Kprime = _kernel(rmat, M_prob, bw=bw,
ktype=self.kernel_type, bGrad=True)
for ix in np.arange(self._N_dv):
grad_js = grad_samples[mm, :, ix]
fht_grad[:, mm, ix] = Kprime.dot(-1*grad_js)
else:
Kcdf = _kernel(rmat, M_prob, bw=bw, ktype=self.kernel_type,
bGrad=False)
fhtail[:, mm] = Kcdf.dot(np.ones([M_prob, 1])).flatten()
qhtail[:, mm] = qis
# ALGORITHM 2 from publication
# Find horsetail curves - envelope of the CDFs and their gradients
# In Matrix form
if grad_samples is None:
hu = np.max(fhtail, axis=1).flatten()
hl = np.min(fhtail, axis=1).flatten()
else:
hu = _extalg(fhtail, alpha, axis=1).flatten()
hl = _extalg(fhtail, -1*alpha, axis=1).flatten()
Su_prime = _extgrad(fhtail, alpha, axis=1)
Sl_prime = _extgrad(fhtail, -1*alpha, axis=1)
for kx in np.arange(self._N_dv):
fis_grad = fht_grad[:, :, kx]
for ii in np.arange(N_quad):
hu_grad[ii, kx] = Su_prime[ii, :].dot(fis_grad[ii, :])
hl_grad[ii, kx] = Sl_prime[ii, :].dot(fis_grad[ii, :])
# ALGORITHM 3 from publication
# Evaluate overall metric and gradient using matrix multipliation
tu = np.array([self._ftarg_u(hi) for hi in hu])
tl = np.array([self._ftarg_l(hi) for hi in hl])
Du = _matrix_integration(qis, hu, tu)
Dl = _matrix_integration(qis, hl, tl)
dhat = float(np.sqrt(Du + Dl))
self._ql, self._qu, self._hl, self._hu = qis, qis, hl, hu
self._qh, self._hh = qhtail, fhtail
self._tl, self._tu = tl, tu
if self.verbose:
print('Metric: ' + str(dhat))
if grad_samples is not None:
tu_pr = np.array([_finDiff(self._ftarg_u, hi) for hi in hu])
tl_pr = np.array([_finDiff(self._ftarg_l, hi) for hi in hl])
for kx in np.arange(self._N_dv):
Du_grad[kx] = _matrix_grad(qis, hu, hu_grad[:, kx], tu, tu_pr)
Dl_grad[kx] = _matrix_grad(qis, hl, hl_grad[:, kx], tl, tl_pr)
dhat_grad = (0.5*(Du+Dl)**(-0.5)*(Du_grad + Dl_grad))
if self.verbose:
print('Gradient: ' + str([g for g in dhat_grad]))
return dhat, dhat_grad
else:
return dhat
def _makeSurrogates(self, x):
# Get quadrature points
if self.surrogate_points is None:
N_u = len(self.prob_uncertainties) + len(self.int_uncertainties)
mesh = np.meshgrid(*[np.linspace(-1, 1, 5) for n in np.arange(N_u)],
copy=False)
u_sparse = np.vstack([m.flatten() for m in mesh]).T
else:
u_sparse = self.surrogate_points
N_sparse = u_sparse.shape[0]
q_sparse = np.zeros(N_sparse)
# Get surrogates in correct form
if not self.jac:
for iu, u in enumerate(u_sparse):
q_sparse[iu] = self.fqoi(x, u)
surr_qoi = self.surrogate(u_sparse, q_sparse)
def fqoi(u):
return surr_qoi(u)
fgrad = False
surr_jac = False
else:
g_sparse = np.zeros([N_sparse, self._N_dv])
for iu, u in enumerate(u_sparse):
if isinstance(self.jac, bool) and self.jac:
q_sparse[iu], g_sparse[iu, :] = self.fqoi(x, u)
else:
q_sparse[iu] = self.fqoi(x, u)
g_sparse[iu, :] = self.jac(x, u)
if not self.surrogate_jac:
fpartial = [lambda u: 0 for _ in np.arange(self._N_dv)]
surr_qoi = self.surrogate(u_sparse, q_sparse)
for k in np.arange(self._N_dv):
fpartial[k] = self.surrogate(u_sparse, g_sparse[:, k])
def surr_grad(u):
return [f(u) for f in fpartial]
else:
if isinstance(self.surrogate_jac, bool) and self.surrogate_jac:
surr_qoi, surr_grad = self.surrogate(
u_sparse, q_sparse, g_sparse)
else:
surr_qoi = self.surrogate(u_sparse, q_sparse)
surr_grad = self.surrogate_jac(u_sparse, g_sparse)
def fqoi(u):
return(surr_qoi(u))
def fgrad(u):
return(surr_grad(u))
surr_jac = fgrad
return fqoi, fgrad, surr_jac
def _getParameterSamples(self):
N_u = len(self.prob_uncertainties) + len(self.int_uncertainties)
get_new = True
if self.reuse_samples and self.u_samples is not None:
if self.u_samples.shape != (self.samples_int, self.samples_prob, N_u):
if self.verbose:
print('''Stored samples do not match current dimensions,
getting new samples''')
else:
get_new = False
if get_new:
if self.verbose:
print('Getting uncertain parameter samples')
N_u = len(self.prob_uncertainties) + len(self.int_uncertainties)
N_prob = len(self.prob_uncertainties)
N_int = len(self.int_uncertainties)
# u_samples = np.zeros([self.samples_int, self.samples_prob, N_u])
u_samples_prob = np.zeros([self.samples_int, self.samples_prob,
len(self.prob_uncertainties)])
u_samples_int = np.zeros([self.samples_int, self.samples_prob,
len(self.int_uncertainties)])
u_ints = np.zeros([self.samples_int, len(self.int_uncertainties)])
for kk, uk in enumerate(self.int_uncertainties):
if callable(uk):
samps = np.array(uk()).flatten()
if len(samps) != self.samples_prob:
raise Exception('Number of samples returned not equal ' +
'to specified number of samples: please set number of ' +
'samples with samples_prob attribute')
else:
u_ints[:, kk] = samps
elif isinstance(uk, (tuple, list)): ## See if given as tuple/list of bounds
lb, ub = uk[0], uk[1]
u_ints[:, kk] = np.random.uniform(lb, ub, size=self.samples_int)
u_ints[0, kk] = lb
u_ints[-1, kk] = ub
elif hasattr(uk, 'getSample'):
for ii in np.arange(self.samples_int):
u_ints[ii, kk] = uk.getSample()
else:
raise TypeError('Unsupported interval uncertainty type')
u_samples_int = np.tile(u_ints[:, np.newaxis], (1, self.samples_prob, 1))
u_probs = np.zeros([self.samples_prob, len(self.prob_uncertainties)])
for kk, uk in enumerate(self.prob_uncertainties):
if callable(uk):
samps = np.array(uk()).flatten()
if len(samps) != self.samples_prob:
raise Exception('Number of samples returned not equal ' +
'to specified number of samples: please set number of ' +
'samples with samples_prob attribute')
else:
u_probs[:, kk] = samps
elif hasattr(uk, 'getSample'):
for jj in np.arange(self.samples_prob):
u_probs[jj, kk] = uk.getSample()
else:
raise TypeError('Unsupported probabilistic uncertainty type')
u_samples_prob = np.tile(u_probs[np.newaxis, :], (self.samples_int, 1, 1))
u_samples = np.concatenate((u_samples_int, u_samples_prob), axis=2)
self.u_samples = u_samples
return u_samples
else:
if self.verbose:
print('Re-using stored samples')
return self.u_samples
def _evalSamples(self, u_samples, fqoi, fgrad, jac):
# Array of shape (M_int, M_prob)
grad_samples = None
q_samples = np.zeros([self.samples_int, self.samples_prob])
if not jac:
for ii in np.arange(q_samples.shape[0]):
for jj in np.arange(q_samples.shape[1]):
q_samples[ii, jj] = fqoi(u_samples[ii, jj])
else:
grad_samples = np.zeros([self.samples_int, self.samples_prob,
self._N_dv])
for ii in np.arange(q_samples.shape[0]):
for jj in np.arange(q_samples.shape[1]):
if isinstance(jac, bool) and jac:
(q, grad) = fqoi(u_samples[ii, jj])
q_samples[ii, jj] = float(q)
grad_samples[ii, jj, :] = [_ for _ in grad]
else:
q_samples[ii, jj] = fqoi(u_samples[ii, jj])
grad_samples[ii, jj, :] = fgrad(u_samples[ii, jj])
self.grad_samples = grad_samples
self.q_samples = q_samples
return q_samples, grad_samples
##############################################################################
## Private functions
##############################################################################
def _extalg(xarr, alpha=100, axis=None):
'''Given an array xarr of values, smoothly return the max/min'''
return (np.sum(xarr * np.exp(alpha*xarr), axis=axis, keepdims=True)/
np.sum(np.exp(alpha*xarr), axis=axis, keepdims=True))
def _ramp(x, width):
return _minsmooth(1, _maxsmooth(0, (x - width/2)*(1/width)))
def _trint(x, width):
w = width/2.
xb = _maxsmooth(-w, _minsmooth(x, w))
y1 = 0.5 + xb/w + xb**2/(2*w**2)
y2 = xb/w - xb**2/(2*w**2)
return _minsmooth(y1, 0.5) + _maxsmooth(y2, 0.0)
def _minsmooth(a, b, eps=0.0000):
return 0.5*(a + b - np.sqrt((a-b)**2 + eps**2))
def _maxsmooth(a, b, eps=0.0000):
return 0.5*(a + b + np.sqrt((a-b)**2 + eps**2))
def _step(x):
return 1 * (x > 0)
def _erf(r):
## Numerical implementation of the error function for matrix comptibility
# save the sign of x
sign = np.sign(r)
x = np.absolute(r)
# constants
a1 = 0.254829592
a2 = -0.284496736
a3 = 1.421413741
a4 = -1.453152027
a5 = 1.061405429
p = 0.3275911
# A&S formula 7.1.26
t = 1.0/(1.0 + p*x)
y = 1.0 - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*np.exp(-x*x)
return sign*y # erf(-x) = -erf(x)
def _kernel(points, M, bw, ktype='gauss', bGrad=False):
if ktype == 'gauss' or ktype == 'gaussian':
KernelMat = (1./M)*((1 + _erf((points/bw)/np.sqrt(2.)))/2.)
# KernelMat = np.zeros(points.shape)
# for ir in np.arange(points.shape[0]):
# for ic in np.arange(points.shape[1]):
# KernelMat[ir, ic] = (1./M)*((1. +
# math.erf((points[ir, ic]/bw)/math.sqrt(2.)))/2.)
elif ktype == 'uniform' or ktype == 'uni':
KernelMat = (1./M)*_ramp(points, width=bw*np.sqrt(12))
elif ktype == 'triangle' or ktype == 'tri':
KernelMat = (1./M)*_trint(points, width=bw*2.*np.sqrt(6))
if bGrad:
if ktype == 'gauss' or ktype == 'gaussian':
const_term = 1.0/(M * np.sqrt(2*np.pi*bw**2))
KernelGradMat = const_term * np.exp(-(1./2.) * (points/bw)**2)
elif ktype == 'uniform' or ktype == 'uni':
width = bw*np.sqrt(12)
const = (1./M)*(1./width)
KernelGradMat = const*(_step(points+width/2) -
_step(points-width/2))
elif ktype == 'triangle' or ktype == 'tri':
width = bw*2.*np.sqrt(6)
const = (1./M)*(2./width)
KernelGradMat = const*(_ramp(points+width/4, width/2) -
_ramp(points-width/4, width/2))
return KernelMat, KernelGradMat
else:
return KernelMat
def _matrix_integration(q, h, t):
''' Returns the dp metric for a single horsetail
curve at a given value of the epistemic uncertainties'''
N = len(q)
# correction if CDF has gone out of trapezium range
if h[-1] < 0.9: h[-1] = 1.0
W = np.zeros([N, N])
for i in range(N):
W[i, i] = 0.5*(h[min(i+1, N-1)] - h[max(i-1, 0)])
dp = (q - t).T.dot(W).dot(q - t)
return dp
def _matrix_grad(q, h, h_dx, t, t_prime):
''' Returns the gradient with respect to a single variable'''
N = len(q)
W = np.zeros([N, N])
Wprime = np.zeros([N, N])
for i in range(N):
W[i, i] = 0.5*(h[min(i+1, N-1)] - h[max(i-1, 0)])
Wprime[i, i] = \
0.5*(h_dx[min(i+1, N-1)] - h_dx[max(i-1, 0)])
tgrad = np.array([t_prime[i]*h_dx[i] for i in np.arange(N)])
grad = 2.0*(q - t).T.dot(W).dot(-1.0*tgrad) \
+ (q - t).T.dot(Wprime).dot(q - t)
return grad
def _appendPlotArrays(q, h, integration_points):
q = np.insert(q, 0, q[0])
h = np.insert(h, 0, 0)
q = np.insert(q, 0, min(integration_points))
h = np.insert(h, 0, 0)
q = np.append(q, q[-1])
h = np.append(h, 1)
q = np.append(q, max(integration_points))
h = np.append(h, 1)
return q, h
def _finDiff(fobj, dv, f0=None, eps=10**-6):
if f0 is None:
f0 = fobj(dv)
fbase = copy.copy(f0)
fnew = fobj(dv + eps)
return float((fnew - fbase)/eps)
def _makeIter(x):
try:
iter(x)
return [xi for xi in x]
except:
return [x]
def _intervalSample(returned_samples, bounds):
if len(returned_samples) < 1:
return bounds[0]
elif len(returned_samples) < 2:
return bounds[1]
else:
return np.random.uniform(bounds[0], bounds[1])
|
lwcook/horsetail-matching | horsetailmatching/hm.py | _matrix_integration | python | def _matrix_integration(q, h, t):
''' Returns the dp metric for a single horsetail
curve at a given value of the epistemic uncertainties'''
N = len(q)
# correction if CDF has gone out of trapezium range
if h[-1] < 0.9: h[-1] = 1.0
W = np.zeros([N, N])
for i in range(N):
W[i, i] = 0.5*(h[min(i+1, N-1)] - h[max(i-1, 0)])
dp = (q - t).T.dot(W).dot(q - t)
return dp | Returns the dp metric for a single horsetail
curve at a given value of the epistemic uncertainties | train | https://github.com/lwcook/horsetail-matching/blob/f3d5f8d01249debbca978f412ce4eae017458119/horsetailmatching/hm.py#L921-L936 | null | import pdb
import time
import math
import copy
import warnings
import numpy as np
class HorsetailMatching(object):
'''Class for using horsetail matching within an optimization. The main
functionality is to evaluate the horsetail matching
metric (and optionally its gradient) that can be used with external
optimizers.
The code is written such that all arguments that can be used at the
initialization of a HorsetailMatching object can also be set as
attributes after creation to achieve exactly the same effect.
:param function fqoi: function that returns the quantity of interest, it
must take two ordered arguments - the value of the design variable
vector and the value of the uncertainty vector.
:param list prob_uncertainties: list of probabilistic uncertainties.
Each can be an instance of the UncertainParameter class,
in which case they will be sampled using the getSample() method.
Alternatiely each can be a function which returns sample(s) using
whatever method is desired.
:param list int_uncertainties: list of interval uncertainties [default []].
Each can be an instance of the IntervalParameter class,
in which case they will be sampled using the getSample() method.
Alternatiely each can be specified as a tuple/list of the bounds.
:param function ftarget: function that returns the value of the target
inverse CDF given a value in [0,1]. Can be a tuple that gives two
target fuctions, one for the upper bound and one for the lower bound on
the CDF under mixed uncertainties [default t(h) = 0]
:param bool/function jac: Argument that
specifies how to evaluate the gradient of the quantity of interest.
If False no gradients are propagated, if True the fqoi should return
a second argument g such that g_i = dq/dx_i. If a function, it should
have the same signature as fqoi but return g. [default False]
:param str method: method with which to evaluate the horsetil matching
metric, can be 'empirical' or 'kernel' [default 'empirical' if
jac is False else default 'kernel'].
:param int samples_prob: number of samples to take from the
probabilsitic uncertainties. [default 1000]
:param int samples_int: number of samples to take from the
interval uncertainties. Note that under mixed uncertainties, a nested
loop is used to evaluate the metric so the total number of
samples will be samples_prob*samples_int (at each interval uncertainty
sample samples_prob samples are taken from the probabilistic
uncertainties). [default 50]
:param list integration_points: Only for method='kernel'.
The integration point values to use when evaluating the metric using
kernels [by default 100 points spread over 3 times the range of
the samples of q obtained the first time the metric is evaluated]
:param number kernel_bandwidth: Only for method='kernel'. The bandwidth
used in the kernel function [by default it is found the first time
the metric is evaluated using Scott's rule]
:param str kernel_type: Only for method='kernel'. The type of kernel to
use, can be 'gaussian', 'uniform', or 'triangle' [default 'gaussian'].
:param function surrogate: Surrogate that is created at every design
point to be sampled instead of fqoi. It should be a function that
takes two arguments - an array with values of the uncertainties at
which to fit the surrogate of size (num_quadrature_points,
num_uncertainties), and an array of quantity of interest values
corresponding to these uncertainty values to which to fit the surrogate
of size (num_quadrature_points). It should return a functio that
predicts the qoi at an aribtrary value of the uncertainties.
[default None]
:param list surrogate_points: Only with a surrogate. List of points at
which fqoi is evaluated to give values to fit the surrogates to. These
are passed to the surrogate function along with the qoi evaluated at
these points when the surrogate is fitted [by default tensor
quadrature of 5 points in each uncertain dimension is used]
:param bool/function surrogate_jac: Only with a surrogate. Specifies how
to take surrogates of the gradient. It works similarly to the
jac argument: if False, the same surrogate is fitted to fqoi and each
component of its gradient, if True, the surrogate function is
expected to take a third argument - an array that is the gradient
at each of the quadrature points of size
(num_quadrature_points, num_design_variables). If a function, then
instead the array of uncertainty values and the array of gradient
values are passed to this function and it should return a function for
the surrogate model of the gradient.
:param bool reuse_samples: If True will reuse the same set of samples of
the uncertainties for evaluating the metric at any value of the
design variables, if False wise will re-sample every time evalMetric
is called [default True]
:param bool verbose: If True will print out details [default False].
*Example Declarations*::
>>> from horsetailmatching import HorsetailMatching,
UncertainParameter, PolySurrogate
>>> def myFunc(x, u): return x[0]*x[1] + u
>>> def myGrad(x, u): return [x[1], x[0]]
>>> def myTarg1(h): return 1-h**3
>>> def myTarg2(h): return 2-h**3
>>> u1 = UniformParameter()
>>> u2 = IntervalParameter()
>>> U = [u1, u2]
>>> poly = PolySurrogate(dimensions=2)
>>> poly_points = poly.getQuadraturePoints()
>>> theHM = HorsetailMatching(myFunc, U)
>>> theHM = HorsetailMatching(myFunc, U, jac=myGrad, method='kernel')
>>> theHM = HorsetailMatching(myFunc, U, ftarget=myTarg1)
>>> theHM = HorsetailMatching(myFunc, U, ftarget=(myTarg1, myTarg2))
>>> theHM = HorsetailMatching(myFunc, U, samples_prob=500,
samples_int = 50)
>>> theHM = HorsetailMatching(myFunc, U, method='kernel',
integration_points=numpy.linspace(0, 10, 100),
kernel_bandwidth=0.01)
>>> theHM = HorsetailMatching(myFunc, U,
surrogate=poly.surrogate, surrogate_jac=False,
surrogate_points=poly_points)
>>> theHM = HorsetailMatching(myFunc, U, verbose=True,
reuse_samples=True)
'''
def __init__(self, fqoi, prob_uncertainties, int_uncertainties=[],
ftarget=None, jac=False, method=None,
samples_prob=100, samples_int=50, integration_points=None,
kernel_bandwidth=None, kernel_type='gaussian', alpha=400,
surrogate=None, surrogate_points=None, surrogate_jac=False,
reuse_samples=True, verbose=False):
self.fqoi = fqoi
# self.uncertain_parameters = uncertain_parameters
self.prob_uncertainties = prob_uncertainties
self.int_uncertainties = int_uncertainties
self.ftarget = ftarget
self.jac = jac
self.method = method # Must be done after setting jac
self.samples_prob = samples_prob
self.samples_int = samples_int
self.integration_points = integration_points
self.kernel_bandwidth = kernel_bandwidth
self.kernel_type = kernel_type
self.alpha = alpha
self.reuse_samples = reuse_samples
self.u_samples = None
self.surrogate = surrogate
self.surrogate_points = surrogate_points
self.surrogate_jac = surrogate_jac
self.verbose = verbose
###############################################################################
## Properties with non-trivial setting behaviour
###############################################################################
# @property
# def uncertain_parameters(self):
# return self._u_params
#
# @uncertain_parameters.setter
# def uncertain_parameters(self, params):
# self._u_params = _makeIter(params)
# if len(self._u_params) == 0:
# raise ValueError('No uncertain parameters provided')
#
# self._u_int, self._u_prob = [], []
# for ii, u in enumerate(self._u_params):
# if u.is_interval_uncertainty:
# self._u_int.append((ii, u))
# else:
# self._u_prob.append((ii, u))
@property
def prob_uncertainties(self):
return self._prob_uncertainties
@prob_uncertainties.setter
def prob_uncertainties(self, params):
self._prob_uncertainties = _makeIter(params)
@property
def int_uncertainties(self):
return self._int_uncertainties
@int_uncertainties.setter
def int_uncertainties(self, params):
self._int_uncertainties = _makeIter(params)
@property
def samples_prob(self):
return self._samples_prob
@samples_prob.setter
def samples_prob(self, value):
if len(self.prob_uncertainties) > 0:
self._samples_prob = value
else:
self._samples_prob = 1
@property
def samples_int(self):
return self._samples_int
@samples_int.setter
def samples_int(self, value):
if len(self.int_uncertainties) > 0:
self._samples_int = value
else:
self._samples_int = 1
@property
def method(self):
return self._method
@method.setter
def method(self, value):
if value is None:
if self.jac is False:
self._method = 'empirical'
else:
self._method = 'kernel'
else:
self._method = value
@property
def ftarget(self):
return self._ftarget
@ftarget.setter
def ftarget(self, value):
def standardTarget(h):
return 0
try:
iter(value)
self._ftarg_u = value[0]
self._ftarg_l = value[1]
self._ftarget = value
except:
if value is None:
self._ftarget = standardTarget
else:
self._ftarget = value
self._ftarg_u = self._ftarget
self._ftarg_l = self._ftarget
@property
def u_samples(self):
return self._u_samples
@u_samples.setter
def u_samples(self, samples):
if samples is not None:
N_u = len(self.prob_uncertainties) + len(self.int_uncertainties)
if (not isinstance(samples, np.ndarray) or
samples.shape != (self.samples_int, self.samples_prob, N_u)):
raise TypeError('u_samples should be a np.array of size'
'(samples_int, samples_prob, num_uncertanities)')
self._u_samples = samples
@property
def kernel_type(self):
return self._kernel_type
@kernel_type.setter
def kernel_type(self, value):
allowed_types = ['gaussian', 'uniform', 'triangle']
if value not in allowed_types:
raise ValueError('Kernel type must be one of'+
', '.join([str(t) for t in allowed_types]))
else:
self._kernel_type = value
##############################################################################
## Public Methods
##############################################################################
def evalSamples(self, x):
'''Evalautes the samples of quantity of interest and its gradient
(if supplied) at the given values of the design variables
:param iterable x: values of the design variables, this is passed as
the first argument to the function fqoi
:return: (values of the quantity of interest, values of the gradient)
:rtype: Tuple
'''
# Make sure dimensions are correct
# u_sample_dimensions = self._processDimensions()
self._N_dv = len(_makeIter(x))
if self.verbose:
print('Evaluating surrogate')
if self.surrogate is None:
def fqoi(u):
return self.fqoi(x, u)
def fgrad(u):
return self.jac(x, u)
jac = self.jac
else:
fqoi, fgrad, surr_jac = self._makeSurrogates(x)
jac = surr_jac
u_samples = self._getParameterSamples()
if self.verbose:
print('Evaluating quantity of interest at samples')
q_samples, grad_samples = self._evalSamples(u_samples, fqoi, fgrad, jac)
return q_samples, grad_samples
def evalMetric(self, x, method=None):
'''Evaluates the horsetail matching metric at given values of the
design variables.
:param iterable x: values of the design variables, this is passed as
the first argument to the function fqoi
:param str method: method to use to evaluate the metric ('empirical' or
'kernel')
:return: metric_value - value of the metric evaluated at the design
point given by x
:rtype: float
*Example Usage*::
>>> def myFunc(x, u): return x[0]*x[1] + u
>>> u1 = UniformParameter()
>>> theHM = HorsetailMatching(myFunc, u)
>>> x0 = [1, 2]
>>> theHM.evalMetric(x0)
'''
# Make sure dimensions are correct
# u_sample_dimensions = self._processDimensions()
if self.verbose:
print('----------')
print('At design: ' + str(x))
q_samples, grad_samples = self.evalSamples(x)
if self.verbose:
print('Evaluating metric')
return self.evalMetricFromSamples(q_samples, grad_samples, method)
def evalMetricFromSamples(self, q_samples, grad_samples=None, method=None):
'''Evaluates the horsetail matching metric from given samples of the quantity
of interest and gradient instead of evaluating them at a design.
:param np.ndarray q_samples: samples of the quantity of interest,
size (M_int, M_prob)
:param np.ndarray grad_samples: samples of the gradien,
size (M_int, M_prob, n_x)
:return: metric_value - value of the metric
:rtype: float
'''
# Make sure dimensions are correct
# u_sample_dimensions = self._processDimensions()
q_samples = np.array(q_samples)
if not (q_samples.shape[0] == self.samples_int and
q_samples.shape[1] == self.samples_prob):
raise ValueError('Shape of q_samples should be [M_int, M_prob]')
if grad_samples is not None:
grad_samples = np.array(grad_samples)
if not (grad_samples.shape[0] == self.samples_int and
grad_samples.shape[1] == self.samples_prob):
raise ValueError('''Shape of grad_samples
should be [M_int, M_prob, n_dv]''')
if method is None:
method = self.method
if method.lower() == 'empirical':
return self._evalMetricEmpirical(q_samples, grad_samples)
elif method.lower() == 'kernel':
return self._evalMetricKernel(q_samples, grad_samples)
else:
raise ValueError('Unsupported metric evalation method')
def getHorsetail(self):
'''Function that gets vectors of the horsetail plot at the last design
evaluated.
:return: upper_curve, lower_curve, CDFs - returns three parameters,
the first two are tuples containing pairs of x/y vectors of the
upper and lower bounds on the CDFs (the horsetail plot). The
third parameter is a list of x/y tuples for individual CDFs
propagated at each sampled value of the interval uncertainties
*Example Usage*::
>>> def myFunc(x, u): return x[0]*x[1] + u
>>> u = UniformParameter()
>>> theHM = HorsetailMatching(myFunc, u)
>>> (x1, y1, t1), (x2, y2, t2), CDFs = theHM.getHorsetail()
>>> matplotlib.pyplot(x1, y1, 'b')
>>> matplotlib.pyplot(x2, y2, 'b')
>>> for (x, y) in CDFs:
... matplotlib.pyplot(x, y, 'k:')
>>> matplotlib.pyplot.show()
'''
if hasattr(self, '_ql'):
ql, qu, hl, hu = self._ql, self._qu, self._hl, self._hu
qh, hh = self._qh, self._hh
if self._qis is not None:
ql, hl = _appendPlotArrays(ql, hl, self._qis)
qu, hu = _appendPlotArrays(qu, hu, self._qis)
CDFs = []
for qi, hi in zip(qh, hh):
CDFs.append((qi, hi))
upper_target = [self._ftarg_u(h) for h in hu]
upper_curve = (qu, hu, upper_target)
lower_target = [self._ftarg_l(h) for h in hl]
lower_curve = (ql, hl, lower_target)
return upper_curve, lower_curve, CDFs
else:
raise ValueError('''The metric has not been evaluated at any
design point so the horsetail does not exist''')
##############################################################################
## Private methods ##
##############################################################################
def _evalMetricEmpirical(self, q_samples, grad_samples=None):
M_prob = self.samples_prob
M_int = self.samples_int
if M_int > 1:
alpha = self.alpha
else:
alpha = 1
h_htail = np.zeros([M_int, M_prob])
q_htail = np.zeros([M_int, M_prob])
q_l = np.zeros(M_prob)
q_u = np.zeros(M_prob)
if grad_samples is not None:
g_htail = np.zeros([M_int, M_prob, self._N_dv])
g_l = np.zeros([M_prob, self._N_dv])
g_u = np.zeros([M_prob, self._N_dv])
Du_grad = np.zeros(self._N_dv)
Dl_grad = np.zeros(self._N_dv)
for ii in np.arange(M_int):
# Get empirical CDF by sorting samples at each value of intervals
sortinds = np.argsort(q_samples[ii, :])
q_htail[ii, :] = q_samples[ii, sortinds]
M = q_samples.shape[1]
h_htail[ii, :] = [(1./M)*(0.5 + j) for j in range(M)]
if grad_samples is not None:
for ix in np.arange(self._N_dv):
g_htail[ii, :, ix] = grad_samples[ii, sortinds, ix]
for jj in np.arange(M_prob):
q_u[jj] = min(q_htail[:, jj])
q_l[jj] = max(q_htail[:, jj])
if grad_samples is not None:
q_u[jj] = _extalg(q_htail[:, jj], -1*alpha)
q_l[jj] = _extalg(q_htail[:, jj], alpha)
for ix in np.arange(self._N_dv):
gtemp = _extgrad(q_htail[:, jj], -1*alpha)
g_u[jj, ix] = gtemp.dot(g_htail[:, jj, ix])
gtemp = _extgrad(q_htail[:, jj], alpha)
g_l[jj, ix] = gtemp.dot(g_htail[:, jj, ix])
h_u, h_l = h_htail[0], h_htail[0] # h is same for all ECDFs
t_u = [self._ftarg_u(hi) for hi in h_u]
t_l = [self._ftarg_l(hi) for hi in h_u]
self._ql, self._qu, self._hl, self._hu = q_l, q_u, h_l, h_u
self._qh, self._hh = q_htail, h_htail
self._tl, self._tu = t_l, t_u
self._qis = None
Du = (1./M_prob)*sum((q_u - t_u)**2)
Dl = (1./M_prob)*sum((q_l - t_l)**2)
dhat = np.sqrt(Du + Dl)
if self.verbose:
print('Metric: ' + str(dhat))
if grad_samples is not None:
for ix in np.arange(self._N_dv):
Du_grad[ix] = (1./M_prob)*sum(2*(q_u - t_u)*g_u[:, ix])
Dl_grad[ix] = (1./M_prob)*sum(2*(q_l - t_l)*g_l[:, ix])
dhat_grad = (0.5*(Du+Dl)**(-0.5)*(Du_grad + Dl_grad))
if self.verbose:
print('Gradient: ' + str([g for g in dhat_grad]))
return dhat, dhat_grad
else:
return dhat
def _getKernelParameters(self, q_samples):
# If kernel bandwidth not specified, find it using Scott's rule
if self.kernel_bandwidth is None:
if len(self.prob_uncertainties) > 0:
if abs(np.max(q_samples) - np.min(q_samples)) < 1e-6:
bw = 1e-6
else:
bw = 0.33*((4/(3.*q_samples.shape[1]))**(1/5.)
*np.std(q_samples[0,:]))
else:
bw = 1e-3
self.kernel_bandwidth = bw
else:
bw = self.kernel_bandwidth
## Initalize arrays and prepare calculation
q_min = np.amin(q_samples)
q_max = np.amax(q_samples)
if self.integration_points is None:
q_range = q_max - q_min
qis_full = np.linspace(q_min - q_range, q_max + q_range, 10000)
self.integration_points = qis_full
else:
qis_full = np.array(self.integration_points)
ii_low, ii_high = 0, len(qis_full)
try:
ii_high, qi_high = next((iq, qi) for iq, qi in enumerate(qis_full) if
qi > q_max + 20*bw)
except StopIteration:
warnings.warn('Sample found higher than range of integration points')
try:
iiN_low, qi_low = next((iq, qi) for iq, qi in enumerate(qis_full[::-1]) if
qi < q_min - 20*bw)
ii_low = len(qis_full) - (iiN_low+1)
except StopIteration:
warnings.warn('Sample found lower than range of integration points')
qis = qis_full[ii_low:ii_high+1] # Only evaluate over range of samples
self._qis = qis
return qis, bw
def _evalMetricKernel(self, q_samples, grad_samples=None):
qis, bw = self._getKernelParameters(q_samples)
N_quad = len(qis)
M_prob = self.samples_prob
M_int = self.samples_int
if M_int > 1:
alpha = self.alpha
else:
alpha = 1
fhtail = np.zeros([N_quad, M_int])
qhtail = np.zeros([N_quad, M_int])
if grad_samples is not None:
fht_grad = np.zeros([N_quad, M_int, self._N_dv])
hu_grad = np.zeros([N_quad, self._N_dv])
hl_grad = np.zeros([N_quad, self._N_dv])
Du_grad = np.zeros(self._N_dv)
Dl_grad = np.zeros(self._N_dv)
# ALGORITHM 1 from publication
# Evaluate all individual CDFs and their gradients
for mm in np.arange(M_int):
qjs = q_samples[mm, :]
rmat = qis.reshape([N_quad, 1])-qjs.reshape([1, M_prob])
if grad_samples is not None:
Kcdf, Kprime = _kernel(rmat, M_prob, bw=bw,
ktype=self.kernel_type, bGrad=True)
for ix in np.arange(self._N_dv):
grad_js = grad_samples[mm, :, ix]
fht_grad[:, mm, ix] = Kprime.dot(-1*grad_js)
else:
Kcdf = _kernel(rmat, M_prob, bw=bw, ktype=self.kernel_type,
bGrad=False)
fhtail[:, mm] = Kcdf.dot(np.ones([M_prob, 1])).flatten()
qhtail[:, mm] = qis
# ALGORITHM 2 from publication
# Find horsetail curves - envelope of the CDFs and their gradients
# In Matrix form
if grad_samples is None:
hu = np.max(fhtail, axis=1).flatten()
hl = np.min(fhtail, axis=1).flatten()
else:
hu = _extalg(fhtail, alpha, axis=1).flatten()
hl = _extalg(fhtail, -1*alpha, axis=1).flatten()
Su_prime = _extgrad(fhtail, alpha, axis=1)
Sl_prime = _extgrad(fhtail, -1*alpha, axis=1)
for kx in np.arange(self._N_dv):
fis_grad = fht_grad[:, :, kx]
for ii in np.arange(N_quad):
hu_grad[ii, kx] = Su_prime[ii, :].dot(fis_grad[ii, :])
hl_grad[ii, kx] = Sl_prime[ii, :].dot(fis_grad[ii, :])
# ALGORITHM 3 from publication
# Evaluate overall metric and gradient using matrix multipliation
tu = np.array([self._ftarg_u(hi) for hi in hu])
tl = np.array([self._ftarg_l(hi) for hi in hl])
Du = _matrix_integration(qis, hu, tu)
Dl = _matrix_integration(qis, hl, tl)
dhat = float(np.sqrt(Du + Dl))
self._ql, self._qu, self._hl, self._hu = qis, qis, hl, hu
self._qh, self._hh = qhtail, fhtail
self._tl, self._tu = tl, tu
if self.verbose:
print('Metric: ' + str(dhat))
if grad_samples is not None:
tu_pr = np.array([_finDiff(self._ftarg_u, hi) for hi in hu])
tl_pr = np.array([_finDiff(self._ftarg_l, hi) for hi in hl])
for kx in np.arange(self._N_dv):
Du_grad[kx] = _matrix_grad(qis, hu, hu_grad[:, kx], tu, tu_pr)
Dl_grad[kx] = _matrix_grad(qis, hl, hl_grad[:, kx], tl, tl_pr)
dhat_grad = (0.5*(Du+Dl)**(-0.5)*(Du_grad + Dl_grad))
if self.verbose:
print('Gradient: ' + str([g for g in dhat_grad]))
return dhat, dhat_grad
else:
return dhat
def _makeSurrogates(self, x):
# Get quadrature points
if self.surrogate_points is None:
N_u = len(self.prob_uncertainties) + len(self.int_uncertainties)
mesh = np.meshgrid(*[np.linspace(-1, 1, 5) for n in np.arange(N_u)],
copy=False)
u_sparse = np.vstack([m.flatten() for m in mesh]).T
else:
u_sparse = self.surrogate_points
N_sparse = u_sparse.shape[0]
q_sparse = np.zeros(N_sparse)
# Get surrogates in correct form
if not self.jac:
for iu, u in enumerate(u_sparse):
q_sparse[iu] = self.fqoi(x, u)
surr_qoi = self.surrogate(u_sparse, q_sparse)
def fqoi(u):
return surr_qoi(u)
fgrad = False
surr_jac = False
else:
g_sparse = np.zeros([N_sparse, self._N_dv])
for iu, u in enumerate(u_sparse):
if isinstance(self.jac, bool) and self.jac:
q_sparse[iu], g_sparse[iu, :] = self.fqoi(x, u)
else:
q_sparse[iu] = self.fqoi(x, u)
g_sparse[iu, :] = self.jac(x, u)
if not self.surrogate_jac:
fpartial = [lambda u: 0 for _ in np.arange(self._N_dv)]
surr_qoi = self.surrogate(u_sparse, q_sparse)
for k in np.arange(self._N_dv):
fpartial[k] = self.surrogate(u_sparse, g_sparse[:, k])
def surr_grad(u):
return [f(u) for f in fpartial]
else:
if isinstance(self.surrogate_jac, bool) and self.surrogate_jac:
surr_qoi, surr_grad = self.surrogate(
u_sparse, q_sparse, g_sparse)
else:
surr_qoi = self.surrogate(u_sparse, q_sparse)
surr_grad = self.surrogate_jac(u_sparse, g_sparse)
def fqoi(u):
return(surr_qoi(u))
def fgrad(u):
return(surr_grad(u))
surr_jac = fgrad
return fqoi, fgrad, surr_jac
def _getParameterSamples(self):
N_u = len(self.prob_uncertainties) + len(self.int_uncertainties)
get_new = True
if self.reuse_samples and self.u_samples is not None:
if self.u_samples.shape != (self.samples_int, self.samples_prob, N_u):
if self.verbose:
print('''Stored samples do not match current dimensions,
getting new samples''')
else:
get_new = False
if get_new:
if self.verbose:
print('Getting uncertain parameter samples')
N_u = len(self.prob_uncertainties) + len(self.int_uncertainties)
N_prob = len(self.prob_uncertainties)
N_int = len(self.int_uncertainties)
# u_samples = np.zeros([self.samples_int, self.samples_prob, N_u])
u_samples_prob = np.zeros([self.samples_int, self.samples_prob,
len(self.prob_uncertainties)])
u_samples_int = np.zeros([self.samples_int, self.samples_prob,
len(self.int_uncertainties)])
u_ints = np.zeros([self.samples_int, len(self.int_uncertainties)])
for kk, uk in enumerate(self.int_uncertainties):
if callable(uk):
samps = np.array(uk()).flatten()
if len(samps) != self.samples_prob:
raise Exception('Number of samples returned not equal ' +
'to specified number of samples: please set number of ' +
'samples with samples_prob attribute')
else:
u_ints[:, kk] = samps
elif isinstance(uk, (tuple, list)): ## See if given as tuple/list of bounds
lb, ub = uk[0], uk[1]
u_ints[:, kk] = np.random.uniform(lb, ub, size=self.samples_int)
u_ints[0, kk] = lb
u_ints[-1, kk] = ub
elif hasattr(uk, 'getSample'):
for ii in np.arange(self.samples_int):
u_ints[ii, kk] = uk.getSample()
else:
raise TypeError('Unsupported interval uncertainty type')
u_samples_int = np.tile(u_ints[:, np.newaxis], (1, self.samples_prob, 1))
u_probs = np.zeros([self.samples_prob, len(self.prob_uncertainties)])
for kk, uk in enumerate(self.prob_uncertainties):
if callable(uk):
samps = np.array(uk()).flatten()
if len(samps) != self.samples_prob:
raise Exception('Number of samples returned not equal ' +
'to specified number of samples: please set number of ' +
'samples with samples_prob attribute')
else:
u_probs[:, kk] = samps
elif hasattr(uk, 'getSample'):
for jj in np.arange(self.samples_prob):
u_probs[jj, kk] = uk.getSample()
else:
raise TypeError('Unsupported probabilistic uncertainty type')
u_samples_prob = np.tile(u_probs[np.newaxis, :], (self.samples_int, 1, 1))
u_samples = np.concatenate((u_samples_int, u_samples_prob), axis=2)
self.u_samples = u_samples
return u_samples
else:
if self.verbose:
print('Re-using stored samples')
return self.u_samples
def _evalSamples(self, u_samples, fqoi, fgrad, jac):
# Array of shape (M_int, M_prob)
grad_samples = None
q_samples = np.zeros([self.samples_int, self.samples_prob])
if not jac:
for ii in np.arange(q_samples.shape[0]):
for jj in np.arange(q_samples.shape[1]):
q_samples[ii, jj] = fqoi(u_samples[ii, jj])
else:
grad_samples = np.zeros([self.samples_int, self.samples_prob,
self._N_dv])
for ii in np.arange(q_samples.shape[0]):
for jj in np.arange(q_samples.shape[1]):
if isinstance(jac, bool) and jac:
(q, grad) = fqoi(u_samples[ii, jj])
q_samples[ii, jj] = float(q)
grad_samples[ii, jj, :] = [_ for _ in grad]
else:
q_samples[ii, jj] = fqoi(u_samples[ii, jj])
grad_samples[ii, jj, :] = fgrad(u_samples[ii, jj])
self.grad_samples = grad_samples
self.q_samples = q_samples
return q_samples, grad_samples
##############################################################################
## Private functions
##############################################################################
def _extalg(xarr, alpha=100, axis=None):
'''Given an array xarr of values, smoothly return the max/min'''
return (np.sum(xarr * np.exp(alpha*xarr), axis=axis, keepdims=True)/
np.sum(np.exp(alpha*xarr), axis=axis, keepdims=True))
def _extgrad(xarr, alpha=100, axis=None):
'''Given an array xarr of values, return the gradient of the smooth min/max
swith respect to each entry in the array'''
term1 = (np.exp(alpha*xarr)/
np.sum(np.exp(alpha*xarr), axis=axis, keepdims=True))
term2 = 1 + alpha*(xarr - _extalg(xarr, alpha, axis=axis))
return term1*term2
def _ramp(x, width):
return _minsmooth(1, _maxsmooth(0, (x - width/2)*(1/width)))
def _trint(x, width):
w = width/2.
xb = _maxsmooth(-w, _minsmooth(x, w))
y1 = 0.5 + xb/w + xb**2/(2*w**2)
y2 = xb/w - xb**2/(2*w**2)
return _minsmooth(y1, 0.5) + _maxsmooth(y2, 0.0)
def _minsmooth(a, b, eps=0.0000):
return 0.5*(a + b - np.sqrt((a-b)**2 + eps**2))
def _maxsmooth(a, b, eps=0.0000):
return 0.5*(a + b + np.sqrt((a-b)**2 + eps**2))
def _step(x):
return 1 * (x > 0)
def _erf(r):
## Numerical implementation of the error function for matrix comptibility
# save the sign of x
sign = np.sign(r)
x = np.absolute(r)
# constants
a1 = 0.254829592
a2 = -0.284496736
a3 = 1.421413741
a4 = -1.453152027
a5 = 1.061405429
p = 0.3275911
# A&S formula 7.1.26
t = 1.0/(1.0 + p*x)
y = 1.0 - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*np.exp(-x*x)
return sign*y # erf(-x) = -erf(x)
def _kernel(points, M, bw, ktype='gauss', bGrad=False):
if ktype == 'gauss' or ktype == 'gaussian':
KernelMat = (1./M)*((1 + _erf((points/bw)/np.sqrt(2.)))/2.)
# KernelMat = np.zeros(points.shape)
# for ir in np.arange(points.shape[0]):
# for ic in np.arange(points.shape[1]):
# KernelMat[ir, ic] = (1./M)*((1. +
# math.erf((points[ir, ic]/bw)/math.sqrt(2.)))/2.)
elif ktype == 'uniform' or ktype == 'uni':
KernelMat = (1./M)*_ramp(points, width=bw*np.sqrt(12))
elif ktype == 'triangle' or ktype == 'tri':
KernelMat = (1./M)*_trint(points, width=bw*2.*np.sqrt(6))
if bGrad:
if ktype == 'gauss' or ktype == 'gaussian':
const_term = 1.0/(M * np.sqrt(2*np.pi*bw**2))
KernelGradMat = const_term * np.exp(-(1./2.) * (points/bw)**2)
elif ktype == 'uniform' or ktype == 'uni':
width = bw*np.sqrt(12)
const = (1./M)*(1./width)
KernelGradMat = const*(_step(points+width/2) -
_step(points-width/2))
elif ktype == 'triangle' or ktype == 'tri':
width = bw*2.*np.sqrt(6)
const = (1./M)*(2./width)
KernelGradMat = const*(_ramp(points+width/4, width/2) -
_ramp(points-width/4, width/2))
return KernelMat, KernelGradMat
else:
return KernelMat
def _matrix_grad(q, h, h_dx, t, t_prime):
''' Returns the gradient with respect to a single variable'''
N = len(q)
W = np.zeros([N, N])
Wprime = np.zeros([N, N])
for i in range(N):
W[i, i] = 0.5*(h[min(i+1, N-1)] - h[max(i-1, 0)])
Wprime[i, i] = \
0.5*(h_dx[min(i+1, N-1)] - h_dx[max(i-1, 0)])
tgrad = np.array([t_prime[i]*h_dx[i] for i in np.arange(N)])
grad = 2.0*(q - t).T.dot(W).dot(-1.0*tgrad) \
+ (q - t).T.dot(Wprime).dot(q - t)
return grad
def _appendPlotArrays(q, h, integration_points):
q = np.insert(q, 0, q[0])
h = np.insert(h, 0, 0)
q = np.insert(q, 0, min(integration_points))
h = np.insert(h, 0, 0)
q = np.append(q, q[-1])
h = np.append(h, 1)
q = np.append(q, max(integration_points))
h = np.append(h, 1)
return q, h
def _finDiff(fobj, dv, f0=None, eps=10**-6):
if f0 is None:
f0 = fobj(dv)
fbase = copy.copy(f0)
fnew = fobj(dv + eps)
return float((fnew - fbase)/eps)
def _makeIter(x):
try:
iter(x)
return [xi for xi in x]
except:
return [x]
def _intervalSample(returned_samples, bounds):
if len(returned_samples) < 1:
return bounds[0]
elif len(returned_samples) < 2:
return bounds[1]
else:
return np.random.uniform(bounds[0], bounds[1])
|
lwcook/horsetail-matching | horsetailmatching/hm.py | _matrix_grad | python | def _matrix_grad(q, h, h_dx, t, t_prime):
''' Returns the gradient with respect to a single variable'''
N = len(q)
W = np.zeros([N, N])
Wprime = np.zeros([N, N])
for i in range(N):
W[i, i] = 0.5*(h[min(i+1, N-1)] - h[max(i-1, 0)])
Wprime[i, i] = \
0.5*(h_dx[min(i+1, N-1)] - h_dx[max(i-1, 0)])
tgrad = np.array([t_prime[i]*h_dx[i] for i in np.arange(N)])
grad = 2.0*(q - t).T.dot(W).dot(-1.0*tgrad) \
+ (q - t).T.dot(Wprime).dot(q - t)
return grad | Returns the gradient with respect to a single variable | train | https://github.com/lwcook/horsetail-matching/blob/f3d5f8d01249debbca978f412ce4eae017458119/horsetailmatching/hm.py#L938-L954 | null | import pdb
import time
import math
import copy
import warnings
import numpy as np
class HorsetailMatching(object):
'''Class for using horsetail matching within an optimization. The main
functionality is to evaluate the horsetail matching
metric (and optionally its gradient) that can be used with external
optimizers.
The code is written such that all arguments that can be used at the
initialization of a HorsetailMatching object can also be set as
attributes after creation to achieve exactly the same effect.
:param function fqoi: function that returns the quantity of interest, it
must take two ordered arguments - the value of the design variable
vector and the value of the uncertainty vector.
:param list prob_uncertainties: list of probabilistic uncertainties.
Each can be an instance of the UncertainParameter class,
in which case they will be sampled using the getSample() method.
Alternatiely each can be a function which returns sample(s) using
whatever method is desired.
:param list int_uncertainties: list of interval uncertainties [default []].
Each can be an instance of the IntervalParameter class,
in which case they will be sampled using the getSample() method.
Alternatiely each can be specified as a tuple/list of the bounds.
:param function ftarget: function that returns the value of the target
inverse CDF given a value in [0,1]. Can be a tuple that gives two
target fuctions, one for the upper bound and one for the lower bound on
the CDF under mixed uncertainties [default t(h) = 0]
:param bool/function jac: Argument that
specifies how to evaluate the gradient of the quantity of interest.
If False no gradients are propagated, if True the fqoi should return
a second argument g such that g_i = dq/dx_i. If a function, it should
have the same signature as fqoi but return g. [default False]
:param str method: method with which to evaluate the horsetil matching
metric, can be 'empirical' or 'kernel' [default 'empirical' if
jac is False else default 'kernel'].
:param int samples_prob: number of samples to take from the
probabilsitic uncertainties. [default 1000]
:param int samples_int: number of samples to take from the
interval uncertainties. Note that under mixed uncertainties, a nested
loop is used to evaluate the metric so the total number of
samples will be samples_prob*samples_int (at each interval uncertainty
sample samples_prob samples are taken from the probabilistic
uncertainties). [default 50]
:param list integration_points: Only for method='kernel'.
The integration point values to use when evaluating the metric using
kernels [by default 100 points spread over 3 times the range of
the samples of q obtained the first time the metric is evaluated]
:param number kernel_bandwidth: Only for method='kernel'. The bandwidth
used in the kernel function [by default it is found the first time
the metric is evaluated using Scott's rule]
:param str kernel_type: Only for method='kernel'. The type of kernel to
use, can be 'gaussian', 'uniform', or 'triangle' [default 'gaussian'].
:param function surrogate: Surrogate that is created at every design
point to be sampled instead of fqoi. It should be a function that
takes two arguments - an array with values of the uncertainties at
which to fit the surrogate of size (num_quadrature_points,
num_uncertainties), and an array of quantity of interest values
corresponding to these uncertainty values to which to fit the surrogate
of size (num_quadrature_points). It should return a functio that
predicts the qoi at an aribtrary value of the uncertainties.
[default None]
:param list surrogate_points: Only with a surrogate. List of points at
which fqoi is evaluated to give values to fit the surrogates to. These
are passed to the surrogate function along with the qoi evaluated at
these points when the surrogate is fitted [by default tensor
quadrature of 5 points in each uncertain dimension is used]
:param bool/function surrogate_jac: Only with a surrogate. Specifies how
to take surrogates of the gradient. It works similarly to the
jac argument: if False, the same surrogate is fitted to fqoi and each
component of its gradient, if True, the surrogate function is
expected to take a third argument - an array that is the gradient
at each of the quadrature points of size
(num_quadrature_points, num_design_variables). If a function, then
instead the array of uncertainty values and the array of gradient
values are passed to this function and it should return a function for
the surrogate model of the gradient.
:param bool reuse_samples: If True will reuse the same set of samples of
the uncertainties for evaluating the metric at any value of the
design variables, if False wise will re-sample every time evalMetric
is called [default True]
:param bool verbose: If True will print out details [default False].
*Example Declarations*::
>>> from horsetailmatching import HorsetailMatching,
UncertainParameter, PolySurrogate
>>> def myFunc(x, u): return x[0]*x[1] + u
>>> def myGrad(x, u): return [x[1], x[0]]
>>> def myTarg1(h): return 1-h**3
>>> def myTarg2(h): return 2-h**3
>>> u1 = UniformParameter()
>>> u2 = IntervalParameter()
>>> U = [u1, u2]
>>> poly = PolySurrogate(dimensions=2)
>>> poly_points = poly.getQuadraturePoints()
>>> theHM = HorsetailMatching(myFunc, U)
>>> theHM = HorsetailMatching(myFunc, U, jac=myGrad, method='kernel')
>>> theHM = HorsetailMatching(myFunc, U, ftarget=myTarg1)
>>> theHM = HorsetailMatching(myFunc, U, ftarget=(myTarg1, myTarg2))
>>> theHM = HorsetailMatching(myFunc, U, samples_prob=500,
samples_int = 50)
>>> theHM = HorsetailMatching(myFunc, U, method='kernel',
integration_points=numpy.linspace(0, 10, 100),
kernel_bandwidth=0.01)
>>> theHM = HorsetailMatching(myFunc, U,
surrogate=poly.surrogate, surrogate_jac=False,
surrogate_points=poly_points)
>>> theHM = HorsetailMatching(myFunc, U, verbose=True,
reuse_samples=True)
'''
def __init__(self, fqoi, prob_uncertainties, int_uncertainties=[],
ftarget=None, jac=False, method=None,
samples_prob=100, samples_int=50, integration_points=None,
kernel_bandwidth=None, kernel_type='gaussian', alpha=400,
surrogate=None, surrogate_points=None, surrogate_jac=False,
reuse_samples=True, verbose=False):
self.fqoi = fqoi
# self.uncertain_parameters = uncertain_parameters
self.prob_uncertainties = prob_uncertainties
self.int_uncertainties = int_uncertainties
self.ftarget = ftarget
self.jac = jac
self.method = method # Must be done after setting jac
self.samples_prob = samples_prob
self.samples_int = samples_int
self.integration_points = integration_points
self.kernel_bandwidth = kernel_bandwidth
self.kernel_type = kernel_type
self.alpha = alpha
self.reuse_samples = reuse_samples
self.u_samples = None
self.surrogate = surrogate
self.surrogate_points = surrogate_points
self.surrogate_jac = surrogate_jac
self.verbose = verbose
###############################################################################
## Properties with non-trivial setting behaviour
###############################################################################
# @property
# def uncertain_parameters(self):
# return self._u_params
#
# @uncertain_parameters.setter
# def uncertain_parameters(self, params):
# self._u_params = _makeIter(params)
# if len(self._u_params) == 0:
# raise ValueError('No uncertain parameters provided')
#
# self._u_int, self._u_prob = [], []
# for ii, u in enumerate(self._u_params):
# if u.is_interval_uncertainty:
# self._u_int.append((ii, u))
# else:
# self._u_prob.append((ii, u))
@property
def prob_uncertainties(self):
return self._prob_uncertainties
@prob_uncertainties.setter
def prob_uncertainties(self, params):
self._prob_uncertainties = _makeIter(params)
@property
def int_uncertainties(self):
return self._int_uncertainties
@int_uncertainties.setter
def int_uncertainties(self, params):
self._int_uncertainties = _makeIter(params)
@property
def samples_prob(self):
return self._samples_prob
@samples_prob.setter
def samples_prob(self, value):
if len(self.prob_uncertainties) > 0:
self._samples_prob = value
else:
self._samples_prob = 1
@property
def samples_int(self):
return self._samples_int
@samples_int.setter
def samples_int(self, value):
if len(self.int_uncertainties) > 0:
self._samples_int = value
else:
self._samples_int = 1
@property
def method(self):
return self._method
@method.setter
def method(self, value):
if value is None:
if self.jac is False:
self._method = 'empirical'
else:
self._method = 'kernel'
else:
self._method = value
@property
def ftarget(self):
return self._ftarget
@ftarget.setter
def ftarget(self, value):
def standardTarget(h):
return 0
try:
iter(value)
self._ftarg_u = value[0]
self._ftarg_l = value[1]
self._ftarget = value
except:
if value is None:
self._ftarget = standardTarget
else:
self._ftarget = value
self._ftarg_u = self._ftarget
self._ftarg_l = self._ftarget
@property
def u_samples(self):
return self._u_samples
@u_samples.setter
def u_samples(self, samples):
if samples is not None:
N_u = len(self.prob_uncertainties) + len(self.int_uncertainties)
if (not isinstance(samples, np.ndarray) or
samples.shape != (self.samples_int, self.samples_prob, N_u)):
raise TypeError('u_samples should be a np.array of size'
'(samples_int, samples_prob, num_uncertanities)')
self._u_samples = samples
@property
def kernel_type(self):
return self._kernel_type
@kernel_type.setter
def kernel_type(self, value):
allowed_types = ['gaussian', 'uniform', 'triangle']
if value not in allowed_types:
raise ValueError('Kernel type must be one of'+
', '.join([str(t) for t in allowed_types]))
else:
self._kernel_type = value
##############################################################################
## Public Methods
##############################################################################
def evalSamples(self, x):
'''Evalautes the samples of quantity of interest and its gradient
(if supplied) at the given values of the design variables
:param iterable x: values of the design variables, this is passed as
the first argument to the function fqoi
:return: (values of the quantity of interest, values of the gradient)
:rtype: Tuple
'''
# Make sure dimensions are correct
# u_sample_dimensions = self._processDimensions()
self._N_dv = len(_makeIter(x))
if self.verbose:
print('Evaluating surrogate')
if self.surrogate is None:
def fqoi(u):
return self.fqoi(x, u)
def fgrad(u):
return self.jac(x, u)
jac = self.jac
else:
fqoi, fgrad, surr_jac = self._makeSurrogates(x)
jac = surr_jac
u_samples = self._getParameterSamples()
if self.verbose:
print('Evaluating quantity of interest at samples')
q_samples, grad_samples = self._evalSamples(u_samples, fqoi, fgrad, jac)
return q_samples, grad_samples
def evalMetric(self, x, method=None):
'''Evaluates the horsetail matching metric at given values of the
design variables.
:param iterable x: values of the design variables, this is passed as
the first argument to the function fqoi
:param str method: method to use to evaluate the metric ('empirical' or
'kernel')
:return: metric_value - value of the metric evaluated at the design
point given by x
:rtype: float
*Example Usage*::
>>> def myFunc(x, u): return x[0]*x[1] + u
>>> u1 = UniformParameter()
>>> theHM = HorsetailMatching(myFunc, u)
>>> x0 = [1, 2]
>>> theHM.evalMetric(x0)
'''
# Make sure dimensions are correct
# u_sample_dimensions = self._processDimensions()
if self.verbose:
print('----------')
print('At design: ' + str(x))
q_samples, grad_samples = self.evalSamples(x)
if self.verbose:
print('Evaluating metric')
return self.evalMetricFromSamples(q_samples, grad_samples, method)
def evalMetricFromSamples(self, q_samples, grad_samples=None, method=None):
'''Evaluates the horsetail matching metric from given samples of the quantity
of interest and gradient instead of evaluating them at a design.
:param np.ndarray q_samples: samples of the quantity of interest,
size (M_int, M_prob)
:param np.ndarray grad_samples: samples of the gradien,
size (M_int, M_prob, n_x)
:return: metric_value - value of the metric
:rtype: float
'''
# Make sure dimensions are correct
# u_sample_dimensions = self._processDimensions()
q_samples = np.array(q_samples)
if not (q_samples.shape[0] == self.samples_int and
q_samples.shape[1] == self.samples_prob):
raise ValueError('Shape of q_samples should be [M_int, M_prob]')
if grad_samples is not None:
grad_samples = np.array(grad_samples)
if not (grad_samples.shape[0] == self.samples_int and
grad_samples.shape[1] == self.samples_prob):
raise ValueError('''Shape of grad_samples
should be [M_int, M_prob, n_dv]''')
if method is None:
method = self.method
if method.lower() == 'empirical':
return self._evalMetricEmpirical(q_samples, grad_samples)
elif method.lower() == 'kernel':
return self._evalMetricKernel(q_samples, grad_samples)
else:
raise ValueError('Unsupported metric evalation method')
def getHorsetail(self):
'''Function that gets vectors of the horsetail plot at the last design
evaluated.
:return: upper_curve, lower_curve, CDFs - returns three parameters,
the first two are tuples containing pairs of x/y vectors of the
upper and lower bounds on the CDFs (the horsetail plot). The
third parameter is a list of x/y tuples for individual CDFs
propagated at each sampled value of the interval uncertainties
*Example Usage*::
>>> def myFunc(x, u): return x[0]*x[1] + u
>>> u = UniformParameter()
>>> theHM = HorsetailMatching(myFunc, u)
>>> (x1, y1, t1), (x2, y2, t2), CDFs = theHM.getHorsetail()
>>> matplotlib.pyplot(x1, y1, 'b')
>>> matplotlib.pyplot(x2, y2, 'b')
>>> for (x, y) in CDFs:
... matplotlib.pyplot(x, y, 'k:')
>>> matplotlib.pyplot.show()
'''
if hasattr(self, '_ql'):
ql, qu, hl, hu = self._ql, self._qu, self._hl, self._hu
qh, hh = self._qh, self._hh
if self._qis is not None:
ql, hl = _appendPlotArrays(ql, hl, self._qis)
qu, hu = _appendPlotArrays(qu, hu, self._qis)
CDFs = []
for qi, hi in zip(qh, hh):
CDFs.append((qi, hi))
upper_target = [self._ftarg_u(h) for h in hu]
upper_curve = (qu, hu, upper_target)
lower_target = [self._ftarg_l(h) for h in hl]
lower_curve = (ql, hl, lower_target)
return upper_curve, lower_curve, CDFs
else:
raise ValueError('''The metric has not been evaluated at any
design point so the horsetail does not exist''')
##############################################################################
## Private methods ##
##############################################################################
def _evalMetricEmpirical(self, q_samples, grad_samples=None):
M_prob = self.samples_prob
M_int = self.samples_int
if M_int > 1:
alpha = self.alpha
else:
alpha = 1
h_htail = np.zeros([M_int, M_prob])
q_htail = np.zeros([M_int, M_prob])
q_l = np.zeros(M_prob)
q_u = np.zeros(M_prob)
if grad_samples is not None:
g_htail = np.zeros([M_int, M_prob, self._N_dv])
g_l = np.zeros([M_prob, self._N_dv])
g_u = np.zeros([M_prob, self._N_dv])
Du_grad = np.zeros(self._N_dv)
Dl_grad = np.zeros(self._N_dv)
for ii in np.arange(M_int):
# Get empirical CDF by sorting samples at each value of intervals
sortinds = np.argsort(q_samples[ii, :])
q_htail[ii, :] = q_samples[ii, sortinds]
M = q_samples.shape[1]
h_htail[ii, :] = [(1./M)*(0.5 + j) for j in range(M)]
if grad_samples is not None:
for ix in np.arange(self._N_dv):
g_htail[ii, :, ix] = grad_samples[ii, sortinds, ix]
for jj in np.arange(M_prob):
q_u[jj] = min(q_htail[:, jj])
q_l[jj] = max(q_htail[:, jj])
if grad_samples is not None:
q_u[jj] = _extalg(q_htail[:, jj], -1*alpha)
q_l[jj] = _extalg(q_htail[:, jj], alpha)
for ix in np.arange(self._N_dv):
gtemp = _extgrad(q_htail[:, jj], -1*alpha)
g_u[jj, ix] = gtemp.dot(g_htail[:, jj, ix])
gtemp = _extgrad(q_htail[:, jj], alpha)
g_l[jj, ix] = gtemp.dot(g_htail[:, jj, ix])
h_u, h_l = h_htail[0], h_htail[0] # h is same for all ECDFs
t_u = [self._ftarg_u(hi) for hi in h_u]
t_l = [self._ftarg_l(hi) for hi in h_u]
self._ql, self._qu, self._hl, self._hu = q_l, q_u, h_l, h_u
self._qh, self._hh = q_htail, h_htail
self._tl, self._tu = t_l, t_u
self._qis = None
Du = (1./M_prob)*sum((q_u - t_u)**2)
Dl = (1./M_prob)*sum((q_l - t_l)**2)
dhat = np.sqrt(Du + Dl)
if self.verbose:
print('Metric: ' + str(dhat))
if grad_samples is not None:
for ix in np.arange(self._N_dv):
Du_grad[ix] = (1./M_prob)*sum(2*(q_u - t_u)*g_u[:, ix])
Dl_grad[ix] = (1./M_prob)*sum(2*(q_l - t_l)*g_l[:, ix])
dhat_grad = (0.5*(Du+Dl)**(-0.5)*(Du_grad + Dl_grad))
if self.verbose:
print('Gradient: ' + str([g for g in dhat_grad]))
return dhat, dhat_grad
else:
return dhat
def _getKernelParameters(self, q_samples):
# If kernel bandwidth not specified, find it using Scott's rule
if self.kernel_bandwidth is None:
if len(self.prob_uncertainties) > 0:
if abs(np.max(q_samples) - np.min(q_samples)) < 1e-6:
bw = 1e-6
else:
bw = 0.33*((4/(3.*q_samples.shape[1]))**(1/5.)
*np.std(q_samples[0,:]))
else:
bw = 1e-3
self.kernel_bandwidth = bw
else:
bw = self.kernel_bandwidth
## Initalize arrays and prepare calculation
q_min = np.amin(q_samples)
q_max = np.amax(q_samples)
if self.integration_points is None:
q_range = q_max - q_min
qis_full = np.linspace(q_min - q_range, q_max + q_range, 10000)
self.integration_points = qis_full
else:
qis_full = np.array(self.integration_points)
ii_low, ii_high = 0, len(qis_full)
try:
ii_high, qi_high = next((iq, qi) for iq, qi in enumerate(qis_full) if
qi > q_max + 20*bw)
except StopIteration:
warnings.warn('Sample found higher than range of integration points')
try:
iiN_low, qi_low = next((iq, qi) for iq, qi in enumerate(qis_full[::-1]) if
qi < q_min - 20*bw)
ii_low = len(qis_full) - (iiN_low+1)
except StopIteration:
warnings.warn('Sample found lower than range of integration points')
qis = qis_full[ii_low:ii_high+1] # Only evaluate over range of samples
self._qis = qis
return qis, bw
def _evalMetricKernel(self, q_samples, grad_samples=None):
qis, bw = self._getKernelParameters(q_samples)
N_quad = len(qis)
M_prob = self.samples_prob
M_int = self.samples_int
if M_int > 1:
alpha = self.alpha
else:
alpha = 1
fhtail = np.zeros([N_quad, M_int])
qhtail = np.zeros([N_quad, M_int])
if grad_samples is not None:
fht_grad = np.zeros([N_quad, M_int, self._N_dv])
hu_grad = np.zeros([N_quad, self._N_dv])
hl_grad = np.zeros([N_quad, self._N_dv])
Du_grad = np.zeros(self._N_dv)
Dl_grad = np.zeros(self._N_dv)
# ALGORITHM 1 from publication
# Evaluate all individual CDFs and their gradients
for mm in np.arange(M_int):
qjs = q_samples[mm, :]
rmat = qis.reshape([N_quad, 1])-qjs.reshape([1, M_prob])
if grad_samples is not None:
Kcdf, Kprime = _kernel(rmat, M_prob, bw=bw,
ktype=self.kernel_type, bGrad=True)
for ix in np.arange(self._N_dv):
grad_js = grad_samples[mm, :, ix]
fht_grad[:, mm, ix] = Kprime.dot(-1*grad_js)
else:
Kcdf = _kernel(rmat, M_prob, bw=bw, ktype=self.kernel_type,
bGrad=False)
fhtail[:, mm] = Kcdf.dot(np.ones([M_prob, 1])).flatten()
qhtail[:, mm] = qis
# ALGORITHM 2 from publication
# Find horsetail curves - envelope of the CDFs and their gradients
# In Matrix form
if grad_samples is None:
hu = np.max(fhtail, axis=1).flatten()
hl = np.min(fhtail, axis=1).flatten()
else:
hu = _extalg(fhtail, alpha, axis=1).flatten()
hl = _extalg(fhtail, -1*alpha, axis=1).flatten()
Su_prime = _extgrad(fhtail, alpha, axis=1)
Sl_prime = _extgrad(fhtail, -1*alpha, axis=1)
for kx in np.arange(self._N_dv):
fis_grad = fht_grad[:, :, kx]
for ii in np.arange(N_quad):
hu_grad[ii, kx] = Su_prime[ii, :].dot(fis_grad[ii, :])
hl_grad[ii, kx] = Sl_prime[ii, :].dot(fis_grad[ii, :])
# ALGORITHM 3 from publication
# Evaluate overall metric and gradient using matrix multipliation
tu = np.array([self._ftarg_u(hi) for hi in hu])
tl = np.array([self._ftarg_l(hi) for hi in hl])
Du = _matrix_integration(qis, hu, tu)
Dl = _matrix_integration(qis, hl, tl)
dhat = float(np.sqrt(Du + Dl))
self._ql, self._qu, self._hl, self._hu = qis, qis, hl, hu
self._qh, self._hh = qhtail, fhtail
self._tl, self._tu = tl, tu
if self.verbose:
print('Metric: ' + str(dhat))
if grad_samples is not None:
tu_pr = np.array([_finDiff(self._ftarg_u, hi) for hi in hu])
tl_pr = np.array([_finDiff(self._ftarg_l, hi) for hi in hl])
for kx in np.arange(self._N_dv):
Du_grad[kx] = _matrix_grad(qis, hu, hu_grad[:, kx], tu, tu_pr)
Dl_grad[kx] = _matrix_grad(qis, hl, hl_grad[:, kx], tl, tl_pr)
dhat_grad = (0.5*(Du+Dl)**(-0.5)*(Du_grad + Dl_grad))
if self.verbose:
print('Gradient: ' + str([g for g in dhat_grad]))
return dhat, dhat_grad
else:
return dhat
def _makeSurrogates(self, x):
# Get quadrature points
if self.surrogate_points is None:
N_u = len(self.prob_uncertainties) + len(self.int_uncertainties)
mesh = np.meshgrid(*[np.linspace(-1, 1, 5) for n in np.arange(N_u)],
copy=False)
u_sparse = np.vstack([m.flatten() for m in mesh]).T
else:
u_sparse = self.surrogate_points
N_sparse = u_sparse.shape[0]
q_sparse = np.zeros(N_sparse)
# Get surrogates in correct form
if not self.jac:
for iu, u in enumerate(u_sparse):
q_sparse[iu] = self.fqoi(x, u)
surr_qoi = self.surrogate(u_sparse, q_sparse)
def fqoi(u):
return surr_qoi(u)
fgrad = False
surr_jac = False
else:
g_sparse = np.zeros([N_sparse, self._N_dv])
for iu, u in enumerate(u_sparse):
if isinstance(self.jac, bool) and self.jac:
q_sparse[iu], g_sparse[iu, :] = self.fqoi(x, u)
else:
q_sparse[iu] = self.fqoi(x, u)
g_sparse[iu, :] = self.jac(x, u)
if not self.surrogate_jac:
fpartial = [lambda u: 0 for _ in np.arange(self._N_dv)]
surr_qoi = self.surrogate(u_sparse, q_sparse)
for k in np.arange(self._N_dv):
fpartial[k] = self.surrogate(u_sparse, g_sparse[:, k])
def surr_grad(u):
return [f(u) for f in fpartial]
else:
if isinstance(self.surrogate_jac, bool) and self.surrogate_jac:
surr_qoi, surr_grad = self.surrogate(
u_sparse, q_sparse, g_sparse)
else:
surr_qoi = self.surrogate(u_sparse, q_sparse)
surr_grad = self.surrogate_jac(u_sparse, g_sparse)
def fqoi(u):
return(surr_qoi(u))
def fgrad(u):
return(surr_grad(u))
surr_jac = fgrad
return fqoi, fgrad, surr_jac
def _getParameterSamples(self):
N_u = len(self.prob_uncertainties) + len(self.int_uncertainties)
get_new = True
if self.reuse_samples and self.u_samples is not None:
if self.u_samples.shape != (self.samples_int, self.samples_prob, N_u):
if self.verbose:
print('''Stored samples do not match current dimensions,
getting new samples''')
else:
get_new = False
if get_new:
if self.verbose:
print('Getting uncertain parameter samples')
N_u = len(self.prob_uncertainties) + len(self.int_uncertainties)
N_prob = len(self.prob_uncertainties)
N_int = len(self.int_uncertainties)
# u_samples = np.zeros([self.samples_int, self.samples_prob, N_u])
u_samples_prob = np.zeros([self.samples_int, self.samples_prob,
len(self.prob_uncertainties)])
u_samples_int = np.zeros([self.samples_int, self.samples_prob,
len(self.int_uncertainties)])
u_ints = np.zeros([self.samples_int, len(self.int_uncertainties)])
for kk, uk in enumerate(self.int_uncertainties):
if callable(uk):
samps = np.array(uk()).flatten()
if len(samps) != self.samples_prob:
raise Exception('Number of samples returned not equal ' +
'to specified number of samples: please set number of ' +
'samples with samples_prob attribute')
else:
u_ints[:, kk] = samps
elif isinstance(uk, (tuple, list)): ## See if given as tuple/list of bounds
lb, ub = uk[0], uk[1]
u_ints[:, kk] = np.random.uniform(lb, ub, size=self.samples_int)
u_ints[0, kk] = lb
u_ints[-1, kk] = ub
elif hasattr(uk, 'getSample'):
for ii in np.arange(self.samples_int):
u_ints[ii, kk] = uk.getSample()
else:
raise TypeError('Unsupported interval uncertainty type')
u_samples_int = np.tile(u_ints[:, np.newaxis], (1, self.samples_prob, 1))
u_probs = np.zeros([self.samples_prob, len(self.prob_uncertainties)])
for kk, uk in enumerate(self.prob_uncertainties):
if callable(uk):
samps = np.array(uk()).flatten()
if len(samps) != self.samples_prob:
raise Exception('Number of samples returned not equal ' +
'to specified number of samples: please set number of ' +
'samples with samples_prob attribute')
else:
u_probs[:, kk] = samps
elif hasattr(uk, 'getSample'):
for jj in np.arange(self.samples_prob):
u_probs[jj, kk] = uk.getSample()
else:
raise TypeError('Unsupported probabilistic uncertainty type')
u_samples_prob = np.tile(u_probs[np.newaxis, :], (self.samples_int, 1, 1))
u_samples = np.concatenate((u_samples_int, u_samples_prob), axis=2)
self.u_samples = u_samples
return u_samples
else:
if self.verbose:
print('Re-using stored samples')
return self.u_samples
def _evalSamples(self, u_samples, fqoi, fgrad, jac):
# Array of shape (M_int, M_prob)
grad_samples = None
q_samples = np.zeros([self.samples_int, self.samples_prob])
if not jac:
for ii in np.arange(q_samples.shape[0]):
for jj in np.arange(q_samples.shape[1]):
q_samples[ii, jj] = fqoi(u_samples[ii, jj])
else:
grad_samples = np.zeros([self.samples_int, self.samples_prob,
self._N_dv])
for ii in np.arange(q_samples.shape[0]):
for jj in np.arange(q_samples.shape[1]):
if isinstance(jac, bool) and jac:
(q, grad) = fqoi(u_samples[ii, jj])
q_samples[ii, jj] = float(q)
grad_samples[ii, jj, :] = [_ for _ in grad]
else:
q_samples[ii, jj] = fqoi(u_samples[ii, jj])
grad_samples[ii, jj, :] = fgrad(u_samples[ii, jj])
self.grad_samples = grad_samples
self.q_samples = q_samples
return q_samples, grad_samples
##############################################################################
## Private functions
##############################################################################
def _extalg(xarr, alpha=100, axis=None):
'''Given an array xarr of values, smoothly return the max/min'''
return (np.sum(xarr * np.exp(alpha*xarr), axis=axis, keepdims=True)/
np.sum(np.exp(alpha*xarr), axis=axis, keepdims=True))
def _extgrad(xarr, alpha=100, axis=None):
'''Given an array xarr of values, return the gradient of the smooth min/max
swith respect to each entry in the array'''
term1 = (np.exp(alpha*xarr)/
np.sum(np.exp(alpha*xarr), axis=axis, keepdims=True))
term2 = 1 + alpha*(xarr - _extalg(xarr, alpha, axis=axis))
return term1*term2
def _ramp(x, width):
return _minsmooth(1, _maxsmooth(0, (x - width/2)*(1/width)))
def _trint(x, width):
w = width/2.
xb = _maxsmooth(-w, _minsmooth(x, w))
y1 = 0.5 + xb/w + xb**2/(2*w**2)
y2 = xb/w - xb**2/(2*w**2)
return _minsmooth(y1, 0.5) + _maxsmooth(y2, 0.0)
def _minsmooth(a, b, eps=0.0000):
return 0.5*(a + b - np.sqrt((a-b)**2 + eps**2))
def _maxsmooth(a, b, eps=0.0000):
return 0.5*(a + b + np.sqrt((a-b)**2 + eps**2))
def _step(x):
return 1 * (x > 0)
def _erf(r):
## Numerical implementation of the error function for matrix comptibility
# save the sign of x
sign = np.sign(r)
x = np.absolute(r)
# constants
a1 = 0.254829592
a2 = -0.284496736
a3 = 1.421413741
a4 = -1.453152027
a5 = 1.061405429
p = 0.3275911
# A&S formula 7.1.26
t = 1.0/(1.0 + p*x)
y = 1.0 - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*np.exp(-x*x)
return sign*y # erf(-x) = -erf(x)
def _kernel(points, M, bw, ktype='gauss', bGrad=False):
if ktype == 'gauss' or ktype == 'gaussian':
KernelMat = (1./M)*((1 + _erf((points/bw)/np.sqrt(2.)))/2.)
# KernelMat = np.zeros(points.shape)
# for ir in np.arange(points.shape[0]):
# for ic in np.arange(points.shape[1]):
# KernelMat[ir, ic] = (1./M)*((1. +
# math.erf((points[ir, ic]/bw)/math.sqrt(2.)))/2.)
elif ktype == 'uniform' or ktype == 'uni':
KernelMat = (1./M)*_ramp(points, width=bw*np.sqrt(12))
elif ktype == 'triangle' or ktype == 'tri':
KernelMat = (1./M)*_trint(points, width=bw*2.*np.sqrt(6))
if bGrad:
if ktype == 'gauss' or ktype == 'gaussian':
const_term = 1.0/(M * np.sqrt(2*np.pi*bw**2))
KernelGradMat = const_term * np.exp(-(1./2.) * (points/bw)**2)
elif ktype == 'uniform' or ktype == 'uni':
width = bw*np.sqrt(12)
const = (1./M)*(1./width)
KernelGradMat = const*(_step(points+width/2) -
_step(points-width/2))
elif ktype == 'triangle' or ktype == 'tri':
width = bw*2.*np.sqrt(6)
const = (1./M)*(2./width)
KernelGradMat = const*(_ramp(points+width/4, width/2) -
_ramp(points-width/4, width/2))
return KernelMat, KernelGradMat
else:
return KernelMat
def _matrix_integration(q, h, t):
''' Returns the dp metric for a single horsetail
curve at a given value of the epistemic uncertainties'''
N = len(q)
# correction if CDF has gone out of trapezium range
if h[-1] < 0.9: h[-1] = 1.0
W = np.zeros([N, N])
for i in range(N):
W[i, i] = 0.5*(h[min(i+1, N-1)] - h[max(i-1, 0)])
dp = (q - t).T.dot(W).dot(q - t)
return dp
def _appendPlotArrays(q, h, integration_points):
q = np.insert(q, 0, q[0])
h = np.insert(h, 0, 0)
q = np.insert(q, 0, min(integration_points))
h = np.insert(h, 0, 0)
q = np.append(q, q[-1])
h = np.append(h, 1)
q = np.append(q, max(integration_points))
h = np.append(h, 1)
return q, h
def _finDiff(fobj, dv, f0=None, eps=10**-6):
if f0 is None:
f0 = fobj(dv)
fbase = copy.copy(f0)
fnew = fobj(dv + eps)
return float((fnew - fbase)/eps)
def _makeIter(x):
try:
iter(x)
return [xi for xi in x]
except:
return [x]
def _intervalSample(returned_samples, bounds):
if len(returned_samples) < 1:
return bounds[0]
elif len(returned_samples) < 2:
return bounds[1]
else:
return np.random.uniform(bounds[0], bounds[1])
|
lwcook/horsetail-matching | horsetailmatching/hm.py | HorsetailMatching.evalSamples | python | def evalSamples(self, x):
'''Evalautes the samples of quantity of interest and its gradient
(if supplied) at the given values of the design variables
:param iterable x: values of the design variables, this is passed as
the first argument to the function fqoi
:return: (values of the quantity of interest, values of the gradient)
:rtype: Tuple
'''
# Make sure dimensions are correct
# u_sample_dimensions = self._processDimensions()
self._N_dv = len(_makeIter(x))
if self.verbose:
print('Evaluating surrogate')
if self.surrogate is None:
def fqoi(u):
return self.fqoi(x, u)
def fgrad(u):
return self.jac(x, u)
jac = self.jac
else:
fqoi, fgrad, surr_jac = self._makeSurrogates(x)
jac = surr_jac
u_samples = self._getParameterSamples()
if self.verbose:
print('Evaluating quantity of interest at samples')
q_samples, grad_samples = self._evalSamples(u_samples, fqoi, fgrad, jac)
return q_samples, grad_samples | Evalautes the samples of quantity of interest and its gradient
(if supplied) at the given values of the design variables
:param iterable x: values of the design variables, this is passed as
the first argument to the function fqoi
:return: (values of the quantity of interest, values of the gradient)
:rtype: Tuple | train | https://github.com/lwcook/horsetail-matching/blob/f3d5f8d01249debbca978f412ce4eae017458119/horsetailmatching/hm.py#L292-L326 | [
"def _makeIter(x):\n try:\n iter(x)\n return [xi for xi in x]\n except:\n return [x]\n",
"def _makeSurrogates(self, x):\n\n # Get quadrature points\n if self.surrogate_points is None:\n N_u = len(self.prob_uncertainties) + len(self.int_uncertainties)\n mesh = np.meshgrid(*[np.linspace(-1, 1, 5) for n in np.arange(N_u)],\n copy=False)\n u_sparse = np.vstack([m.flatten() for m in mesh]).T\n else:\n u_sparse = self.surrogate_points\n\n N_sparse = u_sparse.shape[0]\n q_sparse = np.zeros(N_sparse)\n\n # Get surrogates in correct form\n if not self.jac:\n for iu, u in enumerate(u_sparse):\n q_sparse[iu] = self.fqoi(x, u)\n\n surr_qoi = self.surrogate(u_sparse, q_sparse)\n\n def fqoi(u):\n return surr_qoi(u)\n fgrad = False\n surr_jac = False\n\n else:\n g_sparse = np.zeros([N_sparse, self._N_dv])\n for iu, u in enumerate(u_sparse):\n if isinstance(self.jac, bool) and self.jac:\n q_sparse[iu], g_sparse[iu, :] = self.fqoi(x, u)\n else:\n q_sparse[iu] = self.fqoi(x, u)\n g_sparse[iu, :] = self.jac(x, u)\n\n if not self.surrogate_jac:\n fpartial = [lambda u: 0 for _ in np.arange(self._N_dv)]\n surr_qoi = self.surrogate(u_sparse, q_sparse)\n for k in np.arange(self._N_dv):\n fpartial[k] = self.surrogate(u_sparse, g_sparse[:, k])\n def surr_grad(u):\n return [f(u) for f in fpartial]\n else:\n if isinstance(self.surrogate_jac, bool) and self.surrogate_jac:\n surr_qoi, surr_grad = self.surrogate(\n u_sparse, q_sparse, g_sparse)\n else:\n surr_qoi = self.surrogate(u_sparse, q_sparse)\n surr_grad = self.surrogate_jac(u_sparse, g_sparse)\n\n def fqoi(u):\n return(surr_qoi(u))\n def fgrad(u):\n return(surr_grad(u))\n surr_jac = fgrad\n\n return fqoi, fgrad, surr_jac\n",
" def _getParameterSamples(self):\n\n N_u = len(self.prob_uncertainties) + len(self.int_uncertainties)\n\n get_new = True\n if self.reuse_samples and self.u_samples is not None:\n if self.u_samples.shape != (self.samples_int, self.samples_prob, N_u):\n if self.verbose:\n print('''Stored samples do not match current dimensions,\n getting new samples''')\n else:\n get_new = False\n\n if get_new:\n if self.verbose:\n print('Getting uncertain parameter samples')\n\n N_u = len(self.prob_uncertainties) + len(self.int_uncertainties)\n N_prob = len(self.prob_uncertainties)\n N_int = len(self.int_uncertainties)\n# u_samples = np.zeros([self.samples_int, self.samples_prob, N_u])\n\n u_samples_prob = np.zeros([self.samples_int, self.samples_prob,\n len(self.prob_uncertainties)])\n u_samples_int = np.zeros([self.samples_int, self.samples_prob,\n len(self.int_uncertainties)])\n\n u_ints = np.zeros([self.samples_int, len(self.int_uncertainties)])\n for kk, uk in enumerate(self.int_uncertainties):\n if callable(uk):\n samps = np.array(uk()).flatten()\n if len(samps) != self.samples_prob:\n raise Exception('Number of samples returned not equal ' +\n 'to specified number of samples: please set number of ' +\n 'samples with samples_prob attribute')\n else:\n u_ints[:, kk] = samps\n elif isinstance(uk, (tuple, list)): ## See if given as tuple/list of bounds\n lb, ub = uk[0], uk[1]\n u_ints[:, kk] = np.random.uniform(lb, ub, size=self.samples_int)\n u_ints[0, kk] = lb\n u_ints[-1, kk] = ub\n elif hasattr(uk, 'getSample'):\n for ii in np.arange(self.samples_int):\n u_ints[ii, kk] = uk.getSample()\n else:\n raise TypeError('Unsupported interval uncertainty type')\n\n u_samples_int = np.tile(u_ints[:, np.newaxis], (1, self.samples_prob, 1))\n\n u_probs = np.zeros([self.samples_prob, len(self.prob_uncertainties)])\n for kk, uk in enumerate(self.prob_uncertainties):\n if callable(uk):\n samps = np.array(uk()).flatten()\n if len(samps) != self.samples_prob:\n raise Exception('Number of samples returned not equal ' +\n 'to specified number of samples: please set number of ' +\n 'samples with samples_prob attribute')\n else:\n u_probs[:, kk] = samps\n elif hasattr(uk, 'getSample'):\n for jj in np.arange(self.samples_prob):\n u_probs[jj, kk] = uk.getSample()\n else:\n raise TypeError('Unsupported probabilistic uncertainty type')\n\n u_samples_prob = np.tile(u_probs[np.newaxis, :], (self.samples_int, 1, 1))\n\n u_samples = np.concatenate((u_samples_int, u_samples_prob), axis=2)\n\n self.u_samples = u_samples\n return u_samples\n else:\n if self.verbose:\n print('Re-using stored samples')\n return self.u_samples\n",
"def _evalSamples(self, u_samples, fqoi, fgrad, jac):\n\n # Array of shape (M_int, M_prob)\n grad_samples = None\n q_samples = np.zeros([self.samples_int, self.samples_prob])\n if not jac:\n for ii in np.arange(q_samples.shape[0]):\n for jj in np.arange(q_samples.shape[1]):\n q_samples[ii, jj] = fqoi(u_samples[ii, jj])\n else:\n grad_samples = np.zeros([self.samples_int, self.samples_prob,\n self._N_dv])\n for ii in np.arange(q_samples.shape[0]):\n for jj in np.arange(q_samples.shape[1]):\n if isinstance(jac, bool) and jac:\n (q, grad) = fqoi(u_samples[ii, jj])\n q_samples[ii, jj] = float(q)\n grad_samples[ii, jj, :] = [_ for _ in grad]\n else:\n q_samples[ii, jj] = fqoi(u_samples[ii, jj])\n grad_samples[ii, jj, :] = fgrad(u_samples[ii, jj])\n\n self.grad_samples = grad_samples\n\n self.q_samples = q_samples\n\n return q_samples, grad_samples\n"
] | class HorsetailMatching(object):
'''Class for using horsetail matching within an optimization. The main
functionality is to evaluate the horsetail matching
metric (and optionally its gradient) that can be used with external
optimizers.
The code is written such that all arguments that can be used at the
initialization of a HorsetailMatching object can also be set as
attributes after creation to achieve exactly the same effect.
:param function fqoi: function that returns the quantity of interest, it
must take two ordered arguments - the value of the design variable
vector and the value of the uncertainty vector.
:param list prob_uncertainties: list of probabilistic uncertainties.
Each can be an instance of the UncertainParameter class,
in which case they will be sampled using the getSample() method.
Alternatiely each can be a function which returns sample(s) using
whatever method is desired.
:param list int_uncertainties: list of interval uncertainties [default []].
Each can be an instance of the IntervalParameter class,
in which case they will be sampled using the getSample() method.
Alternatiely each can be specified as a tuple/list of the bounds.
:param function ftarget: function that returns the value of the target
inverse CDF given a value in [0,1]. Can be a tuple that gives two
target fuctions, one for the upper bound and one for the lower bound on
the CDF under mixed uncertainties [default t(h) = 0]
:param bool/function jac: Argument that
specifies how to evaluate the gradient of the quantity of interest.
If False no gradients are propagated, if True the fqoi should return
a second argument g such that g_i = dq/dx_i. If a function, it should
have the same signature as fqoi but return g. [default False]
:param str method: method with which to evaluate the horsetil matching
metric, can be 'empirical' or 'kernel' [default 'empirical' if
jac is False else default 'kernel'].
:param int samples_prob: number of samples to take from the
probabilsitic uncertainties. [default 1000]
:param int samples_int: number of samples to take from the
interval uncertainties. Note that under mixed uncertainties, a nested
loop is used to evaluate the metric so the total number of
samples will be samples_prob*samples_int (at each interval uncertainty
sample samples_prob samples are taken from the probabilistic
uncertainties). [default 50]
:param list integration_points: Only for method='kernel'.
The integration point values to use when evaluating the metric using
kernels [by default 100 points spread over 3 times the range of
the samples of q obtained the first time the metric is evaluated]
:param number kernel_bandwidth: Only for method='kernel'. The bandwidth
used in the kernel function [by default it is found the first time
the metric is evaluated using Scott's rule]
:param str kernel_type: Only for method='kernel'. The type of kernel to
use, can be 'gaussian', 'uniform', or 'triangle' [default 'gaussian'].
:param function surrogate: Surrogate that is created at every design
point to be sampled instead of fqoi. It should be a function that
takes two arguments - an array with values of the uncertainties at
which to fit the surrogate of size (num_quadrature_points,
num_uncertainties), and an array of quantity of interest values
corresponding to these uncertainty values to which to fit the surrogate
of size (num_quadrature_points). It should return a functio that
predicts the qoi at an aribtrary value of the uncertainties.
[default None]
:param list surrogate_points: Only with a surrogate. List of points at
which fqoi is evaluated to give values to fit the surrogates to. These
are passed to the surrogate function along with the qoi evaluated at
these points when the surrogate is fitted [by default tensor
quadrature of 5 points in each uncertain dimension is used]
:param bool/function surrogate_jac: Only with a surrogate. Specifies how
to take surrogates of the gradient. It works similarly to the
jac argument: if False, the same surrogate is fitted to fqoi and each
component of its gradient, if True, the surrogate function is
expected to take a third argument - an array that is the gradient
at each of the quadrature points of size
(num_quadrature_points, num_design_variables). If a function, then
instead the array of uncertainty values and the array of gradient
values are passed to this function and it should return a function for
the surrogate model of the gradient.
:param bool reuse_samples: If True will reuse the same set of samples of
the uncertainties for evaluating the metric at any value of the
design variables, if False wise will re-sample every time evalMetric
is called [default True]
:param bool verbose: If True will print out details [default False].
*Example Declarations*::
>>> from horsetailmatching import HorsetailMatching,
UncertainParameter, PolySurrogate
>>> def myFunc(x, u): return x[0]*x[1] + u
>>> def myGrad(x, u): return [x[1], x[0]]
>>> def myTarg1(h): return 1-h**3
>>> def myTarg2(h): return 2-h**3
>>> u1 = UniformParameter()
>>> u2 = IntervalParameter()
>>> U = [u1, u2]
>>> poly = PolySurrogate(dimensions=2)
>>> poly_points = poly.getQuadraturePoints()
>>> theHM = HorsetailMatching(myFunc, U)
>>> theHM = HorsetailMatching(myFunc, U, jac=myGrad, method='kernel')
>>> theHM = HorsetailMatching(myFunc, U, ftarget=myTarg1)
>>> theHM = HorsetailMatching(myFunc, U, ftarget=(myTarg1, myTarg2))
>>> theHM = HorsetailMatching(myFunc, U, samples_prob=500,
samples_int = 50)
>>> theHM = HorsetailMatching(myFunc, U, method='kernel',
integration_points=numpy.linspace(0, 10, 100),
kernel_bandwidth=0.01)
>>> theHM = HorsetailMatching(myFunc, U,
surrogate=poly.surrogate, surrogate_jac=False,
surrogate_points=poly_points)
>>> theHM = HorsetailMatching(myFunc, U, verbose=True,
reuse_samples=True)
'''
def __init__(self, fqoi, prob_uncertainties, int_uncertainties=[],
ftarget=None, jac=False, method=None,
samples_prob=100, samples_int=50, integration_points=None,
kernel_bandwidth=None, kernel_type='gaussian', alpha=400,
surrogate=None, surrogate_points=None, surrogate_jac=False,
reuse_samples=True, verbose=False):
self.fqoi = fqoi
# self.uncertain_parameters = uncertain_parameters
self.prob_uncertainties = prob_uncertainties
self.int_uncertainties = int_uncertainties
self.ftarget = ftarget
self.jac = jac
self.method = method # Must be done after setting jac
self.samples_prob = samples_prob
self.samples_int = samples_int
self.integration_points = integration_points
self.kernel_bandwidth = kernel_bandwidth
self.kernel_type = kernel_type
self.alpha = alpha
self.reuse_samples = reuse_samples
self.u_samples = None
self.surrogate = surrogate
self.surrogate_points = surrogate_points
self.surrogate_jac = surrogate_jac
self.verbose = verbose
###############################################################################
## Properties with non-trivial setting behaviour
###############################################################################
# @property
# def uncertain_parameters(self):
# return self._u_params
#
# @uncertain_parameters.setter
# def uncertain_parameters(self, params):
# self._u_params = _makeIter(params)
# if len(self._u_params) == 0:
# raise ValueError('No uncertain parameters provided')
#
# self._u_int, self._u_prob = [], []
# for ii, u in enumerate(self._u_params):
# if u.is_interval_uncertainty:
# self._u_int.append((ii, u))
# else:
# self._u_prob.append((ii, u))
@property
def prob_uncertainties(self):
return self._prob_uncertainties
@prob_uncertainties.setter
def prob_uncertainties(self, params):
self._prob_uncertainties = _makeIter(params)
@property
def int_uncertainties(self):
return self._int_uncertainties
@int_uncertainties.setter
def int_uncertainties(self, params):
self._int_uncertainties = _makeIter(params)
@property
def samples_prob(self):
return self._samples_prob
@samples_prob.setter
def samples_prob(self, value):
if len(self.prob_uncertainties) > 0:
self._samples_prob = value
else:
self._samples_prob = 1
@property
def samples_int(self):
return self._samples_int
@samples_int.setter
def samples_int(self, value):
if len(self.int_uncertainties) > 0:
self._samples_int = value
else:
self._samples_int = 1
@property
def method(self):
return self._method
@method.setter
def method(self, value):
if value is None:
if self.jac is False:
self._method = 'empirical'
else:
self._method = 'kernel'
else:
self._method = value
@property
def ftarget(self):
return self._ftarget
@ftarget.setter
def ftarget(self, value):
def standardTarget(h):
return 0
try:
iter(value)
self._ftarg_u = value[0]
self._ftarg_l = value[1]
self._ftarget = value
except:
if value is None:
self._ftarget = standardTarget
else:
self._ftarget = value
self._ftarg_u = self._ftarget
self._ftarg_l = self._ftarget
@property
def u_samples(self):
return self._u_samples
@u_samples.setter
def u_samples(self, samples):
if samples is not None:
N_u = len(self.prob_uncertainties) + len(self.int_uncertainties)
if (not isinstance(samples, np.ndarray) or
samples.shape != (self.samples_int, self.samples_prob, N_u)):
raise TypeError('u_samples should be a np.array of size'
'(samples_int, samples_prob, num_uncertanities)')
self._u_samples = samples
@property
def kernel_type(self):
return self._kernel_type
@kernel_type.setter
def kernel_type(self, value):
allowed_types = ['gaussian', 'uniform', 'triangle']
if value not in allowed_types:
raise ValueError('Kernel type must be one of'+
', '.join([str(t) for t in allowed_types]))
else:
self._kernel_type = value
##############################################################################
## Public Methods
##############################################################################
def evalMetric(self, x, method=None):
'''Evaluates the horsetail matching metric at given values of the
design variables.
:param iterable x: values of the design variables, this is passed as
the first argument to the function fqoi
:param str method: method to use to evaluate the metric ('empirical' or
'kernel')
:return: metric_value - value of the metric evaluated at the design
point given by x
:rtype: float
*Example Usage*::
>>> def myFunc(x, u): return x[0]*x[1] + u
>>> u1 = UniformParameter()
>>> theHM = HorsetailMatching(myFunc, u)
>>> x0 = [1, 2]
>>> theHM.evalMetric(x0)
'''
# Make sure dimensions are correct
# u_sample_dimensions = self._processDimensions()
if self.verbose:
print('----------')
print('At design: ' + str(x))
q_samples, grad_samples = self.evalSamples(x)
if self.verbose:
print('Evaluating metric')
return self.evalMetricFromSamples(q_samples, grad_samples, method)
def evalMetricFromSamples(self, q_samples, grad_samples=None, method=None):
'''Evaluates the horsetail matching metric from given samples of the quantity
of interest and gradient instead of evaluating them at a design.
:param np.ndarray q_samples: samples of the quantity of interest,
size (M_int, M_prob)
:param np.ndarray grad_samples: samples of the gradien,
size (M_int, M_prob, n_x)
:return: metric_value - value of the metric
:rtype: float
'''
# Make sure dimensions are correct
# u_sample_dimensions = self._processDimensions()
q_samples = np.array(q_samples)
if not (q_samples.shape[0] == self.samples_int and
q_samples.shape[1] == self.samples_prob):
raise ValueError('Shape of q_samples should be [M_int, M_prob]')
if grad_samples is not None:
grad_samples = np.array(grad_samples)
if not (grad_samples.shape[0] == self.samples_int and
grad_samples.shape[1] == self.samples_prob):
raise ValueError('''Shape of grad_samples
should be [M_int, M_prob, n_dv]''')
if method is None:
method = self.method
if method.lower() == 'empirical':
return self._evalMetricEmpirical(q_samples, grad_samples)
elif method.lower() == 'kernel':
return self._evalMetricKernel(q_samples, grad_samples)
else:
raise ValueError('Unsupported metric evalation method')
def getHorsetail(self):
'''Function that gets vectors of the horsetail plot at the last design
evaluated.
:return: upper_curve, lower_curve, CDFs - returns three parameters,
the first two are tuples containing pairs of x/y vectors of the
upper and lower bounds on the CDFs (the horsetail plot). The
third parameter is a list of x/y tuples for individual CDFs
propagated at each sampled value of the interval uncertainties
*Example Usage*::
>>> def myFunc(x, u): return x[0]*x[1] + u
>>> u = UniformParameter()
>>> theHM = HorsetailMatching(myFunc, u)
>>> (x1, y1, t1), (x2, y2, t2), CDFs = theHM.getHorsetail()
>>> matplotlib.pyplot(x1, y1, 'b')
>>> matplotlib.pyplot(x2, y2, 'b')
>>> for (x, y) in CDFs:
... matplotlib.pyplot(x, y, 'k:')
>>> matplotlib.pyplot.show()
'''
if hasattr(self, '_ql'):
ql, qu, hl, hu = self._ql, self._qu, self._hl, self._hu
qh, hh = self._qh, self._hh
if self._qis is not None:
ql, hl = _appendPlotArrays(ql, hl, self._qis)
qu, hu = _appendPlotArrays(qu, hu, self._qis)
CDFs = []
for qi, hi in zip(qh, hh):
CDFs.append((qi, hi))
upper_target = [self._ftarg_u(h) for h in hu]
upper_curve = (qu, hu, upper_target)
lower_target = [self._ftarg_l(h) for h in hl]
lower_curve = (ql, hl, lower_target)
return upper_curve, lower_curve, CDFs
else:
raise ValueError('''The metric has not been evaluated at any
design point so the horsetail does not exist''')
##############################################################################
## Private methods ##
##############################################################################
def _evalMetricEmpirical(self, q_samples, grad_samples=None):
M_prob = self.samples_prob
M_int = self.samples_int
if M_int > 1:
alpha = self.alpha
else:
alpha = 1
h_htail = np.zeros([M_int, M_prob])
q_htail = np.zeros([M_int, M_prob])
q_l = np.zeros(M_prob)
q_u = np.zeros(M_prob)
if grad_samples is not None:
g_htail = np.zeros([M_int, M_prob, self._N_dv])
g_l = np.zeros([M_prob, self._N_dv])
g_u = np.zeros([M_prob, self._N_dv])
Du_grad = np.zeros(self._N_dv)
Dl_grad = np.zeros(self._N_dv)
for ii in np.arange(M_int):
# Get empirical CDF by sorting samples at each value of intervals
sortinds = np.argsort(q_samples[ii, :])
q_htail[ii, :] = q_samples[ii, sortinds]
M = q_samples.shape[1]
h_htail[ii, :] = [(1./M)*(0.5 + j) for j in range(M)]
if grad_samples is not None:
for ix in np.arange(self._N_dv):
g_htail[ii, :, ix] = grad_samples[ii, sortinds, ix]
for jj in np.arange(M_prob):
q_u[jj] = min(q_htail[:, jj])
q_l[jj] = max(q_htail[:, jj])
if grad_samples is not None:
q_u[jj] = _extalg(q_htail[:, jj], -1*alpha)
q_l[jj] = _extalg(q_htail[:, jj], alpha)
for ix in np.arange(self._N_dv):
gtemp = _extgrad(q_htail[:, jj], -1*alpha)
g_u[jj, ix] = gtemp.dot(g_htail[:, jj, ix])
gtemp = _extgrad(q_htail[:, jj], alpha)
g_l[jj, ix] = gtemp.dot(g_htail[:, jj, ix])
h_u, h_l = h_htail[0], h_htail[0] # h is same for all ECDFs
t_u = [self._ftarg_u(hi) for hi in h_u]
t_l = [self._ftarg_l(hi) for hi in h_u]
self._ql, self._qu, self._hl, self._hu = q_l, q_u, h_l, h_u
self._qh, self._hh = q_htail, h_htail
self._tl, self._tu = t_l, t_u
self._qis = None
Du = (1./M_prob)*sum((q_u - t_u)**2)
Dl = (1./M_prob)*sum((q_l - t_l)**2)
dhat = np.sqrt(Du + Dl)
if self.verbose:
print('Metric: ' + str(dhat))
if grad_samples is not None:
for ix in np.arange(self._N_dv):
Du_grad[ix] = (1./M_prob)*sum(2*(q_u - t_u)*g_u[:, ix])
Dl_grad[ix] = (1./M_prob)*sum(2*(q_l - t_l)*g_l[:, ix])
dhat_grad = (0.5*(Du+Dl)**(-0.5)*(Du_grad + Dl_grad))
if self.verbose:
print('Gradient: ' + str([g for g in dhat_grad]))
return dhat, dhat_grad
else:
return dhat
def _getKernelParameters(self, q_samples):
# If kernel bandwidth not specified, find it using Scott's rule
if self.kernel_bandwidth is None:
if len(self.prob_uncertainties) > 0:
if abs(np.max(q_samples) - np.min(q_samples)) < 1e-6:
bw = 1e-6
else:
bw = 0.33*((4/(3.*q_samples.shape[1]))**(1/5.)
*np.std(q_samples[0,:]))
else:
bw = 1e-3
self.kernel_bandwidth = bw
else:
bw = self.kernel_bandwidth
## Initalize arrays and prepare calculation
q_min = np.amin(q_samples)
q_max = np.amax(q_samples)
if self.integration_points is None:
q_range = q_max - q_min
qis_full = np.linspace(q_min - q_range, q_max + q_range, 10000)
self.integration_points = qis_full
else:
qis_full = np.array(self.integration_points)
ii_low, ii_high = 0, len(qis_full)
try:
ii_high, qi_high = next((iq, qi) for iq, qi in enumerate(qis_full) if
qi > q_max + 20*bw)
except StopIteration:
warnings.warn('Sample found higher than range of integration points')
try:
iiN_low, qi_low = next((iq, qi) for iq, qi in enumerate(qis_full[::-1]) if
qi < q_min - 20*bw)
ii_low = len(qis_full) - (iiN_low+1)
except StopIteration:
warnings.warn('Sample found lower than range of integration points')
qis = qis_full[ii_low:ii_high+1] # Only evaluate over range of samples
self._qis = qis
return qis, bw
def _evalMetricKernel(self, q_samples, grad_samples=None):
qis, bw = self._getKernelParameters(q_samples)
N_quad = len(qis)
M_prob = self.samples_prob
M_int = self.samples_int
if M_int > 1:
alpha = self.alpha
else:
alpha = 1
fhtail = np.zeros([N_quad, M_int])
qhtail = np.zeros([N_quad, M_int])
if grad_samples is not None:
fht_grad = np.zeros([N_quad, M_int, self._N_dv])
hu_grad = np.zeros([N_quad, self._N_dv])
hl_grad = np.zeros([N_quad, self._N_dv])
Du_grad = np.zeros(self._N_dv)
Dl_grad = np.zeros(self._N_dv)
# ALGORITHM 1 from publication
# Evaluate all individual CDFs and their gradients
for mm in np.arange(M_int):
qjs = q_samples[mm, :]
rmat = qis.reshape([N_quad, 1])-qjs.reshape([1, M_prob])
if grad_samples is not None:
Kcdf, Kprime = _kernel(rmat, M_prob, bw=bw,
ktype=self.kernel_type, bGrad=True)
for ix in np.arange(self._N_dv):
grad_js = grad_samples[mm, :, ix]
fht_grad[:, mm, ix] = Kprime.dot(-1*grad_js)
else:
Kcdf = _kernel(rmat, M_prob, bw=bw, ktype=self.kernel_type,
bGrad=False)
fhtail[:, mm] = Kcdf.dot(np.ones([M_prob, 1])).flatten()
qhtail[:, mm] = qis
# ALGORITHM 2 from publication
# Find horsetail curves - envelope of the CDFs and their gradients
# In Matrix form
if grad_samples is None:
hu = np.max(fhtail, axis=1).flatten()
hl = np.min(fhtail, axis=1).flatten()
else:
hu = _extalg(fhtail, alpha, axis=1).flatten()
hl = _extalg(fhtail, -1*alpha, axis=1).flatten()
Su_prime = _extgrad(fhtail, alpha, axis=1)
Sl_prime = _extgrad(fhtail, -1*alpha, axis=1)
for kx in np.arange(self._N_dv):
fis_grad = fht_grad[:, :, kx]
for ii in np.arange(N_quad):
hu_grad[ii, kx] = Su_prime[ii, :].dot(fis_grad[ii, :])
hl_grad[ii, kx] = Sl_prime[ii, :].dot(fis_grad[ii, :])
# ALGORITHM 3 from publication
# Evaluate overall metric and gradient using matrix multipliation
tu = np.array([self._ftarg_u(hi) for hi in hu])
tl = np.array([self._ftarg_l(hi) for hi in hl])
Du = _matrix_integration(qis, hu, tu)
Dl = _matrix_integration(qis, hl, tl)
dhat = float(np.sqrt(Du + Dl))
self._ql, self._qu, self._hl, self._hu = qis, qis, hl, hu
self._qh, self._hh = qhtail, fhtail
self._tl, self._tu = tl, tu
if self.verbose:
print('Metric: ' + str(dhat))
if grad_samples is not None:
tu_pr = np.array([_finDiff(self._ftarg_u, hi) for hi in hu])
tl_pr = np.array([_finDiff(self._ftarg_l, hi) for hi in hl])
for kx in np.arange(self._N_dv):
Du_grad[kx] = _matrix_grad(qis, hu, hu_grad[:, kx], tu, tu_pr)
Dl_grad[kx] = _matrix_grad(qis, hl, hl_grad[:, kx], tl, tl_pr)
dhat_grad = (0.5*(Du+Dl)**(-0.5)*(Du_grad + Dl_grad))
if self.verbose:
print('Gradient: ' + str([g for g in dhat_grad]))
return dhat, dhat_grad
else:
return dhat
def _makeSurrogates(self, x):
# Get quadrature points
if self.surrogate_points is None:
N_u = len(self.prob_uncertainties) + len(self.int_uncertainties)
mesh = np.meshgrid(*[np.linspace(-1, 1, 5) for n in np.arange(N_u)],
copy=False)
u_sparse = np.vstack([m.flatten() for m in mesh]).T
else:
u_sparse = self.surrogate_points
N_sparse = u_sparse.shape[0]
q_sparse = np.zeros(N_sparse)
# Get surrogates in correct form
if not self.jac:
for iu, u in enumerate(u_sparse):
q_sparse[iu] = self.fqoi(x, u)
surr_qoi = self.surrogate(u_sparse, q_sparse)
def fqoi(u):
return surr_qoi(u)
fgrad = False
surr_jac = False
else:
g_sparse = np.zeros([N_sparse, self._N_dv])
for iu, u in enumerate(u_sparse):
if isinstance(self.jac, bool) and self.jac:
q_sparse[iu], g_sparse[iu, :] = self.fqoi(x, u)
else:
q_sparse[iu] = self.fqoi(x, u)
g_sparse[iu, :] = self.jac(x, u)
if not self.surrogate_jac:
fpartial = [lambda u: 0 for _ in np.arange(self._N_dv)]
surr_qoi = self.surrogate(u_sparse, q_sparse)
for k in np.arange(self._N_dv):
fpartial[k] = self.surrogate(u_sparse, g_sparse[:, k])
def surr_grad(u):
return [f(u) for f in fpartial]
else:
if isinstance(self.surrogate_jac, bool) and self.surrogate_jac:
surr_qoi, surr_grad = self.surrogate(
u_sparse, q_sparse, g_sparse)
else:
surr_qoi = self.surrogate(u_sparse, q_sparse)
surr_grad = self.surrogate_jac(u_sparse, g_sparse)
def fqoi(u):
return(surr_qoi(u))
def fgrad(u):
return(surr_grad(u))
surr_jac = fgrad
return fqoi, fgrad, surr_jac
def _getParameterSamples(self):
N_u = len(self.prob_uncertainties) + len(self.int_uncertainties)
get_new = True
if self.reuse_samples and self.u_samples is not None:
if self.u_samples.shape != (self.samples_int, self.samples_prob, N_u):
if self.verbose:
print('''Stored samples do not match current dimensions,
getting new samples''')
else:
get_new = False
if get_new:
if self.verbose:
print('Getting uncertain parameter samples')
N_u = len(self.prob_uncertainties) + len(self.int_uncertainties)
N_prob = len(self.prob_uncertainties)
N_int = len(self.int_uncertainties)
# u_samples = np.zeros([self.samples_int, self.samples_prob, N_u])
u_samples_prob = np.zeros([self.samples_int, self.samples_prob,
len(self.prob_uncertainties)])
u_samples_int = np.zeros([self.samples_int, self.samples_prob,
len(self.int_uncertainties)])
u_ints = np.zeros([self.samples_int, len(self.int_uncertainties)])
for kk, uk in enumerate(self.int_uncertainties):
if callable(uk):
samps = np.array(uk()).flatten()
if len(samps) != self.samples_prob:
raise Exception('Number of samples returned not equal ' +
'to specified number of samples: please set number of ' +
'samples with samples_prob attribute')
else:
u_ints[:, kk] = samps
elif isinstance(uk, (tuple, list)): ## See if given as tuple/list of bounds
lb, ub = uk[0], uk[1]
u_ints[:, kk] = np.random.uniform(lb, ub, size=self.samples_int)
u_ints[0, kk] = lb
u_ints[-1, kk] = ub
elif hasattr(uk, 'getSample'):
for ii in np.arange(self.samples_int):
u_ints[ii, kk] = uk.getSample()
else:
raise TypeError('Unsupported interval uncertainty type')
u_samples_int = np.tile(u_ints[:, np.newaxis], (1, self.samples_prob, 1))
u_probs = np.zeros([self.samples_prob, len(self.prob_uncertainties)])
for kk, uk in enumerate(self.prob_uncertainties):
if callable(uk):
samps = np.array(uk()).flatten()
if len(samps) != self.samples_prob:
raise Exception('Number of samples returned not equal ' +
'to specified number of samples: please set number of ' +
'samples with samples_prob attribute')
else:
u_probs[:, kk] = samps
elif hasattr(uk, 'getSample'):
for jj in np.arange(self.samples_prob):
u_probs[jj, kk] = uk.getSample()
else:
raise TypeError('Unsupported probabilistic uncertainty type')
u_samples_prob = np.tile(u_probs[np.newaxis, :], (self.samples_int, 1, 1))
u_samples = np.concatenate((u_samples_int, u_samples_prob), axis=2)
self.u_samples = u_samples
return u_samples
else:
if self.verbose:
print('Re-using stored samples')
return self.u_samples
def _evalSamples(self, u_samples, fqoi, fgrad, jac):
# Array of shape (M_int, M_prob)
grad_samples = None
q_samples = np.zeros([self.samples_int, self.samples_prob])
if not jac:
for ii in np.arange(q_samples.shape[0]):
for jj in np.arange(q_samples.shape[1]):
q_samples[ii, jj] = fqoi(u_samples[ii, jj])
else:
grad_samples = np.zeros([self.samples_int, self.samples_prob,
self._N_dv])
for ii in np.arange(q_samples.shape[0]):
for jj in np.arange(q_samples.shape[1]):
if isinstance(jac, bool) and jac:
(q, grad) = fqoi(u_samples[ii, jj])
q_samples[ii, jj] = float(q)
grad_samples[ii, jj, :] = [_ for _ in grad]
else:
q_samples[ii, jj] = fqoi(u_samples[ii, jj])
grad_samples[ii, jj, :] = fgrad(u_samples[ii, jj])
self.grad_samples = grad_samples
self.q_samples = q_samples
return q_samples, grad_samples
|
lwcook/horsetail-matching | horsetailmatching/hm.py | HorsetailMatching.evalMetric | python | def evalMetric(self, x, method=None):
'''Evaluates the horsetail matching metric at given values of the
design variables.
:param iterable x: values of the design variables, this is passed as
the first argument to the function fqoi
:param str method: method to use to evaluate the metric ('empirical' or
'kernel')
:return: metric_value - value of the metric evaluated at the design
point given by x
:rtype: float
*Example Usage*::
>>> def myFunc(x, u): return x[0]*x[1] + u
>>> u1 = UniformParameter()
>>> theHM = HorsetailMatching(myFunc, u)
>>> x0 = [1, 2]
>>> theHM.evalMetric(x0)
'''
# Make sure dimensions are correct
# u_sample_dimensions = self._processDimensions()
if self.verbose:
print('----------')
print('At design: ' + str(x))
q_samples, grad_samples = self.evalSamples(x)
if self.verbose:
print('Evaluating metric')
return self.evalMetricFromSamples(q_samples, grad_samples, method) | Evaluates the horsetail matching metric at given values of the
design variables.
:param iterable x: values of the design variables, this is passed as
the first argument to the function fqoi
:param str method: method to use to evaluate the metric ('empirical' or
'kernel')
:return: metric_value - value of the metric evaluated at the design
point given by x
:rtype: float
*Example Usage*::
>>> def myFunc(x, u): return x[0]*x[1] + u
>>> u1 = UniformParameter()
>>> theHM = HorsetailMatching(myFunc, u)
>>> x0 = [1, 2]
>>> theHM.evalMetric(x0) | train | https://github.com/lwcook/horsetail-matching/blob/f3d5f8d01249debbca978f412ce4eae017458119/horsetailmatching/hm.py#L328-L363 | [
"def evalMetricFromSamples(self, q_samples, grad_samples=None, method=None):\n '''Evaluates the density matching metric from given samples of the quantity\n of interest and gradient instead of evaluating them at a design.\n\n :param np.ndarray q_samples: samples of the quantity of interest,\n size (M_int, M_prob)\n :param np.ndarray grad_samples: samples of the gradien,\n size (M_int, M_prob, n_x)\n\n :return: metric_value - value of the metric\n\n :rtype: float\n\n '''\n return self._evalDensityMetric(q_samples, grad_samples)\n",
" def evalSamples(self, x):\n '''Evalautes the samples of quantity of interest and its gradient\n (if supplied) at the given values of the design variables\n\n :param iterable x: values of the design variables, this is passed as\n the first argument to the function fqoi\n\n :return: (values of the quantity of interest, values of the gradient)\n :rtype: Tuple\n '''\n\n # Make sure dimensions are correct\n# u_sample_dimensions = self._processDimensions()\n\n self._N_dv = len(_makeIter(x))\n\n if self.verbose:\n print('Evaluating surrogate')\n if self.surrogate is None:\n def fqoi(u):\n return self.fqoi(x, u)\n def fgrad(u):\n return self.jac(x, u)\n jac = self.jac\n else:\n fqoi, fgrad, surr_jac = self._makeSurrogates(x)\n jac = surr_jac\n\n u_samples = self._getParameterSamples()\n\n if self.verbose:\n print('Evaluating quantity of interest at samples')\n q_samples, grad_samples = self._evalSamples(u_samples, fqoi, fgrad, jac)\n\n return q_samples, grad_samples\n",
" def evalMetricFromSamples(self, q_samples, grad_samples=None, method=None):\n '''Evaluates the horsetail matching metric from given samples of the quantity\n of interest and gradient instead of evaluating them at a design.\n\n :param np.ndarray q_samples: samples of the quantity of interest,\n size (M_int, M_prob)\n :param np.ndarray grad_samples: samples of the gradien,\n size (M_int, M_prob, n_x)\n\n :return: metric_value - value of the metric\n\n :rtype: float\n\n '''\n # Make sure dimensions are correct\n# u_sample_dimensions = self._processDimensions()\n\n q_samples = np.array(q_samples)\n if not (q_samples.shape[0] == self.samples_int and\n q_samples.shape[1] == self.samples_prob):\n raise ValueError('Shape of q_samples should be [M_int, M_prob]')\n\n if grad_samples is not None:\n grad_samples = np.array(grad_samples)\n if not (grad_samples.shape[0] == self.samples_int and\n grad_samples.shape[1] == self.samples_prob):\n raise ValueError('''Shape of grad_samples\n should be [M_int, M_prob, n_dv]''')\n\n if method is None:\n method = self.method\n\n if method.lower() == 'empirical':\n return self._evalMetricEmpirical(q_samples, grad_samples)\n elif method.lower() == 'kernel':\n return self._evalMetricKernel(q_samples, grad_samples)\n else:\n raise ValueError('Unsupported metric evalation method')\n"
] | class HorsetailMatching(object):
'''Class for using horsetail matching within an optimization. The main
functionality is to evaluate the horsetail matching
metric (and optionally its gradient) that can be used with external
optimizers.
The code is written such that all arguments that can be used at the
initialization of a HorsetailMatching object can also be set as
attributes after creation to achieve exactly the same effect.
:param function fqoi: function that returns the quantity of interest, it
must take two ordered arguments - the value of the design variable
vector and the value of the uncertainty vector.
:param list prob_uncertainties: list of probabilistic uncertainties.
Each can be an instance of the UncertainParameter class,
in which case they will be sampled using the getSample() method.
Alternatiely each can be a function which returns sample(s) using
whatever method is desired.
:param list int_uncertainties: list of interval uncertainties [default []].
Each can be an instance of the IntervalParameter class,
in which case they will be sampled using the getSample() method.
Alternatiely each can be specified as a tuple/list of the bounds.
:param function ftarget: function that returns the value of the target
inverse CDF given a value in [0,1]. Can be a tuple that gives two
target fuctions, one for the upper bound and one for the lower bound on
the CDF under mixed uncertainties [default t(h) = 0]
:param bool/function jac: Argument that
specifies how to evaluate the gradient of the quantity of interest.
If False no gradients are propagated, if True the fqoi should return
a second argument g such that g_i = dq/dx_i. If a function, it should
have the same signature as fqoi but return g. [default False]
:param str method: method with which to evaluate the horsetil matching
metric, can be 'empirical' or 'kernel' [default 'empirical' if
jac is False else default 'kernel'].
:param int samples_prob: number of samples to take from the
probabilsitic uncertainties. [default 1000]
:param int samples_int: number of samples to take from the
interval uncertainties. Note that under mixed uncertainties, a nested
loop is used to evaluate the metric so the total number of
samples will be samples_prob*samples_int (at each interval uncertainty
sample samples_prob samples are taken from the probabilistic
uncertainties). [default 50]
:param list integration_points: Only for method='kernel'.
The integration point values to use when evaluating the metric using
kernels [by default 100 points spread over 3 times the range of
the samples of q obtained the first time the metric is evaluated]
:param number kernel_bandwidth: Only for method='kernel'. The bandwidth
used in the kernel function [by default it is found the first time
the metric is evaluated using Scott's rule]
:param str kernel_type: Only for method='kernel'. The type of kernel to
use, can be 'gaussian', 'uniform', or 'triangle' [default 'gaussian'].
:param function surrogate: Surrogate that is created at every design
point to be sampled instead of fqoi. It should be a function that
takes two arguments - an array with values of the uncertainties at
which to fit the surrogate of size (num_quadrature_points,
num_uncertainties), and an array of quantity of interest values
corresponding to these uncertainty values to which to fit the surrogate
of size (num_quadrature_points). It should return a functio that
predicts the qoi at an aribtrary value of the uncertainties.
[default None]
:param list surrogate_points: Only with a surrogate. List of points at
which fqoi is evaluated to give values to fit the surrogates to. These
are passed to the surrogate function along with the qoi evaluated at
these points when the surrogate is fitted [by default tensor
quadrature of 5 points in each uncertain dimension is used]
:param bool/function surrogate_jac: Only with a surrogate. Specifies how
to take surrogates of the gradient. It works similarly to the
jac argument: if False, the same surrogate is fitted to fqoi and each
component of its gradient, if True, the surrogate function is
expected to take a third argument - an array that is the gradient
at each of the quadrature points of size
(num_quadrature_points, num_design_variables). If a function, then
instead the array of uncertainty values and the array of gradient
values are passed to this function and it should return a function for
the surrogate model of the gradient.
:param bool reuse_samples: If True will reuse the same set of samples of
the uncertainties for evaluating the metric at any value of the
design variables, if False wise will re-sample every time evalMetric
is called [default True]
:param bool verbose: If True will print out details [default False].
*Example Declarations*::
>>> from horsetailmatching import HorsetailMatching,
UncertainParameter, PolySurrogate
>>> def myFunc(x, u): return x[0]*x[1] + u
>>> def myGrad(x, u): return [x[1], x[0]]
>>> def myTarg1(h): return 1-h**3
>>> def myTarg2(h): return 2-h**3
>>> u1 = UniformParameter()
>>> u2 = IntervalParameter()
>>> U = [u1, u2]
>>> poly = PolySurrogate(dimensions=2)
>>> poly_points = poly.getQuadraturePoints()
>>> theHM = HorsetailMatching(myFunc, U)
>>> theHM = HorsetailMatching(myFunc, U, jac=myGrad, method='kernel')
>>> theHM = HorsetailMatching(myFunc, U, ftarget=myTarg1)
>>> theHM = HorsetailMatching(myFunc, U, ftarget=(myTarg1, myTarg2))
>>> theHM = HorsetailMatching(myFunc, U, samples_prob=500,
samples_int = 50)
>>> theHM = HorsetailMatching(myFunc, U, method='kernel',
integration_points=numpy.linspace(0, 10, 100),
kernel_bandwidth=0.01)
>>> theHM = HorsetailMatching(myFunc, U,
surrogate=poly.surrogate, surrogate_jac=False,
surrogate_points=poly_points)
>>> theHM = HorsetailMatching(myFunc, U, verbose=True,
reuse_samples=True)
'''
def __init__(self, fqoi, prob_uncertainties, int_uncertainties=[],
ftarget=None, jac=False, method=None,
samples_prob=100, samples_int=50, integration_points=None,
kernel_bandwidth=None, kernel_type='gaussian', alpha=400,
surrogate=None, surrogate_points=None, surrogate_jac=False,
reuse_samples=True, verbose=False):
self.fqoi = fqoi
# self.uncertain_parameters = uncertain_parameters
self.prob_uncertainties = prob_uncertainties
self.int_uncertainties = int_uncertainties
self.ftarget = ftarget
self.jac = jac
self.method = method # Must be done after setting jac
self.samples_prob = samples_prob
self.samples_int = samples_int
self.integration_points = integration_points
self.kernel_bandwidth = kernel_bandwidth
self.kernel_type = kernel_type
self.alpha = alpha
self.reuse_samples = reuse_samples
self.u_samples = None
self.surrogate = surrogate
self.surrogate_points = surrogate_points
self.surrogate_jac = surrogate_jac
self.verbose = verbose
###############################################################################
## Properties with non-trivial setting behaviour
###############################################################################
# @property
# def uncertain_parameters(self):
# return self._u_params
#
# @uncertain_parameters.setter
# def uncertain_parameters(self, params):
# self._u_params = _makeIter(params)
# if len(self._u_params) == 0:
# raise ValueError('No uncertain parameters provided')
#
# self._u_int, self._u_prob = [], []
# for ii, u in enumerate(self._u_params):
# if u.is_interval_uncertainty:
# self._u_int.append((ii, u))
# else:
# self._u_prob.append((ii, u))
@property
def prob_uncertainties(self):
return self._prob_uncertainties
@prob_uncertainties.setter
def prob_uncertainties(self, params):
self._prob_uncertainties = _makeIter(params)
@property
def int_uncertainties(self):
return self._int_uncertainties
@int_uncertainties.setter
def int_uncertainties(self, params):
self._int_uncertainties = _makeIter(params)
@property
def samples_prob(self):
return self._samples_prob
@samples_prob.setter
def samples_prob(self, value):
if len(self.prob_uncertainties) > 0:
self._samples_prob = value
else:
self._samples_prob = 1
@property
def samples_int(self):
return self._samples_int
@samples_int.setter
def samples_int(self, value):
if len(self.int_uncertainties) > 0:
self._samples_int = value
else:
self._samples_int = 1
@property
def method(self):
return self._method
@method.setter
def method(self, value):
if value is None:
if self.jac is False:
self._method = 'empirical'
else:
self._method = 'kernel'
else:
self._method = value
@property
def ftarget(self):
return self._ftarget
@ftarget.setter
def ftarget(self, value):
def standardTarget(h):
return 0
try:
iter(value)
self._ftarg_u = value[0]
self._ftarg_l = value[1]
self._ftarget = value
except:
if value is None:
self._ftarget = standardTarget
else:
self._ftarget = value
self._ftarg_u = self._ftarget
self._ftarg_l = self._ftarget
@property
def u_samples(self):
return self._u_samples
@u_samples.setter
def u_samples(self, samples):
if samples is not None:
N_u = len(self.prob_uncertainties) + len(self.int_uncertainties)
if (not isinstance(samples, np.ndarray) or
samples.shape != (self.samples_int, self.samples_prob, N_u)):
raise TypeError('u_samples should be a np.array of size'
'(samples_int, samples_prob, num_uncertanities)')
self._u_samples = samples
@property
def kernel_type(self):
return self._kernel_type
@kernel_type.setter
def kernel_type(self, value):
allowed_types = ['gaussian', 'uniform', 'triangle']
if value not in allowed_types:
raise ValueError('Kernel type must be one of'+
', '.join([str(t) for t in allowed_types]))
else:
self._kernel_type = value
##############################################################################
## Public Methods
##############################################################################
def evalSamples(self, x):
'''Evalautes the samples of quantity of interest and its gradient
(if supplied) at the given values of the design variables
:param iterable x: values of the design variables, this is passed as
the first argument to the function fqoi
:return: (values of the quantity of interest, values of the gradient)
:rtype: Tuple
'''
# Make sure dimensions are correct
# u_sample_dimensions = self._processDimensions()
self._N_dv = len(_makeIter(x))
if self.verbose:
print('Evaluating surrogate')
if self.surrogate is None:
def fqoi(u):
return self.fqoi(x, u)
def fgrad(u):
return self.jac(x, u)
jac = self.jac
else:
fqoi, fgrad, surr_jac = self._makeSurrogates(x)
jac = surr_jac
u_samples = self._getParameterSamples()
if self.verbose:
print('Evaluating quantity of interest at samples')
q_samples, grad_samples = self._evalSamples(u_samples, fqoi, fgrad, jac)
return q_samples, grad_samples
def evalMetricFromSamples(self, q_samples, grad_samples=None, method=None):
'''Evaluates the horsetail matching metric from given samples of the quantity
of interest and gradient instead of evaluating them at a design.
:param np.ndarray q_samples: samples of the quantity of interest,
size (M_int, M_prob)
:param np.ndarray grad_samples: samples of the gradien,
size (M_int, M_prob, n_x)
:return: metric_value - value of the metric
:rtype: float
'''
# Make sure dimensions are correct
# u_sample_dimensions = self._processDimensions()
q_samples = np.array(q_samples)
if not (q_samples.shape[0] == self.samples_int and
q_samples.shape[1] == self.samples_prob):
raise ValueError('Shape of q_samples should be [M_int, M_prob]')
if grad_samples is not None:
grad_samples = np.array(grad_samples)
if not (grad_samples.shape[0] == self.samples_int and
grad_samples.shape[1] == self.samples_prob):
raise ValueError('''Shape of grad_samples
should be [M_int, M_prob, n_dv]''')
if method is None:
method = self.method
if method.lower() == 'empirical':
return self._evalMetricEmpirical(q_samples, grad_samples)
elif method.lower() == 'kernel':
return self._evalMetricKernel(q_samples, grad_samples)
else:
raise ValueError('Unsupported metric evalation method')
def getHorsetail(self):
'''Function that gets vectors of the horsetail plot at the last design
evaluated.
:return: upper_curve, lower_curve, CDFs - returns three parameters,
the first two are tuples containing pairs of x/y vectors of the
upper and lower bounds on the CDFs (the horsetail plot). The
third parameter is a list of x/y tuples for individual CDFs
propagated at each sampled value of the interval uncertainties
*Example Usage*::
>>> def myFunc(x, u): return x[0]*x[1] + u
>>> u = UniformParameter()
>>> theHM = HorsetailMatching(myFunc, u)
>>> (x1, y1, t1), (x2, y2, t2), CDFs = theHM.getHorsetail()
>>> matplotlib.pyplot(x1, y1, 'b')
>>> matplotlib.pyplot(x2, y2, 'b')
>>> for (x, y) in CDFs:
... matplotlib.pyplot(x, y, 'k:')
>>> matplotlib.pyplot.show()
'''
if hasattr(self, '_ql'):
ql, qu, hl, hu = self._ql, self._qu, self._hl, self._hu
qh, hh = self._qh, self._hh
if self._qis is not None:
ql, hl = _appendPlotArrays(ql, hl, self._qis)
qu, hu = _appendPlotArrays(qu, hu, self._qis)
CDFs = []
for qi, hi in zip(qh, hh):
CDFs.append((qi, hi))
upper_target = [self._ftarg_u(h) for h in hu]
upper_curve = (qu, hu, upper_target)
lower_target = [self._ftarg_l(h) for h in hl]
lower_curve = (ql, hl, lower_target)
return upper_curve, lower_curve, CDFs
else:
raise ValueError('''The metric has not been evaluated at any
design point so the horsetail does not exist''')
##############################################################################
## Private methods ##
##############################################################################
def _evalMetricEmpirical(self, q_samples, grad_samples=None):
M_prob = self.samples_prob
M_int = self.samples_int
if M_int > 1:
alpha = self.alpha
else:
alpha = 1
h_htail = np.zeros([M_int, M_prob])
q_htail = np.zeros([M_int, M_prob])
q_l = np.zeros(M_prob)
q_u = np.zeros(M_prob)
if grad_samples is not None:
g_htail = np.zeros([M_int, M_prob, self._N_dv])
g_l = np.zeros([M_prob, self._N_dv])
g_u = np.zeros([M_prob, self._N_dv])
Du_grad = np.zeros(self._N_dv)
Dl_grad = np.zeros(self._N_dv)
for ii in np.arange(M_int):
# Get empirical CDF by sorting samples at each value of intervals
sortinds = np.argsort(q_samples[ii, :])
q_htail[ii, :] = q_samples[ii, sortinds]
M = q_samples.shape[1]
h_htail[ii, :] = [(1./M)*(0.5 + j) for j in range(M)]
if grad_samples is not None:
for ix in np.arange(self._N_dv):
g_htail[ii, :, ix] = grad_samples[ii, sortinds, ix]
for jj in np.arange(M_prob):
q_u[jj] = min(q_htail[:, jj])
q_l[jj] = max(q_htail[:, jj])
if grad_samples is not None:
q_u[jj] = _extalg(q_htail[:, jj], -1*alpha)
q_l[jj] = _extalg(q_htail[:, jj], alpha)
for ix in np.arange(self._N_dv):
gtemp = _extgrad(q_htail[:, jj], -1*alpha)
g_u[jj, ix] = gtemp.dot(g_htail[:, jj, ix])
gtemp = _extgrad(q_htail[:, jj], alpha)
g_l[jj, ix] = gtemp.dot(g_htail[:, jj, ix])
h_u, h_l = h_htail[0], h_htail[0] # h is same for all ECDFs
t_u = [self._ftarg_u(hi) for hi in h_u]
t_l = [self._ftarg_l(hi) for hi in h_u]
self._ql, self._qu, self._hl, self._hu = q_l, q_u, h_l, h_u
self._qh, self._hh = q_htail, h_htail
self._tl, self._tu = t_l, t_u
self._qis = None
Du = (1./M_prob)*sum((q_u - t_u)**2)
Dl = (1./M_prob)*sum((q_l - t_l)**2)
dhat = np.sqrt(Du + Dl)
if self.verbose:
print('Metric: ' + str(dhat))
if grad_samples is not None:
for ix in np.arange(self._N_dv):
Du_grad[ix] = (1./M_prob)*sum(2*(q_u - t_u)*g_u[:, ix])
Dl_grad[ix] = (1./M_prob)*sum(2*(q_l - t_l)*g_l[:, ix])
dhat_grad = (0.5*(Du+Dl)**(-0.5)*(Du_grad + Dl_grad))
if self.verbose:
print('Gradient: ' + str([g for g in dhat_grad]))
return dhat, dhat_grad
else:
return dhat
def _getKernelParameters(self, q_samples):
# If kernel bandwidth not specified, find it using Scott's rule
if self.kernel_bandwidth is None:
if len(self.prob_uncertainties) > 0:
if abs(np.max(q_samples) - np.min(q_samples)) < 1e-6:
bw = 1e-6
else:
bw = 0.33*((4/(3.*q_samples.shape[1]))**(1/5.)
*np.std(q_samples[0,:]))
else:
bw = 1e-3
self.kernel_bandwidth = bw
else:
bw = self.kernel_bandwidth
## Initalize arrays and prepare calculation
q_min = np.amin(q_samples)
q_max = np.amax(q_samples)
if self.integration_points is None:
q_range = q_max - q_min
qis_full = np.linspace(q_min - q_range, q_max + q_range, 10000)
self.integration_points = qis_full
else:
qis_full = np.array(self.integration_points)
ii_low, ii_high = 0, len(qis_full)
try:
ii_high, qi_high = next((iq, qi) for iq, qi in enumerate(qis_full) if
qi > q_max + 20*bw)
except StopIteration:
warnings.warn('Sample found higher than range of integration points')
try:
iiN_low, qi_low = next((iq, qi) for iq, qi in enumerate(qis_full[::-1]) if
qi < q_min - 20*bw)
ii_low = len(qis_full) - (iiN_low+1)
except StopIteration:
warnings.warn('Sample found lower than range of integration points')
qis = qis_full[ii_low:ii_high+1] # Only evaluate over range of samples
self._qis = qis
return qis, bw
def _evalMetricKernel(self, q_samples, grad_samples=None):
qis, bw = self._getKernelParameters(q_samples)
N_quad = len(qis)
M_prob = self.samples_prob
M_int = self.samples_int
if M_int > 1:
alpha = self.alpha
else:
alpha = 1
fhtail = np.zeros([N_quad, M_int])
qhtail = np.zeros([N_quad, M_int])
if grad_samples is not None:
fht_grad = np.zeros([N_quad, M_int, self._N_dv])
hu_grad = np.zeros([N_quad, self._N_dv])
hl_grad = np.zeros([N_quad, self._N_dv])
Du_grad = np.zeros(self._N_dv)
Dl_grad = np.zeros(self._N_dv)
# ALGORITHM 1 from publication
# Evaluate all individual CDFs and their gradients
for mm in np.arange(M_int):
qjs = q_samples[mm, :]
rmat = qis.reshape([N_quad, 1])-qjs.reshape([1, M_prob])
if grad_samples is not None:
Kcdf, Kprime = _kernel(rmat, M_prob, bw=bw,
ktype=self.kernel_type, bGrad=True)
for ix in np.arange(self._N_dv):
grad_js = grad_samples[mm, :, ix]
fht_grad[:, mm, ix] = Kprime.dot(-1*grad_js)
else:
Kcdf = _kernel(rmat, M_prob, bw=bw, ktype=self.kernel_type,
bGrad=False)
fhtail[:, mm] = Kcdf.dot(np.ones([M_prob, 1])).flatten()
qhtail[:, mm] = qis
# ALGORITHM 2 from publication
# Find horsetail curves - envelope of the CDFs and their gradients
# In Matrix form
if grad_samples is None:
hu = np.max(fhtail, axis=1).flatten()
hl = np.min(fhtail, axis=1).flatten()
else:
hu = _extalg(fhtail, alpha, axis=1).flatten()
hl = _extalg(fhtail, -1*alpha, axis=1).flatten()
Su_prime = _extgrad(fhtail, alpha, axis=1)
Sl_prime = _extgrad(fhtail, -1*alpha, axis=1)
for kx in np.arange(self._N_dv):
fis_grad = fht_grad[:, :, kx]
for ii in np.arange(N_quad):
hu_grad[ii, kx] = Su_prime[ii, :].dot(fis_grad[ii, :])
hl_grad[ii, kx] = Sl_prime[ii, :].dot(fis_grad[ii, :])
# ALGORITHM 3 from publication
# Evaluate overall metric and gradient using matrix multipliation
tu = np.array([self._ftarg_u(hi) for hi in hu])
tl = np.array([self._ftarg_l(hi) for hi in hl])
Du = _matrix_integration(qis, hu, tu)
Dl = _matrix_integration(qis, hl, tl)
dhat = float(np.sqrt(Du + Dl))
self._ql, self._qu, self._hl, self._hu = qis, qis, hl, hu
self._qh, self._hh = qhtail, fhtail
self._tl, self._tu = tl, tu
if self.verbose:
print('Metric: ' + str(dhat))
if grad_samples is not None:
tu_pr = np.array([_finDiff(self._ftarg_u, hi) for hi in hu])
tl_pr = np.array([_finDiff(self._ftarg_l, hi) for hi in hl])
for kx in np.arange(self._N_dv):
Du_grad[kx] = _matrix_grad(qis, hu, hu_grad[:, kx], tu, tu_pr)
Dl_grad[kx] = _matrix_grad(qis, hl, hl_grad[:, kx], tl, tl_pr)
dhat_grad = (0.5*(Du+Dl)**(-0.5)*(Du_grad + Dl_grad))
if self.verbose:
print('Gradient: ' + str([g for g in dhat_grad]))
return dhat, dhat_grad
else:
return dhat
def _makeSurrogates(self, x):
# Get quadrature points
if self.surrogate_points is None:
N_u = len(self.prob_uncertainties) + len(self.int_uncertainties)
mesh = np.meshgrid(*[np.linspace(-1, 1, 5) for n in np.arange(N_u)],
copy=False)
u_sparse = np.vstack([m.flatten() for m in mesh]).T
else:
u_sparse = self.surrogate_points
N_sparse = u_sparse.shape[0]
q_sparse = np.zeros(N_sparse)
# Get surrogates in correct form
if not self.jac:
for iu, u in enumerate(u_sparse):
q_sparse[iu] = self.fqoi(x, u)
surr_qoi = self.surrogate(u_sparse, q_sparse)
def fqoi(u):
return surr_qoi(u)
fgrad = False
surr_jac = False
else:
g_sparse = np.zeros([N_sparse, self._N_dv])
for iu, u in enumerate(u_sparse):
if isinstance(self.jac, bool) and self.jac:
q_sparse[iu], g_sparse[iu, :] = self.fqoi(x, u)
else:
q_sparse[iu] = self.fqoi(x, u)
g_sparse[iu, :] = self.jac(x, u)
if not self.surrogate_jac:
fpartial = [lambda u: 0 for _ in np.arange(self._N_dv)]
surr_qoi = self.surrogate(u_sparse, q_sparse)
for k in np.arange(self._N_dv):
fpartial[k] = self.surrogate(u_sparse, g_sparse[:, k])
def surr_grad(u):
return [f(u) for f in fpartial]
else:
if isinstance(self.surrogate_jac, bool) and self.surrogate_jac:
surr_qoi, surr_grad = self.surrogate(
u_sparse, q_sparse, g_sparse)
else:
surr_qoi = self.surrogate(u_sparse, q_sparse)
surr_grad = self.surrogate_jac(u_sparse, g_sparse)
def fqoi(u):
return(surr_qoi(u))
def fgrad(u):
return(surr_grad(u))
surr_jac = fgrad
return fqoi, fgrad, surr_jac
def _getParameterSamples(self):
N_u = len(self.prob_uncertainties) + len(self.int_uncertainties)
get_new = True
if self.reuse_samples and self.u_samples is not None:
if self.u_samples.shape != (self.samples_int, self.samples_prob, N_u):
if self.verbose:
print('''Stored samples do not match current dimensions,
getting new samples''')
else:
get_new = False
if get_new:
if self.verbose:
print('Getting uncertain parameter samples')
N_u = len(self.prob_uncertainties) + len(self.int_uncertainties)
N_prob = len(self.prob_uncertainties)
N_int = len(self.int_uncertainties)
# u_samples = np.zeros([self.samples_int, self.samples_prob, N_u])
u_samples_prob = np.zeros([self.samples_int, self.samples_prob,
len(self.prob_uncertainties)])
u_samples_int = np.zeros([self.samples_int, self.samples_prob,
len(self.int_uncertainties)])
u_ints = np.zeros([self.samples_int, len(self.int_uncertainties)])
for kk, uk in enumerate(self.int_uncertainties):
if callable(uk):
samps = np.array(uk()).flatten()
if len(samps) != self.samples_prob:
raise Exception('Number of samples returned not equal ' +
'to specified number of samples: please set number of ' +
'samples with samples_prob attribute')
else:
u_ints[:, kk] = samps
elif isinstance(uk, (tuple, list)): ## See if given as tuple/list of bounds
lb, ub = uk[0], uk[1]
u_ints[:, kk] = np.random.uniform(lb, ub, size=self.samples_int)
u_ints[0, kk] = lb
u_ints[-1, kk] = ub
elif hasattr(uk, 'getSample'):
for ii in np.arange(self.samples_int):
u_ints[ii, kk] = uk.getSample()
else:
raise TypeError('Unsupported interval uncertainty type')
u_samples_int = np.tile(u_ints[:, np.newaxis], (1, self.samples_prob, 1))
u_probs = np.zeros([self.samples_prob, len(self.prob_uncertainties)])
for kk, uk in enumerate(self.prob_uncertainties):
if callable(uk):
samps = np.array(uk()).flatten()
if len(samps) != self.samples_prob:
raise Exception('Number of samples returned not equal ' +
'to specified number of samples: please set number of ' +
'samples with samples_prob attribute')
else:
u_probs[:, kk] = samps
elif hasattr(uk, 'getSample'):
for jj in np.arange(self.samples_prob):
u_probs[jj, kk] = uk.getSample()
else:
raise TypeError('Unsupported probabilistic uncertainty type')
u_samples_prob = np.tile(u_probs[np.newaxis, :], (self.samples_int, 1, 1))
u_samples = np.concatenate((u_samples_int, u_samples_prob), axis=2)
self.u_samples = u_samples
return u_samples
else:
if self.verbose:
print('Re-using stored samples')
return self.u_samples
def _evalSamples(self, u_samples, fqoi, fgrad, jac):
# Array of shape (M_int, M_prob)
grad_samples = None
q_samples = np.zeros([self.samples_int, self.samples_prob])
if not jac:
for ii in np.arange(q_samples.shape[0]):
for jj in np.arange(q_samples.shape[1]):
q_samples[ii, jj] = fqoi(u_samples[ii, jj])
else:
grad_samples = np.zeros([self.samples_int, self.samples_prob,
self._N_dv])
for ii in np.arange(q_samples.shape[0]):
for jj in np.arange(q_samples.shape[1]):
if isinstance(jac, bool) and jac:
(q, grad) = fqoi(u_samples[ii, jj])
q_samples[ii, jj] = float(q)
grad_samples[ii, jj, :] = [_ for _ in grad]
else:
q_samples[ii, jj] = fqoi(u_samples[ii, jj])
grad_samples[ii, jj, :] = fgrad(u_samples[ii, jj])
self.grad_samples = grad_samples
self.q_samples = q_samples
return q_samples, grad_samples
|
lwcook/horsetail-matching | horsetailmatching/hm.py | HorsetailMatching.evalMetricFromSamples | python | def evalMetricFromSamples(self, q_samples, grad_samples=None, method=None):
'''Evaluates the horsetail matching metric from given samples of the quantity
of interest and gradient instead of evaluating them at a design.
:param np.ndarray q_samples: samples of the quantity of interest,
size (M_int, M_prob)
:param np.ndarray grad_samples: samples of the gradien,
size (M_int, M_prob, n_x)
:return: metric_value - value of the metric
:rtype: float
'''
# Make sure dimensions are correct
# u_sample_dimensions = self._processDimensions()
q_samples = np.array(q_samples)
if not (q_samples.shape[0] == self.samples_int and
q_samples.shape[1] == self.samples_prob):
raise ValueError('Shape of q_samples should be [M_int, M_prob]')
if grad_samples is not None:
grad_samples = np.array(grad_samples)
if not (grad_samples.shape[0] == self.samples_int and
grad_samples.shape[1] == self.samples_prob):
raise ValueError('''Shape of grad_samples
should be [M_int, M_prob, n_dv]''')
if method is None:
method = self.method
if method.lower() == 'empirical':
return self._evalMetricEmpirical(q_samples, grad_samples)
elif method.lower() == 'kernel':
return self._evalMetricKernel(q_samples, grad_samples)
else:
raise ValueError('Unsupported metric evalation method') | Evaluates the horsetail matching metric from given samples of the quantity
of interest and gradient instead of evaluating them at a design.
:param np.ndarray q_samples: samples of the quantity of interest,
size (M_int, M_prob)
:param np.ndarray grad_samples: samples of the gradien,
size (M_int, M_prob, n_x)
:return: metric_value - value of the metric
:rtype: float | train | https://github.com/lwcook/horsetail-matching/blob/f3d5f8d01249debbca978f412ce4eae017458119/horsetailmatching/hm.py#L365-L402 | [
"def _evalMetricEmpirical(self, q_samples, grad_samples=None):\n\n M_prob = self.samples_prob\n M_int = self.samples_int\n\n if M_int > 1:\n alpha = self.alpha\n else:\n alpha = 1\n\n h_htail = np.zeros([M_int, M_prob])\n q_htail = np.zeros([M_int, M_prob])\n q_l = np.zeros(M_prob)\n q_u = np.zeros(M_prob)\n if grad_samples is not None:\n g_htail = np.zeros([M_int, M_prob, self._N_dv])\n g_l = np.zeros([M_prob, self._N_dv])\n g_u = np.zeros([M_prob, self._N_dv])\n Du_grad = np.zeros(self._N_dv)\n Dl_grad = np.zeros(self._N_dv)\n\n for ii in np.arange(M_int):\n # Get empirical CDF by sorting samples at each value of intervals\n sortinds = np.argsort(q_samples[ii, :])\n q_htail[ii, :] = q_samples[ii, sortinds]\n M = q_samples.shape[1]\n h_htail[ii, :] = [(1./M)*(0.5 + j) for j in range(M)]\n\n if grad_samples is not None:\n for ix in np.arange(self._N_dv):\n g_htail[ii, :, ix] = grad_samples[ii, sortinds, ix]\n\n for jj in np.arange(M_prob):\n q_u[jj] = min(q_htail[:, jj])\n q_l[jj] = max(q_htail[:, jj])\n\n if grad_samples is not None:\n q_u[jj] = _extalg(q_htail[:, jj], -1*alpha)\n q_l[jj] = _extalg(q_htail[:, jj], alpha)\n for ix in np.arange(self._N_dv):\n gtemp = _extgrad(q_htail[:, jj], -1*alpha)\n g_u[jj, ix] = gtemp.dot(g_htail[:, jj, ix])\n gtemp = _extgrad(q_htail[:, jj], alpha)\n g_l[jj, ix] = gtemp.dot(g_htail[:, jj, ix])\n\n h_u, h_l = h_htail[0], h_htail[0] # h is same for all ECDFs\n t_u = [self._ftarg_u(hi) for hi in h_u]\n t_l = [self._ftarg_l(hi) for hi in h_u]\n\n self._ql, self._qu, self._hl, self._hu = q_l, q_u, h_l, h_u\n self._qh, self._hh = q_htail, h_htail\n self._tl, self._tu = t_l, t_u\n self._qis = None\n\n Du = (1./M_prob)*sum((q_u - t_u)**2)\n Dl = (1./M_prob)*sum((q_l - t_l)**2)\n dhat = np.sqrt(Du + Dl)\n\n if self.verbose:\n print('Metric: ' + str(dhat))\n\n if grad_samples is not None:\n for ix in np.arange(self._N_dv):\n Du_grad[ix] = (1./M_prob)*sum(2*(q_u - t_u)*g_u[:, ix])\n Dl_grad[ix] = (1./M_prob)*sum(2*(q_l - t_l)*g_l[:, ix])\n\n dhat_grad = (0.5*(Du+Dl)**(-0.5)*(Du_grad + Dl_grad))\n if self.verbose:\n print('Gradient: ' + str([g for g in dhat_grad]))\n\n return dhat, dhat_grad\n\n else:\n return dhat\n"
] | class HorsetailMatching(object):
'''Class for using horsetail matching within an optimization. The main
functionality is to evaluate the horsetail matching
metric (and optionally its gradient) that can be used with external
optimizers.
The code is written such that all arguments that can be used at the
initialization of a HorsetailMatching object can also be set as
attributes after creation to achieve exactly the same effect.
:param function fqoi: function that returns the quantity of interest, it
must take two ordered arguments - the value of the design variable
vector and the value of the uncertainty vector.
:param list prob_uncertainties: list of probabilistic uncertainties.
Each can be an instance of the UncertainParameter class,
in which case they will be sampled using the getSample() method.
Alternatiely each can be a function which returns sample(s) using
whatever method is desired.
:param list int_uncertainties: list of interval uncertainties [default []].
Each can be an instance of the IntervalParameter class,
in which case they will be sampled using the getSample() method.
Alternatiely each can be specified as a tuple/list of the bounds.
:param function ftarget: function that returns the value of the target
inverse CDF given a value in [0,1]. Can be a tuple that gives two
target fuctions, one for the upper bound and one for the lower bound on
the CDF under mixed uncertainties [default t(h) = 0]
:param bool/function jac: Argument that
specifies how to evaluate the gradient of the quantity of interest.
If False no gradients are propagated, if True the fqoi should return
a second argument g such that g_i = dq/dx_i. If a function, it should
have the same signature as fqoi but return g. [default False]
:param str method: method with which to evaluate the horsetil matching
metric, can be 'empirical' or 'kernel' [default 'empirical' if
jac is False else default 'kernel'].
:param int samples_prob: number of samples to take from the
probabilsitic uncertainties. [default 1000]
:param int samples_int: number of samples to take from the
interval uncertainties. Note that under mixed uncertainties, a nested
loop is used to evaluate the metric so the total number of
samples will be samples_prob*samples_int (at each interval uncertainty
sample samples_prob samples are taken from the probabilistic
uncertainties). [default 50]
:param list integration_points: Only for method='kernel'.
The integration point values to use when evaluating the metric using
kernels [by default 100 points spread over 3 times the range of
the samples of q obtained the first time the metric is evaluated]
:param number kernel_bandwidth: Only for method='kernel'. The bandwidth
used in the kernel function [by default it is found the first time
the metric is evaluated using Scott's rule]
:param str kernel_type: Only for method='kernel'. The type of kernel to
use, can be 'gaussian', 'uniform', or 'triangle' [default 'gaussian'].
:param function surrogate: Surrogate that is created at every design
point to be sampled instead of fqoi. It should be a function that
takes two arguments - an array with values of the uncertainties at
which to fit the surrogate of size (num_quadrature_points,
num_uncertainties), and an array of quantity of interest values
corresponding to these uncertainty values to which to fit the surrogate
of size (num_quadrature_points). It should return a functio that
predicts the qoi at an aribtrary value of the uncertainties.
[default None]
:param list surrogate_points: Only with a surrogate. List of points at
which fqoi is evaluated to give values to fit the surrogates to. These
are passed to the surrogate function along with the qoi evaluated at
these points when the surrogate is fitted [by default tensor
quadrature of 5 points in each uncertain dimension is used]
:param bool/function surrogate_jac: Only with a surrogate. Specifies how
to take surrogates of the gradient. It works similarly to the
jac argument: if False, the same surrogate is fitted to fqoi and each
component of its gradient, if True, the surrogate function is
expected to take a third argument - an array that is the gradient
at each of the quadrature points of size
(num_quadrature_points, num_design_variables). If a function, then
instead the array of uncertainty values and the array of gradient
values are passed to this function and it should return a function for
the surrogate model of the gradient.
:param bool reuse_samples: If True will reuse the same set of samples of
the uncertainties for evaluating the metric at any value of the
design variables, if False wise will re-sample every time evalMetric
is called [default True]
:param bool verbose: If True will print out details [default False].
*Example Declarations*::
>>> from horsetailmatching import HorsetailMatching,
UncertainParameter, PolySurrogate
>>> def myFunc(x, u): return x[0]*x[1] + u
>>> def myGrad(x, u): return [x[1], x[0]]
>>> def myTarg1(h): return 1-h**3
>>> def myTarg2(h): return 2-h**3
>>> u1 = UniformParameter()
>>> u2 = IntervalParameter()
>>> U = [u1, u2]
>>> poly = PolySurrogate(dimensions=2)
>>> poly_points = poly.getQuadraturePoints()
>>> theHM = HorsetailMatching(myFunc, U)
>>> theHM = HorsetailMatching(myFunc, U, jac=myGrad, method='kernel')
>>> theHM = HorsetailMatching(myFunc, U, ftarget=myTarg1)
>>> theHM = HorsetailMatching(myFunc, U, ftarget=(myTarg1, myTarg2))
>>> theHM = HorsetailMatching(myFunc, U, samples_prob=500,
samples_int = 50)
>>> theHM = HorsetailMatching(myFunc, U, method='kernel',
integration_points=numpy.linspace(0, 10, 100),
kernel_bandwidth=0.01)
>>> theHM = HorsetailMatching(myFunc, U,
surrogate=poly.surrogate, surrogate_jac=False,
surrogate_points=poly_points)
>>> theHM = HorsetailMatching(myFunc, U, verbose=True,
reuse_samples=True)
'''
def __init__(self, fqoi, prob_uncertainties, int_uncertainties=[],
ftarget=None, jac=False, method=None,
samples_prob=100, samples_int=50, integration_points=None,
kernel_bandwidth=None, kernel_type='gaussian', alpha=400,
surrogate=None, surrogate_points=None, surrogate_jac=False,
reuse_samples=True, verbose=False):
self.fqoi = fqoi
# self.uncertain_parameters = uncertain_parameters
self.prob_uncertainties = prob_uncertainties
self.int_uncertainties = int_uncertainties
self.ftarget = ftarget
self.jac = jac
self.method = method # Must be done after setting jac
self.samples_prob = samples_prob
self.samples_int = samples_int
self.integration_points = integration_points
self.kernel_bandwidth = kernel_bandwidth
self.kernel_type = kernel_type
self.alpha = alpha
self.reuse_samples = reuse_samples
self.u_samples = None
self.surrogate = surrogate
self.surrogate_points = surrogate_points
self.surrogate_jac = surrogate_jac
self.verbose = verbose
###############################################################################
## Properties with non-trivial setting behaviour
###############################################################################
# @property
# def uncertain_parameters(self):
# return self._u_params
#
# @uncertain_parameters.setter
# def uncertain_parameters(self, params):
# self._u_params = _makeIter(params)
# if len(self._u_params) == 0:
# raise ValueError('No uncertain parameters provided')
#
# self._u_int, self._u_prob = [], []
# for ii, u in enumerate(self._u_params):
# if u.is_interval_uncertainty:
# self._u_int.append((ii, u))
# else:
# self._u_prob.append((ii, u))
@property
def prob_uncertainties(self):
return self._prob_uncertainties
@prob_uncertainties.setter
def prob_uncertainties(self, params):
self._prob_uncertainties = _makeIter(params)
@property
def int_uncertainties(self):
return self._int_uncertainties
@int_uncertainties.setter
def int_uncertainties(self, params):
self._int_uncertainties = _makeIter(params)
@property
def samples_prob(self):
return self._samples_prob
@samples_prob.setter
def samples_prob(self, value):
if len(self.prob_uncertainties) > 0:
self._samples_prob = value
else:
self._samples_prob = 1
@property
def samples_int(self):
return self._samples_int
@samples_int.setter
def samples_int(self, value):
if len(self.int_uncertainties) > 0:
self._samples_int = value
else:
self._samples_int = 1
@property
def method(self):
return self._method
@method.setter
def method(self, value):
if value is None:
if self.jac is False:
self._method = 'empirical'
else:
self._method = 'kernel'
else:
self._method = value
@property
def ftarget(self):
return self._ftarget
@ftarget.setter
def ftarget(self, value):
def standardTarget(h):
return 0
try:
iter(value)
self._ftarg_u = value[0]
self._ftarg_l = value[1]
self._ftarget = value
except:
if value is None:
self._ftarget = standardTarget
else:
self._ftarget = value
self._ftarg_u = self._ftarget
self._ftarg_l = self._ftarget
@property
def u_samples(self):
return self._u_samples
@u_samples.setter
def u_samples(self, samples):
if samples is not None:
N_u = len(self.prob_uncertainties) + len(self.int_uncertainties)
if (not isinstance(samples, np.ndarray) or
samples.shape != (self.samples_int, self.samples_prob, N_u)):
raise TypeError('u_samples should be a np.array of size'
'(samples_int, samples_prob, num_uncertanities)')
self._u_samples = samples
@property
def kernel_type(self):
return self._kernel_type
@kernel_type.setter
def kernel_type(self, value):
allowed_types = ['gaussian', 'uniform', 'triangle']
if value not in allowed_types:
raise ValueError('Kernel type must be one of'+
', '.join([str(t) for t in allowed_types]))
else:
self._kernel_type = value
##############################################################################
## Public Methods
##############################################################################
def evalSamples(self, x):
'''Evalautes the samples of quantity of interest and its gradient
(if supplied) at the given values of the design variables
:param iterable x: values of the design variables, this is passed as
the first argument to the function fqoi
:return: (values of the quantity of interest, values of the gradient)
:rtype: Tuple
'''
# Make sure dimensions are correct
# u_sample_dimensions = self._processDimensions()
self._N_dv = len(_makeIter(x))
if self.verbose:
print('Evaluating surrogate')
if self.surrogate is None:
def fqoi(u):
return self.fqoi(x, u)
def fgrad(u):
return self.jac(x, u)
jac = self.jac
else:
fqoi, fgrad, surr_jac = self._makeSurrogates(x)
jac = surr_jac
u_samples = self._getParameterSamples()
if self.verbose:
print('Evaluating quantity of interest at samples')
q_samples, grad_samples = self._evalSamples(u_samples, fqoi, fgrad, jac)
return q_samples, grad_samples
def evalMetric(self, x, method=None):
'''Evaluates the horsetail matching metric at given values of the
design variables.
:param iterable x: values of the design variables, this is passed as
the first argument to the function fqoi
:param str method: method to use to evaluate the metric ('empirical' or
'kernel')
:return: metric_value - value of the metric evaluated at the design
point given by x
:rtype: float
*Example Usage*::
>>> def myFunc(x, u): return x[0]*x[1] + u
>>> u1 = UniformParameter()
>>> theHM = HorsetailMatching(myFunc, u)
>>> x0 = [1, 2]
>>> theHM.evalMetric(x0)
'''
# Make sure dimensions are correct
# u_sample_dimensions = self._processDimensions()
if self.verbose:
print('----------')
print('At design: ' + str(x))
q_samples, grad_samples = self.evalSamples(x)
if self.verbose:
print('Evaluating metric')
return self.evalMetricFromSamples(q_samples, grad_samples, method)
def getHorsetail(self):
'''Function that gets vectors of the horsetail plot at the last design
evaluated.
:return: upper_curve, lower_curve, CDFs - returns three parameters,
the first two are tuples containing pairs of x/y vectors of the
upper and lower bounds on the CDFs (the horsetail plot). The
third parameter is a list of x/y tuples for individual CDFs
propagated at each sampled value of the interval uncertainties
*Example Usage*::
>>> def myFunc(x, u): return x[0]*x[1] + u
>>> u = UniformParameter()
>>> theHM = HorsetailMatching(myFunc, u)
>>> (x1, y1, t1), (x2, y2, t2), CDFs = theHM.getHorsetail()
>>> matplotlib.pyplot(x1, y1, 'b')
>>> matplotlib.pyplot(x2, y2, 'b')
>>> for (x, y) in CDFs:
... matplotlib.pyplot(x, y, 'k:')
>>> matplotlib.pyplot.show()
'''
if hasattr(self, '_ql'):
ql, qu, hl, hu = self._ql, self._qu, self._hl, self._hu
qh, hh = self._qh, self._hh
if self._qis is not None:
ql, hl = _appendPlotArrays(ql, hl, self._qis)
qu, hu = _appendPlotArrays(qu, hu, self._qis)
CDFs = []
for qi, hi in zip(qh, hh):
CDFs.append((qi, hi))
upper_target = [self._ftarg_u(h) for h in hu]
upper_curve = (qu, hu, upper_target)
lower_target = [self._ftarg_l(h) for h in hl]
lower_curve = (ql, hl, lower_target)
return upper_curve, lower_curve, CDFs
else:
raise ValueError('''The metric has not been evaluated at any
design point so the horsetail does not exist''')
##############################################################################
## Private methods ##
##############################################################################
def _evalMetricEmpirical(self, q_samples, grad_samples=None):
M_prob = self.samples_prob
M_int = self.samples_int
if M_int > 1:
alpha = self.alpha
else:
alpha = 1
h_htail = np.zeros([M_int, M_prob])
q_htail = np.zeros([M_int, M_prob])
q_l = np.zeros(M_prob)
q_u = np.zeros(M_prob)
if grad_samples is not None:
g_htail = np.zeros([M_int, M_prob, self._N_dv])
g_l = np.zeros([M_prob, self._N_dv])
g_u = np.zeros([M_prob, self._N_dv])
Du_grad = np.zeros(self._N_dv)
Dl_grad = np.zeros(self._N_dv)
for ii in np.arange(M_int):
# Get empirical CDF by sorting samples at each value of intervals
sortinds = np.argsort(q_samples[ii, :])
q_htail[ii, :] = q_samples[ii, sortinds]
M = q_samples.shape[1]
h_htail[ii, :] = [(1./M)*(0.5 + j) for j in range(M)]
if grad_samples is not None:
for ix in np.arange(self._N_dv):
g_htail[ii, :, ix] = grad_samples[ii, sortinds, ix]
for jj in np.arange(M_prob):
q_u[jj] = min(q_htail[:, jj])
q_l[jj] = max(q_htail[:, jj])
if grad_samples is not None:
q_u[jj] = _extalg(q_htail[:, jj], -1*alpha)
q_l[jj] = _extalg(q_htail[:, jj], alpha)
for ix in np.arange(self._N_dv):
gtemp = _extgrad(q_htail[:, jj], -1*alpha)
g_u[jj, ix] = gtemp.dot(g_htail[:, jj, ix])
gtemp = _extgrad(q_htail[:, jj], alpha)
g_l[jj, ix] = gtemp.dot(g_htail[:, jj, ix])
h_u, h_l = h_htail[0], h_htail[0] # h is same for all ECDFs
t_u = [self._ftarg_u(hi) for hi in h_u]
t_l = [self._ftarg_l(hi) for hi in h_u]
self._ql, self._qu, self._hl, self._hu = q_l, q_u, h_l, h_u
self._qh, self._hh = q_htail, h_htail
self._tl, self._tu = t_l, t_u
self._qis = None
Du = (1./M_prob)*sum((q_u - t_u)**2)
Dl = (1./M_prob)*sum((q_l - t_l)**2)
dhat = np.sqrt(Du + Dl)
if self.verbose:
print('Metric: ' + str(dhat))
if grad_samples is not None:
for ix in np.arange(self._N_dv):
Du_grad[ix] = (1./M_prob)*sum(2*(q_u - t_u)*g_u[:, ix])
Dl_grad[ix] = (1./M_prob)*sum(2*(q_l - t_l)*g_l[:, ix])
dhat_grad = (0.5*(Du+Dl)**(-0.5)*(Du_grad + Dl_grad))
if self.verbose:
print('Gradient: ' + str([g for g in dhat_grad]))
return dhat, dhat_grad
else:
return dhat
def _getKernelParameters(self, q_samples):
# If kernel bandwidth not specified, find it using Scott's rule
if self.kernel_bandwidth is None:
if len(self.prob_uncertainties) > 0:
if abs(np.max(q_samples) - np.min(q_samples)) < 1e-6:
bw = 1e-6
else:
bw = 0.33*((4/(3.*q_samples.shape[1]))**(1/5.)
*np.std(q_samples[0,:]))
else:
bw = 1e-3
self.kernel_bandwidth = bw
else:
bw = self.kernel_bandwidth
## Initalize arrays and prepare calculation
q_min = np.amin(q_samples)
q_max = np.amax(q_samples)
if self.integration_points is None:
q_range = q_max - q_min
qis_full = np.linspace(q_min - q_range, q_max + q_range, 10000)
self.integration_points = qis_full
else:
qis_full = np.array(self.integration_points)
ii_low, ii_high = 0, len(qis_full)
try:
ii_high, qi_high = next((iq, qi) for iq, qi in enumerate(qis_full) if
qi > q_max + 20*bw)
except StopIteration:
warnings.warn('Sample found higher than range of integration points')
try:
iiN_low, qi_low = next((iq, qi) for iq, qi in enumerate(qis_full[::-1]) if
qi < q_min - 20*bw)
ii_low = len(qis_full) - (iiN_low+1)
except StopIteration:
warnings.warn('Sample found lower than range of integration points')
qis = qis_full[ii_low:ii_high+1] # Only evaluate over range of samples
self._qis = qis
return qis, bw
def _evalMetricKernel(self, q_samples, grad_samples=None):
qis, bw = self._getKernelParameters(q_samples)
N_quad = len(qis)
M_prob = self.samples_prob
M_int = self.samples_int
if M_int > 1:
alpha = self.alpha
else:
alpha = 1
fhtail = np.zeros([N_quad, M_int])
qhtail = np.zeros([N_quad, M_int])
if grad_samples is not None:
fht_grad = np.zeros([N_quad, M_int, self._N_dv])
hu_grad = np.zeros([N_quad, self._N_dv])
hl_grad = np.zeros([N_quad, self._N_dv])
Du_grad = np.zeros(self._N_dv)
Dl_grad = np.zeros(self._N_dv)
# ALGORITHM 1 from publication
# Evaluate all individual CDFs and their gradients
for mm in np.arange(M_int):
qjs = q_samples[mm, :]
rmat = qis.reshape([N_quad, 1])-qjs.reshape([1, M_prob])
if grad_samples is not None:
Kcdf, Kprime = _kernel(rmat, M_prob, bw=bw,
ktype=self.kernel_type, bGrad=True)
for ix in np.arange(self._N_dv):
grad_js = grad_samples[mm, :, ix]
fht_grad[:, mm, ix] = Kprime.dot(-1*grad_js)
else:
Kcdf = _kernel(rmat, M_prob, bw=bw, ktype=self.kernel_type,
bGrad=False)
fhtail[:, mm] = Kcdf.dot(np.ones([M_prob, 1])).flatten()
qhtail[:, mm] = qis
# ALGORITHM 2 from publication
# Find horsetail curves - envelope of the CDFs and their gradients
# In Matrix form
if grad_samples is None:
hu = np.max(fhtail, axis=1).flatten()
hl = np.min(fhtail, axis=1).flatten()
else:
hu = _extalg(fhtail, alpha, axis=1).flatten()
hl = _extalg(fhtail, -1*alpha, axis=1).flatten()
Su_prime = _extgrad(fhtail, alpha, axis=1)
Sl_prime = _extgrad(fhtail, -1*alpha, axis=1)
for kx in np.arange(self._N_dv):
fis_grad = fht_grad[:, :, kx]
for ii in np.arange(N_quad):
hu_grad[ii, kx] = Su_prime[ii, :].dot(fis_grad[ii, :])
hl_grad[ii, kx] = Sl_prime[ii, :].dot(fis_grad[ii, :])
# ALGORITHM 3 from publication
# Evaluate overall metric and gradient using matrix multipliation
tu = np.array([self._ftarg_u(hi) for hi in hu])
tl = np.array([self._ftarg_l(hi) for hi in hl])
Du = _matrix_integration(qis, hu, tu)
Dl = _matrix_integration(qis, hl, tl)
dhat = float(np.sqrt(Du + Dl))
self._ql, self._qu, self._hl, self._hu = qis, qis, hl, hu
self._qh, self._hh = qhtail, fhtail
self._tl, self._tu = tl, tu
if self.verbose:
print('Metric: ' + str(dhat))
if grad_samples is not None:
tu_pr = np.array([_finDiff(self._ftarg_u, hi) for hi in hu])
tl_pr = np.array([_finDiff(self._ftarg_l, hi) for hi in hl])
for kx in np.arange(self._N_dv):
Du_grad[kx] = _matrix_grad(qis, hu, hu_grad[:, kx], tu, tu_pr)
Dl_grad[kx] = _matrix_grad(qis, hl, hl_grad[:, kx], tl, tl_pr)
dhat_grad = (0.5*(Du+Dl)**(-0.5)*(Du_grad + Dl_grad))
if self.verbose:
print('Gradient: ' + str([g for g in dhat_grad]))
return dhat, dhat_grad
else:
return dhat
def _makeSurrogates(self, x):
# Get quadrature points
if self.surrogate_points is None:
N_u = len(self.prob_uncertainties) + len(self.int_uncertainties)
mesh = np.meshgrid(*[np.linspace(-1, 1, 5) for n in np.arange(N_u)],
copy=False)
u_sparse = np.vstack([m.flatten() for m in mesh]).T
else:
u_sparse = self.surrogate_points
N_sparse = u_sparse.shape[0]
q_sparse = np.zeros(N_sparse)
# Get surrogates in correct form
if not self.jac:
for iu, u in enumerate(u_sparse):
q_sparse[iu] = self.fqoi(x, u)
surr_qoi = self.surrogate(u_sparse, q_sparse)
def fqoi(u):
return surr_qoi(u)
fgrad = False
surr_jac = False
else:
g_sparse = np.zeros([N_sparse, self._N_dv])
for iu, u in enumerate(u_sparse):
if isinstance(self.jac, bool) and self.jac:
q_sparse[iu], g_sparse[iu, :] = self.fqoi(x, u)
else:
q_sparse[iu] = self.fqoi(x, u)
g_sparse[iu, :] = self.jac(x, u)
if not self.surrogate_jac:
fpartial = [lambda u: 0 for _ in np.arange(self._N_dv)]
surr_qoi = self.surrogate(u_sparse, q_sparse)
for k in np.arange(self._N_dv):
fpartial[k] = self.surrogate(u_sparse, g_sparse[:, k])
def surr_grad(u):
return [f(u) for f in fpartial]
else:
if isinstance(self.surrogate_jac, bool) and self.surrogate_jac:
surr_qoi, surr_grad = self.surrogate(
u_sparse, q_sparse, g_sparse)
else:
surr_qoi = self.surrogate(u_sparse, q_sparse)
surr_grad = self.surrogate_jac(u_sparse, g_sparse)
def fqoi(u):
return(surr_qoi(u))
def fgrad(u):
return(surr_grad(u))
surr_jac = fgrad
return fqoi, fgrad, surr_jac
def _getParameterSamples(self):
N_u = len(self.prob_uncertainties) + len(self.int_uncertainties)
get_new = True
if self.reuse_samples and self.u_samples is not None:
if self.u_samples.shape != (self.samples_int, self.samples_prob, N_u):
if self.verbose:
print('''Stored samples do not match current dimensions,
getting new samples''')
else:
get_new = False
if get_new:
if self.verbose:
print('Getting uncertain parameter samples')
N_u = len(self.prob_uncertainties) + len(self.int_uncertainties)
N_prob = len(self.prob_uncertainties)
N_int = len(self.int_uncertainties)
# u_samples = np.zeros([self.samples_int, self.samples_prob, N_u])
u_samples_prob = np.zeros([self.samples_int, self.samples_prob,
len(self.prob_uncertainties)])
u_samples_int = np.zeros([self.samples_int, self.samples_prob,
len(self.int_uncertainties)])
u_ints = np.zeros([self.samples_int, len(self.int_uncertainties)])
for kk, uk in enumerate(self.int_uncertainties):
if callable(uk):
samps = np.array(uk()).flatten()
if len(samps) != self.samples_prob:
raise Exception('Number of samples returned not equal ' +
'to specified number of samples: please set number of ' +
'samples with samples_prob attribute')
else:
u_ints[:, kk] = samps
elif isinstance(uk, (tuple, list)): ## See if given as tuple/list of bounds
lb, ub = uk[0], uk[1]
u_ints[:, kk] = np.random.uniform(lb, ub, size=self.samples_int)
u_ints[0, kk] = lb
u_ints[-1, kk] = ub
elif hasattr(uk, 'getSample'):
for ii in np.arange(self.samples_int):
u_ints[ii, kk] = uk.getSample()
else:
raise TypeError('Unsupported interval uncertainty type')
u_samples_int = np.tile(u_ints[:, np.newaxis], (1, self.samples_prob, 1))
u_probs = np.zeros([self.samples_prob, len(self.prob_uncertainties)])
for kk, uk in enumerate(self.prob_uncertainties):
if callable(uk):
samps = np.array(uk()).flatten()
if len(samps) != self.samples_prob:
raise Exception('Number of samples returned not equal ' +
'to specified number of samples: please set number of ' +
'samples with samples_prob attribute')
else:
u_probs[:, kk] = samps
elif hasattr(uk, 'getSample'):
for jj in np.arange(self.samples_prob):
u_probs[jj, kk] = uk.getSample()
else:
raise TypeError('Unsupported probabilistic uncertainty type')
u_samples_prob = np.tile(u_probs[np.newaxis, :], (self.samples_int, 1, 1))
u_samples = np.concatenate((u_samples_int, u_samples_prob), axis=2)
self.u_samples = u_samples
return u_samples
else:
if self.verbose:
print('Re-using stored samples')
return self.u_samples
def _evalSamples(self, u_samples, fqoi, fgrad, jac):
# Array of shape (M_int, M_prob)
grad_samples = None
q_samples = np.zeros([self.samples_int, self.samples_prob])
if not jac:
for ii in np.arange(q_samples.shape[0]):
for jj in np.arange(q_samples.shape[1]):
q_samples[ii, jj] = fqoi(u_samples[ii, jj])
else:
grad_samples = np.zeros([self.samples_int, self.samples_prob,
self._N_dv])
for ii in np.arange(q_samples.shape[0]):
for jj in np.arange(q_samples.shape[1]):
if isinstance(jac, bool) and jac:
(q, grad) = fqoi(u_samples[ii, jj])
q_samples[ii, jj] = float(q)
grad_samples[ii, jj, :] = [_ for _ in grad]
else:
q_samples[ii, jj] = fqoi(u_samples[ii, jj])
grad_samples[ii, jj, :] = fgrad(u_samples[ii, jj])
self.grad_samples = grad_samples
self.q_samples = q_samples
return q_samples, grad_samples
|
lwcook/horsetail-matching | horsetailmatching/hm.py | HorsetailMatching.getHorsetail | python | def getHorsetail(self):
'''Function that gets vectors of the horsetail plot at the last design
evaluated.
:return: upper_curve, lower_curve, CDFs - returns three parameters,
the first two are tuples containing pairs of x/y vectors of the
upper and lower bounds on the CDFs (the horsetail plot). The
third parameter is a list of x/y tuples for individual CDFs
propagated at each sampled value of the interval uncertainties
*Example Usage*::
>>> def myFunc(x, u): return x[0]*x[1] + u
>>> u = UniformParameter()
>>> theHM = HorsetailMatching(myFunc, u)
>>> (x1, y1, t1), (x2, y2, t2), CDFs = theHM.getHorsetail()
>>> matplotlib.pyplot(x1, y1, 'b')
>>> matplotlib.pyplot(x2, y2, 'b')
>>> for (x, y) in CDFs:
... matplotlib.pyplot(x, y, 'k:')
>>> matplotlib.pyplot.show()
'''
if hasattr(self, '_ql'):
ql, qu, hl, hu = self._ql, self._qu, self._hl, self._hu
qh, hh = self._qh, self._hh
if self._qis is not None:
ql, hl = _appendPlotArrays(ql, hl, self._qis)
qu, hu = _appendPlotArrays(qu, hu, self._qis)
CDFs = []
for qi, hi in zip(qh, hh):
CDFs.append((qi, hi))
upper_target = [self._ftarg_u(h) for h in hu]
upper_curve = (qu, hu, upper_target)
lower_target = [self._ftarg_l(h) for h in hl]
lower_curve = (ql, hl, lower_target)
return upper_curve, lower_curve, CDFs
else:
raise ValueError('''The metric has not been evaluated at any
design point so the horsetail does not exist''') | Function that gets vectors of the horsetail plot at the last design
evaluated.
:return: upper_curve, lower_curve, CDFs - returns three parameters,
the first two are tuples containing pairs of x/y vectors of the
upper and lower bounds on the CDFs (the horsetail plot). The
third parameter is a list of x/y tuples for individual CDFs
propagated at each sampled value of the interval uncertainties
*Example Usage*::
>>> def myFunc(x, u): return x[0]*x[1] + u
>>> u = UniformParameter()
>>> theHM = HorsetailMatching(myFunc, u)
>>> (x1, y1, t1), (x2, y2, t2), CDFs = theHM.getHorsetail()
>>> matplotlib.pyplot(x1, y1, 'b')
>>> matplotlib.pyplot(x2, y2, 'b')
>>> for (x, y) in CDFs:
... matplotlib.pyplot(x, y, 'k:')
>>> matplotlib.pyplot.show() | train | https://github.com/lwcook/horsetail-matching/blob/f3d5f8d01249debbca978f412ce4eae017458119/horsetailmatching/hm.py#L405-L450 | null | class HorsetailMatching(object):
'''Class for using horsetail matching within an optimization. The main
functionality is to evaluate the horsetail matching
metric (and optionally its gradient) that can be used with external
optimizers.
The code is written such that all arguments that can be used at the
initialization of a HorsetailMatching object can also be set as
attributes after creation to achieve exactly the same effect.
:param function fqoi: function that returns the quantity of interest, it
must take two ordered arguments - the value of the design variable
vector and the value of the uncertainty vector.
:param list prob_uncertainties: list of probabilistic uncertainties.
Each can be an instance of the UncertainParameter class,
in which case they will be sampled using the getSample() method.
Alternatiely each can be a function which returns sample(s) using
whatever method is desired.
:param list int_uncertainties: list of interval uncertainties [default []].
Each can be an instance of the IntervalParameter class,
in which case they will be sampled using the getSample() method.
Alternatiely each can be specified as a tuple/list of the bounds.
:param function ftarget: function that returns the value of the target
inverse CDF given a value in [0,1]. Can be a tuple that gives two
target fuctions, one for the upper bound and one for the lower bound on
the CDF under mixed uncertainties [default t(h) = 0]
:param bool/function jac: Argument that
specifies how to evaluate the gradient of the quantity of interest.
If False no gradients are propagated, if True the fqoi should return
a second argument g such that g_i = dq/dx_i. If a function, it should
have the same signature as fqoi but return g. [default False]
:param str method: method with which to evaluate the horsetil matching
metric, can be 'empirical' or 'kernel' [default 'empirical' if
jac is False else default 'kernel'].
:param int samples_prob: number of samples to take from the
probabilsitic uncertainties. [default 1000]
:param int samples_int: number of samples to take from the
interval uncertainties. Note that under mixed uncertainties, a nested
loop is used to evaluate the metric so the total number of
samples will be samples_prob*samples_int (at each interval uncertainty
sample samples_prob samples are taken from the probabilistic
uncertainties). [default 50]
:param list integration_points: Only for method='kernel'.
The integration point values to use when evaluating the metric using
kernels [by default 100 points spread over 3 times the range of
the samples of q obtained the first time the metric is evaluated]
:param number kernel_bandwidth: Only for method='kernel'. The bandwidth
used in the kernel function [by default it is found the first time
the metric is evaluated using Scott's rule]
:param str kernel_type: Only for method='kernel'. The type of kernel to
use, can be 'gaussian', 'uniform', or 'triangle' [default 'gaussian'].
:param function surrogate: Surrogate that is created at every design
point to be sampled instead of fqoi. It should be a function that
takes two arguments - an array with values of the uncertainties at
which to fit the surrogate of size (num_quadrature_points,
num_uncertainties), and an array of quantity of interest values
corresponding to these uncertainty values to which to fit the surrogate
of size (num_quadrature_points). It should return a functio that
predicts the qoi at an aribtrary value of the uncertainties.
[default None]
:param list surrogate_points: Only with a surrogate. List of points at
which fqoi is evaluated to give values to fit the surrogates to. These
are passed to the surrogate function along with the qoi evaluated at
these points when the surrogate is fitted [by default tensor
quadrature of 5 points in each uncertain dimension is used]
:param bool/function surrogate_jac: Only with a surrogate. Specifies how
to take surrogates of the gradient. It works similarly to the
jac argument: if False, the same surrogate is fitted to fqoi and each
component of its gradient, if True, the surrogate function is
expected to take a third argument - an array that is the gradient
at each of the quadrature points of size
(num_quadrature_points, num_design_variables). If a function, then
instead the array of uncertainty values and the array of gradient
values are passed to this function and it should return a function for
the surrogate model of the gradient.
:param bool reuse_samples: If True will reuse the same set of samples of
the uncertainties for evaluating the metric at any value of the
design variables, if False wise will re-sample every time evalMetric
is called [default True]
:param bool verbose: If True will print out details [default False].
*Example Declarations*::
>>> from horsetailmatching import HorsetailMatching,
UncertainParameter, PolySurrogate
>>> def myFunc(x, u): return x[0]*x[1] + u
>>> def myGrad(x, u): return [x[1], x[0]]
>>> def myTarg1(h): return 1-h**3
>>> def myTarg2(h): return 2-h**3
>>> u1 = UniformParameter()
>>> u2 = IntervalParameter()
>>> U = [u1, u2]
>>> poly = PolySurrogate(dimensions=2)
>>> poly_points = poly.getQuadraturePoints()
>>> theHM = HorsetailMatching(myFunc, U)
>>> theHM = HorsetailMatching(myFunc, U, jac=myGrad, method='kernel')
>>> theHM = HorsetailMatching(myFunc, U, ftarget=myTarg1)
>>> theHM = HorsetailMatching(myFunc, U, ftarget=(myTarg1, myTarg2))
>>> theHM = HorsetailMatching(myFunc, U, samples_prob=500,
samples_int = 50)
>>> theHM = HorsetailMatching(myFunc, U, method='kernel',
integration_points=numpy.linspace(0, 10, 100),
kernel_bandwidth=0.01)
>>> theHM = HorsetailMatching(myFunc, U,
surrogate=poly.surrogate, surrogate_jac=False,
surrogate_points=poly_points)
>>> theHM = HorsetailMatching(myFunc, U, verbose=True,
reuse_samples=True)
'''
def __init__(self, fqoi, prob_uncertainties, int_uncertainties=[],
ftarget=None, jac=False, method=None,
samples_prob=100, samples_int=50, integration_points=None,
kernel_bandwidth=None, kernel_type='gaussian', alpha=400,
surrogate=None, surrogate_points=None, surrogate_jac=False,
reuse_samples=True, verbose=False):
self.fqoi = fqoi
# self.uncertain_parameters = uncertain_parameters
self.prob_uncertainties = prob_uncertainties
self.int_uncertainties = int_uncertainties
self.ftarget = ftarget
self.jac = jac
self.method = method # Must be done after setting jac
self.samples_prob = samples_prob
self.samples_int = samples_int
self.integration_points = integration_points
self.kernel_bandwidth = kernel_bandwidth
self.kernel_type = kernel_type
self.alpha = alpha
self.reuse_samples = reuse_samples
self.u_samples = None
self.surrogate = surrogate
self.surrogate_points = surrogate_points
self.surrogate_jac = surrogate_jac
self.verbose = verbose
###############################################################################
## Properties with non-trivial setting behaviour
###############################################################################
# @property
# def uncertain_parameters(self):
# return self._u_params
#
# @uncertain_parameters.setter
# def uncertain_parameters(self, params):
# self._u_params = _makeIter(params)
# if len(self._u_params) == 0:
# raise ValueError('No uncertain parameters provided')
#
# self._u_int, self._u_prob = [], []
# for ii, u in enumerate(self._u_params):
# if u.is_interval_uncertainty:
# self._u_int.append((ii, u))
# else:
# self._u_prob.append((ii, u))
@property
def prob_uncertainties(self):
return self._prob_uncertainties
@prob_uncertainties.setter
def prob_uncertainties(self, params):
self._prob_uncertainties = _makeIter(params)
@property
def int_uncertainties(self):
return self._int_uncertainties
@int_uncertainties.setter
def int_uncertainties(self, params):
self._int_uncertainties = _makeIter(params)
@property
def samples_prob(self):
return self._samples_prob
@samples_prob.setter
def samples_prob(self, value):
if len(self.prob_uncertainties) > 0:
self._samples_prob = value
else:
self._samples_prob = 1
@property
def samples_int(self):
return self._samples_int
@samples_int.setter
def samples_int(self, value):
if len(self.int_uncertainties) > 0:
self._samples_int = value
else:
self._samples_int = 1
@property
def method(self):
return self._method
@method.setter
def method(self, value):
if value is None:
if self.jac is False:
self._method = 'empirical'
else:
self._method = 'kernel'
else:
self._method = value
@property
def ftarget(self):
return self._ftarget
@ftarget.setter
def ftarget(self, value):
def standardTarget(h):
return 0
try:
iter(value)
self._ftarg_u = value[0]
self._ftarg_l = value[1]
self._ftarget = value
except:
if value is None:
self._ftarget = standardTarget
else:
self._ftarget = value
self._ftarg_u = self._ftarget
self._ftarg_l = self._ftarget
@property
def u_samples(self):
return self._u_samples
@u_samples.setter
def u_samples(self, samples):
if samples is not None:
N_u = len(self.prob_uncertainties) + len(self.int_uncertainties)
if (not isinstance(samples, np.ndarray) or
samples.shape != (self.samples_int, self.samples_prob, N_u)):
raise TypeError('u_samples should be a np.array of size'
'(samples_int, samples_prob, num_uncertanities)')
self._u_samples = samples
@property
def kernel_type(self):
return self._kernel_type
@kernel_type.setter
def kernel_type(self, value):
allowed_types = ['gaussian', 'uniform', 'triangle']
if value not in allowed_types:
raise ValueError('Kernel type must be one of'+
', '.join([str(t) for t in allowed_types]))
else:
self._kernel_type = value
##############################################################################
## Public Methods
##############################################################################
def evalSamples(self, x):
'''Evalautes the samples of quantity of interest and its gradient
(if supplied) at the given values of the design variables
:param iterable x: values of the design variables, this is passed as
the first argument to the function fqoi
:return: (values of the quantity of interest, values of the gradient)
:rtype: Tuple
'''
# Make sure dimensions are correct
# u_sample_dimensions = self._processDimensions()
self._N_dv = len(_makeIter(x))
if self.verbose:
print('Evaluating surrogate')
if self.surrogate is None:
def fqoi(u):
return self.fqoi(x, u)
def fgrad(u):
return self.jac(x, u)
jac = self.jac
else:
fqoi, fgrad, surr_jac = self._makeSurrogates(x)
jac = surr_jac
u_samples = self._getParameterSamples()
if self.verbose:
print('Evaluating quantity of interest at samples')
q_samples, grad_samples = self._evalSamples(u_samples, fqoi, fgrad, jac)
return q_samples, grad_samples
def evalMetric(self, x, method=None):
'''Evaluates the horsetail matching metric at given values of the
design variables.
:param iterable x: values of the design variables, this is passed as
the first argument to the function fqoi
:param str method: method to use to evaluate the metric ('empirical' or
'kernel')
:return: metric_value - value of the metric evaluated at the design
point given by x
:rtype: float
*Example Usage*::
>>> def myFunc(x, u): return x[0]*x[1] + u
>>> u1 = UniformParameter()
>>> theHM = HorsetailMatching(myFunc, u)
>>> x0 = [1, 2]
>>> theHM.evalMetric(x0)
'''
# Make sure dimensions are correct
# u_sample_dimensions = self._processDimensions()
if self.verbose:
print('----------')
print('At design: ' + str(x))
q_samples, grad_samples = self.evalSamples(x)
if self.verbose:
print('Evaluating metric')
return self.evalMetricFromSamples(q_samples, grad_samples, method)
def evalMetricFromSamples(self, q_samples, grad_samples=None, method=None):
'''Evaluates the horsetail matching metric from given samples of the quantity
of interest and gradient instead of evaluating them at a design.
:param np.ndarray q_samples: samples of the quantity of interest,
size (M_int, M_prob)
:param np.ndarray grad_samples: samples of the gradien,
size (M_int, M_prob, n_x)
:return: metric_value - value of the metric
:rtype: float
'''
# Make sure dimensions are correct
# u_sample_dimensions = self._processDimensions()
q_samples = np.array(q_samples)
if not (q_samples.shape[0] == self.samples_int and
q_samples.shape[1] == self.samples_prob):
raise ValueError('Shape of q_samples should be [M_int, M_prob]')
if grad_samples is not None:
grad_samples = np.array(grad_samples)
if not (grad_samples.shape[0] == self.samples_int and
grad_samples.shape[1] == self.samples_prob):
raise ValueError('''Shape of grad_samples
should be [M_int, M_prob, n_dv]''')
if method is None:
method = self.method
if method.lower() == 'empirical':
return self._evalMetricEmpirical(q_samples, grad_samples)
elif method.lower() == 'kernel':
return self._evalMetricKernel(q_samples, grad_samples)
else:
raise ValueError('Unsupported metric evalation method')
##############################################################################
## Private methods ##
##############################################################################
def _evalMetricEmpirical(self, q_samples, grad_samples=None):
M_prob = self.samples_prob
M_int = self.samples_int
if M_int > 1:
alpha = self.alpha
else:
alpha = 1
h_htail = np.zeros([M_int, M_prob])
q_htail = np.zeros([M_int, M_prob])
q_l = np.zeros(M_prob)
q_u = np.zeros(M_prob)
if grad_samples is not None:
g_htail = np.zeros([M_int, M_prob, self._N_dv])
g_l = np.zeros([M_prob, self._N_dv])
g_u = np.zeros([M_prob, self._N_dv])
Du_grad = np.zeros(self._N_dv)
Dl_grad = np.zeros(self._N_dv)
for ii in np.arange(M_int):
# Get empirical CDF by sorting samples at each value of intervals
sortinds = np.argsort(q_samples[ii, :])
q_htail[ii, :] = q_samples[ii, sortinds]
M = q_samples.shape[1]
h_htail[ii, :] = [(1./M)*(0.5 + j) for j in range(M)]
if grad_samples is not None:
for ix in np.arange(self._N_dv):
g_htail[ii, :, ix] = grad_samples[ii, sortinds, ix]
for jj in np.arange(M_prob):
q_u[jj] = min(q_htail[:, jj])
q_l[jj] = max(q_htail[:, jj])
if grad_samples is not None:
q_u[jj] = _extalg(q_htail[:, jj], -1*alpha)
q_l[jj] = _extalg(q_htail[:, jj], alpha)
for ix in np.arange(self._N_dv):
gtemp = _extgrad(q_htail[:, jj], -1*alpha)
g_u[jj, ix] = gtemp.dot(g_htail[:, jj, ix])
gtemp = _extgrad(q_htail[:, jj], alpha)
g_l[jj, ix] = gtemp.dot(g_htail[:, jj, ix])
h_u, h_l = h_htail[0], h_htail[0] # h is same for all ECDFs
t_u = [self._ftarg_u(hi) for hi in h_u]
t_l = [self._ftarg_l(hi) for hi in h_u]
self._ql, self._qu, self._hl, self._hu = q_l, q_u, h_l, h_u
self._qh, self._hh = q_htail, h_htail
self._tl, self._tu = t_l, t_u
self._qis = None
Du = (1./M_prob)*sum((q_u - t_u)**2)
Dl = (1./M_prob)*sum((q_l - t_l)**2)
dhat = np.sqrt(Du + Dl)
if self.verbose:
print('Metric: ' + str(dhat))
if grad_samples is not None:
for ix in np.arange(self._N_dv):
Du_grad[ix] = (1./M_prob)*sum(2*(q_u - t_u)*g_u[:, ix])
Dl_grad[ix] = (1./M_prob)*sum(2*(q_l - t_l)*g_l[:, ix])
dhat_grad = (0.5*(Du+Dl)**(-0.5)*(Du_grad + Dl_grad))
if self.verbose:
print('Gradient: ' + str([g for g in dhat_grad]))
return dhat, dhat_grad
else:
return dhat
def _getKernelParameters(self, q_samples):
# If kernel bandwidth not specified, find it using Scott's rule
if self.kernel_bandwidth is None:
if len(self.prob_uncertainties) > 0:
if abs(np.max(q_samples) - np.min(q_samples)) < 1e-6:
bw = 1e-6
else:
bw = 0.33*((4/(3.*q_samples.shape[1]))**(1/5.)
*np.std(q_samples[0,:]))
else:
bw = 1e-3
self.kernel_bandwidth = bw
else:
bw = self.kernel_bandwidth
## Initalize arrays and prepare calculation
q_min = np.amin(q_samples)
q_max = np.amax(q_samples)
if self.integration_points is None:
q_range = q_max - q_min
qis_full = np.linspace(q_min - q_range, q_max + q_range, 10000)
self.integration_points = qis_full
else:
qis_full = np.array(self.integration_points)
ii_low, ii_high = 0, len(qis_full)
try:
ii_high, qi_high = next((iq, qi) for iq, qi in enumerate(qis_full) if
qi > q_max + 20*bw)
except StopIteration:
warnings.warn('Sample found higher than range of integration points')
try:
iiN_low, qi_low = next((iq, qi) for iq, qi in enumerate(qis_full[::-1]) if
qi < q_min - 20*bw)
ii_low = len(qis_full) - (iiN_low+1)
except StopIteration:
warnings.warn('Sample found lower than range of integration points')
qis = qis_full[ii_low:ii_high+1] # Only evaluate over range of samples
self._qis = qis
return qis, bw
def _evalMetricKernel(self, q_samples, grad_samples=None):
qis, bw = self._getKernelParameters(q_samples)
N_quad = len(qis)
M_prob = self.samples_prob
M_int = self.samples_int
if M_int > 1:
alpha = self.alpha
else:
alpha = 1
fhtail = np.zeros([N_quad, M_int])
qhtail = np.zeros([N_quad, M_int])
if grad_samples is not None:
fht_grad = np.zeros([N_quad, M_int, self._N_dv])
hu_grad = np.zeros([N_quad, self._N_dv])
hl_grad = np.zeros([N_quad, self._N_dv])
Du_grad = np.zeros(self._N_dv)
Dl_grad = np.zeros(self._N_dv)
# ALGORITHM 1 from publication
# Evaluate all individual CDFs and their gradients
for mm in np.arange(M_int):
qjs = q_samples[mm, :]
rmat = qis.reshape([N_quad, 1])-qjs.reshape([1, M_prob])
if grad_samples is not None:
Kcdf, Kprime = _kernel(rmat, M_prob, bw=bw,
ktype=self.kernel_type, bGrad=True)
for ix in np.arange(self._N_dv):
grad_js = grad_samples[mm, :, ix]
fht_grad[:, mm, ix] = Kprime.dot(-1*grad_js)
else:
Kcdf = _kernel(rmat, M_prob, bw=bw, ktype=self.kernel_type,
bGrad=False)
fhtail[:, mm] = Kcdf.dot(np.ones([M_prob, 1])).flatten()
qhtail[:, mm] = qis
# ALGORITHM 2 from publication
# Find horsetail curves - envelope of the CDFs and their gradients
# In Matrix form
if grad_samples is None:
hu = np.max(fhtail, axis=1).flatten()
hl = np.min(fhtail, axis=1).flatten()
else:
hu = _extalg(fhtail, alpha, axis=1).flatten()
hl = _extalg(fhtail, -1*alpha, axis=1).flatten()
Su_prime = _extgrad(fhtail, alpha, axis=1)
Sl_prime = _extgrad(fhtail, -1*alpha, axis=1)
for kx in np.arange(self._N_dv):
fis_grad = fht_grad[:, :, kx]
for ii in np.arange(N_quad):
hu_grad[ii, kx] = Su_prime[ii, :].dot(fis_grad[ii, :])
hl_grad[ii, kx] = Sl_prime[ii, :].dot(fis_grad[ii, :])
# ALGORITHM 3 from publication
# Evaluate overall metric and gradient using matrix multipliation
tu = np.array([self._ftarg_u(hi) for hi in hu])
tl = np.array([self._ftarg_l(hi) for hi in hl])
Du = _matrix_integration(qis, hu, tu)
Dl = _matrix_integration(qis, hl, tl)
dhat = float(np.sqrt(Du + Dl))
self._ql, self._qu, self._hl, self._hu = qis, qis, hl, hu
self._qh, self._hh = qhtail, fhtail
self._tl, self._tu = tl, tu
if self.verbose:
print('Metric: ' + str(dhat))
if grad_samples is not None:
tu_pr = np.array([_finDiff(self._ftarg_u, hi) for hi in hu])
tl_pr = np.array([_finDiff(self._ftarg_l, hi) for hi in hl])
for kx in np.arange(self._N_dv):
Du_grad[kx] = _matrix_grad(qis, hu, hu_grad[:, kx], tu, tu_pr)
Dl_grad[kx] = _matrix_grad(qis, hl, hl_grad[:, kx], tl, tl_pr)
dhat_grad = (0.5*(Du+Dl)**(-0.5)*(Du_grad + Dl_grad))
if self.verbose:
print('Gradient: ' + str([g for g in dhat_grad]))
return dhat, dhat_grad
else:
return dhat
def _makeSurrogates(self, x):
# Get quadrature points
if self.surrogate_points is None:
N_u = len(self.prob_uncertainties) + len(self.int_uncertainties)
mesh = np.meshgrid(*[np.linspace(-1, 1, 5) for n in np.arange(N_u)],
copy=False)
u_sparse = np.vstack([m.flatten() for m in mesh]).T
else:
u_sparse = self.surrogate_points
N_sparse = u_sparse.shape[0]
q_sparse = np.zeros(N_sparse)
# Get surrogates in correct form
if not self.jac:
for iu, u in enumerate(u_sparse):
q_sparse[iu] = self.fqoi(x, u)
surr_qoi = self.surrogate(u_sparse, q_sparse)
def fqoi(u):
return surr_qoi(u)
fgrad = False
surr_jac = False
else:
g_sparse = np.zeros([N_sparse, self._N_dv])
for iu, u in enumerate(u_sparse):
if isinstance(self.jac, bool) and self.jac:
q_sparse[iu], g_sparse[iu, :] = self.fqoi(x, u)
else:
q_sparse[iu] = self.fqoi(x, u)
g_sparse[iu, :] = self.jac(x, u)
if not self.surrogate_jac:
fpartial = [lambda u: 0 for _ in np.arange(self._N_dv)]
surr_qoi = self.surrogate(u_sparse, q_sparse)
for k in np.arange(self._N_dv):
fpartial[k] = self.surrogate(u_sparse, g_sparse[:, k])
def surr_grad(u):
return [f(u) for f in fpartial]
else:
if isinstance(self.surrogate_jac, bool) and self.surrogate_jac:
surr_qoi, surr_grad = self.surrogate(
u_sparse, q_sparse, g_sparse)
else:
surr_qoi = self.surrogate(u_sparse, q_sparse)
surr_grad = self.surrogate_jac(u_sparse, g_sparse)
def fqoi(u):
return(surr_qoi(u))
def fgrad(u):
return(surr_grad(u))
surr_jac = fgrad
return fqoi, fgrad, surr_jac
def _getParameterSamples(self):
N_u = len(self.prob_uncertainties) + len(self.int_uncertainties)
get_new = True
if self.reuse_samples and self.u_samples is not None:
if self.u_samples.shape != (self.samples_int, self.samples_prob, N_u):
if self.verbose:
print('''Stored samples do not match current dimensions,
getting new samples''')
else:
get_new = False
if get_new:
if self.verbose:
print('Getting uncertain parameter samples')
N_u = len(self.prob_uncertainties) + len(self.int_uncertainties)
N_prob = len(self.prob_uncertainties)
N_int = len(self.int_uncertainties)
# u_samples = np.zeros([self.samples_int, self.samples_prob, N_u])
u_samples_prob = np.zeros([self.samples_int, self.samples_prob,
len(self.prob_uncertainties)])
u_samples_int = np.zeros([self.samples_int, self.samples_prob,
len(self.int_uncertainties)])
u_ints = np.zeros([self.samples_int, len(self.int_uncertainties)])
for kk, uk in enumerate(self.int_uncertainties):
if callable(uk):
samps = np.array(uk()).flatten()
if len(samps) != self.samples_prob:
raise Exception('Number of samples returned not equal ' +
'to specified number of samples: please set number of ' +
'samples with samples_prob attribute')
else:
u_ints[:, kk] = samps
elif isinstance(uk, (tuple, list)): ## See if given as tuple/list of bounds
lb, ub = uk[0], uk[1]
u_ints[:, kk] = np.random.uniform(lb, ub, size=self.samples_int)
u_ints[0, kk] = lb
u_ints[-1, kk] = ub
elif hasattr(uk, 'getSample'):
for ii in np.arange(self.samples_int):
u_ints[ii, kk] = uk.getSample()
else:
raise TypeError('Unsupported interval uncertainty type')
u_samples_int = np.tile(u_ints[:, np.newaxis], (1, self.samples_prob, 1))
u_probs = np.zeros([self.samples_prob, len(self.prob_uncertainties)])
for kk, uk in enumerate(self.prob_uncertainties):
if callable(uk):
samps = np.array(uk()).flatten()
if len(samps) != self.samples_prob:
raise Exception('Number of samples returned not equal ' +
'to specified number of samples: please set number of ' +
'samples with samples_prob attribute')
else:
u_probs[:, kk] = samps
elif hasattr(uk, 'getSample'):
for jj in np.arange(self.samples_prob):
u_probs[jj, kk] = uk.getSample()
else:
raise TypeError('Unsupported probabilistic uncertainty type')
u_samples_prob = np.tile(u_probs[np.newaxis, :], (self.samples_int, 1, 1))
u_samples = np.concatenate((u_samples_int, u_samples_prob), axis=2)
self.u_samples = u_samples
return u_samples
else:
if self.verbose:
print('Re-using stored samples')
return self.u_samples
def _evalSamples(self, u_samples, fqoi, fgrad, jac):
# Array of shape (M_int, M_prob)
grad_samples = None
q_samples = np.zeros([self.samples_int, self.samples_prob])
if not jac:
for ii in np.arange(q_samples.shape[0]):
for jj in np.arange(q_samples.shape[1]):
q_samples[ii, jj] = fqoi(u_samples[ii, jj])
else:
grad_samples = np.zeros([self.samples_int, self.samples_prob,
self._N_dv])
for ii in np.arange(q_samples.shape[0]):
for jj in np.arange(q_samples.shape[1]):
if isinstance(jac, bool) and jac:
(q, grad) = fqoi(u_samples[ii, jj])
q_samples[ii, jj] = float(q)
grad_samples[ii, jj, :] = [_ for _ in grad]
else:
q_samples[ii, jj] = fqoi(u_samples[ii, jj])
grad_samples[ii, jj, :] = fgrad(u_samples[ii, jj])
self.grad_samples = grad_samples
self.q_samples = q_samples
return q_samples, grad_samples
|
lwcook/horsetail-matching | horsetailmatching/parameters.py | UncertainParameter.evalPDF | python | def evalPDF(self, u_values):
'''Returns the PDF of the uncertain parameter evaluated at the values
provided in u_values.
:param iterable u_values: values of the uncertain parameter at which to
evaluate the PDF
*Example Usage* ::
>>> u = UniformParameter()
>>> X = numpy.linspace(-1, 1, 100)
>>> Y = [u.evalPDF(x) for x in X]
'''
if isinstance(u_values, np.ndarray):
return self._evalPDF(u_values)
else:
try:
iter(u_values)
return [self._evalPDF(u) for u in u_values]
except:
return self._evalPDF(u_values) | Returns the PDF of the uncertain parameter evaluated at the values
provided in u_values.
:param iterable u_values: values of the uncertain parameter at which to
evaluate the PDF
*Example Usage* ::
>>> u = UniformParameter()
>>> X = numpy.linspace(-1, 1, 100)
>>> Y = [u.evalPDF(x) for x in X] | train | https://github.com/lwcook/horsetail-matching/blob/f3d5f8d01249debbca978f412ce4eae017458119/horsetailmatching/parameters.py#L91-L113 | null | class UncertainParameter(object):
'''Base Class for handling uncertain parameters in optimization under
uncertainty problems using horsetail matching. If this class is used, a
custom distribution must be provided. Otherwise one of the child classes
UniformParameter, IntervalParameter, or GaussianParameter should be used.
All child classes use the methods getSample and evalPDF.
:param function pdf: pdf function of distribution. Bounds on the
distribution should also be provided via the lower_bound and
upper_bound arguments.
:param double lower_bound: lower bound of the distribution [default -1]
:param double upper_bound: upper bound of the distribution [default 1]
*Example Declaration* ::
>>> def myPDF(q):
if q > 2.5 or q < 1.5:
return 0
else:
return 1/(2.5 - 1.5)
>>> u = UncertainParameter(pdf=myPDF, lower_bound=1.5, upper_bound=2.5)
'''
default_lb = -1
default_ub = 1
def __init__(self, pdf=None, lower_bound=default_lb, upper_bound=default_ub):
self.pdf = pdf
self.lower_bound = lower_bound
self.upper_bound = upper_bound
self.is_interval_uncertainty = False
self._max_pdf_val = None
###############################################################################
## Properties with non-trivail setting behaviour
###############################################################################
@property
def lower_bound(self):
return self._lb
@lower_bound.setter
def lower_bound(self, value):
if hasattr(self, '_ub') and value > self.upper_bound:
raise ValueError('Lower bound cannot be greater than upper bound')
self._lb = value
@property
def upper_bound(self):
return self._ub
@upper_bound.setter
def upper_bound(self, value):
if hasattr(self, '_lb') and value < self.lower_bound:
raise ValueError('Lower bound cannot be greater than upper bound')
self._ub = value
###############################################################################
## Public Methods
###############################################################################
def getSample(self):
'''Returns a random sample of the uncertain variable according to its
distribution.
*Example Usage* ::
>>> u = UniformParameter()
>>> u_sample = u.getSample()
'''
## _getSample is overwritten in child classes
return self._getSample()
###############################################################################
## Private Methods
###############################################################################
def _getSample(self):
if self._max_pdf_val is None:
self._max_pdf_val = self._getMaxPDFVal(self.evalPDF,
self.lower_bound, self.upper_bound)
while True:
zscale = self._max_pdf_val*1.1
uval = (self.lower_bound +
np.random.random()*(self.upper_bound-self.lower_bound))
zval = zscale*np.random.random()
if zval < self.evalPDF(uval):
return uval
def _getMaxPDFVal(self, evalPDF, lower_bound, upper_bound):
max_pdf_val = 0
for ui in np.linspace(lower_bound, upper_bound, 20):
pdfi = evalPDF(ui)
if pdfi > max_pdf_val:
max_pdf_val = pdfi
return max_pdf_val
def _evalPDF(self, u):
if u < self.lower_bound or u > self.upper_bound:
return 0
else:
return self.pdf(u)
|
lwcook/horsetail-matching | horsetailmatching/weightedsum.py | WeightedSum.evalMetric | python | def evalMetric(self, x, w1=None, w2=None):
'''Evaluates the weighted sum metric at given values of the
design variables.
:param iterable x: values of the design variables, this is passed as
the first argument to the function fqoi
:param float w1: value to weight the mean by
:param float w2: value to weight the std by
:return: metric_value - value of the metric evaluated at the design
point given by x
:rtype: float
'''
if w1 is None:
w1 = self.w1
if w2 is None:
w2 = self.w2
if self.verbose:
print('----------')
print('At design: ' + str(x))
self._N_dv = len(_makeIter(x))
if self.verbose:
print('Evaluating surrogate')
if self.surrogate is None:
def fqoi(u):
return self.fqoi(x, u)
def fgrad(u):
return self.jac(x, u)
jac = self.jac
else:
fqoi, fgrad, surr_jac = self._makeSurrogates(x)
jac = surr_jac
u_samples = self._getParameterSamples()
if self.verbose: print('Evaluating quantity of interest at samples')
q_samples, grad_samples = self._evalSamples(u_samples, fqoi, fgrad, jac)
if self.verbose: print('Evaluating metric')
return self._evalWeightedSumMetric(q_samples, grad_samples) | Evaluates the weighted sum metric at given values of the
design variables.
:param iterable x: values of the design variables, this is passed as
the first argument to the function fqoi
:param float w1: value to weight the mean by
:param float w2: value to weight the std by
:return: metric_value - value of the metric evaluated at the design
point given by x
:rtype: float | train | https://github.com/lwcook/horsetail-matching/blob/f3d5f8d01249debbca978f412ce4eae017458119/horsetailmatching/weightedsum.py#L96-L143 | [
"def _makeIter(x):\n try:\n iter(x)\n return [xi for xi in x]\n except:\n return [x]\n",
"def _makeSurrogates(self, x):\n\n # Get quadrature points\n if self.surrogate_points is None:\n N_u = len(self.prob_uncertainties) + len(self.int_uncertainties)\n mesh = np.meshgrid(*[np.linspace(-1, 1, 5) for n in np.arange(N_u)],\n copy=False)\n u_sparse = np.vstack([m.flatten() for m in mesh]).T\n else:\n u_sparse = self.surrogate_points\n\n N_sparse = u_sparse.shape[0]\n q_sparse = np.zeros(N_sparse)\n\n # Get surrogates in correct form\n if not self.jac:\n for iu, u in enumerate(u_sparse):\n q_sparse[iu] = self.fqoi(x, u)\n\n surr_qoi = self.surrogate(u_sparse, q_sparse)\n\n def fqoi(u):\n return surr_qoi(u)\n fgrad = False\n surr_jac = False\n\n else:\n g_sparse = np.zeros([N_sparse, self._N_dv])\n for iu, u in enumerate(u_sparse):\n if isinstance(self.jac, bool) and self.jac:\n q_sparse[iu], g_sparse[iu, :] = self.fqoi(x, u)\n else:\n q_sparse[iu] = self.fqoi(x, u)\n g_sparse[iu, :] = self.jac(x, u)\n\n if not self.surrogate_jac:\n fpartial = [lambda u: 0 for _ in np.arange(self._N_dv)]\n surr_qoi = self.surrogate(u_sparse, q_sparse)\n for k in np.arange(self._N_dv):\n fpartial[k] = self.surrogate(u_sparse, g_sparse[:, k])\n def surr_grad(u):\n return [f(u) for f in fpartial]\n else:\n if isinstance(self.surrogate_jac, bool) and self.surrogate_jac:\n surr_qoi, surr_grad = self.surrogate(\n u_sparse, q_sparse, g_sparse)\n else:\n surr_qoi = self.surrogate(u_sparse, q_sparse)\n surr_grad = self.surrogate_jac(u_sparse, g_sparse)\n\n def fqoi(u):\n return(surr_qoi(u))\n def fgrad(u):\n return(surr_grad(u))\n surr_jac = fgrad\n\n return fqoi, fgrad, surr_jac\n",
" def _getParameterSamples(self):\n\n N_u = len(self.prob_uncertainties) + len(self.int_uncertainties)\n\n get_new = True\n if self.reuse_samples and self.u_samples is not None:\n if self.u_samples.shape != (self.samples_int, self.samples_prob, N_u):\n if self.verbose:\n print('''Stored samples do not match current dimensions,\n getting new samples''')\n else:\n get_new = False\n\n if get_new:\n if self.verbose:\n print('Getting uncertain parameter samples')\n\n N_u = len(self.prob_uncertainties) + len(self.int_uncertainties)\n N_prob = len(self.prob_uncertainties)\n N_int = len(self.int_uncertainties)\n# u_samples = np.zeros([self.samples_int, self.samples_prob, N_u])\n\n u_samples_prob = np.zeros([self.samples_int, self.samples_prob,\n len(self.prob_uncertainties)])\n u_samples_int = np.zeros([self.samples_int, self.samples_prob,\n len(self.int_uncertainties)])\n\n u_ints = np.zeros([self.samples_int, len(self.int_uncertainties)])\n for kk, uk in enumerate(self.int_uncertainties):\n if callable(uk):\n samps = np.array(uk()).flatten()\n if len(samps) != self.samples_prob:\n raise Exception('Number of samples returned not equal ' +\n 'to specified number of samples: please set number of ' +\n 'samples with samples_prob attribute')\n else:\n u_ints[:, kk] = samps\n elif isinstance(uk, (tuple, list)): ## See if given as tuple/list of bounds\n lb, ub = uk[0], uk[1]\n u_ints[:, kk] = np.random.uniform(lb, ub, size=self.samples_int)\n u_ints[0, kk] = lb\n u_ints[-1, kk] = ub\n elif hasattr(uk, 'getSample'):\n for ii in np.arange(self.samples_int):\n u_ints[ii, kk] = uk.getSample()\n else:\n raise TypeError('Unsupported interval uncertainty type')\n\n u_samples_int = np.tile(u_ints[:, np.newaxis], (1, self.samples_prob, 1))\n\n u_probs = np.zeros([self.samples_prob, len(self.prob_uncertainties)])\n for kk, uk in enumerate(self.prob_uncertainties):\n if callable(uk):\n samps = np.array(uk()).flatten()\n if len(samps) != self.samples_prob:\n raise Exception('Number of samples returned not equal ' +\n 'to specified number of samples: please set number of ' +\n 'samples with samples_prob attribute')\n else:\n u_probs[:, kk] = samps\n elif hasattr(uk, 'getSample'):\n for jj in np.arange(self.samples_prob):\n u_probs[jj, kk] = uk.getSample()\n else:\n raise TypeError('Unsupported probabilistic uncertainty type')\n\n u_samples_prob = np.tile(u_probs[np.newaxis, :], (self.samples_int, 1, 1))\n\n u_samples = np.concatenate((u_samples_int, u_samples_prob), axis=2)\n\n self.u_samples = u_samples\n return u_samples\n else:\n if self.verbose:\n print('Re-using stored samples')\n return self.u_samples\n",
"def _evalSamples(self, u_samples, fqoi, fgrad, jac):\n\n # Array of shape (M_int, M_prob)\n grad_samples = None\n q_samples = np.zeros([self.samples_int, self.samples_prob])\n if not jac:\n for ii in np.arange(q_samples.shape[0]):\n for jj in np.arange(q_samples.shape[1]):\n q_samples[ii, jj] = fqoi(u_samples[ii, jj])\n else:\n grad_samples = np.zeros([self.samples_int, self.samples_prob,\n self._N_dv])\n for ii in np.arange(q_samples.shape[0]):\n for jj in np.arange(q_samples.shape[1]):\n if isinstance(jac, bool) and jac:\n (q, grad) = fqoi(u_samples[ii, jj])\n q_samples[ii, jj] = float(q)\n grad_samples[ii, jj, :] = [_ for _ in grad]\n else:\n q_samples[ii, jj] = fqoi(u_samples[ii, jj])\n grad_samples[ii, jj, :] = fgrad(u_samples[ii, jj])\n\n self.grad_samples = grad_samples\n\n self.q_samples = q_samples\n\n return q_samples, grad_samples\n",
"def _evalWeightedSumMetric(self, q_samples, grad_samples=None):\n\n fjs = np.array(q_samples).flatten()\n M = self.samples_prob\n\n mean = (1./M)*np.sum(fjs)\n var = (1./M)*np.sum([(fj - mean)**2 for fj in fjs])\n\n ws = self.w1*mean + self.w2*np.sqrt(var)\n\n if grad_samples is None:\n return ws\n else:\n ndv = grad_samples.shape[2]\n gradjs = grad_samples[0, :, :]\n\n gradient = np.zeros(ndv)\n for kdv in range(ndv):\n\n meang, varg = 0., 0.\n for j, fj in enumerate(fjs):\n meang += (1./M)*float(gradjs[j, kdv])\n varg += (1./M)*2*(fj - mean)*float(gradjs[j, kdv])\n\n gradient[kdv] = meang + 0.5*(var**-0.5)*varg\n\n return ws, gradient\n"
] | class WeightedSum(HorsetailMatching):
'''Class for using weighted sum of moments within an optimization.
The code is written such that all arguments that can be used at the
initialization of a WeightedSum object can also be set as
attributes after creation to achieve exactly the same effect.
:param function fqoi: function that returns the quantity of interest, it
must take two ordered arguments - the value of the design variable
vector and the value of the uncertainty vector.
:param list prob_uncertainties: list of probabilistic uncertainties.
Is a list of UncertainParameter objects, or a list of
functions that return samples of the each uncertainty.
:param bool/function jac: Argument that
specifies how to evaluate the gradient of the quantity of interest.
If False no gradients are propagated, if True the fqoi should return
a second argument g such that g_i = dq/dx_i. If a function, it should
have the same signature as fqoi but return g. [default False]
:param int samples_prob: number of samples to take from the
probabilsitic uncertainties. [default 1000]
:param function surrogate: Surrogate that is created at every design
point to be sampled instead of fqoi. It should be a function that
takes two arguments - an array with values of the uncertainties at
which to fit the surrogate of size (num_quadrature_points,
num_uncertainties), and an array of quantity of interest values
corresponding to these uncertainty values to which to fit the surrogate
of size (num_quadrature_points). It should return a functio that
predicts the qoi at an aribtrary value of the uncertainties.
[default None]
:param list surrogate_points: Only with a surrogate. List of points at
which fqoi is evaluated to give values to fit the surrogates to. These
are passed to the surrogate function along with the qoi evaluated at
these points when the surrogate is fitted [by default tensor
quadrature of 5 points in each uncertain dimension is used]
:param bool/function surrogate_jac: Only with a surrogate. Specifies how
to take surrogates of the gradient. It works similarly to the
jac argument: if False, the same surrogate is fitted to fqoi and each
component of its gradient, if True, the surrogate function is
expected to take a third argument - an array that is the gradient
at each of the quadrature points of size
(num_quadrature_points, num_design_variables). If a function, then
instead the array of uncertainty values and the array of gradient
values are passed to this function and it should return a function for
the surrogate model of the gradient.
:param bool reuse_samples: If True will reuse the same set of samples of
the uncertainties for evaluating the metric at any value of the
design variables, if False wise will re-sample every time evalMetric
is called [default True]
:param bool verbose: If True will print out details [default False].
'''
def __init__(self, fqoi, prob_uncertainties, jac=False, samples_prob=1000,
surrogate=None, surrogate_points=None, surrogate_jac=False,
reuse_samples=True, verbose=False,
w1=1, w2=1):
self.fqoi = fqoi
self.prob_uncertainties = prob_uncertainties
self.int_uncertainties = []
self.jac = jac
self.samples_prob = samples_prob
self.samples_int = 1
self.reuse_samples = reuse_samples
self.u_samples = None
self.surrogate = surrogate
self.surrogate_points = surrogate_points
self.surrogate_jac = surrogate_jac
self.verbose = verbose
self.w1 = w1
self.w2 = w2
##############################################################################
## Public Methods
##############################################################################
##############################################################################
## Private methods ##
##############################################################################
def _evalWeightedSumMetric(self, q_samples, grad_samples=None):
fjs = np.array(q_samples).flatten()
M = self.samples_prob
mean = (1./M)*np.sum(fjs)
var = (1./M)*np.sum([(fj - mean)**2 for fj in fjs])
ws = self.w1*mean + self.w2*np.sqrt(var)
if grad_samples is None:
return ws
else:
ndv = grad_samples.shape[2]
gradjs = grad_samples[0, :, :]
gradient = np.zeros(ndv)
for kdv in range(ndv):
meang, varg = 0., 0.
for j, fj in enumerate(fjs):
meang += (1./M)*float(gradjs[j, kdv])
varg += (1./M)*2*(fj - mean)*float(gradjs[j, kdv])
gradient[kdv] = meang + 0.5*(var**-0.5)*varg
return ws, gradient
def getHorsetail(self):
return ([0], [0]), ([0], [0]), [([0], [0])]
|
lwcook/horsetail-matching | horsetailmatching/surrogates.py | eval_poly | python | def eval_poly(uvec, nvec, Jvec):
'''Evaluate multi-dimensional polynomials through tensor multiplication.
:param list uvec: vector value of the uncertain parameters at which to evaluate the
polynomial
:param list nvec: order in each dimension at which to evaluate the polynomial
:param list Jvec: Jacobi matrix of each dimension's 1D polynomial
:return: poly_value - value of the polynomial evaluated at uvec
:rtype: float
'''
us = _makeIter(uvec)
ns = _makeIter(nvec)
Js = _makeIter(Jvec)
return np.prod([_eval_poly_1D(u, n, J) for u, n, J in zip(us, ns, Js)]) | Evaluate multi-dimensional polynomials through tensor multiplication.
:param list uvec: vector value of the uncertain parameters at which to evaluate the
polynomial
:param list nvec: order in each dimension at which to evaluate the polynomial
:param list Jvec: Jacobi matrix of each dimension's 1D polynomial
:return: poly_value - value of the polynomial evaluated at uvec
:rtype: float | train | https://github.com/lwcook/horsetail-matching/blob/f3d5f8d01249debbca978f412ce4eae017458119/horsetailmatching/surrogates.py#L186-L204 | [
"def _makeIter(x):\n try:\n iter(x)\n return [xi for xi in x]\n except:\n return [x]\n"
] | import numpy as np
import math
import pdb
class PolySurrogate(object):
'''Class for creating surrogate models using non-intrusive polynomial
chaos.
:param int dimensions: number of dimensions of the polynomial expansion
:param int order: order of the polynomial expansion [default 3]
:param str/list poly_type: string of the type of polynomials to use in the
expansion, or list of strings where each entry in the list is the type
of polynomial to use in the corresponding dimension. Supported
polynomial types are legendre and gaussian. [default legendre]
*Example Declaration*::
>>> thePC = PolySurrogate(dimensions=3)
>>> thePC = PolySurrogate(dimensions=3, order=3)
>>> thePC = PolySurrogate(dimensions=3, order=3, poly_type='legendre')
'''
def __init__(self, dimensions, order=3, poly_type='legendre'):
self.dims = dimensions
self.P = int(order) + 1
if isinstance(poly_type, basestring):
self.poly_types = [poly_type for _ in np.arange(self.dims)]
else:
self.poly_types = _makeIter(poly_type)
self.J_list = [_define_poly_J(p, self.P) for p in self.poly_types]
imesh = np.meshgrid(*[np.arange(self.P) for d in np.arange(self.dims)])
self.index_polys = np.vstack([m.flatten() for m in imesh]).T
self.N_poly = len(self.index_polys)
self.coeffs = np.zeros([self.P for __ in np.arange(self.dims)])
def surrogate(self, u_sparse, q_sparse):
'''Combines the train and predict methods to create a surrogate
model function fitted to the input/output combinations given in
u_sparse and q_sparse.
:param numpy.ndarray u_sparse: input values at which the output
values are obtained. Must be the same as the qaudrature
points defined by the getQuadraturePoints method.
:param numpy.ndarray q_sparse: output values corresponding
to the input values given in u_sparse to which the
surrogate is fitted
:return: surrogate model fitted to u_sparse and q_sparse
:rtype: function
*Sample Usage*::
>>> thePC = PolySurrogate(dimensions=2)
>>> U = thePC.getQuadraturePoints()
>>> Q = [myFunc(u) for u in U]
>>> surrogateFunc = thePC.surrogate(U, Q)
'''
self.train(q_sparse)
def model(u):
return self.predict(u)
return model
def predict(self, u):
'''Predicts the output value at u from the fitted polynomial expansion.
Therefore the method train() must be called first.
:param numpy.ndarray u: input value at which to predict the output.
:return: q_approx - the predicted value of the output at u
:rtype: float
*Sample Usage*::
>>> thePC = PolySurrogate(dimensions=2)
>>> U = thePC.getQuadraturePoints()
>>> Q = [myFunc(u) for u in U]
>>> thePC.train(U, Q)
>>> thePC.predict([0, 1])
'''
y, ysub = 0, np.zeros(self.N_poly)
for ip in range(self.N_poly):
inds = tuple(self.index_polys[ip])
ysub[ip] = self.coeffs[inds]*eval_poly(u, inds, self.J_list)
y += ysub[ip]
self.response_components = ysub
return y
def train(self, ftrain):
'''Trains the polynomial expansion.
:param numpy.ndarray/function ftrain: output values corresponding to the
quadrature points given by the getQuadraturePoints method to
which the expansion should be trained. Or a function that should be evaluated
at the quadrature points to give these output values.
*Sample Usage*::
>>> thePC = PolySurrogate(dimensions=2)
>>> thePC.train(myFunc)
>>> predicted_q = thePC.predict([0, 1])
>>> thePC = PolySurrogate(dimensions=2)
>>> U = thePC.getQuadraturePoints()
>>> Q = [myFunc(u) for u in U]
>>> thePC.train(Q)
>>> predicted_q = thePC.predict([0, 1])
'''
self.coeffs = 0*self.coeffs
upoints, wpoints = self.getQuadraturePointsAndWeights()
try:
fpoints = [ftrain(u) for u in upoints]
except TypeError:
fpoints = ftrain
for ipoly in np.arange(self.N_poly):
inds = tuple(self.index_polys[ipoly])
coeff = 0.0
for (u, q, w) in zip(upoints, fpoints, wpoints):
coeff += eval_poly(u, inds, self.J_list)*q*np.prod(w)
self.coeffs[inds] = coeff
return None
def getQuadraturePointsAndWeights(self):
'''Gets the quadrature points and weights for gaussian quadrature
integration of inner products from the definition of the polynomials in
each dimension.
:return: (u_points, w_points) - np.ndarray of shape
(num_polynomials, num_dimensions) and a np.ndarray of size
(num_polynomials)
:rtype: (np.ndarray, np.ndarray)
'''
qw_list, qp_list = [], []
for ii in np.arange(len(self.J_list)):
d, Q = np.linalg.eig(self.J_list[ii])
qp, qpi = d[np.argsort(d)].reshape([d.size, 1]), np.argsort(d)
qw = (Q[0, qpi]**2).reshape([d.size, 1])
qw_list.append(qw)
qp_list.append(qp)
umesh = np.meshgrid(*qp_list)
upoints = np.vstack([m.flatten() for m in umesh]).T
wmesh = np.meshgrid(*qw_list)
wpoints = np.vstack([m.flatten() for m in wmesh]).T
return upoints, wpoints
def getQuadraturePoints(self):
'''Gets the quadrature points at which the output values must be found
in order to train the polynomial expansion using gaussian quadrature.
:return: upoints - a np.ndarray of size (num_polynomials, num_dimensions)
:rtype: np.ndarray
'''
upoints, _ = self.getQuadraturePointsAndWeights()
return upoints
## --------------------------------------------------------------------------
## Private funtions for polynomials
## --------------------------------------------------------------------------
def _eval_poly_1D(s, k, Jmat):
if k == -1:
return 0.0
elif k == 0:
return 1.0
else:
ki = k-1
beta_k = float(Jmat[ki+1, ki])
alpha_km1 = float(Jmat[ki, ki])
if k == 1:
beta_km1 = 0.
else:
beta_km1 = float(Jmat[ki, ki-1])
return (1.0/float(beta_k))*(
(s - alpha_km1)*_eval_poly_1D(s, k-1, Jmat) -
beta_km1*_eval_poly_1D(s, k-2, Jmat))
def _define_poly_J(typestr, order, a=1, b=1):
n = order
# Define ab, the matrix of alpha and beta values
# These are recurrence coefficients
if typestr == 'legendre' or typestr == 'uniform':
l, r = -1, 1
o = l + (r-l)/2.0
ab = np.zeros([n, 2],float)
if n > 0:
ab[0, 0], ab[0, 1] = o,1
for k in np.arange(2, n+1, 1):
ik, ab[ik, 0] = k-1, o
if k == 2:
numer = float(((r-l)**2)*(k-1)*(k-1)*(k-1))
denom = float(((2*(k-1))**2)*(2*(k-1)+1))
else:
numer = float(((r-l)**2)*(k-1)*(k-1)*(k-1)*(k-1))
denom = float(((2*(k-1))**2)*(2*(k-1)+1)*(2*(k-1)-1))
ab[ik, 1] = numer / denom
elif typestr == 'hermite' or typestr == 'gaussian':
mu = 0
mu0 = math.gamma(mu+0.5)
if n==1:
ab = np.array([[0, mu0]])
else:
ab = np.zeros([n, 2])
nvechalf = np.array(range(1, n))*0.5
nvechalf[0::2] += mu
ab[0, 1], ab[1::, 1] = mu0, nvechalf
# Define J, the jacobi matrix from recurrence coefficients in ab
J = np.zeros([n, n], float)
if n == 1:
J = np.array([[ab[0, 0]]])
else:
J[0, 0] = ab[0, 0]
J[0, 1] = math.sqrt(ab[1, 1])
for i in np.arange(2, n, 1):
ii = i-1
J[ii, ii] = ab[ii,0]
J[ii, ii-1] = math.sqrt(ab[ii, 1])
J[ii, ii+1] = math.sqrt(ab[ii+1, 1])
J[n-1, n-1] = ab[n-1, 0]
J[n-1, n-2] = math.sqrt(ab[n-1, 1])
return J
def _makeIter(x):
try:
iter(x)
return [xi for xi in x]
except:
return [x]
|
lwcook/horsetail-matching | horsetailmatching/surrogates.py | PolySurrogate.surrogate | python | def surrogate(self, u_sparse, q_sparse):
'''Combines the train and predict methods to create a surrogate
model function fitted to the input/output combinations given in
u_sparse and q_sparse.
:param numpy.ndarray u_sparse: input values at which the output
values are obtained. Must be the same as the qaudrature
points defined by the getQuadraturePoints method.
:param numpy.ndarray q_sparse: output values corresponding
to the input values given in u_sparse to which the
surrogate is fitted
:return: surrogate model fitted to u_sparse and q_sparse
:rtype: function
*Sample Usage*::
>>> thePC = PolySurrogate(dimensions=2)
>>> U = thePC.getQuadraturePoints()
>>> Q = [myFunc(u) for u in U]
>>> surrogateFunc = thePC.surrogate(U, Q)
'''
self.train(q_sparse)
def model(u):
return self.predict(u)
return model | Combines the train and predict methods to create a surrogate
model function fitted to the input/output combinations given in
u_sparse and q_sparse.
:param numpy.ndarray u_sparse: input values at which the output
values are obtained. Must be the same as the qaudrature
points defined by the getQuadraturePoints method.
:param numpy.ndarray q_sparse: output values corresponding
to the input values given in u_sparse to which the
surrogate is fitted
:return: surrogate model fitted to u_sparse and q_sparse
:rtype: function
*Sample Usage*::
>>> thePC = PolySurrogate(dimensions=2)
>>> U = thePC.getQuadraturePoints()
>>> Q = [myFunc(u) for u in U]
>>> surrogateFunc = thePC.surrogate(U, Q) | train | https://github.com/lwcook/horsetail-matching/blob/f3d5f8d01249debbca978f412ce4eae017458119/horsetailmatching/surrogates.py#L43-L70 | [
"def train(self, ftrain):\n '''Trains the polynomial expansion.\n\n :param numpy.ndarray/function ftrain: output values corresponding to the\n quadrature points given by the getQuadraturePoints method to\n which the expansion should be trained. Or a function that should be evaluated\n at the quadrature points to give these output values.\n\n *Sample Usage*::\n\n >>> thePC = PolySurrogate(dimensions=2)\n >>> thePC.train(myFunc)\n >>> predicted_q = thePC.predict([0, 1])\n\n >>> thePC = PolySurrogate(dimensions=2)\n >>> U = thePC.getQuadraturePoints()\n >>> Q = [myFunc(u) for u in U]\n >>> thePC.train(Q)\n >>> predicted_q = thePC.predict([0, 1])\n\n '''\n self.coeffs = 0*self.coeffs\n\n upoints, wpoints = self.getQuadraturePointsAndWeights()\n\n try:\n fpoints = [ftrain(u) for u in upoints]\n except TypeError:\n fpoints = ftrain\n\n for ipoly in np.arange(self.N_poly):\n\n inds = tuple(self.index_polys[ipoly])\n coeff = 0.0\n for (u, q, w) in zip(upoints, fpoints, wpoints):\n coeff += eval_poly(u, inds, self.J_list)*q*np.prod(w)\n\n self.coeffs[inds] = coeff\n return None\n"
] | class PolySurrogate(object):
'''Class for creating surrogate models using non-intrusive polynomial
chaos.
:param int dimensions: number of dimensions of the polynomial expansion
:param int order: order of the polynomial expansion [default 3]
:param str/list poly_type: string of the type of polynomials to use in the
expansion, or list of strings where each entry in the list is the type
of polynomial to use in the corresponding dimension. Supported
polynomial types are legendre and gaussian. [default legendre]
*Example Declaration*::
>>> thePC = PolySurrogate(dimensions=3)
>>> thePC = PolySurrogate(dimensions=3, order=3)
>>> thePC = PolySurrogate(dimensions=3, order=3, poly_type='legendre')
'''
def __init__(self, dimensions, order=3, poly_type='legendre'):
self.dims = dimensions
self.P = int(order) + 1
if isinstance(poly_type, basestring):
self.poly_types = [poly_type for _ in np.arange(self.dims)]
else:
self.poly_types = _makeIter(poly_type)
self.J_list = [_define_poly_J(p, self.P) for p in self.poly_types]
imesh = np.meshgrid(*[np.arange(self.P) for d in np.arange(self.dims)])
self.index_polys = np.vstack([m.flatten() for m in imesh]).T
self.N_poly = len(self.index_polys)
self.coeffs = np.zeros([self.P for __ in np.arange(self.dims)])
def predict(self, u):
'''Predicts the output value at u from the fitted polynomial expansion.
Therefore the method train() must be called first.
:param numpy.ndarray u: input value at which to predict the output.
:return: q_approx - the predicted value of the output at u
:rtype: float
*Sample Usage*::
>>> thePC = PolySurrogate(dimensions=2)
>>> U = thePC.getQuadraturePoints()
>>> Q = [myFunc(u) for u in U]
>>> thePC.train(U, Q)
>>> thePC.predict([0, 1])
'''
y, ysub = 0, np.zeros(self.N_poly)
for ip in range(self.N_poly):
inds = tuple(self.index_polys[ip])
ysub[ip] = self.coeffs[inds]*eval_poly(u, inds, self.J_list)
y += ysub[ip]
self.response_components = ysub
return y
def train(self, ftrain):
'''Trains the polynomial expansion.
:param numpy.ndarray/function ftrain: output values corresponding to the
quadrature points given by the getQuadraturePoints method to
which the expansion should be trained. Or a function that should be evaluated
at the quadrature points to give these output values.
*Sample Usage*::
>>> thePC = PolySurrogate(dimensions=2)
>>> thePC.train(myFunc)
>>> predicted_q = thePC.predict([0, 1])
>>> thePC = PolySurrogate(dimensions=2)
>>> U = thePC.getQuadraturePoints()
>>> Q = [myFunc(u) for u in U]
>>> thePC.train(Q)
>>> predicted_q = thePC.predict([0, 1])
'''
self.coeffs = 0*self.coeffs
upoints, wpoints = self.getQuadraturePointsAndWeights()
try:
fpoints = [ftrain(u) for u in upoints]
except TypeError:
fpoints = ftrain
for ipoly in np.arange(self.N_poly):
inds = tuple(self.index_polys[ipoly])
coeff = 0.0
for (u, q, w) in zip(upoints, fpoints, wpoints):
coeff += eval_poly(u, inds, self.J_list)*q*np.prod(w)
self.coeffs[inds] = coeff
return None
def getQuadraturePointsAndWeights(self):
'''Gets the quadrature points and weights for gaussian quadrature
integration of inner products from the definition of the polynomials in
each dimension.
:return: (u_points, w_points) - np.ndarray of shape
(num_polynomials, num_dimensions) and a np.ndarray of size
(num_polynomials)
:rtype: (np.ndarray, np.ndarray)
'''
qw_list, qp_list = [], []
for ii in np.arange(len(self.J_list)):
d, Q = np.linalg.eig(self.J_list[ii])
qp, qpi = d[np.argsort(d)].reshape([d.size, 1]), np.argsort(d)
qw = (Q[0, qpi]**2).reshape([d.size, 1])
qw_list.append(qw)
qp_list.append(qp)
umesh = np.meshgrid(*qp_list)
upoints = np.vstack([m.flatten() for m in umesh]).T
wmesh = np.meshgrid(*qw_list)
wpoints = np.vstack([m.flatten() for m in wmesh]).T
return upoints, wpoints
def getQuadraturePoints(self):
'''Gets the quadrature points at which the output values must be found
in order to train the polynomial expansion using gaussian quadrature.
:return: upoints - a np.ndarray of size (num_polynomials, num_dimensions)
:rtype: np.ndarray
'''
upoints, _ = self.getQuadraturePointsAndWeights()
return upoints
|
lwcook/horsetail-matching | horsetailmatching/surrogates.py | PolySurrogate.predict | python | def predict(self, u):
'''Predicts the output value at u from the fitted polynomial expansion.
Therefore the method train() must be called first.
:param numpy.ndarray u: input value at which to predict the output.
:return: q_approx - the predicted value of the output at u
:rtype: float
*Sample Usage*::
>>> thePC = PolySurrogate(dimensions=2)
>>> U = thePC.getQuadraturePoints()
>>> Q = [myFunc(u) for u in U]
>>> thePC.train(U, Q)
>>> thePC.predict([0, 1])
'''
y, ysub = 0, np.zeros(self.N_poly)
for ip in range(self.N_poly):
inds = tuple(self.index_polys[ip])
ysub[ip] = self.coeffs[inds]*eval_poly(u, inds, self.J_list)
y += ysub[ip]
self.response_components = ysub
return y | Predicts the output value at u from the fitted polynomial expansion.
Therefore the method train() must be called first.
:param numpy.ndarray u: input value at which to predict the output.
:return: q_approx - the predicted value of the output at u
:rtype: float
*Sample Usage*::
>>> thePC = PolySurrogate(dimensions=2)
>>> U = thePC.getQuadraturePoints()
>>> Q = [myFunc(u) for u in U]
>>> thePC.train(U, Q)
>>> thePC.predict([0, 1]) | train | https://github.com/lwcook/horsetail-matching/blob/f3d5f8d01249debbca978f412ce4eae017458119/horsetailmatching/surrogates.py#L72-L98 | [
"def eval_poly(uvec, nvec, Jvec):\n '''Evaluate multi-dimensional polynomials through tensor multiplication.\n\n :param list uvec: vector value of the uncertain parameters at which to evaluate the\n polynomial\n\n :param list nvec: order in each dimension at which to evaluate the polynomial\n\n :param list Jvec: Jacobi matrix of each dimension's 1D polynomial\n\n :return: poly_value - value of the polynomial evaluated at uvec\n\n :rtype: float\n\n '''\n us = _makeIter(uvec)\n ns = _makeIter(nvec)\n Js = _makeIter(Jvec)\n return np.prod([_eval_poly_1D(u, n, J) for u, n, J in zip(us, ns, Js)])\n"
] | class PolySurrogate(object):
'''Class for creating surrogate models using non-intrusive polynomial
chaos.
:param int dimensions: number of dimensions of the polynomial expansion
:param int order: order of the polynomial expansion [default 3]
:param str/list poly_type: string of the type of polynomials to use in the
expansion, or list of strings where each entry in the list is the type
of polynomial to use in the corresponding dimension. Supported
polynomial types are legendre and gaussian. [default legendre]
*Example Declaration*::
>>> thePC = PolySurrogate(dimensions=3)
>>> thePC = PolySurrogate(dimensions=3, order=3)
>>> thePC = PolySurrogate(dimensions=3, order=3, poly_type='legendre')
'''
def __init__(self, dimensions, order=3, poly_type='legendre'):
self.dims = dimensions
self.P = int(order) + 1
if isinstance(poly_type, basestring):
self.poly_types = [poly_type for _ in np.arange(self.dims)]
else:
self.poly_types = _makeIter(poly_type)
self.J_list = [_define_poly_J(p, self.P) for p in self.poly_types]
imesh = np.meshgrid(*[np.arange(self.P) for d in np.arange(self.dims)])
self.index_polys = np.vstack([m.flatten() for m in imesh]).T
self.N_poly = len(self.index_polys)
self.coeffs = np.zeros([self.P for __ in np.arange(self.dims)])
def surrogate(self, u_sparse, q_sparse):
'''Combines the train and predict methods to create a surrogate
model function fitted to the input/output combinations given in
u_sparse and q_sparse.
:param numpy.ndarray u_sparse: input values at which the output
values are obtained. Must be the same as the qaudrature
points defined by the getQuadraturePoints method.
:param numpy.ndarray q_sparse: output values corresponding
to the input values given in u_sparse to which the
surrogate is fitted
:return: surrogate model fitted to u_sparse and q_sparse
:rtype: function
*Sample Usage*::
>>> thePC = PolySurrogate(dimensions=2)
>>> U = thePC.getQuadraturePoints()
>>> Q = [myFunc(u) for u in U]
>>> surrogateFunc = thePC.surrogate(U, Q)
'''
self.train(q_sparse)
def model(u):
return self.predict(u)
return model
def train(self, ftrain):
'''Trains the polynomial expansion.
:param numpy.ndarray/function ftrain: output values corresponding to the
quadrature points given by the getQuadraturePoints method to
which the expansion should be trained. Or a function that should be evaluated
at the quadrature points to give these output values.
*Sample Usage*::
>>> thePC = PolySurrogate(dimensions=2)
>>> thePC.train(myFunc)
>>> predicted_q = thePC.predict([0, 1])
>>> thePC = PolySurrogate(dimensions=2)
>>> U = thePC.getQuadraturePoints()
>>> Q = [myFunc(u) for u in U]
>>> thePC.train(Q)
>>> predicted_q = thePC.predict([0, 1])
'''
self.coeffs = 0*self.coeffs
upoints, wpoints = self.getQuadraturePointsAndWeights()
try:
fpoints = [ftrain(u) for u in upoints]
except TypeError:
fpoints = ftrain
for ipoly in np.arange(self.N_poly):
inds = tuple(self.index_polys[ipoly])
coeff = 0.0
for (u, q, w) in zip(upoints, fpoints, wpoints):
coeff += eval_poly(u, inds, self.J_list)*q*np.prod(w)
self.coeffs[inds] = coeff
return None
def getQuadraturePointsAndWeights(self):
'''Gets the quadrature points and weights for gaussian quadrature
integration of inner products from the definition of the polynomials in
each dimension.
:return: (u_points, w_points) - np.ndarray of shape
(num_polynomials, num_dimensions) and a np.ndarray of size
(num_polynomials)
:rtype: (np.ndarray, np.ndarray)
'''
qw_list, qp_list = [], []
for ii in np.arange(len(self.J_list)):
d, Q = np.linalg.eig(self.J_list[ii])
qp, qpi = d[np.argsort(d)].reshape([d.size, 1]), np.argsort(d)
qw = (Q[0, qpi]**2).reshape([d.size, 1])
qw_list.append(qw)
qp_list.append(qp)
umesh = np.meshgrid(*qp_list)
upoints = np.vstack([m.flatten() for m in umesh]).T
wmesh = np.meshgrid(*qw_list)
wpoints = np.vstack([m.flatten() for m in wmesh]).T
return upoints, wpoints
def getQuadraturePoints(self):
'''Gets the quadrature points at which the output values must be found
in order to train the polynomial expansion using gaussian quadrature.
:return: upoints - a np.ndarray of size (num_polynomials, num_dimensions)
:rtype: np.ndarray
'''
upoints, _ = self.getQuadraturePointsAndWeights()
return upoints
|
lwcook/horsetail-matching | horsetailmatching/surrogates.py | PolySurrogate.train | python | def train(self, ftrain):
'''Trains the polynomial expansion.
:param numpy.ndarray/function ftrain: output values corresponding to the
quadrature points given by the getQuadraturePoints method to
which the expansion should be trained. Or a function that should be evaluated
at the quadrature points to give these output values.
*Sample Usage*::
>>> thePC = PolySurrogate(dimensions=2)
>>> thePC.train(myFunc)
>>> predicted_q = thePC.predict([0, 1])
>>> thePC = PolySurrogate(dimensions=2)
>>> U = thePC.getQuadraturePoints()
>>> Q = [myFunc(u) for u in U]
>>> thePC.train(Q)
>>> predicted_q = thePC.predict([0, 1])
'''
self.coeffs = 0*self.coeffs
upoints, wpoints = self.getQuadraturePointsAndWeights()
try:
fpoints = [ftrain(u) for u in upoints]
except TypeError:
fpoints = ftrain
for ipoly in np.arange(self.N_poly):
inds = tuple(self.index_polys[ipoly])
coeff = 0.0
for (u, q, w) in zip(upoints, fpoints, wpoints):
coeff += eval_poly(u, inds, self.J_list)*q*np.prod(w)
self.coeffs[inds] = coeff
return None | Trains the polynomial expansion.
:param numpy.ndarray/function ftrain: output values corresponding to the
quadrature points given by the getQuadraturePoints method to
which the expansion should be trained. Or a function that should be evaluated
at the quadrature points to give these output values.
*Sample Usage*::
>>> thePC = PolySurrogate(dimensions=2)
>>> thePC.train(myFunc)
>>> predicted_q = thePC.predict([0, 1])
>>> thePC = PolySurrogate(dimensions=2)
>>> U = thePC.getQuadraturePoints()
>>> Q = [myFunc(u) for u in U]
>>> thePC.train(Q)
>>> predicted_q = thePC.predict([0, 1]) | train | https://github.com/lwcook/horsetail-matching/blob/f3d5f8d01249debbca978f412ce4eae017458119/horsetailmatching/surrogates.py#L100-L138 | [
"def eval_poly(uvec, nvec, Jvec):\n '''Evaluate multi-dimensional polynomials through tensor multiplication.\n\n :param list uvec: vector value of the uncertain parameters at which to evaluate the\n polynomial\n\n :param list nvec: order in each dimension at which to evaluate the polynomial\n\n :param list Jvec: Jacobi matrix of each dimension's 1D polynomial\n\n :return: poly_value - value of the polynomial evaluated at uvec\n\n :rtype: float\n\n '''\n us = _makeIter(uvec)\n ns = _makeIter(nvec)\n Js = _makeIter(Jvec)\n return np.prod([_eval_poly_1D(u, n, J) for u, n, J in zip(us, ns, Js)])\n",
"def getQuadraturePointsAndWeights(self):\n '''Gets the quadrature points and weights for gaussian quadrature\n integration of inner products from the definition of the polynomials in\n each dimension.\n\n\n :return: (u_points, w_points) - np.ndarray of shape\n (num_polynomials, num_dimensions) and a np.ndarray of size\n (num_polynomials)\n\n :rtype: (np.ndarray, np.ndarray)\n '''\n\n qw_list, qp_list = [], []\n for ii in np.arange(len(self.J_list)):\n\n d, Q = np.linalg.eig(self.J_list[ii])\n qp, qpi = d[np.argsort(d)].reshape([d.size, 1]), np.argsort(d)\n qw = (Q[0, qpi]**2).reshape([d.size, 1])\n\n qw_list.append(qw)\n qp_list.append(qp)\n\n umesh = np.meshgrid(*qp_list)\n upoints = np.vstack([m.flatten() for m in umesh]).T\n\n wmesh = np.meshgrid(*qw_list)\n wpoints = np.vstack([m.flatten() for m in wmesh]).T\n\n return upoints, wpoints\n"
] | class PolySurrogate(object):
'''Class for creating surrogate models using non-intrusive polynomial
chaos.
:param int dimensions: number of dimensions of the polynomial expansion
:param int order: order of the polynomial expansion [default 3]
:param str/list poly_type: string of the type of polynomials to use in the
expansion, or list of strings where each entry in the list is the type
of polynomial to use in the corresponding dimension. Supported
polynomial types are legendre and gaussian. [default legendre]
*Example Declaration*::
>>> thePC = PolySurrogate(dimensions=3)
>>> thePC = PolySurrogate(dimensions=3, order=3)
>>> thePC = PolySurrogate(dimensions=3, order=3, poly_type='legendre')
'''
def __init__(self, dimensions, order=3, poly_type='legendre'):
self.dims = dimensions
self.P = int(order) + 1
if isinstance(poly_type, basestring):
self.poly_types = [poly_type for _ in np.arange(self.dims)]
else:
self.poly_types = _makeIter(poly_type)
self.J_list = [_define_poly_J(p, self.P) for p in self.poly_types]
imesh = np.meshgrid(*[np.arange(self.P) for d in np.arange(self.dims)])
self.index_polys = np.vstack([m.flatten() for m in imesh]).T
self.N_poly = len(self.index_polys)
self.coeffs = np.zeros([self.P for __ in np.arange(self.dims)])
def surrogate(self, u_sparse, q_sparse):
'''Combines the train and predict methods to create a surrogate
model function fitted to the input/output combinations given in
u_sparse and q_sparse.
:param numpy.ndarray u_sparse: input values at which the output
values are obtained. Must be the same as the qaudrature
points defined by the getQuadraturePoints method.
:param numpy.ndarray q_sparse: output values corresponding
to the input values given in u_sparse to which the
surrogate is fitted
:return: surrogate model fitted to u_sparse and q_sparse
:rtype: function
*Sample Usage*::
>>> thePC = PolySurrogate(dimensions=2)
>>> U = thePC.getQuadraturePoints()
>>> Q = [myFunc(u) for u in U]
>>> surrogateFunc = thePC.surrogate(U, Q)
'''
self.train(q_sparse)
def model(u):
return self.predict(u)
return model
def predict(self, u):
'''Predicts the output value at u from the fitted polynomial expansion.
Therefore the method train() must be called first.
:param numpy.ndarray u: input value at which to predict the output.
:return: q_approx - the predicted value of the output at u
:rtype: float
*Sample Usage*::
>>> thePC = PolySurrogate(dimensions=2)
>>> U = thePC.getQuadraturePoints()
>>> Q = [myFunc(u) for u in U]
>>> thePC.train(U, Q)
>>> thePC.predict([0, 1])
'''
y, ysub = 0, np.zeros(self.N_poly)
for ip in range(self.N_poly):
inds = tuple(self.index_polys[ip])
ysub[ip] = self.coeffs[inds]*eval_poly(u, inds, self.J_list)
y += ysub[ip]
self.response_components = ysub
return y
def getQuadraturePointsAndWeights(self):
'''Gets the quadrature points and weights for gaussian quadrature
integration of inner products from the definition of the polynomials in
each dimension.
:return: (u_points, w_points) - np.ndarray of shape
(num_polynomials, num_dimensions) and a np.ndarray of size
(num_polynomials)
:rtype: (np.ndarray, np.ndarray)
'''
qw_list, qp_list = [], []
for ii in np.arange(len(self.J_list)):
d, Q = np.linalg.eig(self.J_list[ii])
qp, qpi = d[np.argsort(d)].reshape([d.size, 1]), np.argsort(d)
qw = (Q[0, qpi]**2).reshape([d.size, 1])
qw_list.append(qw)
qp_list.append(qp)
umesh = np.meshgrid(*qp_list)
upoints = np.vstack([m.flatten() for m in umesh]).T
wmesh = np.meshgrid(*qw_list)
wpoints = np.vstack([m.flatten() for m in wmesh]).T
return upoints, wpoints
def getQuadraturePoints(self):
'''Gets the quadrature points at which the output values must be found
in order to train the polynomial expansion using gaussian quadrature.
:return: upoints - a np.ndarray of size (num_polynomials, num_dimensions)
:rtype: np.ndarray
'''
upoints, _ = self.getQuadraturePointsAndWeights()
return upoints
|
lwcook/horsetail-matching | horsetailmatching/surrogates.py | PolySurrogate.getQuadraturePointsAndWeights | python | def getQuadraturePointsAndWeights(self):
'''Gets the quadrature points and weights for gaussian quadrature
integration of inner products from the definition of the polynomials in
each dimension.
:return: (u_points, w_points) - np.ndarray of shape
(num_polynomials, num_dimensions) and a np.ndarray of size
(num_polynomials)
:rtype: (np.ndarray, np.ndarray)
'''
qw_list, qp_list = [], []
for ii in np.arange(len(self.J_list)):
d, Q = np.linalg.eig(self.J_list[ii])
qp, qpi = d[np.argsort(d)].reshape([d.size, 1]), np.argsort(d)
qw = (Q[0, qpi]**2).reshape([d.size, 1])
qw_list.append(qw)
qp_list.append(qp)
umesh = np.meshgrid(*qp_list)
upoints = np.vstack([m.flatten() for m in umesh]).T
wmesh = np.meshgrid(*qw_list)
wpoints = np.vstack([m.flatten() for m in wmesh]).T
return upoints, wpoints | Gets the quadrature points and weights for gaussian quadrature
integration of inner products from the definition of the polynomials in
each dimension.
:return: (u_points, w_points) - np.ndarray of shape
(num_polynomials, num_dimensions) and a np.ndarray of size
(num_polynomials)
:rtype: (np.ndarray, np.ndarray) | train | https://github.com/lwcook/horsetail-matching/blob/f3d5f8d01249debbca978f412ce4eae017458119/horsetailmatching/surrogates.py#L140-L169 | null | class PolySurrogate(object):
'''Class for creating surrogate models using non-intrusive polynomial
chaos.
:param int dimensions: number of dimensions of the polynomial expansion
:param int order: order of the polynomial expansion [default 3]
:param str/list poly_type: string of the type of polynomials to use in the
expansion, or list of strings where each entry in the list is the type
of polynomial to use in the corresponding dimension. Supported
polynomial types are legendre and gaussian. [default legendre]
*Example Declaration*::
>>> thePC = PolySurrogate(dimensions=3)
>>> thePC = PolySurrogate(dimensions=3, order=3)
>>> thePC = PolySurrogate(dimensions=3, order=3, poly_type='legendre')
'''
def __init__(self, dimensions, order=3, poly_type='legendre'):
self.dims = dimensions
self.P = int(order) + 1
if isinstance(poly_type, basestring):
self.poly_types = [poly_type for _ in np.arange(self.dims)]
else:
self.poly_types = _makeIter(poly_type)
self.J_list = [_define_poly_J(p, self.P) for p in self.poly_types]
imesh = np.meshgrid(*[np.arange(self.P) for d in np.arange(self.dims)])
self.index_polys = np.vstack([m.flatten() for m in imesh]).T
self.N_poly = len(self.index_polys)
self.coeffs = np.zeros([self.P for __ in np.arange(self.dims)])
def surrogate(self, u_sparse, q_sparse):
'''Combines the train and predict methods to create a surrogate
model function fitted to the input/output combinations given in
u_sparse and q_sparse.
:param numpy.ndarray u_sparse: input values at which the output
values are obtained. Must be the same as the qaudrature
points defined by the getQuadraturePoints method.
:param numpy.ndarray q_sparse: output values corresponding
to the input values given in u_sparse to which the
surrogate is fitted
:return: surrogate model fitted to u_sparse and q_sparse
:rtype: function
*Sample Usage*::
>>> thePC = PolySurrogate(dimensions=2)
>>> U = thePC.getQuadraturePoints()
>>> Q = [myFunc(u) for u in U]
>>> surrogateFunc = thePC.surrogate(U, Q)
'''
self.train(q_sparse)
def model(u):
return self.predict(u)
return model
def predict(self, u):
'''Predicts the output value at u from the fitted polynomial expansion.
Therefore the method train() must be called first.
:param numpy.ndarray u: input value at which to predict the output.
:return: q_approx - the predicted value of the output at u
:rtype: float
*Sample Usage*::
>>> thePC = PolySurrogate(dimensions=2)
>>> U = thePC.getQuadraturePoints()
>>> Q = [myFunc(u) for u in U]
>>> thePC.train(U, Q)
>>> thePC.predict([0, 1])
'''
y, ysub = 0, np.zeros(self.N_poly)
for ip in range(self.N_poly):
inds = tuple(self.index_polys[ip])
ysub[ip] = self.coeffs[inds]*eval_poly(u, inds, self.J_list)
y += ysub[ip]
self.response_components = ysub
return y
def train(self, ftrain):
'''Trains the polynomial expansion.
:param numpy.ndarray/function ftrain: output values corresponding to the
quadrature points given by the getQuadraturePoints method to
which the expansion should be trained. Or a function that should be evaluated
at the quadrature points to give these output values.
*Sample Usage*::
>>> thePC = PolySurrogate(dimensions=2)
>>> thePC.train(myFunc)
>>> predicted_q = thePC.predict([0, 1])
>>> thePC = PolySurrogate(dimensions=2)
>>> U = thePC.getQuadraturePoints()
>>> Q = [myFunc(u) for u in U]
>>> thePC.train(Q)
>>> predicted_q = thePC.predict([0, 1])
'''
self.coeffs = 0*self.coeffs
upoints, wpoints = self.getQuadraturePointsAndWeights()
try:
fpoints = [ftrain(u) for u in upoints]
except TypeError:
fpoints = ftrain
for ipoly in np.arange(self.N_poly):
inds = tuple(self.index_polys[ipoly])
coeff = 0.0
for (u, q, w) in zip(upoints, fpoints, wpoints):
coeff += eval_poly(u, inds, self.J_list)*q*np.prod(w)
self.coeffs[inds] = coeff
return None
def getQuadraturePoints(self):
'''Gets the quadrature points at which the output values must be found
in order to train the polynomial expansion using gaussian quadrature.
:return: upoints - a np.ndarray of size (num_polynomials, num_dimensions)
:rtype: np.ndarray
'''
upoints, _ = self.getQuadraturePointsAndWeights()
return upoints
|
lwcook/horsetail-matching | horsetailmatching/demoproblems.py | TP0 | python | def TP0(dv, u):
'''Demo problem 0 for horsetail matching, takes two input vectors of any size
and returns a single output'''
return np.linalg.norm(np.array(dv)) + np.linalg.norm(np.array(u)) | Demo problem 0 for horsetail matching, takes two input vectors of any size
and returns a single output | train | https://github.com/lwcook/horsetail-matching/blob/f3d5f8d01249debbca978f412ce4eae017458119/horsetailmatching/demoproblems.py#L3-L6 | null | import numpy as np
def TP1(x, u, jac=False):
'''Demo problem 1 for horsetail matching, takes two input vectors of size 2
and returns just the qoi if jac is False or the qoi and its gradient if jac
is True'''
factor = 0.1*(u[0]**2 + 2*u[0]*u[1] + u[1]**2)
q = 0 + factor*(x[0]**2 + 2*x[1]*x[0] + x[1]**2)
if not jac:
return q
else:
grad = [factor*(2*x[0] + 2*x[1]), factor*(2*x[0] + 2*x[1])]
return q, grad
def TP2(dv, u, jac=False):
'''Demo problem 2 for horsetail matching, takes two input vectors of size 2
and returns just the qoi if jac is False or the qoi and its gradient if jac
is True'''
y = dv[0]/2.
z = dv[1]/2. + 12
q = 0.25*((y**2 + z**2)/10 + 5*u[0]*u[1] - z*u[1]**2) + 0.2*z*u[1]**3 + 7
if not jac:
return q
else:
dqdx1 = (1./8.)*( 2*y/10. )
dqdx2 = (1./8.)*( 2*z/10. - u[1]**2) + 0.1*u[1]**3
return q, [dqdx1, dqdx2]
def TP2b(dv, u, jac=False):
'''Demo problem 2 for horsetail matching, takes two input vectors of size 2
and returns just the qoi if jac is False or the qoi and its gradient if jac
is True'''
y = dv[0]/2.
z = dv[1]/2. + 12
q = 0.25*((y**2 + z**2)/10 + 5*u[0]*u[1] - z*u[1]**2) +\
0.2*z*u[1]**3 + 7 + u[0]*(y + z)*0.02
if not jac:
return q
else:
dqdx1 = (1./8.)*( 2*y/10.) + 0.01*u[0]
dqdx2 = (1./8.)*( 2*z/10. - u[1]**2) + 0.1*u[1]**3 + 0.01*u[0]
return q, [dqdx1, dqdx2]
def TP3(x, u, jac=False):
'''Demo problem 1 for horsetail matching, takes two input values of
size 1'''
q = 2 + 0.5*x + 1.5*(1-x)*u
if not jac:
return q
else:
grad = 0.5 -1.5*u
return q, grad
|
lwcook/horsetail-matching | horsetailmatching/demoproblems.py | TP1 | python | def TP1(x, u, jac=False):
'''Demo problem 1 for horsetail matching, takes two input vectors of size 2
and returns just the qoi if jac is False or the qoi and its gradient if jac
is True'''
factor = 0.1*(u[0]**2 + 2*u[0]*u[1] + u[1]**2)
q = 0 + factor*(x[0]**2 + 2*x[1]*x[0] + x[1]**2)
if not jac:
return q
else:
grad = [factor*(2*x[0] + 2*x[1]), factor*(2*x[0] + 2*x[1])]
return q, grad | Demo problem 1 for horsetail matching, takes two input vectors of size 2
and returns just the qoi if jac is False or the qoi and its gradient if jac
is True | train | https://github.com/lwcook/horsetail-matching/blob/f3d5f8d01249debbca978f412ce4eae017458119/horsetailmatching/demoproblems.py#L8-L18 | null | import numpy as np
def TP0(dv, u):
'''Demo problem 0 for horsetail matching, takes two input vectors of any size
and returns a single output'''
return np.linalg.norm(np.array(dv)) + np.linalg.norm(np.array(u))
def TP2(dv, u, jac=False):
'''Demo problem 2 for horsetail matching, takes two input vectors of size 2
and returns just the qoi if jac is False or the qoi and its gradient if jac
is True'''
y = dv[0]/2.
z = dv[1]/2. + 12
q = 0.25*((y**2 + z**2)/10 + 5*u[0]*u[1] - z*u[1]**2) + 0.2*z*u[1]**3 + 7
if not jac:
return q
else:
dqdx1 = (1./8.)*( 2*y/10. )
dqdx2 = (1./8.)*( 2*z/10. - u[1]**2) + 0.1*u[1]**3
return q, [dqdx1, dqdx2]
def TP2b(dv, u, jac=False):
'''Demo problem 2 for horsetail matching, takes two input vectors of size 2
and returns just the qoi if jac is False or the qoi and its gradient if jac
is True'''
y = dv[0]/2.
z = dv[1]/2. + 12
q = 0.25*((y**2 + z**2)/10 + 5*u[0]*u[1] - z*u[1]**2) +\
0.2*z*u[1]**3 + 7 + u[0]*(y + z)*0.02
if not jac:
return q
else:
dqdx1 = (1./8.)*( 2*y/10.) + 0.01*u[0]
dqdx2 = (1./8.)*( 2*z/10. - u[1]**2) + 0.1*u[1]**3 + 0.01*u[0]
return q, [dqdx1, dqdx2]
def TP3(x, u, jac=False):
'''Demo problem 1 for horsetail matching, takes two input values of
size 1'''
q = 2 + 0.5*x + 1.5*(1-x)*u
if not jac:
return q
else:
grad = 0.5 -1.5*u
return q, grad
|
lwcook/horsetail-matching | horsetailmatching/demoproblems.py | TP2 | python | def TP2(dv, u, jac=False):
'''Demo problem 2 for horsetail matching, takes two input vectors of size 2
and returns just the qoi if jac is False or the qoi and its gradient if jac
is True'''
y = dv[0]/2.
z = dv[1]/2. + 12
q = 0.25*((y**2 + z**2)/10 + 5*u[0]*u[1] - z*u[1]**2) + 0.2*z*u[1]**3 + 7
if not jac:
return q
else:
dqdx1 = (1./8.)*( 2*y/10. )
dqdx2 = (1./8.)*( 2*z/10. - u[1]**2) + 0.1*u[1]**3
return q, [dqdx1, dqdx2] | Demo problem 2 for horsetail matching, takes two input vectors of size 2
and returns just the qoi if jac is False or the qoi and its gradient if jac
is True | train | https://github.com/lwcook/horsetail-matching/blob/f3d5f8d01249debbca978f412ce4eae017458119/horsetailmatching/demoproblems.py#L20-L34 | null | import numpy as np
def TP0(dv, u):
'''Demo problem 0 for horsetail matching, takes two input vectors of any size
and returns a single output'''
return np.linalg.norm(np.array(dv)) + np.linalg.norm(np.array(u))
def TP1(x, u, jac=False):
'''Demo problem 1 for horsetail matching, takes two input vectors of size 2
and returns just the qoi if jac is False or the qoi and its gradient if jac
is True'''
factor = 0.1*(u[0]**2 + 2*u[0]*u[1] + u[1]**2)
q = 0 + factor*(x[0]**2 + 2*x[1]*x[0] + x[1]**2)
if not jac:
return q
else:
grad = [factor*(2*x[0] + 2*x[1]), factor*(2*x[0] + 2*x[1])]
return q, grad
def TP2b(dv, u, jac=False):
'''Demo problem 2 for horsetail matching, takes two input vectors of size 2
and returns just the qoi if jac is False or the qoi and its gradient if jac
is True'''
y = dv[0]/2.
z = dv[1]/2. + 12
q = 0.25*((y**2 + z**2)/10 + 5*u[0]*u[1] - z*u[1]**2) +\
0.2*z*u[1]**3 + 7 + u[0]*(y + z)*0.02
if not jac:
return q
else:
dqdx1 = (1./8.)*( 2*y/10.) + 0.01*u[0]
dqdx2 = (1./8.)*( 2*z/10. - u[1]**2) + 0.1*u[1]**3 + 0.01*u[0]
return q, [dqdx1, dqdx2]
def TP3(x, u, jac=False):
'''Demo problem 1 for horsetail matching, takes two input values of
size 1'''
q = 2 + 0.5*x + 1.5*(1-x)*u
if not jac:
return q
else:
grad = 0.5 -1.5*u
return q, grad
|
lwcook/horsetail-matching | horsetailmatching/demoproblems.py | TP3 | python | def TP3(x, u, jac=False):
'''Demo problem 1 for horsetail matching, takes two input values of
size 1'''
q = 2 + 0.5*x + 1.5*(1-x)*u
if not jac:
return q
else:
grad = 0.5 -1.5*u
return q, grad | Demo problem 1 for horsetail matching, takes two input values of
size 1 | train | https://github.com/lwcook/horsetail-matching/blob/f3d5f8d01249debbca978f412ce4eae017458119/horsetailmatching/demoproblems.py#L53-L62 | null | import numpy as np
def TP0(dv, u):
'''Demo problem 0 for horsetail matching, takes two input vectors of any size
and returns a single output'''
return np.linalg.norm(np.array(dv)) + np.linalg.norm(np.array(u))
def TP1(x, u, jac=False):
'''Demo problem 1 for horsetail matching, takes two input vectors of size 2
and returns just the qoi if jac is False or the qoi and its gradient if jac
is True'''
factor = 0.1*(u[0]**2 + 2*u[0]*u[1] + u[1]**2)
q = 0 + factor*(x[0]**2 + 2*x[1]*x[0] + x[1]**2)
if not jac:
return q
else:
grad = [factor*(2*x[0] + 2*x[1]), factor*(2*x[0] + 2*x[1])]
return q, grad
def TP2(dv, u, jac=False):
'''Demo problem 2 for horsetail matching, takes two input vectors of size 2
and returns just the qoi if jac is False or the qoi and its gradient if jac
is True'''
y = dv[0]/2.
z = dv[1]/2. + 12
q = 0.25*((y**2 + z**2)/10 + 5*u[0]*u[1] - z*u[1]**2) + 0.2*z*u[1]**3 + 7
if not jac:
return q
else:
dqdx1 = (1./8.)*( 2*y/10. )
dqdx2 = (1./8.)*( 2*z/10. - u[1]**2) + 0.1*u[1]**3
return q, [dqdx1, dqdx2]
def TP2b(dv, u, jac=False):
'''Demo problem 2 for horsetail matching, takes two input vectors of size 2
and returns just the qoi if jac is False or the qoi and its gradient if jac
is True'''
y = dv[0]/2.
z = dv[1]/2. + 12
q = 0.25*((y**2 + z**2)/10 + 5*u[0]*u[1] - z*u[1]**2) +\
0.2*z*u[1]**3 + 7 + u[0]*(y + z)*0.02
if not jac:
return q
else:
dqdx1 = (1./8.)*( 2*y/10.) + 0.01*u[0]
dqdx2 = (1./8.)*( 2*z/10. - u[1]**2) + 0.1*u[1]**3 + 0.01*u[0]
return q, [dqdx1, dqdx2]
|
lwcook/horsetail-matching | horsetailmatching/densitymatching.py | DensityMatching.evalMetric | python | def evalMetric(self, x, method=None):
'''Evaluates the density matching metric at a given design point.
:param iterable x: values of the design variables, this is passed as
the first argument to the function fqoi
:return: metric_value - value of the metric evaluated at the design
point given by x
:rtype: float
*Example Usage*::
>>> def myFunc(x, u): return x[0]*x[1] + u
>>> u1 = UniformParameter()
>>> theDM = DensityMatching(myFunc, u)
>>> x0 = [1, 2]
>>> theDM.evalMetric(x0)
'''
return super(DensityMatching, self).evalMetric(x, method) | Evaluates the density matching metric at a given design point.
:param iterable x: values of the design variables, this is passed as
the first argument to the function fqoi
:return: metric_value - value of the metric evaluated at the design
point given by x
:rtype: float
*Example Usage*::
>>> def myFunc(x, u): return x[0]*x[1] + u
>>> u1 = UniformParameter()
>>> theDM = DensityMatching(myFunc, u)
>>> x0 = [1, 2]
>>> theDM.evalMetric(x0) | train | https://github.com/lwcook/horsetail-matching/blob/f3d5f8d01249debbca978f412ce4eae017458119/horsetailmatching/densitymatching.py#L114-L134 | [
" def evalMetric(self, x, method=None):\n '''Evaluates the horsetail matching metric at given values of the\n design variables.\n\n :param iterable x: values of the design variables, this is passed as\n the first argument to the function fqoi\n :param str method: method to use to evaluate the metric ('empirical' or\n 'kernel')\n\n :return: metric_value - value of the metric evaluated at the design\n point given by x\n\n :rtype: float\n\n *Example Usage*::\n\n >>> def myFunc(x, u): return x[0]*x[1] + u\n >>> u1 = UniformParameter()\n >>> theHM = HorsetailMatching(myFunc, u)\n >>> x0 = [1, 2]\n >>> theHM.evalMetric(x0)\n\n '''\n # Make sure dimensions are correct\n# u_sample_dimensions = self._processDimensions()\n\n if self.verbose:\n print('----------')\n print('At design: ' + str(x))\n\n q_samples, grad_samples = self.evalSamples(x)\n\n if self.verbose:\n print('Evaluating metric')\n\n return self.evalMetricFromSamples(q_samples, grad_samples, method)\n"
] | class DensityMatching(HorsetailMatching):
'''Class for using density matching within an optimization. The main
functionality is to evaluate the density matching
metric (and optionally its gradient) that can be used with external
optimizers.
The code is written such that all arguments that can be used at the
initialization of a DensityMatching object can also be set as
attributes after creation to achieve exactly the same effect.
:param function fqoi: function that returns the quantity of interest, it
must take two ordered arguments - the value of the design variable
vector and the value of the uncertainty vector.
:param list prob_uncertainties: list of probabilistic uncertainties.
Is a list of UncertainParameter objects, or a list of
functions that return samples of the each uncertainty.
:param function ftarget: function that returns the value of the target
PDF function.
:param bool/function jac: Argument that
specifies how to evaluate the gradient of the quantity of interest.
If False no gradients are propagated, if True the fqoi should return
a second argument g such that g_i = dq/dx_i. If a function, it should
have the same signature as fqoi but return g. [default False]
:param int samples_prob: number of samples to take from the
probabilsitic uncertainties. [default 1000]
:param list integration_points:
The integration point values to use when evaluating the metric using
kernels [by default 100 points spread over 3 times the range of
the samples of q obtained the first time the metric is evaluated]
:param number kernel_bandwidth: The bandwidth
used in the kernel function [by default it is found the first time
the metric is evaluated using Scott's rule]
:param function surrogate: Surrogate that is created at every design
point to be sampled instead of fqoi. It should be a function that
takes two arguments - an array with values of the uncertainties at
which to fit the surrogate of size (num_quadrature_points,
num_uncertainties), and an array of quantity of interest values
corresponding to these uncertainty values to which to fit the surrogate
of size (num_quadrature_points). It should return a functio that
predicts the qoi at an aribtrary value of the uncertainties.
[default None]
:param list surrogate_points: Only with a surrogate. List of points at
which fqoi is evaluated to give values to fit the surrogates to. These
are passed to the surrogate function along with the qoi evaluated at
these points when the surrogate is fitted [by default tensor
quadrature of 5 points in each uncertain dimension is used]
:param bool/function surrogate_jac: Only with a surrogate. Specifies how
to take surrogates of the gradient. It works similarly to the
jac argument: if False, the same surrogate is fitted to fqoi and each
component of its gradient, if True, the surrogate function is
expected to take a third argument - an array that is the gradient
at each of the quadrature points of size
(num_quadrature_points, num_design_variables). If a function, then
instead the array of uncertainty values and the array of gradient
values are passed to this function and it should return a function for
the surrogate model of the gradient.
:param bool reuse_samples: If True will reuse the same set of samples of
the uncertainties for evaluating the metric at any value of the
design variables, if False wise will re-sample every time evalMetric
is called [default True]
:param bool verbose: If True will print out details [default False].
'''
def __init__(self, fqoi, prob_uncertainties, ftarget=None, jac=False,
samples_prob=1000, integration_points=None, kernel_bandwidth=None,
surrogate=None, surrogate_points=None, surrogate_jac=False,
reuse_samples=True, verbose=False):
self.fqoi = fqoi
self.prob_uncertainties = prob_uncertainties
self.int_uncertainties = []
self.ftarget = ftarget
self.jac = jac
self.samples_prob = samples_prob
self.samples_int = 1
self.integration_points = integration_points
self.kernel_bandwidth = kernel_bandwidth
self.reuse_samples = reuse_samples
self.u_samples = None
self.surrogate = surrogate
self.surrogate_points = surrogate_points
self.surrogate_jac = surrogate_jac
self.verbose = verbose
# Note that this class makes heavy use of the HorsetailMatching parent
# class's methods
##############################################################################
## Public Methods
##############################################################################
def evalMetricFromSamples(self, q_samples, grad_samples=None, method=None):
'''Evaluates the density matching metric from given samples of the quantity
of interest and gradient instead of evaluating them at a design.
:param np.ndarray q_samples: samples of the quantity of interest,
size (M_int, M_prob)
:param np.ndarray grad_samples: samples of the gradien,
size (M_int, M_prob, n_x)
:return: metric_value - value of the metric
:rtype: float
'''
return self._evalDensityMetric(q_samples, grad_samples)
def getPDF(self):
'''Function that gets vectors of the pdf and target at the last design
evaluated.
:return: tuple of q values, pdf values, target values
'''
if hasattr(self, '_qplot'):
return self._qplot, self._hplot, self._tplot
else:
raise ValueError('''The metric has not been evaluated at any
design point so the PDF cannot get obtained''')
##############################################################################
## Private methods ##
##############################################################################
def _evalDensityMetric(self, q_samples, grad_samples=None):
if self.integration_points is None:
q_min = np.amin(q_samples)
q_max = np.amax(q_samples)
q_range = q_max - q_min
fis = np.linspace(q_min - q_range, q_max + q_range, 1000)
self.integration_points = fis
else:
fis = self.integration_points
# If kernel bandwidth not specified, find it using Scott's rule
if self.kernel_bandwidth is None:
if abs(np.max(q_samples) - np.min(q_samples)) < 1e-6:
bw = 1e-6
else:
bw = ((4/(3.*q_samples.shape[1]))**(1/5.)
*np.std(q_samples[0,:]))
self.kernel_bandwidth = bw
else:
bw = self.kernel_bandwidth
fjs = np.array(q_samples)
N = len(fis)
M = self.samples_prob
t = np.array([float(self.ftarget(fi)) for fi in fis]).reshape([N, 1])
# column vector - row vector to give matrix
delf = fis.reshape([N, 1]) - fjs.reshape([1, M])
const_term = 1.0/(M * np.sqrt(2*np.pi*bw**2))
K = const_term * np.exp((-1./2.) * (delf/bw)**2)
Ks = np.dot(K, np.ones([M, 1])).reshape([N, 1])
W = np.zeros([N, N]) # Trapezium rule weighting matrix
for i in range(N):
W[i, i] = (fis[min(i+1, N-1)] - fis[max(i-1, 0)])*0.5
l2norm = float((t - Ks).T.dot(W.dot((t - Ks))))
self._qplot = fis
self._hplot = Ks
self._tplot = t
if grad_samples is None:
return l2norm
else:
ndv = grad_samples.shape[2]
gradjs = grad_samples[0, :, :]
Kprime = const_term * np.exp((-1./2.) * (delf/bw)**2) *\
delf / bw**2 * -1.
Fprime = np.zeros([M, ndv])
for kdv in range(ndv):
Fprime[:, kdv] = gradjs[:, kdv]
gradient = 2*(t - Ks).T.dot(W.dot(Kprime.dot(Fprime))).reshape(ndv)
return l2norm, gradient
|
lwcook/horsetail-matching | horsetailmatching/densitymatching.py | DensityMatching.getPDF | python | def getPDF(self):
'''Function that gets vectors of the pdf and target at the last design
evaluated.
:return: tuple of q values, pdf values, target values
'''
if hasattr(self, '_qplot'):
return self._qplot, self._hplot, self._tplot
else:
raise ValueError('''The metric has not been evaluated at any
design point so the PDF cannot get obtained''') | Function that gets vectors of the pdf and target at the last design
evaluated.
:return: tuple of q values, pdf values, target values | train | https://github.com/lwcook/horsetail-matching/blob/f3d5f8d01249debbca978f412ce4eae017458119/horsetailmatching/densitymatching.py#L152-L165 | null | class DensityMatching(HorsetailMatching):
'''Class for using density matching within an optimization. The main
functionality is to evaluate the density matching
metric (and optionally its gradient) that can be used with external
optimizers.
The code is written such that all arguments that can be used at the
initialization of a DensityMatching object can also be set as
attributes after creation to achieve exactly the same effect.
:param function fqoi: function that returns the quantity of interest, it
must take two ordered arguments - the value of the design variable
vector and the value of the uncertainty vector.
:param list prob_uncertainties: list of probabilistic uncertainties.
Is a list of UncertainParameter objects, or a list of
functions that return samples of the each uncertainty.
:param function ftarget: function that returns the value of the target
PDF function.
:param bool/function jac: Argument that
specifies how to evaluate the gradient of the quantity of interest.
If False no gradients are propagated, if True the fqoi should return
a second argument g such that g_i = dq/dx_i. If a function, it should
have the same signature as fqoi but return g. [default False]
:param int samples_prob: number of samples to take from the
probabilsitic uncertainties. [default 1000]
:param list integration_points:
The integration point values to use when evaluating the metric using
kernels [by default 100 points spread over 3 times the range of
the samples of q obtained the first time the metric is evaluated]
:param number kernel_bandwidth: The bandwidth
used in the kernel function [by default it is found the first time
the metric is evaluated using Scott's rule]
:param function surrogate: Surrogate that is created at every design
point to be sampled instead of fqoi. It should be a function that
takes two arguments - an array with values of the uncertainties at
which to fit the surrogate of size (num_quadrature_points,
num_uncertainties), and an array of quantity of interest values
corresponding to these uncertainty values to which to fit the surrogate
of size (num_quadrature_points). It should return a functio that
predicts the qoi at an aribtrary value of the uncertainties.
[default None]
:param list surrogate_points: Only with a surrogate. List of points at
which fqoi is evaluated to give values to fit the surrogates to. These
are passed to the surrogate function along with the qoi evaluated at
these points when the surrogate is fitted [by default tensor
quadrature of 5 points in each uncertain dimension is used]
:param bool/function surrogate_jac: Only with a surrogate. Specifies how
to take surrogates of the gradient. It works similarly to the
jac argument: if False, the same surrogate is fitted to fqoi and each
component of its gradient, if True, the surrogate function is
expected to take a third argument - an array that is the gradient
at each of the quadrature points of size
(num_quadrature_points, num_design_variables). If a function, then
instead the array of uncertainty values and the array of gradient
values are passed to this function and it should return a function for
the surrogate model of the gradient.
:param bool reuse_samples: If True will reuse the same set of samples of
the uncertainties for evaluating the metric at any value of the
design variables, if False wise will re-sample every time evalMetric
is called [default True]
:param bool verbose: If True will print out details [default False].
'''
def __init__(self, fqoi, prob_uncertainties, ftarget=None, jac=False,
samples_prob=1000, integration_points=None, kernel_bandwidth=None,
surrogate=None, surrogate_points=None, surrogate_jac=False,
reuse_samples=True, verbose=False):
self.fqoi = fqoi
self.prob_uncertainties = prob_uncertainties
self.int_uncertainties = []
self.ftarget = ftarget
self.jac = jac
self.samples_prob = samples_prob
self.samples_int = 1
self.integration_points = integration_points
self.kernel_bandwidth = kernel_bandwidth
self.reuse_samples = reuse_samples
self.u_samples = None
self.surrogate = surrogate
self.surrogate_points = surrogate_points
self.surrogate_jac = surrogate_jac
self.verbose = verbose
# Note that this class makes heavy use of the HorsetailMatching parent
# class's methods
##############################################################################
## Public Methods
##############################################################################
def evalMetric(self, x, method=None):
'''Evaluates the density matching metric at a given design point.
:param iterable x: values of the design variables, this is passed as
the first argument to the function fqoi
:return: metric_value - value of the metric evaluated at the design
point given by x
:rtype: float
*Example Usage*::
>>> def myFunc(x, u): return x[0]*x[1] + u
>>> u1 = UniformParameter()
>>> theDM = DensityMatching(myFunc, u)
>>> x0 = [1, 2]
>>> theDM.evalMetric(x0)
'''
return super(DensityMatching, self).evalMetric(x, method)
def evalMetricFromSamples(self, q_samples, grad_samples=None, method=None):
'''Evaluates the density matching metric from given samples of the quantity
of interest and gradient instead of evaluating them at a design.
:param np.ndarray q_samples: samples of the quantity of interest,
size (M_int, M_prob)
:param np.ndarray grad_samples: samples of the gradien,
size (M_int, M_prob, n_x)
:return: metric_value - value of the metric
:rtype: float
'''
return self._evalDensityMetric(q_samples, grad_samples)
##############################################################################
## Private methods ##
##############################################################################
def _evalDensityMetric(self, q_samples, grad_samples=None):
if self.integration_points is None:
q_min = np.amin(q_samples)
q_max = np.amax(q_samples)
q_range = q_max - q_min
fis = np.linspace(q_min - q_range, q_max + q_range, 1000)
self.integration_points = fis
else:
fis = self.integration_points
# If kernel bandwidth not specified, find it using Scott's rule
if self.kernel_bandwidth is None:
if abs(np.max(q_samples) - np.min(q_samples)) < 1e-6:
bw = 1e-6
else:
bw = ((4/(3.*q_samples.shape[1]))**(1/5.)
*np.std(q_samples[0,:]))
self.kernel_bandwidth = bw
else:
bw = self.kernel_bandwidth
fjs = np.array(q_samples)
N = len(fis)
M = self.samples_prob
t = np.array([float(self.ftarget(fi)) for fi in fis]).reshape([N, 1])
# column vector - row vector to give matrix
delf = fis.reshape([N, 1]) - fjs.reshape([1, M])
const_term = 1.0/(M * np.sqrt(2*np.pi*bw**2))
K = const_term * np.exp((-1./2.) * (delf/bw)**2)
Ks = np.dot(K, np.ones([M, 1])).reshape([N, 1])
W = np.zeros([N, N]) # Trapezium rule weighting matrix
for i in range(N):
W[i, i] = (fis[min(i+1, N-1)] - fis[max(i-1, 0)])*0.5
l2norm = float((t - Ks).T.dot(W.dot((t - Ks))))
self._qplot = fis
self._hplot = Ks
self._tplot = t
if grad_samples is None:
return l2norm
else:
ndv = grad_samples.shape[2]
gradjs = grad_samples[0, :, :]
Kprime = const_term * np.exp((-1./2.) * (delf/bw)**2) *\
delf / bw**2 * -1.
Fprime = np.zeros([M, ndv])
for kdv in range(ndv):
Fprime[:, kdv] = gradjs[:, kdv]
gradient = 2*(t - Ks).T.dot(W.dot(Kprime.dot(Fprime))).reshape(ndv)
return l2norm, gradient
|
glue-viz/echo | echo/core.py | add_callback | python | def add_callback(instance, prop, callback, echo_old=False, priority=0):
p = getattr(type(instance), prop)
if not isinstance(p, CallbackProperty):
raise TypeError("%s is not a CallbackProperty" % prop)
p.add_callback(instance, callback, echo_old=echo_old, priority=priority) | Attach a callback function to a property in an instance
Parameters
----------
instance
The instance to add the callback to
prop : str
Name of callback property in `instance`
callback : func
The callback function to add
echo_old : bool, optional
If `True`, the callback function will be invoked with both the old
and new values of the property, as ``func(old, new)``. If `False`
(the default), will be invoked as ``func(new)``
priority : int, optional
This can optionally be used to force a certain order of execution of
callbacks (larger values indicate a higher priority).
Examples
--------
::
class Foo:
bar = CallbackProperty(0)
def callback(value):
pass
f = Foo()
add_callback(f, 'bar', callback) | train | https://github.com/glue-viz/echo/blob/6ad54cc5e869de27c34e8716f2619ddc640f08fe/echo/core.py#L334-L372 | [
"def add_callback(self, instance, func, echo_old=False, priority=0):\n \"\"\"\n Add a callback to a specific instance that manages this property\n\n Parameters\n ----------\n instance\n The instance to add the callback to\n func : func\n The callback function to add\n echo_old : bool, optional\n If `True`, the callback function will be invoked with both the old\n and new values of the property, as ``func(old, new)``. If `False`\n (the default), will be invoked as ``func(new)``\n priority : int, optional\n This can optionally be used to force a certain order of execution of\n callbacks (larger values indicate a higher priority).\n \"\"\"\n\n if echo_old:\n self._2arg_callbacks.setdefault(instance, CallbackContainer()).append(func, priority=priority)\n else:\n self._callbacks.setdefault(instance, CallbackContainer()).append(func, priority=priority)\n"
] | from __future__ import absolute_import, division, print_function
import weakref
from itertools import chain
from weakref import WeakKeyDictionary
from contextlib import contextmanager
from .callback_container import CallbackContainer
__all__ = ['CallbackProperty', 'callback_property',
'add_callback', 'remove_callback',
'delay_callback', 'ignore_callback',
'HasCallbackProperties', 'keep_in_sync']
class CallbackProperty(object):
"""
A property that callback functions can be added to.
When a callback property changes value, each callback function
is called with information about the state change. Otherwise,
callback properties behave just like normal instance variables.
CallbackProperties must be defined at the class level. Use
the helper function :func:`~echo.add_callback` to attach a callback to
a specific instance of a class with CallbackProperties
Parameters
----------
default
The initial value for the property
docstring : str
The docstring for the property
getter, setter : func
Custom getter and setter functions (advanced)
"""
def __init__(self, default=None, docstring=None, getter=None, setter=None):
"""
:param default: The initial value for the property
"""
self._default = default
self._callbacks = WeakKeyDictionary()
self._2arg_callbacks = WeakKeyDictionary()
self._disabled = WeakKeyDictionary()
self._values = WeakKeyDictionary()
if getter is None:
getter = self._default_getter
if setter is None:
setter = self._default_setter
self._getter = getter
self._setter = setter
if docstring is not None:
self.__doc__ = docstring
def _default_getter(self, instance, owner=None):
return self._values.get(instance, self._default)
def _default_setter(self, instance, value):
self._values.__setitem__(instance, value)
def __get__(self, instance, owner=None):
if instance is None:
return self
return self._getter(instance)
def __set__(self, instance, value):
try:
old = self.__get__(instance)
except AttributeError: # pragma: no cover
old = None
self._setter(instance, value)
new = self.__get__(instance)
if old != new:
self.notify(instance, old, new)
def setter(self, func):
"""
Method to use as a decorator, to mimic @property.setter
"""
self._setter = func
return self
def _get_full_info(self, instance):
# Some callback subclasses may contain additional info in addition
# to the main value, and we need to use this full information when
# comparing old and new 'values', so this method is used in that
# case. The result should be a tuple where the first item is the
# actual primary value of the property and the second item is any
# additional data to use in the comparison.
return self.__get__(instance), None
def notify(self, instance, old, new):
"""
Call all callback functions with the current value
Each callback will either be called using
callback(new) or callback(old, new) depending
on whether ``echo_old`` was set to `True` when calling
:func:`~echo.add_callback`
Parameters
----------
instance
The instance to consider
old
The old value of the property
new
The new value of the property
"""
if self._disabled.get(instance, False):
return
for cback in self._callbacks.get(instance, []):
cback(new)
for cback in self._2arg_callbacks.get(instance, []):
cback(old, new)
def disable(self, instance):
"""
Disable callbacks for a specific instance
"""
self._disabled[instance] = True
def enable(self, instance):
"""
Enable previously-disabled callbacks for a specific instance
"""
self._disabled[instance] = False
def add_callback(self, instance, func, echo_old=False, priority=0):
"""
Add a callback to a specific instance that manages this property
Parameters
----------
instance
The instance to add the callback to
func : func
The callback function to add
echo_old : bool, optional
If `True`, the callback function will be invoked with both the old
and new values of the property, as ``func(old, new)``. If `False`
(the default), will be invoked as ``func(new)``
priority : int, optional
This can optionally be used to force a certain order of execution of
callbacks (larger values indicate a higher priority).
"""
if echo_old:
self._2arg_callbacks.setdefault(instance, CallbackContainer()).append(func, priority=priority)
else:
self._callbacks.setdefault(instance, CallbackContainer()).append(func, priority=priority)
def remove_callback(self, instance, func):
"""
Remove a previously-added callback
Parameters
----------
instance
The instance to detach the callback from
func : func
The callback function to remove
"""
for cb in [self._callbacks, self._2arg_callbacks]:
if instance not in cb:
continue
if func in cb[instance]:
cb[instance].remove(func)
return
else:
raise ValueError("Callback function not found: %s" % func)
class HasCallbackProperties(object):
"""
A class that adds functionality to subclasses that use callback properties.
"""
def __init__(self):
from .list import ListCallbackProperty
self._global_callbacks = CallbackContainer()
self._ignored_properties = set()
self._delayed_properties = {}
self._delay_global_calls = {}
self._callback_wrappers = {}
for prop_name, prop in self.iter_callback_properties():
if isinstance(prop, ListCallbackProperty):
prop.add_callback(self, self._notify_global_lists)
def _ignore_global_callbacks(self, properties):
# This is to allow ignore_callbacks to work for global callbacks
self._ignored_properties.update(properties)
def _unignore_global_callbacks(self, properties):
# Once this is called, we simply remove properties from _ignored_properties
# and don't call the callbacks. This is used by ignore_callback
self._ignored_properties -= set(properties)
def _delay_global_callbacks(self, properties):
# This is to allow delay_callback to still have an effect in delaying
# global callbacks. We set _delayed_properties to a dictionary of the
# values at the point at which the callbacks are delayed.
self._delayed_properties.update(properties)
def _process_delayed_global_callbacks(self, properties):
# Once this is called, the global callbacks are called once each with
# a dictionary of the current values of properties that have been
# resumed.
kwargs = {}
for prop, new_value in properties.items():
old_value = self._delayed_properties.pop(prop)
if old_value != new_value:
kwargs[prop] = new_value[0]
self._notify_global(**kwargs)
def _notify_global_lists(self, *args):
from .list import ListCallbackProperty
properties = {}
for prop_name, prop in self.iter_callback_properties():
if isinstance(prop, ListCallbackProperty):
callback_list = getattr(self, prop_name)
if callback_list is args[0]:
properties[prop_name] = callback_list
break
self._notify_global(**properties)
def _notify_global(self, **kwargs):
for prop in set(self._delayed_properties) | set(self._ignored_properties):
if prop in kwargs:
kwargs.pop(prop)
if len(kwargs) > 0:
for callback in self._global_callbacks:
callback(**kwargs)
def __setattr__(self, attribute, value):
super(HasCallbackProperties, self).__setattr__(attribute, value)
if self.is_callback_property(attribute):
self._notify_global(**{attribute: value})
def add_callback(self, name, callback, echo_old=False, priority=0):
"""
Add a callback that gets triggered when a callback property of the
class changes.
Parameters
----------
name : str
The instance to add the callback to.
callback : func
The callback function to add
echo_old : bool, optional
If `True`, the callback function will be invoked with both the old
and new values of the property, as ``callback(old, new)``. If `False`
(the default), will be invoked as ``callback(new)``
priority : int, optional
This can optionally be used to force a certain order of execution of
callbacks (larger values indicate a higher priority).
"""
if self.is_callback_property(name):
prop = getattr(type(self), name)
prop.add_callback(self, callback, echo_old=echo_old, priority=priority)
else:
raise TypeError("attribute '{0}' is not a callback property".format(name))
def remove_callback(self, name, callback):
"""
Remove a previously-added callback
Parameters
----------
name : str
The instance to remove the callback from.
func : func
The callback function to remove
"""
if self.is_callback_property(name):
prop = getattr(type(self), name)
try:
prop.remove_callback(self, callback)
except ValueError: # pragma: nocover
pass # Be forgiving if callback was already removed before
else:
raise TypeError("attribute '{0}' is not a callback property".format(name))
def add_global_callback(self, callback):
"""
Add a global callback function, which is a callback that gets triggered
when any callback properties on the class change.
Parameters
----------
callback : func
The callback function to add
"""
self._global_callbacks.append(callback)
def remove_global_callback(self, callback):
"""
Remove a global callback function.
Parameters
----------
callback : func
The callback function to remove
"""
self._global_callbacks.remove(callback)
def is_callback_property(self, name):
"""
Whether a property (identified by name) is a callback property.
Parameters
----------
name : str
The name of the property to check
"""
return isinstance(getattr(type(self), name, None), CallbackProperty)
def iter_callback_properties(self):
"""
Iterator to loop over all callback properties.
"""
for name in dir(self):
if self.is_callback_property(name):
yield name, getattr(type(self), name)
def remove_callback(instance, prop, callback):
"""
Remove a callback function from a property in an instance
Parameters
----------
instance
The instance to detach the callback from
prop : str
Name of callback property in `instance`
callback : func
The callback function to remove
"""
p = getattr(type(instance), prop)
if not isinstance(p, CallbackProperty):
raise TypeError("%s is not a CallbackProperty" % prop)
p.remove_callback(instance, callback)
def callback_property(getter):
"""
A decorator to build a CallbackProperty.
This is used by wrapping a getter method, similar to the use of @property::
class Foo(object):
@callback_property
def x(self):
return self._x
@x.setter
def x(self, value):
self._x = value
In simple cases with no getter or setter logic, it's easier to create a
:class:`~echo.CallbackProperty` directly::
class Foo(object);
x = CallbackProperty(initial_value)
"""
cb = CallbackProperty(getter=getter)
cb.__doc__ = getter.__doc__
return cb
class delay_callback(object):
"""
Delay any callback functions from one or more callback properties
This is a context manager. Within the context block, no callbacks
will be issued. Each callback will be called once on exit
Parameters
----------
instance
An instance object with callback properties
*props : str
One or more properties within instance to delay
Examples
--------
::
with delay_callback(foo, 'bar', 'baz'):
f.bar = 20
f.baz = 30
f.bar = 10
print('done') # callbacks triggered at this point, if needed
"""
# Class-level registry of properties and how many times the callbacks have
# been delayed. The idea is that when nesting calls to delay_callback, the
# delay count is increased, and every time __exit__ is called, the count is
# decreased, and once the count reaches zero, the callback is triggered.
delay_count = {}
old_values = {}
def __init__(self, instance, *props):
self.instance = instance
self.props = props
def __enter__(self):
delay_props = {}
for prop in self.props:
p = getattr(type(self.instance), prop)
if not isinstance(p, CallbackProperty):
raise TypeError("%s is not a CallbackProperty" % prop)
if (self.instance, prop) not in self.delay_count:
self.delay_count[self.instance, prop] = 1
self.old_values[self.instance, prop] = p._get_full_info(self.instance)
delay_props[prop] = p._get_full_info(self.instance)
else:
self.delay_count[self.instance, prop] += 1
p.disable(self.instance)
if isinstance(self.instance, HasCallbackProperties):
self.instance._delay_global_callbacks(delay_props)
def __exit__(self, *args):
resume_props = {}
notifications = []
for prop in self.props:
p = getattr(type(self.instance), prop)
if not isinstance(p, CallbackProperty): # pragma: no cover
raise TypeError("%s is not a CallbackProperty" % prop)
if self.delay_count[self.instance, prop] > 1:
self.delay_count[self.instance, prop] -= 1
else:
self.delay_count.pop((self.instance, prop))
old = self.old_values.pop((self.instance, prop))
p.enable(self.instance)
new = p._get_full_info(self.instance)
if old != new:
notifications.append((p, (self.instance, old[0], new[0])))
resume_props[prop] = new
if isinstance(self.instance, HasCallbackProperties):
self.instance._process_delayed_global_callbacks(resume_props)
for p, args in notifications:
p.notify(*args)
@contextmanager
def ignore_callback(instance, *props):
"""
Temporarily ignore any callbacks from one or more callback properties
This is a context manager. Within the context block, no callbacks will be
issued. In contrast with :func:`~echo.delay_callback`, no callbakcs will be
called on exiting the context manager
Parameters
----------
instance
An instance object with callback properties
*props : str
One or more properties within instance to ignore
Examples
--------
::
with ignore_callback(foo, 'bar', 'baz'):
f.bar = 20
f.baz = 30
f.bar = 10
print('done') # no callbacks called
"""
for prop in props:
p = getattr(type(instance), prop)
if not isinstance(p, CallbackProperty):
raise TypeError("%s is not a CallbackProperty" % prop)
p.disable(instance)
if isinstance(instance, HasCallbackProperties):
instance._ignore_global_callbacks(props)
yield
for prop in props:
p = getattr(type(instance), prop)
assert isinstance(p, CallbackProperty)
p.enable(instance)
if isinstance(instance, HasCallbackProperties):
instance._unignore_global_callbacks(props)
class keep_in_sync(object):
def __init__(self, instance1, prop1, instance2, prop2):
self.instance1 = weakref.ref(instance1, self.disable_syncing)
self.prop1 = prop1
self.instance2 = weakref.ref(instance2, self.disable_syncing)
self.prop2 = prop2
self._syncing = False
self.enabled = False
self.enable_syncing()
def prop1_from_prop2(self, value):
if not self._syncing:
self._syncing = True
setattr(self.instance1(), self.prop1, getattr(self.instance2(), self.prop2))
self._syncing = False
def prop2_from_prop1(self, value):
if not self._syncing:
self._syncing = True
setattr(self.instance2(), self.prop2, getattr(self.instance1(), self.prop1))
self._syncing = False
def enable_syncing(self, *args):
if self.enabled:
return
add_callback(self.instance1(), self.prop1, self.prop2_from_prop1)
add_callback(self.instance2(), self.prop2, self.prop1_from_prop2)
self.enabled = True
def disable_syncing(self, *args):
if not self.enabled:
return
if self.instance1() is not None:
remove_callback(self.instance1(), self.prop1, self.prop2_from_prop1)
if self.instance2() is not None:
remove_callback(self.instance2(), self.prop2, self.prop1_from_prop2)
self.enabled = False
|
glue-viz/echo | echo/core.py | remove_callback | python | def remove_callback(instance, prop, callback):
p = getattr(type(instance), prop)
if not isinstance(p, CallbackProperty):
raise TypeError("%s is not a CallbackProperty" % prop)
p.remove_callback(instance, callback) | Remove a callback function from a property in an instance
Parameters
----------
instance
The instance to detach the callback from
prop : str
Name of callback property in `instance`
callback : func
The callback function to remove | train | https://github.com/glue-viz/echo/blob/6ad54cc5e869de27c34e8716f2619ddc640f08fe/echo/core.py#L375-L391 | null | from __future__ import absolute_import, division, print_function
import weakref
from itertools import chain
from weakref import WeakKeyDictionary
from contextlib import contextmanager
from .callback_container import CallbackContainer
__all__ = ['CallbackProperty', 'callback_property',
'add_callback', 'remove_callback',
'delay_callback', 'ignore_callback',
'HasCallbackProperties', 'keep_in_sync']
class CallbackProperty(object):
"""
A property that callback functions can be added to.
When a callback property changes value, each callback function
is called with information about the state change. Otherwise,
callback properties behave just like normal instance variables.
CallbackProperties must be defined at the class level. Use
the helper function :func:`~echo.add_callback` to attach a callback to
a specific instance of a class with CallbackProperties
Parameters
----------
default
The initial value for the property
docstring : str
The docstring for the property
getter, setter : func
Custom getter and setter functions (advanced)
"""
def __init__(self, default=None, docstring=None, getter=None, setter=None):
"""
:param default: The initial value for the property
"""
self._default = default
self._callbacks = WeakKeyDictionary()
self._2arg_callbacks = WeakKeyDictionary()
self._disabled = WeakKeyDictionary()
self._values = WeakKeyDictionary()
if getter is None:
getter = self._default_getter
if setter is None:
setter = self._default_setter
self._getter = getter
self._setter = setter
if docstring is not None:
self.__doc__ = docstring
def _default_getter(self, instance, owner=None):
return self._values.get(instance, self._default)
def _default_setter(self, instance, value):
self._values.__setitem__(instance, value)
def __get__(self, instance, owner=None):
if instance is None:
return self
return self._getter(instance)
def __set__(self, instance, value):
try:
old = self.__get__(instance)
except AttributeError: # pragma: no cover
old = None
self._setter(instance, value)
new = self.__get__(instance)
if old != new:
self.notify(instance, old, new)
def setter(self, func):
"""
Method to use as a decorator, to mimic @property.setter
"""
self._setter = func
return self
def _get_full_info(self, instance):
# Some callback subclasses may contain additional info in addition
# to the main value, and we need to use this full information when
# comparing old and new 'values', so this method is used in that
# case. The result should be a tuple where the first item is the
# actual primary value of the property and the second item is any
# additional data to use in the comparison.
return self.__get__(instance), None
def notify(self, instance, old, new):
"""
Call all callback functions with the current value
Each callback will either be called using
callback(new) or callback(old, new) depending
on whether ``echo_old`` was set to `True` when calling
:func:`~echo.add_callback`
Parameters
----------
instance
The instance to consider
old
The old value of the property
new
The new value of the property
"""
if self._disabled.get(instance, False):
return
for cback in self._callbacks.get(instance, []):
cback(new)
for cback in self._2arg_callbacks.get(instance, []):
cback(old, new)
def disable(self, instance):
"""
Disable callbacks for a specific instance
"""
self._disabled[instance] = True
def enable(self, instance):
"""
Enable previously-disabled callbacks for a specific instance
"""
self._disabled[instance] = False
def add_callback(self, instance, func, echo_old=False, priority=0):
"""
Add a callback to a specific instance that manages this property
Parameters
----------
instance
The instance to add the callback to
func : func
The callback function to add
echo_old : bool, optional
If `True`, the callback function will be invoked with both the old
and new values of the property, as ``func(old, new)``. If `False`
(the default), will be invoked as ``func(new)``
priority : int, optional
This can optionally be used to force a certain order of execution of
callbacks (larger values indicate a higher priority).
"""
if echo_old:
self._2arg_callbacks.setdefault(instance, CallbackContainer()).append(func, priority=priority)
else:
self._callbacks.setdefault(instance, CallbackContainer()).append(func, priority=priority)
def remove_callback(self, instance, func):
"""
Remove a previously-added callback
Parameters
----------
instance
The instance to detach the callback from
func : func
The callback function to remove
"""
for cb in [self._callbacks, self._2arg_callbacks]:
if instance not in cb:
continue
if func in cb[instance]:
cb[instance].remove(func)
return
else:
raise ValueError("Callback function not found: %s" % func)
class HasCallbackProperties(object):
"""
A class that adds functionality to subclasses that use callback properties.
"""
def __init__(self):
from .list import ListCallbackProperty
self._global_callbacks = CallbackContainer()
self._ignored_properties = set()
self._delayed_properties = {}
self._delay_global_calls = {}
self._callback_wrappers = {}
for prop_name, prop in self.iter_callback_properties():
if isinstance(prop, ListCallbackProperty):
prop.add_callback(self, self._notify_global_lists)
def _ignore_global_callbacks(self, properties):
# This is to allow ignore_callbacks to work for global callbacks
self._ignored_properties.update(properties)
def _unignore_global_callbacks(self, properties):
# Once this is called, we simply remove properties from _ignored_properties
# and don't call the callbacks. This is used by ignore_callback
self._ignored_properties -= set(properties)
def _delay_global_callbacks(self, properties):
# This is to allow delay_callback to still have an effect in delaying
# global callbacks. We set _delayed_properties to a dictionary of the
# values at the point at which the callbacks are delayed.
self._delayed_properties.update(properties)
def _process_delayed_global_callbacks(self, properties):
# Once this is called, the global callbacks are called once each with
# a dictionary of the current values of properties that have been
# resumed.
kwargs = {}
for prop, new_value in properties.items():
old_value = self._delayed_properties.pop(prop)
if old_value != new_value:
kwargs[prop] = new_value[0]
self._notify_global(**kwargs)
def _notify_global_lists(self, *args):
from .list import ListCallbackProperty
properties = {}
for prop_name, prop in self.iter_callback_properties():
if isinstance(prop, ListCallbackProperty):
callback_list = getattr(self, prop_name)
if callback_list is args[0]:
properties[prop_name] = callback_list
break
self._notify_global(**properties)
def _notify_global(self, **kwargs):
for prop in set(self._delayed_properties) | set(self._ignored_properties):
if prop in kwargs:
kwargs.pop(prop)
if len(kwargs) > 0:
for callback in self._global_callbacks:
callback(**kwargs)
def __setattr__(self, attribute, value):
super(HasCallbackProperties, self).__setattr__(attribute, value)
if self.is_callback_property(attribute):
self._notify_global(**{attribute: value})
def add_callback(self, name, callback, echo_old=False, priority=0):
"""
Add a callback that gets triggered when a callback property of the
class changes.
Parameters
----------
name : str
The instance to add the callback to.
callback : func
The callback function to add
echo_old : bool, optional
If `True`, the callback function will be invoked with both the old
and new values of the property, as ``callback(old, new)``. If `False`
(the default), will be invoked as ``callback(new)``
priority : int, optional
This can optionally be used to force a certain order of execution of
callbacks (larger values indicate a higher priority).
"""
if self.is_callback_property(name):
prop = getattr(type(self), name)
prop.add_callback(self, callback, echo_old=echo_old, priority=priority)
else:
raise TypeError("attribute '{0}' is not a callback property".format(name))
def remove_callback(self, name, callback):
"""
Remove a previously-added callback
Parameters
----------
name : str
The instance to remove the callback from.
func : func
The callback function to remove
"""
if self.is_callback_property(name):
prop = getattr(type(self), name)
try:
prop.remove_callback(self, callback)
except ValueError: # pragma: nocover
pass # Be forgiving if callback was already removed before
else:
raise TypeError("attribute '{0}' is not a callback property".format(name))
def add_global_callback(self, callback):
"""
Add a global callback function, which is a callback that gets triggered
when any callback properties on the class change.
Parameters
----------
callback : func
The callback function to add
"""
self._global_callbacks.append(callback)
def remove_global_callback(self, callback):
"""
Remove a global callback function.
Parameters
----------
callback : func
The callback function to remove
"""
self._global_callbacks.remove(callback)
def is_callback_property(self, name):
"""
Whether a property (identified by name) is a callback property.
Parameters
----------
name : str
The name of the property to check
"""
return isinstance(getattr(type(self), name, None), CallbackProperty)
def iter_callback_properties(self):
"""
Iterator to loop over all callback properties.
"""
for name in dir(self):
if self.is_callback_property(name):
yield name, getattr(type(self), name)
def add_callback(instance, prop, callback, echo_old=False, priority=0):
"""
Attach a callback function to a property in an instance
Parameters
----------
instance
The instance to add the callback to
prop : str
Name of callback property in `instance`
callback : func
The callback function to add
echo_old : bool, optional
If `True`, the callback function will be invoked with both the old
and new values of the property, as ``func(old, new)``. If `False`
(the default), will be invoked as ``func(new)``
priority : int, optional
This can optionally be used to force a certain order of execution of
callbacks (larger values indicate a higher priority).
Examples
--------
::
class Foo:
bar = CallbackProperty(0)
def callback(value):
pass
f = Foo()
add_callback(f, 'bar', callback)
"""
p = getattr(type(instance), prop)
if not isinstance(p, CallbackProperty):
raise TypeError("%s is not a CallbackProperty" % prop)
p.add_callback(instance, callback, echo_old=echo_old, priority=priority)
def callback_property(getter):
"""
A decorator to build a CallbackProperty.
This is used by wrapping a getter method, similar to the use of @property::
class Foo(object):
@callback_property
def x(self):
return self._x
@x.setter
def x(self, value):
self._x = value
In simple cases with no getter or setter logic, it's easier to create a
:class:`~echo.CallbackProperty` directly::
class Foo(object);
x = CallbackProperty(initial_value)
"""
cb = CallbackProperty(getter=getter)
cb.__doc__ = getter.__doc__
return cb
class delay_callback(object):
"""
Delay any callback functions from one or more callback properties
This is a context manager. Within the context block, no callbacks
will be issued. Each callback will be called once on exit
Parameters
----------
instance
An instance object with callback properties
*props : str
One or more properties within instance to delay
Examples
--------
::
with delay_callback(foo, 'bar', 'baz'):
f.bar = 20
f.baz = 30
f.bar = 10
print('done') # callbacks triggered at this point, if needed
"""
# Class-level registry of properties and how many times the callbacks have
# been delayed. The idea is that when nesting calls to delay_callback, the
# delay count is increased, and every time __exit__ is called, the count is
# decreased, and once the count reaches zero, the callback is triggered.
delay_count = {}
old_values = {}
def __init__(self, instance, *props):
self.instance = instance
self.props = props
def __enter__(self):
delay_props = {}
for prop in self.props:
p = getattr(type(self.instance), prop)
if not isinstance(p, CallbackProperty):
raise TypeError("%s is not a CallbackProperty" % prop)
if (self.instance, prop) not in self.delay_count:
self.delay_count[self.instance, prop] = 1
self.old_values[self.instance, prop] = p._get_full_info(self.instance)
delay_props[prop] = p._get_full_info(self.instance)
else:
self.delay_count[self.instance, prop] += 1
p.disable(self.instance)
if isinstance(self.instance, HasCallbackProperties):
self.instance._delay_global_callbacks(delay_props)
def __exit__(self, *args):
resume_props = {}
notifications = []
for prop in self.props:
p = getattr(type(self.instance), prop)
if not isinstance(p, CallbackProperty): # pragma: no cover
raise TypeError("%s is not a CallbackProperty" % prop)
if self.delay_count[self.instance, prop] > 1:
self.delay_count[self.instance, prop] -= 1
else:
self.delay_count.pop((self.instance, prop))
old = self.old_values.pop((self.instance, prop))
p.enable(self.instance)
new = p._get_full_info(self.instance)
if old != new:
notifications.append((p, (self.instance, old[0], new[0])))
resume_props[prop] = new
if isinstance(self.instance, HasCallbackProperties):
self.instance._process_delayed_global_callbacks(resume_props)
for p, args in notifications:
p.notify(*args)
@contextmanager
def ignore_callback(instance, *props):
"""
Temporarily ignore any callbacks from one or more callback properties
This is a context manager. Within the context block, no callbacks will be
issued. In contrast with :func:`~echo.delay_callback`, no callbakcs will be
called on exiting the context manager
Parameters
----------
instance
An instance object with callback properties
*props : str
One or more properties within instance to ignore
Examples
--------
::
with ignore_callback(foo, 'bar', 'baz'):
f.bar = 20
f.baz = 30
f.bar = 10
print('done') # no callbacks called
"""
for prop in props:
p = getattr(type(instance), prop)
if not isinstance(p, CallbackProperty):
raise TypeError("%s is not a CallbackProperty" % prop)
p.disable(instance)
if isinstance(instance, HasCallbackProperties):
instance._ignore_global_callbacks(props)
yield
for prop in props:
p = getattr(type(instance), prop)
assert isinstance(p, CallbackProperty)
p.enable(instance)
if isinstance(instance, HasCallbackProperties):
instance._unignore_global_callbacks(props)
class keep_in_sync(object):
def __init__(self, instance1, prop1, instance2, prop2):
self.instance1 = weakref.ref(instance1, self.disable_syncing)
self.prop1 = prop1
self.instance2 = weakref.ref(instance2, self.disable_syncing)
self.prop2 = prop2
self._syncing = False
self.enabled = False
self.enable_syncing()
def prop1_from_prop2(self, value):
if not self._syncing:
self._syncing = True
setattr(self.instance1(), self.prop1, getattr(self.instance2(), self.prop2))
self._syncing = False
def prop2_from_prop1(self, value):
if not self._syncing:
self._syncing = True
setattr(self.instance2(), self.prop2, getattr(self.instance1(), self.prop1))
self._syncing = False
def enable_syncing(self, *args):
if self.enabled:
return
add_callback(self.instance1(), self.prop1, self.prop2_from_prop1)
add_callback(self.instance2(), self.prop2, self.prop1_from_prop2)
self.enabled = True
def disable_syncing(self, *args):
if not self.enabled:
return
if self.instance1() is not None:
remove_callback(self.instance1(), self.prop1, self.prop2_from_prop1)
if self.instance2() is not None:
remove_callback(self.instance2(), self.prop2, self.prop1_from_prop2)
self.enabled = False
|
glue-viz/echo | echo/core.py | callback_property | python | def callback_property(getter):
cb = CallbackProperty(getter=getter)
cb.__doc__ = getter.__doc__
return cb | A decorator to build a CallbackProperty.
This is used by wrapping a getter method, similar to the use of @property::
class Foo(object):
@callback_property
def x(self):
return self._x
@x.setter
def x(self, value):
self._x = value
In simple cases with no getter or setter logic, it's easier to create a
:class:`~echo.CallbackProperty` directly::
class Foo(object);
x = CallbackProperty(initial_value) | train | https://github.com/glue-viz/echo/blob/6ad54cc5e869de27c34e8716f2619ddc640f08fe/echo/core.py#L394-L418 | null | from __future__ import absolute_import, division, print_function
import weakref
from itertools import chain
from weakref import WeakKeyDictionary
from contextlib import contextmanager
from .callback_container import CallbackContainer
__all__ = ['CallbackProperty', 'callback_property',
'add_callback', 'remove_callback',
'delay_callback', 'ignore_callback',
'HasCallbackProperties', 'keep_in_sync']
class CallbackProperty(object):
"""
A property that callback functions can be added to.
When a callback property changes value, each callback function
is called with information about the state change. Otherwise,
callback properties behave just like normal instance variables.
CallbackProperties must be defined at the class level. Use
the helper function :func:`~echo.add_callback` to attach a callback to
a specific instance of a class with CallbackProperties
Parameters
----------
default
The initial value for the property
docstring : str
The docstring for the property
getter, setter : func
Custom getter and setter functions (advanced)
"""
def __init__(self, default=None, docstring=None, getter=None, setter=None):
"""
:param default: The initial value for the property
"""
self._default = default
self._callbacks = WeakKeyDictionary()
self._2arg_callbacks = WeakKeyDictionary()
self._disabled = WeakKeyDictionary()
self._values = WeakKeyDictionary()
if getter is None:
getter = self._default_getter
if setter is None:
setter = self._default_setter
self._getter = getter
self._setter = setter
if docstring is not None:
self.__doc__ = docstring
def _default_getter(self, instance, owner=None):
return self._values.get(instance, self._default)
def _default_setter(self, instance, value):
self._values.__setitem__(instance, value)
def __get__(self, instance, owner=None):
if instance is None:
return self
return self._getter(instance)
def __set__(self, instance, value):
try:
old = self.__get__(instance)
except AttributeError: # pragma: no cover
old = None
self._setter(instance, value)
new = self.__get__(instance)
if old != new:
self.notify(instance, old, new)
def setter(self, func):
"""
Method to use as a decorator, to mimic @property.setter
"""
self._setter = func
return self
def _get_full_info(self, instance):
# Some callback subclasses may contain additional info in addition
# to the main value, and we need to use this full information when
# comparing old and new 'values', so this method is used in that
# case. The result should be a tuple where the first item is the
# actual primary value of the property and the second item is any
# additional data to use in the comparison.
return self.__get__(instance), None
def notify(self, instance, old, new):
"""
Call all callback functions with the current value
Each callback will either be called using
callback(new) or callback(old, new) depending
on whether ``echo_old`` was set to `True` when calling
:func:`~echo.add_callback`
Parameters
----------
instance
The instance to consider
old
The old value of the property
new
The new value of the property
"""
if self._disabled.get(instance, False):
return
for cback in self._callbacks.get(instance, []):
cback(new)
for cback in self._2arg_callbacks.get(instance, []):
cback(old, new)
def disable(self, instance):
"""
Disable callbacks for a specific instance
"""
self._disabled[instance] = True
def enable(self, instance):
"""
Enable previously-disabled callbacks for a specific instance
"""
self._disabled[instance] = False
def add_callback(self, instance, func, echo_old=False, priority=0):
"""
Add a callback to a specific instance that manages this property
Parameters
----------
instance
The instance to add the callback to
func : func
The callback function to add
echo_old : bool, optional
If `True`, the callback function will be invoked with both the old
and new values of the property, as ``func(old, new)``. If `False`
(the default), will be invoked as ``func(new)``
priority : int, optional
This can optionally be used to force a certain order of execution of
callbacks (larger values indicate a higher priority).
"""
if echo_old:
self._2arg_callbacks.setdefault(instance, CallbackContainer()).append(func, priority=priority)
else:
self._callbacks.setdefault(instance, CallbackContainer()).append(func, priority=priority)
def remove_callback(self, instance, func):
"""
Remove a previously-added callback
Parameters
----------
instance
The instance to detach the callback from
func : func
The callback function to remove
"""
for cb in [self._callbacks, self._2arg_callbacks]:
if instance not in cb:
continue
if func in cb[instance]:
cb[instance].remove(func)
return
else:
raise ValueError("Callback function not found: %s" % func)
class HasCallbackProperties(object):
"""
A class that adds functionality to subclasses that use callback properties.
"""
def __init__(self):
from .list import ListCallbackProperty
self._global_callbacks = CallbackContainer()
self._ignored_properties = set()
self._delayed_properties = {}
self._delay_global_calls = {}
self._callback_wrappers = {}
for prop_name, prop in self.iter_callback_properties():
if isinstance(prop, ListCallbackProperty):
prop.add_callback(self, self._notify_global_lists)
def _ignore_global_callbacks(self, properties):
# This is to allow ignore_callbacks to work for global callbacks
self._ignored_properties.update(properties)
def _unignore_global_callbacks(self, properties):
# Once this is called, we simply remove properties from _ignored_properties
# and don't call the callbacks. This is used by ignore_callback
self._ignored_properties -= set(properties)
def _delay_global_callbacks(self, properties):
# This is to allow delay_callback to still have an effect in delaying
# global callbacks. We set _delayed_properties to a dictionary of the
# values at the point at which the callbacks are delayed.
self._delayed_properties.update(properties)
def _process_delayed_global_callbacks(self, properties):
# Once this is called, the global callbacks are called once each with
# a dictionary of the current values of properties that have been
# resumed.
kwargs = {}
for prop, new_value in properties.items():
old_value = self._delayed_properties.pop(prop)
if old_value != new_value:
kwargs[prop] = new_value[0]
self._notify_global(**kwargs)
def _notify_global_lists(self, *args):
from .list import ListCallbackProperty
properties = {}
for prop_name, prop in self.iter_callback_properties():
if isinstance(prop, ListCallbackProperty):
callback_list = getattr(self, prop_name)
if callback_list is args[0]:
properties[prop_name] = callback_list
break
self._notify_global(**properties)
def _notify_global(self, **kwargs):
for prop in set(self._delayed_properties) | set(self._ignored_properties):
if prop in kwargs:
kwargs.pop(prop)
if len(kwargs) > 0:
for callback in self._global_callbacks:
callback(**kwargs)
def __setattr__(self, attribute, value):
super(HasCallbackProperties, self).__setattr__(attribute, value)
if self.is_callback_property(attribute):
self._notify_global(**{attribute: value})
def add_callback(self, name, callback, echo_old=False, priority=0):
"""
Add a callback that gets triggered when a callback property of the
class changes.
Parameters
----------
name : str
The instance to add the callback to.
callback : func
The callback function to add
echo_old : bool, optional
If `True`, the callback function will be invoked with both the old
and new values of the property, as ``callback(old, new)``. If `False`
(the default), will be invoked as ``callback(new)``
priority : int, optional
This can optionally be used to force a certain order of execution of
callbacks (larger values indicate a higher priority).
"""
if self.is_callback_property(name):
prop = getattr(type(self), name)
prop.add_callback(self, callback, echo_old=echo_old, priority=priority)
else:
raise TypeError("attribute '{0}' is not a callback property".format(name))
def remove_callback(self, name, callback):
"""
Remove a previously-added callback
Parameters
----------
name : str
The instance to remove the callback from.
func : func
The callback function to remove
"""
if self.is_callback_property(name):
prop = getattr(type(self), name)
try:
prop.remove_callback(self, callback)
except ValueError: # pragma: nocover
pass # Be forgiving if callback was already removed before
else:
raise TypeError("attribute '{0}' is not a callback property".format(name))
def add_global_callback(self, callback):
"""
Add a global callback function, which is a callback that gets triggered
when any callback properties on the class change.
Parameters
----------
callback : func
The callback function to add
"""
self._global_callbacks.append(callback)
def remove_global_callback(self, callback):
"""
Remove a global callback function.
Parameters
----------
callback : func
The callback function to remove
"""
self._global_callbacks.remove(callback)
def is_callback_property(self, name):
"""
Whether a property (identified by name) is a callback property.
Parameters
----------
name : str
The name of the property to check
"""
return isinstance(getattr(type(self), name, None), CallbackProperty)
def iter_callback_properties(self):
"""
Iterator to loop over all callback properties.
"""
for name in dir(self):
if self.is_callback_property(name):
yield name, getattr(type(self), name)
def add_callback(instance, prop, callback, echo_old=False, priority=0):
"""
Attach a callback function to a property in an instance
Parameters
----------
instance
The instance to add the callback to
prop : str
Name of callback property in `instance`
callback : func
The callback function to add
echo_old : bool, optional
If `True`, the callback function will be invoked with both the old
and new values of the property, as ``func(old, new)``. If `False`
(the default), will be invoked as ``func(new)``
priority : int, optional
This can optionally be used to force a certain order of execution of
callbacks (larger values indicate a higher priority).
Examples
--------
::
class Foo:
bar = CallbackProperty(0)
def callback(value):
pass
f = Foo()
add_callback(f, 'bar', callback)
"""
p = getattr(type(instance), prop)
if not isinstance(p, CallbackProperty):
raise TypeError("%s is not a CallbackProperty" % prop)
p.add_callback(instance, callback, echo_old=echo_old, priority=priority)
def remove_callback(instance, prop, callback):
"""
Remove a callback function from a property in an instance
Parameters
----------
instance
The instance to detach the callback from
prop : str
Name of callback property in `instance`
callback : func
The callback function to remove
"""
p = getattr(type(instance), prop)
if not isinstance(p, CallbackProperty):
raise TypeError("%s is not a CallbackProperty" % prop)
p.remove_callback(instance, callback)
class delay_callback(object):
"""
Delay any callback functions from one or more callback properties
This is a context manager. Within the context block, no callbacks
will be issued. Each callback will be called once on exit
Parameters
----------
instance
An instance object with callback properties
*props : str
One or more properties within instance to delay
Examples
--------
::
with delay_callback(foo, 'bar', 'baz'):
f.bar = 20
f.baz = 30
f.bar = 10
print('done') # callbacks triggered at this point, if needed
"""
# Class-level registry of properties and how many times the callbacks have
# been delayed. The idea is that when nesting calls to delay_callback, the
# delay count is increased, and every time __exit__ is called, the count is
# decreased, and once the count reaches zero, the callback is triggered.
delay_count = {}
old_values = {}
def __init__(self, instance, *props):
self.instance = instance
self.props = props
def __enter__(self):
delay_props = {}
for prop in self.props:
p = getattr(type(self.instance), prop)
if not isinstance(p, CallbackProperty):
raise TypeError("%s is not a CallbackProperty" % prop)
if (self.instance, prop) not in self.delay_count:
self.delay_count[self.instance, prop] = 1
self.old_values[self.instance, prop] = p._get_full_info(self.instance)
delay_props[prop] = p._get_full_info(self.instance)
else:
self.delay_count[self.instance, prop] += 1
p.disable(self.instance)
if isinstance(self.instance, HasCallbackProperties):
self.instance._delay_global_callbacks(delay_props)
def __exit__(self, *args):
resume_props = {}
notifications = []
for prop in self.props:
p = getattr(type(self.instance), prop)
if not isinstance(p, CallbackProperty): # pragma: no cover
raise TypeError("%s is not a CallbackProperty" % prop)
if self.delay_count[self.instance, prop] > 1:
self.delay_count[self.instance, prop] -= 1
else:
self.delay_count.pop((self.instance, prop))
old = self.old_values.pop((self.instance, prop))
p.enable(self.instance)
new = p._get_full_info(self.instance)
if old != new:
notifications.append((p, (self.instance, old[0], new[0])))
resume_props[prop] = new
if isinstance(self.instance, HasCallbackProperties):
self.instance._process_delayed_global_callbacks(resume_props)
for p, args in notifications:
p.notify(*args)
@contextmanager
def ignore_callback(instance, *props):
"""
Temporarily ignore any callbacks from one or more callback properties
This is a context manager. Within the context block, no callbacks will be
issued. In contrast with :func:`~echo.delay_callback`, no callbakcs will be
called on exiting the context manager
Parameters
----------
instance
An instance object with callback properties
*props : str
One or more properties within instance to ignore
Examples
--------
::
with ignore_callback(foo, 'bar', 'baz'):
f.bar = 20
f.baz = 30
f.bar = 10
print('done') # no callbacks called
"""
for prop in props:
p = getattr(type(instance), prop)
if not isinstance(p, CallbackProperty):
raise TypeError("%s is not a CallbackProperty" % prop)
p.disable(instance)
if isinstance(instance, HasCallbackProperties):
instance._ignore_global_callbacks(props)
yield
for prop in props:
p = getattr(type(instance), prop)
assert isinstance(p, CallbackProperty)
p.enable(instance)
if isinstance(instance, HasCallbackProperties):
instance._unignore_global_callbacks(props)
class keep_in_sync(object):
def __init__(self, instance1, prop1, instance2, prop2):
self.instance1 = weakref.ref(instance1, self.disable_syncing)
self.prop1 = prop1
self.instance2 = weakref.ref(instance2, self.disable_syncing)
self.prop2 = prop2
self._syncing = False
self.enabled = False
self.enable_syncing()
def prop1_from_prop2(self, value):
if not self._syncing:
self._syncing = True
setattr(self.instance1(), self.prop1, getattr(self.instance2(), self.prop2))
self._syncing = False
def prop2_from_prop1(self, value):
if not self._syncing:
self._syncing = True
setattr(self.instance2(), self.prop2, getattr(self.instance1(), self.prop1))
self._syncing = False
def enable_syncing(self, *args):
if self.enabled:
return
add_callback(self.instance1(), self.prop1, self.prop2_from_prop1)
add_callback(self.instance2(), self.prop2, self.prop1_from_prop2)
self.enabled = True
def disable_syncing(self, *args):
if not self.enabled:
return
if self.instance1() is not None:
remove_callback(self.instance1(), self.prop1, self.prop2_from_prop1)
if self.instance2() is not None:
remove_callback(self.instance2(), self.prop2, self.prop1_from_prop2)
self.enabled = False
|
glue-viz/echo | echo/core.py | ignore_callback | python | def ignore_callback(instance, *props):
for prop in props:
p = getattr(type(instance), prop)
if not isinstance(p, CallbackProperty):
raise TypeError("%s is not a CallbackProperty" % prop)
p.disable(instance)
if isinstance(instance, HasCallbackProperties):
instance._ignore_global_callbacks(props)
yield
for prop in props:
p = getattr(type(instance), prop)
assert isinstance(p, CallbackProperty)
p.enable(instance)
if isinstance(instance, HasCallbackProperties):
instance._unignore_global_callbacks(props) | Temporarily ignore any callbacks from one or more callback properties
This is a context manager. Within the context block, no callbacks will be
issued. In contrast with :func:`~echo.delay_callback`, no callbakcs will be
called on exiting the context manager
Parameters
----------
instance
An instance object with callback properties
*props : str
One or more properties within instance to ignore
Examples
--------
::
with ignore_callback(foo, 'bar', 'baz'):
f.bar = 20
f.baz = 30
f.bar = 10
print('done') # no callbacks called | train | https://github.com/glue-viz/echo/blob/6ad54cc5e869de27c34e8716f2619ddc640f08fe/echo/core.py#L511-L555 | null | from __future__ import absolute_import, division, print_function
import weakref
from itertools import chain
from weakref import WeakKeyDictionary
from contextlib import contextmanager
from .callback_container import CallbackContainer
__all__ = ['CallbackProperty', 'callback_property',
'add_callback', 'remove_callback',
'delay_callback', 'ignore_callback',
'HasCallbackProperties', 'keep_in_sync']
class CallbackProperty(object):
"""
A property that callback functions can be added to.
When a callback property changes value, each callback function
is called with information about the state change. Otherwise,
callback properties behave just like normal instance variables.
CallbackProperties must be defined at the class level. Use
the helper function :func:`~echo.add_callback` to attach a callback to
a specific instance of a class with CallbackProperties
Parameters
----------
default
The initial value for the property
docstring : str
The docstring for the property
getter, setter : func
Custom getter and setter functions (advanced)
"""
def __init__(self, default=None, docstring=None, getter=None, setter=None):
"""
:param default: The initial value for the property
"""
self._default = default
self._callbacks = WeakKeyDictionary()
self._2arg_callbacks = WeakKeyDictionary()
self._disabled = WeakKeyDictionary()
self._values = WeakKeyDictionary()
if getter is None:
getter = self._default_getter
if setter is None:
setter = self._default_setter
self._getter = getter
self._setter = setter
if docstring is not None:
self.__doc__ = docstring
def _default_getter(self, instance, owner=None):
return self._values.get(instance, self._default)
def _default_setter(self, instance, value):
self._values.__setitem__(instance, value)
def __get__(self, instance, owner=None):
if instance is None:
return self
return self._getter(instance)
def __set__(self, instance, value):
try:
old = self.__get__(instance)
except AttributeError: # pragma: no cover
old = None
self._setter(instance, value)
new = self.__get__(instance)
if old != new:
self.notify(instance, old, new)
def setter(self, func):
"""
Method to use as a decorator, to mimic @property.setter
"""
self._setter = func
return self
def _get_full_info(self, instance):
# Some callback subclasses may contain additional info in addition
# to the main value, and we need to use this full information when
# comparing old and new 'values', so this method is used in that
# case. The result should be a tuple where the first item is the
# actual primary value of the property and the second item is any
# additional data to use in the comparison.
return self.__get__(instance), None
def notify(self, instance, old, new):
"""
Call all callback functions with the current value
Each callback will either be called using
callback(new) or callback(old, new) depending
on whether ``echo_old`` was set to `True` when calling
:func:`~echo.add_callback`
Parameters
----------
instance
The instance to consider
old
The old value of the property
new
The new value of the property
"""
if self._disabled.get(instance, False):
return
for cback in self._callbacks.get(instance, []):
cback(new)
for cback in self._2arg_callbacks.get(instance, []):
cback(old, new)
def disable(self, instance):
"""
Disable callbacks for a specific instance
"""
self._disabled[instance] = True
def enable(self, instance):
"""
Enable previously-disabled callbacks for a specific instance
"""
self._disabled[instance] = False
def add_callback(self, instance, func, echo_old=False, priority=0):
"""
Add a callback to a specific instance that manages this property
Parameters
----------
instance
The instance to add the callback to
func : func
The callback function to add
echo_old : bool, optional
If `True`, the callback function will be invoked with both the old
and new values of the property, as ``func(old, new)``. If `False`
(the default), will be invoked as ``func(new)``
priority : int, optional
This can optionally be used to force a certain order of execution of
callbacks (larger values indicate a higher priority).
"""
if echo_old:
self._2arg_callbacks.setdefault(instance, CallbackContainer()).append(func, priority=priority)
else:
self._callbacks.setdefault(instance, CallbackContainer()).append(func, priority=priority)
def remove_callback(self, instance, func):
"""
Remove a previously-added callback
Parameters
----------
instance
The instance to detach the callback from
func : func
The callback function to remove
"""
for cb in [self._callbacks, self._2arg_callbacks]:
if instance not in cb:
continue
if func in cb[instance]:
cb[instance].remove(func)
return
else:
raise ValueError("Callback function not found: %s" % func)
class HasCallbackProperties(object):
"""
A class that adds functionality to subclasses that use callback properties.
"""
def __init__(self):
from .list import ListCallbackProperty
self._global_callbacks = CallbackContainer()
self._ignored_properties = set()
self._delayed_properties = {}
self._delay_global_calls = {}
self._callback_wrappers = {}
for prop_name, prop in self.iter_callback_properties():
if isinstance(prop, ListCallbackProperty):
prop.add_callback(self, self._notify_global_lists)
def _ignore_global_callbacks(self, properties):
# This is to allow ignore_callbacks to work for global callbacks
self._ignored_properties.update(properties)
def _unignore_global_callbacks(self, properties):
# Once this is called, we simply remove properties from _ignored_properties
# and don't call the callbacks. This is used by ignore_callback
self._ignored_properties -= set(properties)
def _delay_global_callbacks(self, properties):
# This is to allow delay_callback to still have an effect in delaying
# global callbacks. We set _delayed_properties to a dictionary of the
# values at the point at which the callbacks are delayed.
self._delayed_properties.update(properties)
def _process_delayed_global_callbacks(self, properties):
# Once this is called, the global callbacks are called once each with
# a dictionary of the current values of properties that have been
# resumed.
kwargs = {}
for prop, new_value in properties.items():
old_value = self._delayed_properties.pop(prop)
if old_value != new_value:
kwargs[prop] = new_value[0]
self._notify_global(**kwargs)
def _notify_global_lists(self, *args):
from .list import ListCallbackProperty
properties = {}
for prop_name, prop in self.iter_callback_properties():
if isinstance(prop, ListCallbackProperty):
callback_list = getattr(self, prop_name)
if callback_list is args[0]:
properties[prop_name] = callback_list
break
self._notify_global(**properties)
def _notify_global(self, **kwargs):
for prop in set(self._delayed_properties) | set(self._ignored_properties):
if prop in kwargs:
kwargs.pop(prop)
if len(kwargs) > 0:
for callback in self._global_callbacks:
callback(**kwargs)
def __setattr__(self, attribute, value):
super(HasCallbackProperties, self).__setattr__(attribute, value)
if self.is_callback_property(attribute):
self._notify_global(**{attribute: value})
def add_callback(self, name, callback, echo_old=False, priority=0):
"""
Add a callback that gets triggered when a callback property of the
class changes.
Parameters
----------
name : str
The instance to add the callback to.
callback : func
The callback function to add
echo_old : bool, optional
If `True`, the callback function will be invoked with both the old
and new values of the property, as ``callback(old, new)``. If `False`
(the default), will be invoked as ``callback(new)``
priority : int, optional
This can optionally be used to force a certain order of execution of
callbacks (larger values indicate a higher priority).
"""
if self.is_callback_property(name):
prop = getattr(type(self), name)
prop.add_callback(self, callback, echo_old=echo_old, priority=priority)
else:
raise TypeError("attribute '{0}' is not a callback property".format(name))
def remove_callback(self, name, callback):
"""
Remove a previously-added callback
Parameters
----------
name : str
The instance to remove the callback from.
func : func
The callback function to remove
"""
if self.is_callback_property(name):
prop = getattr(type(self), name)
try:
prop.remove_callback(self, callback)
except ValueError: # pragma: nocover
pass # Be forgiving if callback was already removed before
else:
raise TypeError("attribute '{0}' is not a callback property".format(name))
def add_global_callback(self, callback):
"""
Add a global callback function, which is a callback that gets triggered
when any callback properties on the class change.
Parameters
----------
callback : func
The callback function to add
"""
self._global_callbacks.append(callback)
def remove_global_callback(self, callback):
"""
Remove a global callback function.
Parameters
----------
callback : func
The callback function to remove
"""
self._global_callbacks.remove(callback)
def is_callback_property(self, name):
"""
Whether a property (identified by name) is a callback property.
Parameters
----------
name : str
The name of the property to check
"""
return isinstance(getattr(type(self), name, None), CallbackProperty)
def iter_callback_properties(self):
"""
Iterator to loop over all callback properties.
"""
for name in dir(self):
if self.is_callback_property(name):
yield name, getattr(type(self), name)
def add_callback(instance, prop, callback, echo_old=False, priority=0):
"""
Attach a callback function to a property in an instance
Parameters
----------
instance
The instance to add the callback to
prop : str
Name of callback property in `instance`
callback : func
The callback function to add
echo_old : bool, optional
If `True`, the callback function will be invoked with both the old
and new values of the property, as ``func(old, new)``. If `False`
(the default), will be invoked as ``func(new)``
priority : int, optional
This can optionally be used to force a certain order of execution of
callbacks (larger values indicate a higher priority).
Examples
--------
::
class Foo:
bar = CallbackProperty(0)
def callback(value):
pass
f = Foo()
add_callback(f, 'bar', callback)
"""
p = getattr(type(instance), prop)
if not isinstance(p, CallbackProperty):
raise TypeError("%s is not a CallbackProperty" % prop)
p.add_callback(instance, callback, echo_old=echo_old, priority=priority)
def remove_callback(instance, prop, callback):
"""
Remove a callback function from a property in an instance
Parameters
----------
instance
The instance to detach the callback from
prop : str
Name of callback property in `instance`
callback : func
The callback function to remove
"""
p = getattr(type(instance), prop)
if not isinstance(p, CallbackProperty):
raise TypeError("%s is not a CallbackProperty" % prop)
p.remove_callback(instance, callback)
def callback_property(getter):
"""
A decorator to build a CallbackProperty.
This is used by wrapping a getter method, similar to the use of @property::
class Foo(object):
@callback_property
def x(self):
return self._x
@x.setter
def x(self, value):
self._x = value
In simple cases with no getter or setter logic, it's easier to create a
:class:`~echo.CallbackProperty` directly::
class Foo(object);
x = CallbackProperty(initial_value)
"""
cb = CallbackProperty(getter=getter)
cb.__doc__ = getter.__doc__
return cb
class delay_callback(object):
"""
Delay any callback functions from one or more callback properties
This is a context manager. Within the context block, no callbacks
will be issued. Each callback will be called once on exit
Parameters
----------
instance
An instance object with callback properties
*props : str
One or more properties within instance to delay
Examples
--------
::
with delay_callback(foo, 'bar', 'baz'):
f.bar = 20
f.baz = 30
f.bar = 10
print('done') # callbacks triggered at this point, if needed
"""
# Class-level registry of properties and how many times the callbacks have
# been delayed. The idea is that when nesting calls to delay_callback, the
# delay count is increased, and every time __exit__ is called, the count is
# decreased, and once the count reaches zero, the callback is triggered.
delay_count = {}
old_values = {}
def __init__(self, instance, *props):
self.instance = instance
self.props = props
def __enter__(self):
delay_props = {}
for prop in self.props:
p = getattr(type(self.instance), prop)
if not isinstance(p, CallbackProperty):
raise TypeError("%s is not a CallbackProperty" % prop)
if (self.instance, prop) not in self.delay_count:
self.delay_count[self.instance, prop] = 1
self.old_values[self.instance, prop] = p._get_full_info(self.instance)
delay_props[prop] = p._get_full_info(self.instance)
else:
self.delay_count[self.instance, prop] += 1
p.disable(self.instance)
if isinstance(self.instance, HasCallbackProperties):
self.instance._delay_global_callbacks(delay_props)
def __exit__(self, *args):
resume_props = {}
notifications = []
for prop in self.props:
p = getattr(type(self.instance), prop)
if not isinstance(p, CallbackProperty): # pragma: no cover
raise TypeError("%s is not a CallbackProperty" % prop)
if self.delay_count[self.instance, prop] > 1:
self.delay_count[self.instance, prop] -= 1
else:
self.delay_count.pop((self.instance, prop))
old = self.old_values.pop((self.instance, prop))
p.enable(self.instance)
new = p._get_full_info(self.instance)
if old != new:
notifications.append((p, (self.instance, old[0], new[0])))
resume_props[prop] = new
if isinstance(self.instance, HasCallbackProperties):
self.instance._process_delayed_global_callbacks(resume_props)
for p, args in notifications:
p.notify(*args)
@contextmanager
class keep_in_sync(object):
def __init__(self, instance1, prop1, instance2, prop2):
self.instance1 = weakref.ref(instance1, self.disable_syncing)
self.prop1 = prop1
self.instance2 = weakref.ref(instance2, self.disable_syncing)
self.prop2 = prop2
self._syncing = False
self.enabled = False
self.enable_syncing()
def prop1_from_prop2(self, value):
if not self._syncing:
self._syncing = True
setattr(self.instance1(), self.prop1, getattr(self.instance2(), self.prop2))
self._syncing = False
def prop2_from_prop1(self, value):
if not self._syncing:
self._syncing = True
setattr(self.instance2(), self.prop2, getattr(self.instance1(), self.prop1))
self._syncing = False
def enable_syncing(self, *args):
if self.enabled:
return
add_callback(self.instance1(), self.prop1, self.prop2_from_prop1)
add_callback(self.instance2(), self.prop2, self.prop1_from_prop2)
self.enabled = True
def disable_syncing(self, *args):
if not self.enabled:
return
if self.instance1() is not None:
remove_callback(self.instance1(), self.prop1, self.prop2_from_prop1)
if self.instance2() is not None:
remove_callback(self.instance2(), self.prop2, self.prop1_from_prop2)
self.enabled = False
|
glue-viz/echo | echo/core.py | CallbackProperty.notify | python | def notify(self, instance, old, new):
if self._disabled.get(instance, False):
return
for cback in self._callbacks.get(instance, []):
cback(new)
for cback in self._2arg_callbacks.get(instance, []):
cback(old, new) | Call all callback functions with the current value
Each callback will either be called using
callback(new) or callback(old, new) depending
on whether ``echo_old`` was set to `True` when calling
:func:`~echo.add_callback`
Parameters
----------
instance
The instance to consider
old
The old value of the property
new
The new value of the property | train | https://github.com/glue-viz/echo/blob/6ad54cc5e869de27c34e8716f2619ddc640f08fe/echo/core.py#L97-L120 | null | class CallbackProperty(object):
"""
A property that callback functions can be added to.
When a callback property changes value, each callback function
is called with information about the state change. Otherwise,
callback properties behave just like normal instance variables.
CallbackProperties must be defined at the class level. Use
the helper function :func:`~echo.add_callback` to attach a callback to
a specific instance of a class with CallbackProperties
Parameters
----------
default
The initial value for the property
docstring : str
The docstring for the property
getter, setter : func
Custom getter and setter functions (advanced)
"""
def __init__(self, default=None, docstring=None, getter=None, setter=None):
"""
:param default: The initial value for the property
"""
self._default = default
self._callbacks = WeakKeyDictionary()
self._2arg_callbacks = WeakKeyDictionary()
self._disabled = WeakKeyDictionary()
self._values = WeakKeyDictionary()
if getter is None:
getter = self._default_getter
if setter is None:
setter = self._default_setter
self._getter = getter
self._setter = setter
if docstring is not None:
self.__doc__ = docstring
def _default_getter(self, instance, owner=None):
return self._values.get(instance, self._default)
def _default_setter(self, instance, value):
self._values.__setitem__(instance, value)
def __get__(self, instance, owner=None):
if instance is None:
return self
return self._getter(instance)
def __set__(self, instance, value):
try:
old = self.__get__(instance)
except AttributeError: # pragma: no cover
old = None
self._setter(instance, value)
new = self.__get__(instance)
if old != new:
self.notify(instance, old, new)
def setter(self, func):
"""
Method to use as a decorator, to mimic @property.setter
"""
self._setter = func
return self
def _get_full_info(self, instance):
# Some callback subclasses may contain additional info in addition
# to the main value, and we need to use this full information when
# comparing old and new 'values', so this method is used in that
# case. The result should be a tuple where the first item is the
# actual primary value of the property and the second item is any
# additional data to use in the comparison.
return self.__get__(instance), None
def disable(self, instance):
"""
Disable callbacks for a specific instance
"""
self._disabled[instance] = True
def enable(self, instance):
"""
Enable previously-disabled callbacks for a specific instance
"""
self._disabled[instance] = False
def add_callback(self, instance, func, echo_old=False, priority=0):
"""
Add a callback to a specific instance that manages this property
Parameters
----------
instance
The instance to add the callback to
func : func
The callback function to add
echo_old : bool, optional
If `True`, the callback function will be invoked with both the old
and new values of the property, as ``func(old, new)``. If `False`
(the default), will be invoked as ``func(new)``
priority : int, optional
This can optionally be used to force a certain order of execution of
callbacks (larger values indicate a higher priority).
"""
if echo_old:
self._2arg_callbacks.setdefault(instance, CallbackContainer()).append(func, priority=priority)
else:
self._callbacks.setdefault(instance, CallbackContainer()).append(func, priority=priority)
def remove_callback(self, instance, func):
"""
Remove a previously-added callback
Parameters
----------
instance
The instance to detach the callback from
func : func
The callback function to remove
"""
for cb in [self._callbacks, self._2arg_callbacks]:
if instance not in cb:
continue
if func in cb[instance]:
cb[instance].remove(func)
return
|
glue-viz/echo | echo/core.py | CallbackProperty.add_callback | python | def add_callback(self, instance, func, echo_old=False, priority=0):
if echo_old:
self._2arg_callbacks.setdefault(instance, CallbackContainer()).append(func, priority=priority)
else:
self._callbacks.setdefault(instance, CallbackContainer()).append(func, priority=priority) | Add a callback to a specific instance that manages this property
Parameters
----------
instance
The instance to add the callback to
func : func
The callback function to add
echo_old : bool, optional
If `True`, the callback function will be invoked with both the old
and new values of the property, as ``func(old, new)``. If `False`
(the default), will be invoked as ``func(new)``
priority : int, optional
This can optionally be used to force a certain order of execution of
callbacks (larger values indicate a higher priority). | train | https://github.com/glue-viz/echo/blob/6ad54cc5e869de27c34e8716f2619ddc640f08fe/echo/core.py#L134-L156 | null | class CallbackProperty(object):
"""
A property that callback functions can be added to.
When a callback property changes value, each callback function
is called with information about the state change. Otherwise,
callback properties behave just like normal instance variables.
CallbackProperties must be defined at the class level. Use
the helper function :func:`~echo.add_callback` to attach a callback to
a specific instance of a class with CallbackProperties
Parameters
----------
default
The initial value for the property
docstring : str
The docstring for the property
getter, setter : func
Custom getter and setter functions (advanced)
"""
def __init__(self, default=None, docstring=None, getter=None, setter=None):
"""
:param default: The initial value for the property
"""
self._default = default
self._callbacks = WeakKeyDictionary()
self._2arg_callbacks = WeakKeyDictionary()
self._disabled = WeakKeyDictionary()
self._values = WeakKeyDictionary()
if getter is None:
getter = self._default_getter
if setter is None:
setter = self._default_setter
self._getter = getter
self._setter = setter
if docstring is not None:
self.__doc__ = docstring
def _default_getter(self, instance, owner=None):
return self._values.get(instance, self._default)
def _default_setter(self, instance, value):
self._values.__setitem__(instance, value)
def __get__(self, instance, owner=None):
if instance is None:
return self
return self._getter(instance)
def __set__(self, instance, value):
try:
old = self.__get__(instance)
except AttributeError: # pragma: no cover
old = None
self._setter(instance, value)
new = self.__get__(instance)
if old != new:
self.notify(instance, old, new)
def setter(self, func):
"""
Method to use as a decorator, to mimic @property.setter
"""
self._setter = func
return self
def _get_full_info(self, instance):
# Some callback subclasses may contain additional info in addition
# to the main value, and we need to use this full information when
# comparing old and new 'values', so this method is used in that
# case. The result should be a tuple where the first item is the
# actual primary value of the property and the second item is any
# additional data to use in the comparison.
return self.__get__(instance), None
def notify(self, instance, old, new):
"""
Call all callback functions with the current value
Each callback will either be called using
callback(new) or callback(old, new) depending
on whether ``echo_old`` was set to `True` when calling
:func:`~echo.add_callback`
Parameters
----------
instance
The instance to consider
old
The old value of the property
new
The new value of the property
"""
if self._disabled.get(instance, False):
return
for cback in self._callbacks.get(instance, []):
cback(new)
for cback in self._2arg_callbacks.get(instance, []):
cback(old, new)
def disable(self, instance):
"""
Disable callbacks for a specific instance
"""
self._disabled[instance] = True
def enable(self, instance):
"""
Enable previously-disabled callbacks for a specific instance
"""
self._disabled[instance] = False
def remove_callback(self, instance, func):
"""
Remove a previously-added callback
Parameters
----------
instance
The instance to detach the callback from
func : func
The callback function to remove
"""
for cb in [self._callbacks, self._2arg_callbacks]:
if instance not in cb:
continue
if func in cb[instance]:
cb[instance].remove(func)
return
|
glue-viz/echo | echo/core.py | CallbackProperty.remove_callback | python | def remove_callback(self, instance, func):
for cb in [self._callbacks, self._2arg_callbacks]:
if instance not in cb:
continue
if func in cb[instance]:
cb[instance].remove(func)
return
else:
raise ValueError("Callback function not found: %s" % func) | Remove a previously-added callback
Parameters
----------
instance
The instance to detach the callback from
func : func
The callback function to remove | train | https://github.com/glue-viz/echo/blob/6ad54cc5e869de27c34e8716f2619ddc640f08fe/echo/core.py#L158-L176 | null | class CallbackProperty(object):
"""
A property that callback functions can be added to.
When a callback property changes value, each callback function
is called with information about the state change. Otherwise,
callback properties behave just like normal instance variables.
CallbackProperties must be defined at the class level. Use
the helper function :func:`~echo.add_callback` to attach a callback to
a specific instance of a class with CallbackProperties
Parameters
----------
default
The initial value for the property
docstring : str
The docstring for the property
getter, setter : func
Custom getter and setter functions (advanced)
"""
def __init__(self, default=None, docstring=None, getter=None, setter=None):
"""
:param default: The initial value for the property
"""
self._default = default
self._callbacks = WeakKeyDictionary()
self._2arg_callbacks = WeakKeyDictionary()
self._disabled = WeakKeyDictionary()
self._values = WeakKeyDictionary()
if getter is None:
getter = self._default_getter
if setter is None:
setter = self._default_setter
self._getter = getter
self._setter = setter
if docstring is not None:
self.__doc__ = docstring
def _default_getter(self, instance, owner=None):
return self._values.get(instance, self._default)
def _default_setter(self, instance, value):
self._values.__setitem__(instance, value)
def __get__(self, instance, owner=None):
if instance is None:
return self
return self._getter(instance)
def __set__(self, instance, value):
try:
old = self.__get__(instance)
except AttributeError: # pragma: no cover
old = None
self._setter(instance, value)
new = self.__get__(instance)
if old != new:
self.notify(instance, old, new)
def setter(self, func):
"""
Method to use as a decorator, to mimic @property.setter
"""
self._setter = func
return self
def _get_full_info(self, instance):
# Some callback subclasses may contain additional info in addition
# to the main value, and we need to use this full information when
# comparing old and new 'values', so this method is used in that
# case. The result should be a tuple where the first item is the
# actual primary value of the property and the second item is any
# additional data to use in the comparison.
return self.__get__(instance), None
def notify(self, instance, old, new):
"""
Call all callback functions with the current value
Each callback will either be called using
callback(new) or callback(old, new) depending
on whether ``echo_old`` was set to `True` when calling
:func:`~echo.add_callback`
Parameters
----------
instance
The instance to consider
old
The old value of the property
new
The new value of the property
"""
if self._disabled.get(instance, False):
return
for cback in self._callbacks.get(instance, []):
cback(new)
for cback in self._2arg_callbacks.get(instance, []):
cback(old, new)
def disable(self, instance):
"""
Disable callbacks for a specific instance
"""
self._disabled[instance] = True
def enable(self, instance):
"""
Enable previously-disabled callbacks for a specific instance
"""
self._disabled[instance] = False
def add_callback(self, instance, func, echo_old=False, priority=0):
"""
Add a callback to a specific instance that manages this property
Parameters
----------
instance
The instance to add the callback to
func : func
The callback function to add
echo_old : bool, optional
If `True`, the callback function will be invoked with both the old
and new values of the property, as ``func(old, new)``. If `False`
(the default), will be invoked as ``func(new)``
priority : int, optional
This can optionally be used to force a certain order of execution of
callbacks (larger values indicate a higher priority).
"""
if echo_old:
self._2arg_callbacks.setdefault(instance, CallbackContainer()).append(func, priority=priority)
else:
self._callbacks.setdefault(instance, CallbackContainer()).append(func, priority=priority)
def remove_callback(self, instance, func):
"""
Remove a previously-added callback
Parameters
----------
instance
The instance to detach the callback from
func : func
The callback function to remove
"""
for cb in [self._callbacks, self._2arg_callbacks]:
if instance not in cb:
continue
if func in cb[instance]:
cb[instance].remove(func)
return
|
glue-viz/echo | echo/core.py | HasCallbackProperties.add_callback | python | def add_callback(self, name, callback, echo_old=False, priority=0):
if self.is_callback_property(name):
prop = getattr(type(self), name)
prop.add_callback(self, callback, echo_old=echo_old, priority=priority)
else:
raise TypeError("attribute '{0}' is not a callback property".format(name)) | Add a callback that gets triggered when a callback property of the
class changes.
Parameters
----------
name : str
The instance to add the callback to.
callback : func
The callback function to add
echo_old : bool, optional
If `True`, the callback function will be invoked with both the old
and new values of the property, as ``callback(old, new)``. If `False`
(the default), will be invoked as ``callback(new)``
priority : int, optional
This can optionally be used to force a certain order of execution of
callbacks (larger values indicate a higher priority). | train | https://github.com/glue-viz/echo/blob/6ad54cc5e869de27c34e8716f2619ddc640f08fe/echo/core.py#L245-L268 | [
"def is_callback_property(self, name):\n \"\"\"\n Whether a property (identified by name) is a callback property.\n\n Parameters\n ----------\n name : str\n The name of the property to check\n \"\"\"\n return isinstance(getattr(type(self), name, None), CallbackProperty)\n"
] | class HasCallbackProperties(object):
"""
A class that adds functionality to subclasses that use callback properties.
"""
def __init__(self):
from .list import ListCallbackProperty
self._global_callbacks = CallbackContainer()
self._ignored_properties = set()
self._delayed_properties = {}
self._delay_global_calls = {}
self._callback_wrappers = {}
for prop_name, prop in self.iter_callback_properties():
if isinstance(prop, ListCallbackProperty):
prop.add_callback(self, self._notify_global_lists)
def _ignore_global_callbacks(self, properties):
# This is to allow ignore_callbacks to work for global callbacks
self._ignored_properties.update(properties)
def _unignore_global_callbacks(self, properties):
# Once this is called, we simply remove properties from _ignored_properties
# and don't call the callbacks. This is used by ignore_callback
self._ignored_properties -= set(properties)
def _delay_global_callbacks(self, properties):
# This is to allow delay_callback to still have an effect in delaying
# global callbacks. We set _delayed_properties to a dictionary of the
# values at the point at which the callbacks are delayed.
self._delayed_properties.update(properties)
def _process_delayed_global_callbacks(self, properties):
# Once this is called, the global callbacks are called once each with
# a dictionary of the current values of properties that have been
# resumed.
kwargs = {}
for prop, new_value in properties.items():
old_value = self._delayed_properties.pop(prop)
if old_value != new_value:
kwargs[prop] = new_value[0]
self._notify_global(**kwargs)
def _notify_global_lists(self, *args):
from .list import ListCallbackProperty
properties = {}
for prop_name, prop in self.iter_callback_properties():
if isinstance(prop, ListCallbackProperty):
callback_list = getattr(self, prop_name)
if callback_list is args[0]:
properties[prop_name] = callback_list
break
self._notify_global(**properties)
def _notify_global(self, **kwargs):
for prop in set(self._delayed_properties) | set(self._ignored_properties):
if prop in kwargs:
kwargs.pop(prop)
if len(kwargs) > 0:
for callback in self._global_callbacks:
callback(**kwargs)
def __setattr__(self, attribute, value):
super(HasCallbackProperties, self).__setattr__(attribute, value)
if self.is_callback_property(attribute):
self._notify_global(**{attribute: value})
def remove_callback(self, name, callback):
"""
Remove a previously-added callback
Parameters
----------
name : str
The instance to remove the callback from.
func : func
The callback function to remove
"""
if self.is_callback_property(name):
prop = getattr(type(self), name)
try:
prop.remove_callback(self, callback)
except ValueError: # pragma: nocover
pass # Be forgiving if callback was already removed before
else:
raise TypeError("attribute '{0}' is not a callback property".format(name))
def add_global_callback(self, callback):
"""
Add a global callback function, which is a callback that gets triggered
when any callback properties on the class change.
Parameters
----------
callback : func
The callback function to add
"""
self._global_callbacks.append(callback)
def remove_global_callback(self, callback):
"""
Remove a global callback function.
Parameters
----------
callback : func
The callback function to remove
"""
self._global_callbacks.remove(callback)
def is_callback_property(self, name):
"""
Whether a property (identified by name) is a callback property.
Parameters
----------
name : str
The name of the property to check
"""
return isinstance(getattr(type(self), name, None), CallbackProperty)
def iter_callback_properties(self):
"""
Iterator to loop over all callback properties.
"""
for name in dir(self):
if self.is_callback_property(name):
yield name, getattr(type(self), name)
|
glue-viz/echo | echo/core.py | HasCallbackProperties.remove_callback | python | def remove_callback(self, name, callback):
if self.is_callback_property(name):
prop = getattr(type(self), name)
try:
prop.remove_callback(self, callback)
except ValueError: # pragma: nocover
pass # Be forgiving if callback was already removed before
else:
raise TypeError("attribute '{0}' is not a callback property".format(name)) | Remove a previously-added callback
Parameters
----------
name : str
The instance to remove the callback from.
func : func
The callback function to remove | train | https://github.com/glue-viz/echo/blob/6ad54cc5e869de27c34e8716f2619ddc640f08fe/echo/core.py#L270-L289 | [
"def is_callback_property(self, name):\n \"\"\"\n Whether a property (identified by name) is a callback property.\n\n Parameters\n ----------\n name : str\n The name of the property to check\n \"\"\"\n return isinstance(getattr(type(self), name, None), CallbackProperty)\n"
] | class HasCallbackProperties(object):
"""
A class that adds functionality to subclasses that use callback properties.
"""
def __init__(self):
from .list import ListCallbackProperty
self._global_callbacks = CallbackContainer()
self._ignored_properties = set()
self._delayed_properties = {}
self._delay_global_calls = {}
self._callback_wrappers = {}
for prop_name, prop in self.iter_callback_properties():
if isinstance(prop, ListCallbackProperty):
prop.add_callback(self, self._notify_global_lists)
def _ignore_global_callbacks(self, properties):
# This is to allow ignore_callbacks to work for global callbacks
self._ignored_properties.update(properties)
def _unignore_global_callbacks(self, properties):
# Once this is called, we simply remove properties from _ignored_properties
# and don't call the callbacks. This is used by ignore_callback
self._ignored_properties -= set(properties)
def _delay_global_callbacks(self, properties):
# This is to allow delay_callback to still have an effect in delaying
# global callbacks. We set _delayed_properties to a dictionary of the
# values at the point at which the callbacks are delayed.
self._delayed_properties.update(properties)
def _process_delayed_global_callbacks(self, properties):
# Once this is called, the global callbacks are called once each with
# a dictionary of the current values of properties that have been
# resumed.
kwargs = {}
for prop, new_value in properties.items():
old_value = self._delayed_properties.pop(prop)
if old_value != new_value:
kwargs[prop] = new_value[0]
self._notify_global(**kwargs)
def _notify_global_lists(self, *args):
from .list import ListCallbackProperty
properties = {}
for prop_name, prop in self.iter_callback_properties():
if isinstance(prop, ListCallbackProperty):
callback_list = getattr(self, prop_name)
if callback_list is args[0]:
properties[prop_name] = callback_list
break
self._notify_global(**properties)
def _notify_global(self, **kwargs):
for prop in set(self._delayed_properties) | set(self._ignored_properties):
if prop in kwargs:
kwargs.pop(prop)
if len(kwargs) > 0:
for callback in self._global_callbacks:
callback(**kwargs)
def __setattr__(self, attribute, value):
super(HasCallbackProperties, self).__setattr__(attribute, value)
if self.is_callback_property(attribute):
self._notify_global(**{attribute: value})
def add_callback(self, name, callback, echo_old=False, priority=0):
"""
Add a callback that gets triggered when a callback property of the
class changes.
Parameters
----------
name : str
The instance to add the callback to.
callback : func
The callback function to add
echo_old : bool, optional
If `True`, the callback function will be invoked with both the old
and new values of the property, as ``callback(old, new)``. If `False`
(the default), will be invoked as ``callback(new)``
priority : int, optional
This can optionally be used to force a certain order of execution of
callbacks (larger values indicate a higher priority).
"""
if self.is_callback_property(name):
prop = getattr(type(self), name)
prop.add_callback(self, callback, echo_old=echo_old, priority=priority)
else:
raise TypeError("attribute '{0}' is not a callback property".format(name))
def add_global_callback(self, callback):
"""
Add a global callback function, which is a callback that gets triggered
when any callback properties on the class change.
Parameters
----------
callback : func
The callback function to add
"""
self._global_callbacks.append(callback)
def remove_global_callback(self, callback):
"""
Remove a global callback function.
Parameters
----------
callback : func
The callback function to remove
"""
self._global_callbacks.remove(callback)
def is_callback_property(self, name):
"""
Whether a property (identified by name) is a callback property.
Parameters
----------
name : str
The name of the property to check
"""
return isinstance(getattr(type(self), name, None), CallbackProperty)
def iter_callback_properties(self):
"""
Iterator to loop over all callback properties.
"""
for name in dir(self):
if self.is_callback_property(name):
yield name, getattr(type(self), name)
|
glue-viz/echo | echo/core.py | HasCallbackProperties.iter_callback_properties | python | def iter_callback_properties(self):
for name in dir(self):
if self.is_callback_property(name):
yield name, getattr(type(self), name) | Iterator to loop over all callback properties. | train | https://github.com/glue-viz/echo/blob/6ad54cc5e869de27c34e8716f2619ddc640f08fe/echo/core.py#L325-L331 | [
"def is_callback_property(self, name):\n \"\"\"\n Whether a property (identified by name) is a callback property.\n\n Parameters\n ----------\n name : str\n The name of the property to check\n \"\"\"\n return isinstance(getattr(type(self), name, None), CallbackProperty)\n"
] | class HasCallbackProperties(object):
"""
A class that adds functionality to subclasses that use callback properties.
"""
def __init__(self):
from .list import ListCallbackProperty
self._global_callbacks = CallbackContainer()
self._ignored_properties = set()
self._delayed_properties = {}
self._delay_global_calls = {}
self._callback_wrappers = {}
for prop_name, prop in self.iter_callback_properties():
if isinstance(prop, ListCallbackProperty):
prop.add_callback(self, self._notify_global_lists)
def _ignore_global_callbacks(self, properties):
# This is to allow ignore_callbacks to work for global callbacks
self._ignored_properties.update(properties)
def _unignore_global_callbacks(self, properties):
# Once this is called, we simply remove properties from _ignored_properties
# and don't call the callbacks. This is used by ignore_callback
self._ignored_properties -= set(properties)
def _delay_global_callbacks(self, properties):
# This is to allow delay_callback to still have an effect in delaying
# global callbacks. We set _delayed_properties to a dictionary of the
# values at the point at which the callbacks are delayed.
self._delayed_properties.update(properties)
def _process_delayed_global_callbacks(self, properties):
# Once this is called, the global callbacks are called once each with
# a dictionary of the current values of properties that have been
# resumed.
kwargs = {}
for prop, new_value in properties.items():
old_value = self._delayed_properties.pop(prop)
if old_value != new_value:
kwargs[prop] = new_value[0]
self._notify_global(**kwargs)
def _notify_global_lists(self, *args):
from .list import ListCallbackProperty
properties = {}
for prop_name, prop in self.iter_callback_properties():
if isinstance(prop, ListCallbackProperty):
callback_list = getattr(self, prop_name)
if callback_list is args[0]:
properties[prop_name] = callback_list
break
self._notify_global(**properties)
def _notify_global(self, **kwargs):
for prop in set(self._delayed_properties) | set(self._ignored_properties):
if prop in kwargs:
kwargs.pop(prop)
if len(kwargs) > 0:
for callback in self._global_callbacks:
callback(**kwargs)
def __setattr__(self, attribute, value):
super(HasCallbackProperties, self).__setattr__(attribute, value)
if self.is_callback_property(attribute):
self._notify_global(**{attribute: value})
def add_callback(self, name, callback, echo_old=False, priority=0):
"""
Add a callback that gets triggered when a callback property of the
class changes.
Parameters
----------
name : str
The instance to add the callback to.
callback : func
The callback function to add
echo_old : bool, optional
If `True`, the callback function will be invoked with both the old
and new values of the property, as ``callback(old, new)``. If `False`
(the default), will be invoked as ``callback(new)``
priority : int, optional
This can optionally be used to force a certain order of execution of
callbacks (larger values indicate a higher priority).
"""
if self.is_callback_property(name):
prop = getattr(type(self), name)
prop.add_callback(self, callback, echo_old=echo_old, priority=priority)
else:
raise TypeError("attribute '{0}' is not a callback property".format(name))
def remove_callback(self, name, callback):
"""
Remove a previously-added callback
Parameters
----------
name : str
The instance to remove the callback from.
func : func
The callback function to remove
"""
if self.is_callback_property(name):
prop = getattr(type(self), name)
try:
prop.remove_callback(self, callback)
except ValueError: # pragma: nocover
pass # Be forgiving if callback was already removed before
else:
raise TypeError("attribute '{0}' is not a callback property".format(name))
def add_global_callback(self, callback):
"""
Add a global callback function, which is a callback that gets triggered
when any callback properties on the class change.
Parameters
----------
callback : func
The callback function to add
"""
self._global_callbacks.append(callback)
def remove_global_callback(self, callback):
"""
Remove a global callback function.
Parameters
----------
callback : func
The callback function to remove
"""
self._global_callbacks.remove(callback)
def is_callback_property(self, name):
"""
Whether a property (identified by name) is a callback property.
Parameters
----------
name : str
The name of the property to check
"""
return isinstance(getattr(type(self), name, None), CallbackProperty)
|
glue-viz/echo | echo/qt/autoconnect.py | autoconnect_callbacks_to_qt | python | def autoconnect_callbacks_to_qt(instance, widget, connect_kwargs={}):
if not hasattr(widget, 'children'):
return
for child in widget.findChildren(QtWidgets.QWidget):
full_name = child.objectName()
if '_' in full_name:
wtype, wname = full_name.split('_', 1)
if full_name in connect_kwargs:
kwargs = connect_kwargs[full_name]
elif wname in connect_kwargs:
kwargs = connect_kwargs[wname]
else:
kwargs = {}
if hasattr(instance, wname):
if wtype in HANDLERS:
HANDLERS[wtype](instance, wname, child, **kwargs) | Given a class instance with callback properties and a Qt widget/window,
connect callback properties to Qt widgets automatically.
The matching is done based on the objectName of the Qt widgets. Qt widgets
that need to be connected should be named using the syntax ``type_name``
where ``type`` describes the kind of matching to be done, and ``name``
matches the name of a callback property. By default, the types can be:
* ``value``: the callback property is linked to a Qt widget that has
``value`` and ``setValue`` methods. Note that for this type, two
additional keyword arguments can be specified using ``connect_kwargs``
(see below): these are ``value_range``, which is used for cases where
the Qt widget is e.g. a slider which has a range of values, and you want
to map this range of values onto a different range for the callback
property, and the second is ``log``, which can be set to `True` if this
mapping should be done in log space.
* ``valuetext``: the callback property is linked to a Qt widget that has
``text`` and ``setText`` methods, and the text is set to a string
representation of the value. Note that for this type, an additional
argument ``fmt`` can be provided, which gives either the format to use
using the ``{}`` syntax, or should be a function that takes a value
and returns a string. Optionally, if the Qt widget supports
the ``editingFinished`` signal, this signal is connected to the callback
property too.
* ``bool``: the callback property is linked to a Qt widget that has
``isChecked`` and ``setChecked`` methods, such as a checkable button.
* ``text``: the callback property is linked to a Qt widget that has
``text`` and ``setText`` methods. Optionally, if the Qt widget supports
the ``editingFinished`` signal, this signal is connected to the callback
property too.
* ``combodata``: the callback property is linked to a QComboBox based on
the ``userData`` of the entries in the combo box.
* ``combotext``: the callback property is linked to a QComboBox based on
the label of the entries in the combo box.
Applications can also define additional mappings between type and
auto-linking. To do this, simply add a new entry to the ``HANDLERS`` object::
>>> echo.qt.autoconnect import HANDLERS
>>> HANDLERS['color'] = connect_color
The handler function (``connect_color`` in the example above) should take
the following arguments: the instance the callback property is attached to,
the name of the callback property, the Qt widget, and optionally some
keyword arguments.
When calling ``autoconnect_callbacks_to_qt``, you can specify
``connect_kwargs``, where each key should be a valid callback property name,
and which gives any additional keyword arguments that can be taken by the
connect functions, as described above. These include for example
``value_range``, ``log``, and ``fmt``.
This function is especially useful when defining ui files, since widget
objectNames can be easily set during the editing process. | train | https://github.com/glue-viz/echo/blob/6ad54cc5e869de27c34e8716f2619ddc640f08fe/echo/qt/autoconnect.py#L27-L105 | null | from __future__ import absolute_import, division, print_function
from qtpy import QtWidgets
from .connect import (connect_checkable_button,
connect_value,
connect_combo_data,
connect_combo_text,
connect_float_text,
connect_text,
connect_button,
connect_combo_selection)
__all__ = ['autoconnect_callbacks_to_qt']
HANDLERS = {}
HANDLERS['value'] = connect_value
HANDLERS['valuetext'] = connect_float_text
HANDLERS['bool'] = connect_checkable_button
HANDLERS['text'] = connect_text
HANDLERS['combodata'] = connect_combo_data
HANDLERS['combotext'] = connect_combo_text
HANDLERS['button'] = connect_button
HANDLERS['combodatasel'] = connect_combo_selection
|
glue-viz/echo | echo/qt/connect.py | connect_checkable_button | python | def connect_checkable_button(instance, prop, widget):
add_callback(instance, prop, widget.setChecked)
widget.toggled.connect(partial(setattr, instance, prop))
widget.setChecked(getattr(instance, prop) or False) | Connect a boolean callback property with a Qt button widget.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QtWidget
The Qt widget to connect. This should implement the ``setChecked``
method and the ``toggled`` signal. | train | https://github.com/glue-viz/echo/blob/6ad54cc5e869de27c34e8716f2619ddc640f08fe/echo/qt/connect.py#L20-L36 | [
"def add_callback(instance, prop, callback, echo_old=False, priority=0):\n \"\"\"\n Attach a callback function to a property in an instance\n\n Parameters\n ----------\n instance\n The instance to add the callback to\n prop : str\n Name of callback property in `instance`\n callback : func\n The callback function to add\n echo_old : bool, optional\n If `True`, the callback function will be invoked with both the old\n and new values of the property, as ``func(old, new)``. If `False`\n (the default), will be invoked as ``func(new)``\n priority : int, optional\n This can optionally be used to force a certain order of execution of\n callbacks (larger values indicate a higher priority).\n\n Examples\n --------\n\n ::\n\n class Foo:\n bar = CallbackProperty(0)\n\n def callback(value):\n pass\n\n f = Foo()\n add_callback(f, 'bar', callback)\n\n \"\"\"\n p = getattr(type(instance), prop)\n if not isinstance(p, CallbackProperty):\n raise TypeError(\"%s is not a CallbackProperty\" % prop)\n p.add_callback(instance, callback, echo_old=echo_old, priority=priority)\n"
] | # The functions in this module are used to connect callback properties to Qt
# widgets.
from __future__ import absolute_import, division, print_function
import math
from functools import partial
from qtpy import QtGui
from qtpy.QtCore import Qt
from ..core import add_callback
from ..selection import SelectionCallbackProperty, ChoiceSeparator
__all__ = ['connect_checkable_button', 'connect_text', 'connect_combo_data',
'connect_combo_text', 'connect_float_text', 'connect_value',
'connect_combo_selection']
def connect_text(instance, prop, widget):
"""
Connect a string callback property with a Qt widget containing text.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QtWidget
The Qt widget to connect. This should implement the ``setText`` and
``text`` methods as well optionally the ``editingFinished`` signal.
"""
def update_prop():
val = widget.text()
setattr(instance, prop, val)
def update_widget(val):
if hasattr(widget, 'editingFinished'):
widget.blockSignals(True)
widget.setText(val)
widget.blockSignals(False)
widget.editingFinished.emit()
else:
widget.setText(val)
add_callback(instance, prop, update_widget)
try:
widget.editingFinished.connect(update_prop)
except AttributeError:
pass
update_widget(getattr(instance, prop))
def connect_combo_data(instance, prop, widget):
"""
Connect a callback property with a QComboBox widget based on the userData.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QComboBox
The combo box to connect.
See Also
--------
connect_combo_text: connect a callback property with a QComboBox widget based on the text.
"""
def update_widget(value):
try:
idx = _find_combo_data(widget, value)
except ValueError:
if value is None:
idx = -1
else:
raise
widget.setCurrentIndex(idx)
def update_prop(idx):
if idx == -1:
setattr(instance, prop, None)
else:
setattr(instance, prop, widget.itemData(idx))
add_callback(instance, prop, update_widget)
widget.currentIndexChanged.connect(update_prop)
update_widget(getattr(instance, prop))
def connect_combo_text(instance, prop, widget):
"""
Connect a callback property with a QComboBox widget based on the text.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QComboBox
The combo box to connect.
See Also
--------
connect_combo_data: connect a callback property with a QComboBox widget based on the userData.
"""
def update_widget(value):
try:
idx = _find_combo_text(widget, value)
except ValueError:
if value is None:
idx = -1
else:
raise
widget.setCurrentIndex(idx)
def update_prop(idx):
if idx == -1:
setattr(instance, prop, None)
else:
setattr(instance, prop, widget.itemText(idx))
add_callback(instance, prop, update_widget)
widget.currentIndexChanged.connect(update_prop)
update_widget(getattr(instance, prop))
def connect_float_text(instance, prop, widget, fmt="{:g}"):
"""
Connect a numerical callback property with a Qt widget containing text.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QtWidget
The Qt widget to connect. This should implement the ``setText`` and
``text`` methods as well optionally the ``editingFinished`` signal.
fmt : str or func
This should be either a format string (in the ``{}`` notation), or a
function that takes a number and returns a string.
"""
if callable(fmt):
format_func = fmt
else:
def format_func(x):
return fmt.format(x)
def update_prop():
val = widget.text()
try:
setattr(instance, prop, float(val))
except ValueError:
setattr(instance, prop, 0)
def update_widget(val):
if val is None:
val = 0.
widget.setText(format_func(val))
add_callback(instance, prop, update_widget)
try:
widget.editingFinished.connect(update_prop)
except AttributeError:
pass
update_widget(getattr(instance, prop))
def connect_value(instance, prop, widget, value_range=None, log=False):
"""
Connect a numerical callback property with a Qt widget representing a value.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QtWidget
The Qt widget to connect. This should implement the ``setText`` and
``text`` methods as well optionally the ``editingFinished`` signal.
value_range : iterable, optional
A pair of two values representing the true range of values (since
Qt widgets such as sliders can only have values in certain ranges).
log : bool, optional
Whether the Qt widget value should be mapped to the log of the callback
property.
"""
if log:
if value_range is None:
raise ValueError("log option can only be set if value_range is given")
else:
value_range = math.log10(value_range[0]), math.log10(value_range[1])
def update_prop():
val = widget.value()
if value_range is not None:
imin, imax = widget.minimum(), widget.maximum()
val = (val - imin) / (imax - imin) * (value_range[1] - value_range[0]) + value_range[0]
if log:
val = 10 ** val
setattr(instance, prop, val)
def update_widget(val):
if val is None:
widget.setValue(0)
return
if log:
val = math.log10(val)
if value_range is not None:
imin, imax = widget.minimum(), widget.maximum()
val = (val - value_range[0]) / (value_range[1] - value_range[0]) * (imax - imin) + imin
widget.setValue(val)
add_callback(instance, prop, update_widget)
widget.valueChanged.connect(update_prop)
update_widget(getattr(instance, prop))
def connect_button(instance, prop, widget):
"""
Connect a button with a callback method
Parameters
----------
instance : object
The class instance that the callback method is attached to
prop : str
The name of the callback method
widget : QtWidget
The Qt widget to connect. This should implement the ``clicked`` method
"""
widget.clicked.connect(getattr(instance, prop))
def _find_combo_data(widget, value):
"""
Returns the index in a combo box where itemData == value
Raises a ValueError if data is not found
"""
# Here we check that the result is True, because some classes may overload
# == and return other kinds of objects whether true or false.
for idx in range(widget.count()):
if widget.itemData(idx) is value or (widget.itemData(idx) == value) is True:
return idx
else:
raise ValueError("%s not found in combo box" % (value,))
def _find_combo_text(widget, value):
"""
Returns the index in a combo box where text == value
Raises a ValueError if data is not found
"""
i = widget.findText(value)
if i == -1:
raise ValueError("%s not found in combo box" % value)
else:
return i
def connect_combo_selection(instance, prop, widget, display=str):
if not isinstance(getattr(type(instance), prop), SelectionCallbackProperty):
raise TypeError('connect_combo_selection requires a SelectionCallbackProperty')
def update_widget(value):
# Update choices in the combo box
combo_data = [widget.itemData(idx) for idx in range(widget.count())]
choices = getattr(type(instance), prop).get_choices(instance)
choice_labels = getattr(type(instance), prop).get_choice_labels(instance)
if combo_data == choices:
choices_updated = False
else:
widget.blockSignals(True)
widget.clear()
if len(choices) == 0:
return
combo_model = widget.model()
for index, (label, choice) in enumerate(zip(choice_labels, choices)):
widget.addItem(label, userData=choice)
# We interpret None data as being disabled rows (used for headers)
if isinstance(choice, ChoiceSeparator):
item = combo_model.item(index)
palette = widget.palette()
item.setFlags(item.flags() & ~(Qt.ItemIsSelectable | Qt.ItemIsEnabled))
item.setData(palette.color(QtGui.QPalette.Disabled, QtGui.QPalette.Text))
choices_updated = True
# Update current selection
try:
idx = _find_combo_data(widget, value)
except ValueError:
if value is None:
idx = -1
else:
raise
if idx == widget.currentIndex() and not choices_updated:
return
widget.setCurrentIndex(idx)
widget.blockSignals(False)
widget.currentIndexChanged.emit(idx)
def update_prop(idx):
if idx == -1:
setattr(instance, prop, None)
else:
setattr(instance, prop, widget.itemData(idx))
add_callback(instance, prop, update_widget)
widget.currentIndexChanged.connect(update_prop)
update_widget(getattr(instance, prop))
|
glue-viz/echo | echo/qt/connect.py | connect_text | python | def connect_text(instance, prop, widget):
def update_prop():
val = widget.text()
setattr(instance, prop, val)
def update_widget(val):
if hasattr(widget, 'editingFinished'):
widget.blockSignals(True)
widget.setText(val)
widget.blockSignals(False)
widget.editingFinished.emit()
else:
widget.setText(val)
add_callback(instance, prop, update_widget)
try:
widget.editingFinished.connect(update_prop)
except AttributeError:
pass
update_widget(getattr(instance, prop)) | Connect a string callback property with a Qt widget containing text.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QtWidget
The Qt widget to connect. This should implement the ``setText`` and
``text`` methods as well optionally the ``editingFinished`` signal. | train | https://github.com/glue-viz/echo/blob/6ad54cc5e869de27c34e8716f2619ddc640f08fe/echo/qt/connect.py#L39-L74 | [
"def add_callback(instance, prop, callback, echo_old=False, priority=0):\n \"\"\"\n Attach a callback function to a property in an instance\n\n Parameters\n ----------\n instance\n The instance to add the callback to\n prop : str\n Name of callback property in `instance`\n callback : func\n The callback function to add\n echo_old : bool, optional\n If `True`, the callback function will be invoked with both the old\n and new values of the property, as ``func(old, new)``. If `False`\n (the default), will be invoked as ``func(new)``\n priority : int, optional\n This can optionally be used to force a certain order of execution of\n callbacks (larger values indicate a higher priority).\n\n Examples\n --------\n\n ::\n\n class Foo:\n bar = CallbackProperty(0)\n\n def callback(value):\n pass\n\n f = Foo()\n add_callback(f, 'bar', callback)\n\n \"\"\"\n p = getattr(type(instance), prop)\n if not isinstance(p, CallbackProperty):\n raise TypeError(\"%s is not a CallbackProperty\" % prop)\n p.add_callback(instance, callback, echo_old=echo_old, priority=priority)\n",
"def update_widget(val):\n if hasattr(widget, 'editingFinished'):\n widget.blockSignals(True)\n widget.setText(val)\n widget.blockSignals(False)\n widget.editingFinished.emit()\n else:\n widget.setText(val)\n"
] | # The functions in this module are used to connect callback properties to Qt
# widgets.
from __future__ import absolute_import, division, print_function
import math
from functools import partial
from qtpy import QtGui
from qtpy.QtCore import Qt
from ..core import add_callback
from ..selection import SelectionCallbackProperty, ChoiceSeparator
__all__ = ['connect_checkable_button', 'connect_text', 'connect_combo_data',
'connect_combo_text', 'connect_float_text', 'connect_value',
'connect_combo_selection']
def connect_checkable_button(instance, prop, widget):
"""
Connect a boolean callback property with a Qt button widget.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QtWidget
The Qt widget to connect. This should implement the ``setChecked``
method and the ``toggled`` signal.
"""
add_callback(instance, prop, widget.setChecked)
widget.toggled.connect(partial(setattr, instance, prop))
widget.setChecked(getattr(instance, prop) or False)
def connect_combo_data(instance, prop, widget):
"""
Connect a callback property with a QComboBox widget based on the userData.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QComboBox
The combo box to connect.
See Also
--------
connect_combo_text: connect a callback property with a QComboBox widget based on the text.
"""
def update_widget(value):
try:
idx = _find_combo_data(widget, value)
except ValueError:
if value is None:
idx = -1
else:
raise
widget.setCurrentIndex(idx)
def update_prop(idx):
if idx == -1:
setattr(instance, prop, None)
else:
setattr(instance, prop, widget.itemData(idx))
add_callback(instance, prop, update_widget)
widget.currentIndexChanged.connect(update_prop)
update_widget(getattr(instance, prop))
def connect_combo_text(instance, prop, widget):
"""
Connect a callback property with a QComboBox widget based on the text.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QComboBox
The combo box to connect.
See Also
--------
connect_combo_data: connect a callback property with a QComboBox widget based on the userData.
"""
def update_widget(value):
try:
idx = _find_combo_text(widget, value)
except ValueError:
if value is None:
idx = -1
else:
raise
widget.setCurrentIndex(idx)
def update_prop(idx):
if idx == -1:
setattr(instance, prop, None)
else:
setattr(instance, prop, widget.itemText(idx))
add_callback(instance, prop, update_widget)
widget.currentIndexChanged.connect(update_prop)
update_widget(getattr(instance, prop))
def connect_float_text(instance, prop, widget, fmt="{:g}"):
"""
Connect a numerical callback property with a Qt widget containing text.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QtWidget
The Qt widget to connect. This should implement the ``setText`` and
``text`` methods as well optionally the ``editingFinished`` signal.
fmt : str or func
This should be either a format string (in the ``{}`` notation), or a
function that takes a number and returns a string.
"""
if callable(fmt):
format_func = fmt
else:
def format_func(x):
return fmt.format(x)
def update_prop():
val = widget.text()
try:
setattr(instance, prop, float(val))
except ValueError:
setattr(instance, prop, 0)
def update_widget(val):
if val is None:
val = 0.
widget.setText(format_func(val))
add_callback(instance, prop, update_widget)
try:
widget.editingFinished.connect(update_prop)
except AttributeError:
pass
update_widget(getattr(instance, prop))
def connect_value(instance, prop, widget, value_range=None, log=False):
"""
Connect a numerical callback property with a Qt widget representing a value.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QtWidget
The Qt widget to connect. This should implement the ``setText`` and
``text`` methods as well optionally the ``editingFinished`` signal.
value_range : iterable, optional
A pair of two values representing the true range of values (since
Qt widgets such as sliders can only have values in certain ranges).
log : bool, optional
Whether the Qt widget value should be mapped to the log of the callback
property.
"""
if log:
if value_range is None:
raise ValueError("log option can only be set if value_range is given")
else:
value_range = math.log10(value_range[0]), math.log10(value_range[1])
def update_prop():
val = widget.value()
if value_range is not None:
imin, imax = widget.minimum(), widget.maximum()
val = (val - imin) / (imax - imin) * (value_range[1] - value_range[0]) + value_range[0]
if log:
val = 10 ** val
setattr(instance, prop, val)
def update_widget(val):
if val is None:
widget.setValue(0)
return
if log:
val = math.log10(val)
if value_range is not None:
imin, imax = widget.minimum(), widget.maximum()
val = (val - value_range[0]) / (value_range[1] - value_range[0]) * (imax - imin) + imin
widget.setValue(val)
add_callback(instance, prop, update_widget)
widget.valueChanged.connect(update_prop)
update_widget(getattr(instance, prop))
def connect_button(instance, prop, widget):
"""
Connect a button with a callback method
Parameters
----------
instance : object
The class instance that the callback method is attached to
prop : str
The name of the callback method
widget : QtWidget
The Qt widget to connect. This should implement the ``clicked`` method
"""
widget.clicked.connect(getattr(instance, prop))
def _find_combo_data(widget, value):
"""
Returns the index in a combo box where itemData == value
Raises a ValueError if data is not found
"""
# Here we check that the result is True, because some classes may overload
# == and return other kinds of objects whether true or false.
for idx in range(widget.count()):
if widget.itemData(idx) is value or (widget.itemData(idx) == value) is True:
return idx
else:
raise ValueError("%s not found in combo box" % (value,))
def _find_combo_text(widget, value):
"""
Returns the index in a combo box where text == value
Raises a ValueError if data is not found
"""
i = widget.findText(value)
if i == -1:
raise ValueError("%s not found in combo box" % value)
else:
return i
def connect_combo_selection(instance, prop, widget, display=str):
if not isinstance(getattr(type(instance), prop), SelectionCallbackProperty):
raise TypeError('connect_combo_selection requires a SelectionCallbackProperty')
def update_widget(value):
# Update choices in the combo box
combo_data = [widget.itemData(idx) for idx in range(widget.count())]
choices = getattr(type(instance), prop).get_choices(instance)
choice_labels = getattr(type(instance), prop).get_choice_labels(instance)
if combo_data == choices:
choices_updated = False
else:
widget.blockSignals(True)
widget.clear()
if len(choices) == 0:
return
combo_model = widget.model()
for index, (label, choice) in enumerate(zip(choice_labels, choices)):
widget.addItem(label, userData=choice)
# We interpret None data as being disabled rows (used for headers)
if isinstance(choice, ChoiceSeparator):
item = combo_model.item(index)
palette = widget.palette()
item.setFlags(item.flags() & ~(Qt.ItemIsSelectable | Qt.ItemIsEnabled))
item.setData(palette.color(QtGui.QPalette.Disabled, QtGui.QPalette.Text))
choices_updated = True
# Update current selection
try:
idx = _find_combo_data(widget, value)
except ValueError:
if value is None:
idx = -1
else:
raise
if idx == widget.currentIndex() and not choices_updated:
return
widget.setCurrentIndex(idx)
widget.blockSignals(False)
widget.currentIndexChanged.emit(idx)
def update_prop(idx):
if idx == -1:
setattr(instance, prop, None)
else:
setattr(instance, prop, widget.itemData(idx))
add_callback(instance, prop, update_widget)
widget.currentIndexChanged.connect(update_prop)
update_widget(getattr(instance, prop))
|
glue-viz/echo | echo/qt/connect.py | connect_combo_data | python | def connect_combo_data(instance, prop, widget):
def update_widget(value):
try:
idx = _find_combo_data(widget, value)
except ValueError:
if value is None:
idx = -1
else:
raise
widget.setCurrentIndex(idx)
def update_prop(idx):
if idx == -1:
setattr(instance, prop, None)
else:
setattr(instance, prop, widget.itemData(idx))
add_callback(instance, prop, update_widget)
widget.currentIndexChanged.connect(update_prop)
update_widget(getattr(instance, prop)) | Connect a callback property with a QComboBox widget based on the userData.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QComboBox
The combo box to connect.
See Also
--------
connect_combo_text: connect a callback property with a QComboBox widget based on the text. | train | https://github.com/glue-viz/echo/blob/6ad54cc5e869de27c34e8716f2619ddc640f08fe/echo/qt/connect.py#L77-L114 | [
"def add_callback(instance, prop, callback, echo_old=False, priority=0):\n \"\"\"\n Attach a callback function to a property in an instance\n\n Parameters\n ----------\n instance\n The instance to add the callback to\n prop : str\n Name of callback property in `instance`\n callback : func\n The callback function to add\n echo_old : bool, optional\n If `True`, the callback function will be invoked with both the old\n and new values of the property, as ``func(old, new)``. If `False`\n (the default), will be invoked as ``func(new)``\n priority : int, optional\n This can optionally be used to force a certain order of execution of\n callbacks (larger values indicate a higher priority).\n\n Examples\n --------\n\n ::\n\n class Foo:\n bar = CallbackProperty(0)\n\n def callback(value):\n pass\n\n f = Foo()\n add_callback(f, 'bar', callback)\n\n \"\"\"\n p = getattr(type(instance), prop)\n if not isinstance(p, CallbackProperty):\n raise TypeError(\"%s is not a CallbackProperty\" % prop)\n p.add_callback(instance, callback, echo_old=echo_old, priority=priority)\n",
"def update_widget(value):\n try:\n idx = _find_combo_data(widget, value)\n except ValueError:\n if value is None:\n idx = -1\n else:\n raise\n widget.setCurrentIndex(idx)\n"
] | # The functions in this module are used to connect callback properties to Qt
# widgets.
from __future__ import absolute_import, division, print_function
import math
from functools import partial
from qtpy import QtGui
from qtpy.QtCore import Qt
from ..core import add_callback
from ..selection import SelectionCallbackProperty, ChoiceSeparator
__all__ = ['connect_checkable_button', 'connect_text', 'connect_combo_data',
'connect_combo_text', 'connect_float_text', 'connect_value',
'connect_combo_selection']
def connect_checkable_button(instance, prop, widget):
"""
Connect a boolean callback property with a Qt button widget.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QtWidget
The Qt widget to connect. This should implement the ``setChecked``
method and the ``toggled`` signal.
"""
add_callback(instance, prop, widget.setChecked)
widget.toggled.connect(partial(setattr, instance, prop))
widget.setChecked(getattr(instance, prop) or False)
def connect_text(instance, prop, widget):
"""
Connect a string callback property with a Qt widget containing text.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QtWidget
The Qt widget to connect. This should implement the ``setText`` and
``text`` methods as well optionally the ``editingFinished`` signal.
"""
def update_prop():
val = widget.text()
setattr(instance, prop, val)
def update_widget(val):
if hasattr(widget, 'editingFinished'):
widget.blockSignals(True)
widget.setText(val)
widget.blockSignals(False)
widget.editingFinished.emit()
else:
widget.setText(val)
add_callback(instance, prop, update_widget)
try:
widget.editingFinished.connect(update_prop)
except AttributeError:
pass
update_widget(getattr(instance, prop))
def connect_combo_text(instance, prop, widget):
"""
Connect a callback property with a QComboBox widget based on the text.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QComboBox
The combo box to connect.
See Also
--------
connect_combo_data: connect a callback property with a QComboBox widget based on the userData.
"""
def update_widget(value):
try:
idx = _find_combo_text(widget, value)
except ValueError:
if value is None:
idx = -1
else:
raise
widget.setCurrentIndex(idx)
def update_prop(idx):
if idx == -1:
setattr(instance, prop, None)
else:
setattr(instance, prop, widget.itemText(idx))
add_callback(instance, prop, update_widget)
widget.currentIndexChanged.connect(update_prop)
update_widget(getattr(instance, prop))
def connect_float_text(instance, prop, widget, fmt="{:g}"):
"""
Connect a numerical callback property with a Qt widget containing text.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QtWidget
The Qt widget to connect. This should implement the ``setText`` and
``text`` methods as well optionally the ``editingFinished`` signal.
fmt : str or func
This should be either a format string (in the ``{}`` notation), or a
function that takes a number and returns a string.
"""
if callable(fmt):
format_func = fmt
else:
def format_func(x):
return fmt.format(x)
def update_prop():
val = widget.text()
try:
setattr(instance, prop, float(val))
except ValueError:
setattr(instance, prop, 0)
def update_widget(val):
if val is None:
val = 0.
widget.setText(format_func(val))
add_callback(instance, prop, update_widget)
try:
widget.editingFinished.connect(update_prop)
except AttributeError:
pass
update_widget(getattr(instance, prop))
def connect_value(instance, prop, widget, value_range=None, log=False):
"""
Connect a numerical callback property with a Qt widget representing a value.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QtWidget
The Qt widget to connect. This should implement the ``setText`` and
``text`` methods as well optionally the ``editingFinished`` signal.
value_range : iterable, optional
A pair of two values representing the true range of values (since
Qt widgets such as sliders can only have values in certain ranges).
log : bool, optional
Whether the Qt widget value should be mapped to the log of the callback
property.
"""
if log:
if value_range is None:
raise ValueError("log option can only be set if value_range is given")
else:
value_range = math.log10(value_range[0]), math.log10(value_range[1])
def update_prop():
val = widget.value()
if value_range is not None:
imin, imax = widget.minimum(), widget.maximum()
val = (val - imin) / (imax - imin) * (value_range[1] - value_range[0]) + value_range[0]
if log:
val = 10 ** val
setattr(instance, prop, val)
def update_widget(val):
if val is None:
widget.setValue(0)
return
if log:
val = math.log10(val)
if value_range is not None:
imin, imax = widget.minimum(), widget.maximum()
val = (val - value_range[0]) / (value_range[1] - value_range[0]) * (imax - imin) + imin
widget.setValue(val)
add_callback(instance, prop, update_widget)
widget.valueChanged.connect(update_prop)
update_widget(getattr(instance, prop))
def connect_button(instance, prop, widget):
"""
Connect a button with a callback method
Parameters
----------
instance : object
The class instance that the callback method is attached to
prop : str
The name of the callback method
widget : QtWidget
The Qt widget to connect. This should implement the ``clicked`` method
"""
widget.clicked.connect(getattr(instance, prop))
def _find_combo_data(widget, value):
"""
Returns the index in a combo box where itemData == value
Raises a ValueError if data is not found
"""
# Here we check that the result is True, because some classes may overload
# == and return other kinds of objects whether true or false.
for idx in range(widget.count()):
if widget.itemData(idx) is value or (widget.itemData(idx) == value) is True:
return idx
else:
raise ValueError("%s not found in combo box" % (value,))
def _find_combo_text(widget, value):
"""
Returns the index in a combo box where text == value
Raises a ValueError if data is not found
"""
i = widget.findText(value)
if i == -1:
raise ValueError("%s not found in combo box" % value)
else:
return i
def connect_combo_selection(instance, prop, widget, display=str):
if not isinstance(getattr(type(instance), prop), SelectionCallbackProperty):
raise TypeError('connect_combo_selection requires a SelectionCallbackProperty')
def update_widget(value):
# Update choices in the combo box
combo_data = [widget.itemData(idx) for idx in range(widget.count())]
choices = getattr(type(instance), prop).get_choices(instance)
choice_labels = getattr(type(instance), prop).get_choice_labels(instance)
if combo_data == choices:
choices_updated = False
else:
widget.blockSignals(True)
widget.clear()
if len(choices) == 0:
return
combo_model = widget.model()
for index, (label, choice) in enumerate(zip(choice_labels, choices)):
widget.addItem(label, userData=choice)
# We interpret None data as being disabled rows (used for headers)
if isinstance(choice, ChoiceSeparator):
item = combo_model.item(index)
palette = widget.palette()
item.setFlags(item.flags() & ~(Qt.ItemIsSelectable | Qt.ItemIsEnabled))
item.setData(palette.color(QtGui.QPalette.Disabled, QtGui.QPalette.Text))
choices_updated = True
# Update current selection
try:
idx = _find_combo_data(widget, value)
except ValueError:
if value is None:
idx = -1
else:
raise
if idx == widget.currentIndex() and not choices_updated:
return
widget.setCurrentIndex(idx)
widget.blockSignals(False)
widget.currentIndexChanged.emit(idx)
def update_prop(idx):
if idx == -1:
setattr(instance, prop, None)
else:
setattr(instance, prop, widget.itemData(idx))
add_callback(instance, prop, update_widget)
widget.currentIndexChanged.connect(update_prop)
update_widget(getattr(instance, prop))
|
glue-viz/echo | echo/qt/connect.py | connect_combo_text | python | def connect_combo_text(instance, prop, widget):
def update_widget(value):
try:
idx = _find_combo_text(widget, value)
except ValueError:
if value is None:
idx = -1
else:
raise
widget.setCurrentIndex(idx)
def update_prop(idx):
if idx == -1:
setattr(instance, prop, None)
else:
setattr(instance, prop, widget.itemText(idx))
add_callback(instance, prop, update_widget)
widget.currentIndexChanged.connect(update_prop)
update_widget(getattr(instance, prop)) | Connect a callback property with a QComboBox widget based on the text.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QComboBox
The combo box to connect.
See Also
--------
connect_combo_data: connect a callback property with a QComboBox widget based on the userData. | train | https://github.com/glue-viz/echo/blob/6ad54cc5e869de27c34e8716f2619ddc640f08fe/echo/qt/connect.py#L117-L154 | [
"def add_callback(instance, prop, callback, echo_old=False, priority=0):\n \"\"\"\n Attach a callback function to a property in an instance\n\n Parameters\n ----------\n instance\n The instance to add the callback to\n prop : str\n Name of callback property in `instance`\n callback : func\n The callback function to add\n echo_old : bool, optional\n If `True`, the callback function will be invoked with both the old\n and new values of the property, as ``func(old, new)``. If `False`\n (the default), will be invoked as ``func(new)``\n priority : int, optional\n This can optionally be used to force a certain order of execution of\n callbacks (larger values indicate a higher priority).\n\n Examples\n --------\n\n ::\n\n class Foo:\n bar = CallbackProperty(0)\n\n def callback(value):\n pass\n\n f = Foo()\n add_callback(f, 'bar', callback)\n\n \"\"\"\n p = getattr(type(instance), prop)\n if not isinstance(p, CallbackProperty):\n raise TypeError(\"%s is not a CallbackProperty\" % prop)\n p.add_callback(instance, callback, echo_old=echo_old, priority=priority)\n",
"def update_widget(value):\n try:\n idx = _find_combo_text(widget, value)\n except ValueError:\n if value is None:\n idx = -1\n else:\n raise\n widget.setCurrentIndex(idx)\n"
] | # The functions in this module are used to connect callback properties to Qt
# widgets.
from __future__ import absolute_import, division, print_function
import math
from functools import partial
from qtpy import QtGui
from qtpy.QtCore import Qt
from ..core import add_callback
from ..selection import SelectionCallbackProperty, ChoiceSeparator
__all__ = ['connect_checkable_button', 'connect_text', 'connect_combo_data',
'connect_combo_text', 'connect_float_text', 'connect_value',
'connect_combo_selection']
def connect_checkable_button(instance, prop, widget):
"""
Connect a boolean callback property with a Qt button widget.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QtWidget
The Qt widget to connect. This should implement the ``setChecked``
method and the ``toggled`` signal.
"""
add_callback(instance, prop, widget.setChecked)
widget.toggled.connect(partial(setattr, instance, prop))
widget.setChecked(getattr(instance, prop) or False)
def connect_text(instance, prop, widget):
"""
Connect a string callback property with a Qt widget containing text.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QtWidget
The Qt widget to connect. This should implement the ``setText`` and
``text`` methods as well optionally the ``editingFinished`` signal.
"""
def update_prop():
val = widget.text()
setattr(instance, prop, val)
def update_widget(val):
if hasattr(widget, 'editingFinished'):
widget.blockSignals(True)
widget.setText(val)
widget.blockSignals(False)
widget.editingFinished.emit()
else:
widget.setText(val)
add_callback(instance, prop, update_widget)
try:
widget.editingFinished.connect(update_prop)
except AttributeError:
pass
update_widget(getattr(instance, prop))
def connect_combo_data(instance, prop, widget):
"""
Connect a callback property with a QComboBox widget based on the userData.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QComboBox
The combo box to connect.
See Also
--------
connect_combo_text: connect a callback property with a QComboBox widget based on the text.
"""
def update_widget(value):
try:
idx = _find_combo_data(widget, value)
except ValueError:
if value is None:
idx = -1
else:
raise
widget.setCurrentIndex(idx)
def update_prop(idx):
if idx == -1:
setattr(instance, prop, None)
else:
setattr(instance, prop, widget.itemData(idx))
add_callback(instance, prop, update_widget)
widget.currentIndexChanged.connect(update_prop)
update_widget(getattr(instance, prop))
def connect_float_text(instance, prop, widget, fmt="{:g}"):
"""
Connect a numerical callback property with a Qt widget containing text.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QtWidget
The Qt widget to connect. This should implement the ``setText`` and
``text`` methods as well optionally the ``editingFinished`` signal.
fmt : str or func
This should be either a format string (in the ``{}`` notation), or a
function that takes a number and returns a string.
"""
if callable(fmt):
format_func = fmt
else:
def format_func(x):
return fmt.format(x)
def update_prop():
val = widget.text()
try:
setattr(instance, prop, float(val))
except ValueError:
setattr(instance, prop, 0)
def update_widget(val):
if val is None:
val = 0.
widget.setText(format_func(val))
add_callback(instance, prop, update_widget)
try:
widget.editingFinished.connect(update_prop)
except AttributeError:
pass
update_widget(getattr(instance, prop))
def connect_value(instance, prop, widget, value_range=None, log=False):
"""
Connect a numerical callback property with a Qt widget representing a value.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QtWidget
The Qt widget to connect. This should implement the ``setText`` and
``text`` methods as well optionally the ``editingFinished`` signal.
value_range : iterable, optional
A pair of two values representing the true range of values (since
Qt widgets such as sliders can only have values in certain ranges).
log : bool, optional
Whether the Qt widget value should be mapped to the log of the callback
property.
"""
if log:
if value_range is None:
raise ValueError("log option can only be set if value_range is given")
else:
value_range = math.log10(value_range[0]), math.log10(value_range[1])
def update_prop():
val = widget.value()
if value_range is not None:
imin, imax = widget.minimum(), widget.maximum()
val = (val - imin) / (imax - imin) * (value_range[1] - value_range[0]) + value_range[0]
if log:
val = 10 ** val
setattr(instance, prop, val)
def update_widget(val):
if val is None:
widget.setValue(0)
return
if log:
val = math.log10(val)
if value_range is not None:
imin, imax = widget.minimum(), widget.maximum()
val = (val - value_range[0]) / (value_range[1] - value_range[0]) * (imax - imin) + imin
widget.setValue(val)
add_callback(instance, prop, update_widget)
widget.valueChanged.connect(update_prop)
update_widget(getattr(instance, prop))
def connect_button(instance, prop, widget):
"""
Connect a button with a callback method
Parameters
----------
instance : object
The class instance that the callback method is attached to
prop : str
The name of the callback method
widget : QtWidget
The Qt widget to connect. This should implement the ``clicked`` method
"""
widget.clicked.connect(getattr(instance, prop))
def _find_combo_data(widget, value):
"""
Returns the index in a combo box where itemData == value
Raises a ValueError if data is not found
"""
# Here we check that the result is True, because some classes may overload
# == and return other kinds of objects whether true or false.
for idx in range(widget.count()):
if widget.itemData(idx) is value or (widget.itemData(idx) == value) is True:
return idx
else:
raise ValueError("%s not found in combo box" % (value,))
def _find_combo_text(widget, value):
"""
Returns the index in a combo box where text == value
Raises a ValueError if data is not found
"""
i = widget.findText(value)
if i == -1:
raise ValueError("%s not found in combo box" % value)
else:
return i
def connect_combo_selection(instance, prop, widget, display=str):
if not isinstance(getattr(type(instance), prop), SelectionCallbackProperty):
raise TypeError('connect_combo_selection requires a SelectionCallbackProperty')
def update_widget(value):
# Update choices in the combo box
combo_data = [widget.itemData(idx) for idx in range(widget.count())]
choices = getattr(type(instance), prop).get_choices(instance)
choice_labels = getattr(type(instance), prop).get_choice_labels(instance)
if combo_data == choices:
choices_updated = False
else:
widget.blockSignals(True)
widget.clear()
if len(choices) == 0:
return
combo_model = widget.model()
for index, (label, choice) in enumerate(zip(choice_labels, choices)):
widget.addItem(label, userData=choice)
# We interpret None data as being disabled rows (used for headers)
if isinstance(choice, ChoiceSeparator):
item = combo_model.item(index)
palette = widget.palette()
item.setFlags(item.flags() & ~(Qt.ItemIsSelectable | Qt.ItemIsEnabled))
item.setData(palette.color(QtGui.QPalette.Disabled, QtGui.QPalette.Text))
choices_updated = True
# Update current selection
try:
idx = _find_combo_data(widget, value)
except ValueError:
if value is None:
idx = -1
else:
raise
if idx == widget.currentIndex() and not choices_updated:
return
widget.setCurrentIndex(idx)
widget.blockSignals(False)
widget.currentIndexChanged.emit(idx)
def update_prop(idx):
if idx == -1:
setattr(instance, prop, None)
else:
setattr(instance, prop, widget.itemData(idx))
add_callback(instance, prop, update_widget)
widget.currentIndexChanged.connect(update_prop)
update_widget(getattr(instance, prop))
|
glue-viz/echo | echo/qt/connect.py | connect_float_text | python | def connect_float_text(instance, prop, widget, fmt="{:g}"):
if callable(fmt):
format_func = fmt
else:
def format_func(x):
return fmt.format(x)
def update_prop():
val = widget.text()
try:
setattr(instance, prop, float(val))
except ValueError:
setattr(instance, prop, 0)
def update_widget(val):
if val is None:
val = 0.
widget.setText(format_func(val))
add_callback(instance, prop, update_widget)
try:
widget.editingFinished.connect(update_prop)
except AttributeError:
pass
update_widget(getattr(instance, prop)) | Connect a numerical callback property with a Qt widget containing text.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QtWidget
The Qt widget to connect. This should implement the ``setText`` and
``text`` methods as well optionally the ``editingFinished`` signal.
fmt : str or func
This should be either a format string (in the ``{}`` notation), or a
function that takes a number and returns a string. | train | https://github.com/glue-viz/echo/blob/6ad54cc5e869de27c34e8716f2619ddc640f08fe/echo/qt/connect.py#L157-L200 | [
"def add_callback(instance, prop, callback, echo_old=False, priority=0):\n \"\"\"\n Attach a callback function to a property in an instance\n\n Parameters\n ----------\n instance\n The instance to add the callback to\n prop : str\n Name of callback property in `instance`\n callback : func\n The callback function to add\n echo_old : bool, optional\n If `True`, the callback function will be invoked with both the old\n and new values of the property, as ``func(old, new)``. If `False`\n (the default), will be invoked as ``func(new)``\n priority : int, optional\n This can optionally be used to force a certain order of execution of\n callbacks (larger values indicate a higher priority).\n\n Examples\n --------\n\n ::\n\n class Foo:\n bar = CallbackProperty(0)\n\n def callback(value):\n pass\n\n f = Foo()\n add_callback(f, 'bar', callback)\n\n \"\"\"\n p = getattr(type(instance), prop)\n if not isinstance(p, CallbackProperty):\n raise TypeError(\"%s is not a CallbackProperty\" % prop)\n p.add_callback(instance, callback, echo_old=echo_old, priority=priority)\n",
"def update_widget(val):\n if val is None:\n val = 0.\n widget.setText(format_func(val))\n"
] | # The functions in this module are used to connect callback properties to Qt
# widgets.
from __future__ import absolute_import, division, print_function
import math
from functools import partial
from qtpy import QtGui
from qtpy.QtCore import Qt
from ..core import add_callback
from ..selection import SelectionCallbackProperty, ChoiceSeparator
__all__ = ['connect_checkable_button', 'connect_text', 'connect_combo_data',
'connect_combo_text', 'connect_float_text', 'connect_value',
'connect_combo_selection']
def connect_checkable_button(instance, prop, widget):
"""
Connect a boolean callback property with a Qt button widget.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QtWidget
The Qt widget to connect. This should implement the ``setChecked``
method and the ``toggled`` signal.
"""
add_callback(instance, prop, widget.setChecked)
widget.toggled.connect(partial(setattr, instance, prop))
widget.setChecked(getattr(instance, prop) or False)
def connect_text(instance, prop, widget):
"""
Connect a string callback property with a Qt widget containing text.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QtWidget
The Qt widget to connect. This should implement the ``setText`` and
``text`` methods as well optionally the ``editingFinished`` signal.
"""
def update_prop():
val = widget.text()
setattr(instance, prop, val)
def update_widget(val):
if hasattr(widget, 'editingFinished'):
widget.blockSignals(True)
widget.setText(val)
widget.blockSignals(False)
widget.editingFinished.emit()
else:
widget.setText(val)
add_callback(instance, prop, update_widget)
try:
widget.editingFinished.connect(update_prop)
except AttributeError:
pass
update_widget(getattr(instance, prop))
def connect_combo_data(instance, prop, widget):
"""
Connect a callback property with a QComboBox widget based on the userData.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QComboBox
The combo box to connect.
See Also
--------
connect_combo_text: connect a callback property with a QComboBox widget based on the text.
"""
def update_widget(value):
try:
idx = _find_combo_data(widget, value)
except ValueError:
if value is None:
idx = -1
else:
raise
widget.setCurrentIndex(idx)
def update_prop(idx):
if idx == -1:
setattr(instance, prop, None)
else:
setattr(instance, prop, widget.itemData(idx))
add_callback(instance, prop, update_widget)
widget.currentIndexChanged.connect(update_prop)
update_widget(getattr(instance, prop))
def connect_combo_text(instance, prop, widget):
"""
Connect a callback property with a QComboBox widget based on the text.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QComboBox
The combo box to connect.
See Also
--------
connect_combo_data: connect a callback property with a QComboBox widget based on the userData.
"""
def update_widget(value):
try:
idx = _find_combo_text(widget, value)
except ValueError:
if value is None:
idx = -1
else:
raise
widget.setCurrentIndex(idx)
def update_prop(idx):
if idx == -1:
setattr(instance, prop, None)
else:
setattr(instance, prop, widget.itemText(idx))
add_callback(instance, prop, update_widget)
widget.currentIndexChanged.connect(update_prop)
update_widget(getattr(instance, prop))
def connect_value(instance, prop, widget, value_range=None, log=False):
"""
Connect a numerical callback property with a Qt widget representing a value.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QtWidget
The Qt widget to connect. This should implement the ``setText`` and
``text`` methods as well optionally the ``editingFinished`` signal.
value_range : iterable, optional
A pair of two values representing the true range of values (since
Qt widgets such as sliders can only have values in certain ranges).
log : bool, optional
Whether the Qt widget value should be mapped to the log of the callback
property.
"""
if log:
if value_range is None:
raise ValueError("log option can only be set if value_range is given")
else:
value_range = math.log10(value_range[0]), math.log10(value_range[1])
def update_prop():
val = widget.value()
if value_range is not None:
imin, imax = widget.minimum(), widget.maximum()
val = (val - imin) / (imax - imin) * (value_range[1] - value_range[0]) + value_range[0]
if log:
val = 10 ** val
setattr(instance, prop, val)
def update_widget(val):
if val is None:
widget.setValue(0)
return
if log:
val = math.log10(val)
if value_range is not None:
imin, imax = widget.minimum(), widget.maximum()
val = (val - value_range[0]) / (value_range[1] - value_range[0]) * (imax - imin) + imin
widget.setValue(val)
add_callback(instance, prop, update_widget)
widget.valueChanged.connect(update_prop)
update_widget(getattr(instance, prop))
def connect_button(instance, prop, widget):
"""
Connect a button with a callback method
Parameters
----------
instance : object
The class instance that the callback method is attached to
prop : str
The name of the callback method
widget : QtWidget
The Qt widget to connect. This should implement the ``clicked`` method
"""
widget.clicked.connect(getattr(instance, prop))
def _find_combo_data(widget, value):
"""
Returns the index in a combo box where itemData == value
Raises a ValueError if data is not found
"""
# Here we check that the result is True, because some classes may overload
# == and return other kinds of objects whether true or false.
for idx in range(widget.count()):
if widget.itemData(idx) is value or (widget.itemData(idx) == value) is True:
return idx
else:
raise ValueError("%s not found in combo box" % (value,))
def _find_combo_text(widget, value):
"""
Returns the index in a combo box where text == value
Raises a ValueError if data is not found
"""
i = widget.findText(value)
if i == -1:
raise ValueError("%s not found in combo box" % value)
else:
return i
def connect_combo_selection(instance, prop, widget, display=str):
if not isinstance(getattr(type(instance), prop), SelectionCallbackProperty):
raise TypeError('connect_combo_selection requires a SelectionCallbackProperty')
def update_widget(value):
# Update choices in the combo box
combo_data = [widget.itemData(idx) for idx in range(widget.count())]
choices = getattr(type(instance), prop).get_choices(instance)
choice_labels = getattr(type(instance), prop).get_choice_labels(instance)
if combo_data == choices:
choices_updated = False
else:
widget.blockSignals(True)
widget.clear()
if len(choices) == 0:
return
combo_model = widget.model()
for index, (label, choice) in enumerate(zip(choice_labels, choices)):
widget.addItem(label, userData=choice)
# We interpret None data as being disabled rows (used for headers)
if isinstance(choice, ChoiceSeparator):
item = combo_model.item(index)
palette = widget.palette()
item.setFlags(item.flags() & ~(Qt.ItemIsSelectable | Qt.ItemIsEnabled))
item.setData(palette.color(QtGui.QPalette.Disabled, QtGui.QPalette.Text))
choices_updated = True
# Update current selection
try:
idx = _find_combo_data(widget, value)
except ValueError:
if value is None:
idx = -1
else:
raise
if idx == widget.currentIndex() and not choices_updated:
return
widget.setCurrentIndex(idx)
widget.blockSignals(False)
widget.currentIndexChanged.emit(idx)
def update_prop(idx):
if idx == -1:
setattr(instance, prop, None)
else:
setattr(instance, prop, widget.itemData(idx))
add_callback(instance, prop, update_widget)
widget.currentIndexChanged.connect(update_prop)
update_widget(getattr(instance, prop))
|
glue-viz/echo | echo/qt/connect.py | connect_value | python | def connect_value(instance, prop, widget, value_range=None, log=False):
if log:
if value_range is None:
raise ValueError("log option can only be set if value_range is given")
else:
value_range = math.log10(value_range[0]), math.log10(value_range[1])
def update_prop():
val = widget.value()
if value_range is not None:
imin, imax = widget.minimum(), widget.maximum()
val = (val - imin) / (imax - imin) * (value_range[1] - value_range[0]) + value_range[0]
if log:
val = 10 ** val
setattr(instance, prop, val)
def update_widget(val):
if val is None:
widget.setValue(0)
return
if log:
val = math.log10(val)
if value_range is not None:
imin, imax = widget.minimum(), widget.maximum()
val = (val - value_range[0]) / (value_range[1] - value_range[0]) * (imax - imin) + imin
widget.setValue(val)
add_callback(instance, prop, update_widget)
widget.valueChanged.connect(update_prop)
update_widget(getattr(instance, prop)) | Connect a numerical callback property with a Qt widget representing a value.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QtWidget
The Qt widget to connect. This should implement the ``setText`` and
``text`` methods as well optionally the ``editingFinished`` signal.
value_range : iterable, optional
A pair of two values representing the true range of values (since
Qt widgets such as sliders can only have values in certain ranges).
log : bool, optional
Whether the Qt widget value should be mapped to the log of the callback
property. | train | https://github.com/glue-viz/echo/blob/6ad54cc5e869de27c34e8716f2619ddc640f08fe/echo/qt/connect.py#L203-L253 | [
"def add_callback(instance, prop, callback, echo_old=False, priority=0):\n \"\"\"\n Attach a callback function to a property in an instance\n\n Parameters\n ----------\n instance\n The instance to add the callback to\n prop : str\n Name of callback property in `instance`\n callback : func\n The callback function to add\n echo_old : bool, optional\n If `True`, the callback function will be invoked with both the old\n and new values of the property, as ``func(old, new)``. If `False`\n (the default), will be invoked as ``func(new)``\n priority : int, optional\n This can optionally be used to force a certain order of execution of\n callbacks (larger values indicate a higher priority).\n\n Examples\n --------\n\n ::\n\n class Foo:\n bar = CallbackProperty(0)\n\n def callback(value):\n pass\n\n f = Foo()\n add_callback(f, 'bar', callback)\n\n \"\"\"\n p = getattr(type(instance), prop)\n if not isinstance(p, CallbackProperty):\n raise TypeError(\"%s is not a CallbackProperty\" % prop)\n p.add_callback(instance, callback, echo_old=echo_old, priority=priority)\n",
"def update_widget(val):\n if val is None:\n widget.setValue(0)\n return\n if log:\n val = math.log10(val)\n if value_range is not None:\n imin, imax = widget.minimum(), widget.maximum()\n val = (val - value_range[0]) / (value_range[1] - value_range[0]) * (imax - imin) + imin\n widget.setValue(val)\n"
] | # The functions in this module are used to connect callback properties to Qt
# widgets.
from __future__ import absolute_import, division, print_function
import math
from functools import partial
from qtpy import QtGui
from qtpy.QtCore import Qt
from ..core import add_callback
from ..selection import SelectionCallbackProperty, ChoiceSeparator
__all__ = ['connect_checkable_button', 'connect_text', 'connect_combo_data',
'connect_combo_text', 'connect_float_text', 'connect_value',
'connect_combo_selection']
def connect_checkable_button(instance, prop, widget):
"""
Connect a boolean callback property with a Qt button widget.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QtWidget
The Qt widget to connect. This should implement the ``setChecked``
method and the ``toggled`` signal.
"""
add_callback(instance, prop, widget.setChecked)
widget.toggled.connect(partial(setattr, instance, prop))
widget.setChecked(getattr(instance, prop) or False)
def connect_text(instance, prop, widget):
"""
Connect a string callback property with a Qt widget containing text.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QtWidget
The Qt widget to connect. This should implement the ``setText`` and
``text`` methods as well optionally the ``editingFinished`` signal.
"""
def update_prop():
val = widget.text()
setattr(instance, prop, val)
def update_widget(val):
if hasattr(widget, 'editingFinished'):
widget.blockSignals(True)
widget.setText(val)
widget.blockSignals(False)
widget.editingFinished.emit()
else:
widget.setText(val)
add_callback(instance, prop, update_widget)
try:
widget.editingFinished.connect(update_prop)
except AttributeError:
pass
update_widget(getattr(instance, prop))
def connect_combo_data(instance, prop, widget):
"""
Connect a callback property with a QComboBox widget based on the userData.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QComboBox
The combo box to connect.
See Also
--------
connect_combo_text: connect a callback property with a QComboBox widget based on the text.
"""
def update_widget(value):
try:
idx = _find_combo_data(widget, value)
except ValueError:
if value is None:
idx = -1
else:
raise
widget.setCurrentIndex(idx)
def update_prop(idx):
if idx == -1:
setattr(instance, prop, None)
else:
setattr(instance, prop, widget.itemData(idx))
add_callback(instance, prop, update_widget)
widget.currentIndexChanged.connect(update_prop)
update_widget(getattr(instance, prop))
def connect_combo_text(instance, prop, widget):
"""
Connect a callback property with a QComboBox widget based on the text.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QComboBox
The combo box to connect.
See Also
--------
connect_combo_data: connect a callback property with a QComboBox widget based on the userData.
"""
def update_widget(value):
try:
idx = _find_combo_text(widget, value)
except ValueError:
if value is None:
idx = -1
else:
raise
widget.setCurrentIndex(idx)
def update_prop(idx):
if idx == -1:
setattr(instance, prop, None)
else:
setattr(instance, prop, widget.itemText(idx))
add_callback(instance, prop, update_widget)
widget.currentIndexChanged.connect(update_prop)
update_widget(getattr(instance, prop))
def connect_float_text(instance, prop, widget, fmt="{:g}"):
"""
Connect a numerical callback property with a Qt widget containing text.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QtWidget
The Qt widget to connect. This should implement the ``setText`` and
``text`` methods as well optionally the ``editingFinished`` signal.
fmt : str or func
This should be either a format string (in the ``{}`` notation), or a
function that takes a number and returns a string.
"""
if callable(fmt):
format_func = fmt
else:
def format_func(x):
return fmt.format(x)
def update_prop():
val = widget.text()
try:
setattr(instance, prop, float(val))
except ValueError:
setattr(instance, prop, 0)
def update_widget(val):
if val is None:
val = 0.
widget.setText(format_func(val))
add_callback(instance, prop, update_widget)
try:
widget.editingFinished.connect(update_prop)
except AttributeError:
pass
update_widget(getattr(instance, prop))
def connect_button(instance, prop, widget):
"""
Connect a button with a callback method
Parameters
----------
instance : object
The class instance that the callback method is attached to
prop : str
The name of the callback method
widget : QtWidget
The Qt widget to connect. This should implement the ``clicked`` method
"""
widget.clicked.connect(getattr(instance, prop))
def _find_combo_data(widget, value):
"""
Returns the index in a combo box where itemData == value
Raises a ValueError if data is not found
"""
# Here we check that the result is True, because some classes may overload
# == and return other kinds of objects whether true or false.
for idx in range(widget.count()):
if widget.itemData(idx) is value or (widget.itemData(idx) == value) is True:
return idx
else:
raise ValueError("%s not found in combo box" % (value,))
def _find_combo_text(widget, value):
"""
Returns the index in a combo box where text == value
Raises a ValueError if data is not found
"""
i = widget.findText(value)
if i == -1:
raise ValueError("%s not found in combo box" % value)
else:
return i
def connect_combo_selection(instance, prop, widget, display=str):
if not isinstance(getattr(type(instance), prop), SelectionCallbackProperty):
raise TypeError('connect_combo_selection requires a SelectionCallbackProperty')
def update_widget(value):
# Update choices in the combo box
combo_data = [widget.itemData(idx) for idx in range(widget.count())]
choices = getattr(type(instance), prop).get_choices(instance)
choice_labels = getattr(type(instance), prop).get_choice_labels(instance)
if combo_data == choices:
choices_updated = False
else:
widget.blockSignals(True)
widget.clear()
if len(choices) == 0:
return
combo_model = widget.model()
for index, (label, choice) in enumerate(zip(choice_labels, choices)):
widget.addItem(label, userData=choice)
# We interpret None data as being disabled rows (used for headers)
if isinstance(choice, ChoiceSeparator):
item = combo_model.item(index)
palette = widget.palette()
item.setFlags(item.flags() & ~(Qt.ItemIsSelectable | Qt.ItemIsEnabled))
item.setData(palette.color(QtGui.QPalette.Disabled, QtGui.QPalette.Text))
choices_updated = True
# Update current selection
try:
idx = _find_combo_data(widget, value)
except ValueError:
if value is None:
idx = -1
else:
raise
if idx == widget.currentIndex() and not choices_updated:
return
widget.setCurrentIndex(idx)
widget.blockSignals(False)
widget.currentIndexChanged.emit(idx)
def update_prop(idx):
if idx == -1:
setattr(instance, prop, None)
else:
setattr(instance, prop, widget.itemData(idx))
add_callback(instance, prop, update_widget)
widget.currentIndexChanged.connect(update_prop)
update_widget(getattr(instance, prop))
|
glue-viz/echo | echo/qt/connect.py | connect_button | python | def connect_button(instance, prop, widget):
widget.clicked.connect(getattr(instance, prop)) | Connect a button with a callback method
Parameters
----------
instance : object
The class instance that the callback method is attached to
prop : str
The name of the callback method
widget : QtWidget
The Qt widget to connect. This should implement the ``clicked`` method | train | https://github.com/glue-viz/echo/blob/6ad54cc5e869de27c34e8716f2619ddc640f08fe/echo/qt/connect.py#L256-L269 | null | # The functions in this module are used to connect callback properties to Qt
# widgets.
from __future__ import absolute_import, division, print_function
import math
from functools import partial
from qtpy import QtGui
from qtpy.QtCore import Qt
from ..core import add_callback
from ..selection import SelectionCallbackProperty, ChoiceSeparator
__all__ = ['connect_checkable_button', 'connect_text', 'connect_combo_data',
'connect_combo_text', 'connect_float_text', 'connect_value',
'connect_combo_selection']
def connect_checkable_button(instance, prop, widget):
"""
Connect a boolean callback property with a Qt button widget.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QtWidget
The Qt widget to connect. This should implement the ``setChecked``
method and the ``toggled`` signal.
"""
add_callback(instance, prop, widget.setChecked)
widget.toggled.connect(partial(setattr, instance, prop))
widget.setChecked(getattr(instance, prop) or False)
def connect_text(instance, prop, widget):
"""
Connect a string callback property with a Qt widget containing text.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QtWidget
The Qt widget to connect. This should implement the ``setText`` and
``text`` methods as well optionally the ``editingFinished`` signal.
"""
def update_prop():
val = widget.text()
setattr(instance, prop, val)
def update_widget(val):
if hasattr(widget, 'editingFinished'):
widget.blockSignals(True)
widget.setText(val)
widget.blockSignals(False)
widget.editingFinished.emit()
else:
widget.setText(val)
add_callback(instance, prop, update_widget)
try:
widget.editingFinished.connect(update_prop)
except AttributeError:
pass
update_widget(getattr(instance, prop))
def connect_combo_data(instance, prop, widget):
"""
Connect a callback property with a QComboBox widget based on the userData.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QComboBox
The combo box to connect.
See Also
--------
connect_combo_text: connect a callback property with a QComboBox widget based on the text.
"""
def update_widget(value):
try:
idx = _find_combo_data(widget, value)
except ValueError:
if value is None:
idx = -1
else:
raise
widget.setCurrentIndex(idx)
def update_prop(idx):
if idx == -1:
setattr(instance, prop, None)
else:
setattr(instance, prop, widget.itemData(idx))
add_callback(instance, prop, update_widget)
widget.currentIndexChanged.connect(update_prop)
update_widget(getattr(instance, prop))
def connect_combo_text(instance, prop, widget):
"""
Connect a callback property with a QComboBox widget based on the text.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QComboBox
The combo box to connect.
See Also
--------
connect_combo_data: connect a callback property with a QComboBox widget based on the userData.
"""
def update_widget(value):
try:
idx = _find_combo_text(widget, value)
except ValueError:
if value is None:
idx = -1
else:
raise
widget.setCurrentIndex(idx)
def update_prop(idx):
if idx == -1:
setattr(instance, prop, None)
else:
setattr(instance, prop, widget.itemText(idx))
add_callback(instance, prop, update_widget)
widget.currentIndexChanged.connect(update_prop)
update_widget(getattr(instance, prop))
def connect_float_text(instance, prop, widget, fmt="{:g}"):
"""
Connect a numerical callback property with a Qt widget containing text.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QtWidget
The Qt widget to connect. This should implement the ``setText`` and
``text`` methods as well optionally the ``editingFinished`` signal.
fmt : str or func
This should be either a format string (in the ``{}`` notation), or a
function that takes a number and returns a string.
"""
if callable(fmt):
format_func = fmt
else:
def format_func(x):
return fmt.format(x)
def update_prop():
val = widget.text()
try:
setattr(instance, prop, float(val))
except ValueError:
setattr(instance, prop, 0)
def update_widget(val):
if val is None:
val = 0.
widget.setText(format_func(val))
add_callback(instance, prop, update_widget)
try:
widget.editingFinished.connect(update_prop)
except AttributeError:
pass
update_widget(getattr(instance, prop))
def connect_value(instance, prop, widget, value_range=None, log=False):
"""
Connect a numerical callback property with a Qt widget representing a value.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QtWidget
The Qt widget to connect. This should implement the ``setText`` and
``text`` methods as well optionally the ``editingFinished`` signal.
value_range : iterable, optional
A pair of two values representing the true range of values (since
Qt widgets such as sliders can only have values in certain ranges).
log : bool, optional
Whether the Qt widget value should be mapped to the log of the callback
property.
"""
if log:
if value_range is None:
raise ValueError("log option can only be set if value_range is given")
else:
value_range = math.log10(value_range[0]), math.log10(value_range[1])
def update_prop():
val = widget.value()
if value_range is not None:
imin, imax = widget.minimum(), widget.maximum()
val = (val - imin) / (imax - imin) * (value_range[1] - value_range[0]) + value_range[0]
if log:
val = 10 ** val
setattr(instance, prop, val)
def update_widget(val):
if val is None:
widget.setValue(0)
return
if log:
val = math.log10(val)
if value_range is not None:
imin, imax = widget.minimum(), widget.maximum()
val = (val - value_range[0]) / (value_range[1] - value_range[0]) * (imax - imin) + imin
widget.setValue(val)
add_callback(instance, prop, update_widget)
widget.valueChanged.connect(update_prop)
update_widget(getattr(instance, prop))
def _find_combo_data(widget, value):
"""
Returns the index in a combo box where itemData == value
Raises a ValueError if data is not found
"""
# Here we check that the result is True, because some classes may overload
# == and return other kinds of objects whether true or false.
for idx in range(widget.count()):
if widget.itemData(idx) is value or (widget.itemData(idx) == value) is True:
return idx
else:
raise ValueError("%s not found in combo box" % (value,))
def _find_combo_text(widget, value):
"""
Returns the index in a combo box where text == value
Raises a ValueError if data is not found
"""
i = widget.findText(value)
if i == -1:
raise ValueError("%s not found in combo box" % value)
else:
return i
def connect_combo_selection(instance, prop, widget, display=str):
if not isinstance(getattr(type(instance), prop), SelectionCallbackProperty):
raise TypeError('connect_combo_selection requires a SelectionCallbackProperty')
def update_widget(value):
# Update choices in the combo box
combo_data = [widget.itemData(idx) for idx in range(widget.count())]
choices = getattr(type(instance), prop).get_choices(instance)
choice_labels = getattr(type(instance), prop).get_choice_labels(instance)
if combo_data == choices:
choices_updated = False
else:
widget.blockSignals(True)
widget.clear()
if len(choices) == 0:
return
combo_model = widget.model()
for index, (label, choice) in enumerate(zip(choice_labels, choices)):
widget.addItem(label, userData=choice)
# We interpret None data as being disabled rows (used for headers)
if isinstance(choice, ChoiceSeparator):
item = combo_model.item(index)
palette = widget.palette()
item.setFlags(item.flags() & ~(Qt.ItemIsSelectable | Qt.ItemIsEnabled))
item.setData(palette.color(QtGui.QPalette.Disabled, QtGui.QPalette.Text))
choices_updated = True
# Update current selection
try:
idx = _find_combo_data(widget, value)
except ValueError:
if value is None:
idx = -1
else:
raise
if idx == widget.currentIndex() and not choices_updated:
return
widget.setCurrentIndex(idx)
widget.blockSignals(False)
widget.currentIndexChanged.emit(idx)
def update_prop(idx):
if idx == -1:
setattr(instance, prop, None)
else:
setattr(instance, prop, widget.itemData(idx))
add_callback(instance, prop, update_widget)
widget.currentIndexChanged.connect(update_prop)
update_widget(getattr(instance, prop))
|
glue-viz/echo | echo/qt/connect.py | _find_combo_data | python | def _find_combo_data(widget, value):
# Here we check that the result is True, because some classes may overload
# == and return other kinds of objects whether true or false.
for idx in range(widget.count()):
if widget.itemData(idx) is value or (widget.itemData(idx) == value) is True:
return idx
else:
raise ValueError("%s not found in combo box" % (value,)) | Returns the index in a combo box where itemData == value
Raises a ValueError if data is not found | train | https://github.com/glue-viz/echo/blob/6ad54cc5e869de27c34e8716f2619ddc640f08fe/echo/qt/connect.py#L272-L284 | null | # The functions in this module are used to connect callback properties to Qt
# widgets.
from __future__ import absolute_import, division, print_function
import math
from functools import partial
from qtpy import QtGui
from qtpy.QtCore import Qt
from ..core import add_callback
from ..selection import SelectionCallbackProperty, ChoiceSeparator
__all__ = ['connect_checkable_button', 'connect_text', 'connect_combo_data',
'connect_combo_text', 'connect_float_text', 'connect_value',
'connect_combo_selection']
def connect_checkable_button(instance, prop, widget):
"""
Connect a boolean callback property with a Qt button widget.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QtWidget
The Qt widget to connect. This should implement the ``setChecked``
method and the ``toggled`` signal.
"""
add_callback(instance, prop, widget.setChecked)
widget.toggled.connect(partial(setattr, instance, prop))
widget.setChecked(getattr(instance, prop) or False)
def connect_text(instance, prop, widget):
"""
Connect a string callback property with a Qt widget containing text.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QtWidget
The Qt widget to connect. This should implement the ``setText`` and
``text`` methods as well optionally the ``editingFinished`` signal.
"""
def update_prop():
val = widget.text()
setattr(instance, prop, val)
def update_widget(val):
if hasattr(widget, 'editingFinished'):
widget.blockSignals(True)
widget.setText(val)
widget.blockSignals(False)
widget.editingFinished.emit()
else:
widget.setText(val)
add_callback(instance, prop, update_widget)
try:
widget.editingFinished.connect(update_prop)
except AttributeError:
pass
update_widget(getattr(instance, prop))
def connect_combo_data(instance, prop, widget):
"""
Connect a callback property with a QComboBox widget based on the userData.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QComboBox
The combo box to connect.
See Also
--------
connect_combo_text: connect a callback property with a QComboBox widget based on the text.
"""
def update_widget(value):
try:
idx = _find_combo_data(widget, value)
except ValueError:
if value is None:
idx = -1
else:
raise
widget.setCurrentIndex(idx)
def update_prop(idx):
if idx == -1:
setattr(instance, prop, None)
else:
setattr(instance, prop, widget.itemData(idx))
add_callback(instance, prop, update_widget)
widget.currentIndexChanged.connect(update_prop)
update_widget(getattr(instance, prop))
def connect_combo_text(instance, prop, widget):
"""
Connect a callback property with a QComboBox widget based on the text.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QComboBox
The combo box to connect.
See Also
--------
connect_combo_data: connect a callback property with a QComboBox widget based on the userData.
"""
def update_widget(value):
try:
idx = _find_combo_text(widget, value)
except ValueError:
if value is None:
idx = -1
else:
raise
widget.setCurrentIndex(idx)
def update_prop(idx):
if idx == -1:
setattr(instance, prop, None)
else:
setattr(instance, prop, widget.itemText(idx))
add_callback(instance, prop, update_widget)
widget.currentIndexChanged.connect(update_prop)
update_widget(getattr(instance, prop))
def connect_float_text(instance, prop, widget, fmt="{:g}"):
"""
Connect a numerical callback property with a Qt widget containing text.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QtWidget
The Qt widget to connect. This should implement the ``setText`` and
``text`` methods as well optionally the ``editingFinished`` signal.
fmt : str or func
This should be either a format string (in the ``{}`` notation), or a
function that takes a number and returns a string.
"""
if callable(fmt):
format_func = fmt
else:
def format_func(x):
return fmt.format(x)
def update_prop():
val = widget.text()
try:
setattr(instance, prop, float(val))
except ValueError:
setattr(instance, prop, 0)
def update_widget(val):
if val is None:
val = 0.
widget.setText(format_func(val))
add_callback(instance, prop, update_widget)
try:
widget.editingFinished.connect(update_prop)
except AttributeError:
pass
update_widget(getattr(instance, prop))
def connect_value(instance, prop, widget, value_range=None, log=False):
"""
Connect a numerical callback property with a Qt widget representing a value.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QtWidget
The Qt widget to connect. This should implement the ``setText`` and
``text`` methods as well optionally the ``editingFinished`` signal.
value_range : iterable, optional
A pair of two values representing the true range of values (since
Qt widgets such as sliders can only have values in certain ranges).
log : bool, optional
Whether the Qt widget value should be mapped to the log of the callback
property.
"""
if log:
if value_range is None:
raise ValueError("log option can only be set if value_range is given")
else:
value_range = math.log10(value_range[0]), math.log10(value_range[1])
def update_prop():
val = widget.value()
if value_range is not None:
imin, imax = widget.minimum(), widget.maximum()
val = (val - imin) / (imax - imin) * (value_range[1] - value_range[0]) + value_range[0]
if log:
val = 10 ** val
setattr(instance, prop, val)
def update_widget(val):
if val is None:
widget.setValue(0)
return
if log:
val = math.log10(val)
if value_range is not None:
imin, imax = widget.minimum(), widget.maximum()
val = (val - value_range[0]) / (value_range[1] - value_range[0]) * (imax - imin) + imin
widget.setValue(val)
add_callback(instance, prop, update_widget)
widget.valueChanged.connect(update_prop)
update_widget(getattr(instance, prop))
def connect_button(instance, prop, widget):
"""
Connect a button with a callback method
Parameters
----------
instance : object
The class instance that the callback method is attached to
prop : str
The name of the callback method
widget : QtWidget
The Qt widget to connect. This should implement the ``clicked`` method
"""
widget.clicked.connect(getattr(instance, prop))
def _find_combo_text(widget, value):
"""
Returns the index in a combo box where text == value
Raises a ValueError if data is not found
"""
i = widget.findText(value)
if i == -1:
raise ValueError("%s not found in combo box" % value)
else:
return i
def connect_combo_selection(instance, prop, widget, display=str):
if not isinstance(getattr(type(instance), prop), SelectionCallbackProperty):
raise TypeError('connect_combo_selection requires a SelectionCallbackProperty')
def update_widget(value):
# Update choices in the combo box
combo_data = [widget.itemData(idx) for idx in range(widget.count())]
choices = getattr(type(instance), prop).get_choices(instance)
choice_labels = getattr(type(instance), prop).get_choice_labels(instance)
if combo_data == choices:
choices_updated = False
else:
widget.blockSignals(True)
widget.clear()
if len(choices) == 0:
return
combo_model = widget.model()
for index, (label, choice) in enumerate(zip(choice_labels, choices)):
widget.addItem(label, userData=choice)
# We interpret None data as being disabled rows (used for headers)
if isinstance(choice, ChoiceSeparator):
item = combo_model.item(index)
palette = widget.palette()
item.setFlags(item.flags() & ~(Qt.ItemIsSelectable | Qt.ItemIsEnabled))
item.setData(palette.color(QtGui.QPalette.Disabled, QtGui.QPalette.Text))
choices_updated = True
# Update current selection
try:
idx = _find_combo_data(widget, value)
except ValueError:
if value is None:
idx = -1
else:
raise
if idx == widget.currentIndex() and not choices_updated:
return
widget.setCurrentIndex(idx)
widget.blockSignals(False)
widget.currentIndexChanged.emit(idx)
def update_prop(idx):
if idx == -1:
setattr(instance, prop, None)
else:
setattr(instance, prop, widget.itemData(idx))
add_callback(instance, prop, update_widget)
widget.currentIndexChanged.connect(update_prop)
update_widget(getattr(instance, prop))
|
glue-viz/echo | echo/qt/connect.py | _find_combo_text | python | def _find_combo_text(widget, value):
i = widget.findText(value)
if i == -1:
raise ValueError("%s not found in combo box" % value)
else:
return i | Returns the index in a combo box where text == value
Raises a ValueError if data is not found | train | https://github.com/glue-viz/echo/blob/6ad54cc5e869de27c34e8716f2619ddc640f08fe/echo/qt/connect.py#L287-L297 | null | # The functions in this module are used to connect callback properties to Qt
# widgets.
from __future__ import absolute_import, division, print_function
import math
from functools import partial
from qtpy import QtGui
from qtpy.QtCore import Qt
from ..core import add_callback
from ..selection import SelectionCallbackProperty, ChoiceSeparator
__all__ = ['connect_checkable_button', 'connect_text', 'connect_combo_data',
'connect_combo_text', 'connect_float_text', 'connect_value',
'connect_combo_selection']
def connect_checkable_button(instance, prop, widget):
"""
Connect a boolean callback property with a Qt button widget.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QtWidget
The Qt widget to connect. This should implement the ``setChecked``
method and the ``toggled`` signal.
"""
add_callback(instance, prop, widget.setChecked)
widget.toggled.connect(partial(setattr, instance, prop))
widget.setChecked(getattr(instance, prop) or False)
def connect_text(instance, prop, widget):
"""
Connect a string callback property with a Qt widget containing text.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QtWidget
The Qt widget to connect. This should implement the ``setText`` and
``text`` methods as well optionally the ``editingFinished`` signal.
"""
def update_prop():
val = widget.text()
setattr(instance, prop, val)
def update_widget(val):
if hasattr(widget, 'editingFinished'):
widget.blockSignals(True)
widget.setText(val)
widget.blockSignals(False)
widget.editingFinished.emit()
else:
widget.setText(val)
add_callback(instance, prop, update_widget)
try:
widget.editingFinished.connect(update_prop)
except AttributeError:
pass
update_widget(getattr(instance, prop))
def connect_combo_data(instance, prop, widget):
"""
Connect a callback property with a QComboBox widget based on the userData.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QComboBox
The combo box to connect.
See Also
--------
connect_combo_text: connect a callback property with a QComboBox widget based on the text.
"""
def update_widget(value):
try:
idx = _find_combo_data(widget, value)
except ValueError:
if value is None:
idx = -1
else:
raise
widget.setCurrentIndex(idx)
def update_prop(idx):
if idx == -1:
setattr(instance, prop, None)
else:
setattr(instance, prop, widget.itemData(idx))
add_callback(instance, prop, update_widget)
widget.currentIndexChanged.connect(update_prop)
update_widget(getattr(instance, prop))
def connect_combo_text(instance, prop, widget):
"""
Connect a callback property with a QComboBox widget based on the text.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QComboBox
The combo box to connect.
See Also
--------
connect_combo_data: connect a callback property with a QComboBox widget based on the userData.
"""
def update_widget(value):
try:
idx = _find_combo_text(widget, value)
except ValueError:
if value is None:
idx = -1
else:
raise
widget.setCurrentIndex(idx)
def update_prop(idx):
if idx == -1:
setattr(instance, prop, None)
else:
setattr(instance, prop, widget.itemText(idx))
add_callback(instance, prop, update_widget)
widget.currentIndexChanged.connect(update_prop)
update_widget(getattr(instance, prop))
def connect_float_text(instance, prop, widget, fmt="{:g}"):
"""
Connect a numerical callback property with a Qt widget containing text.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QtWidget
The Qt widget to connect. This should implement the ``setText`` and
``text`` methods as well optionally the ``editingFinished`` signal.
fmt : str or func
This should be either a format string (in the ``{}`` notation), or a
function that takes a number and returns a string.
"""
if callable(fmt):
format_func = fmt
else:
def format_func(x):
return fmt.format(x)
def update_prop():
val = widget.text()
try:
setattr(instance, prop, float(val))
except ValueError:
setattr(instance, prop, 0)
def update_widget(val):
if val is None:
val = 0.
widget.setText(format_func(val))
add_callback(instance, prop, update_widget)
try:
widget.editingFinished.connect(update_prop)
except AttributeError:
pass
update_widget(getattr(instance, prop))
def connect_value(instance, prop, widget, value_range=None, log=False):
"""
Connect a numerical callback property with a Qt widget representing a value.
Parameters
----------
instance : object
The class instance that the callback property is attached to
prop : str
The name of the callback property
widget : QtWidget
The Qt widget to connect. This should implement the ``setText`` and
``text`` methods as well optionally the ``editingFinished`` signal.
value_range : iterable, optional
A pair of two values representing the true range of values (since
Qt widgets such as sliders can only have values in certain ranges).
log : bool, optional
Whether the Qt widget value should be mapped to the log of the callback
property.
"""
if log:
if value_range is None:
raise ValueError("log option can only be set if value_range is given")
else:
value_range = math.log10(value_range[0]), math.log10(value_range[1])
def update_prop():
val = widget.value()
if value_range is not None:
imin, imax = widget.minimum(), widget.maximum()
val = (val - imin) / (imax - imin) * (value_range[1] - value_range[0]) + value_range[0]
if log:
val = 10 ** val
setattr(instance, prop, val)
def update_widget(val):
if val is None:
widget.setValue(0)
return
if log:
val = math.log10(val)
if value_range is not None:
imin, imax = widget.minimum(), widget.maximum()
val = (val - value_range[0]) / (value_range[1] - value_range[0]) * (imax - imin) + imin
widget.setValue(val)
add_callback(instance, prop, update_widget)
widget.valueChanged.connect(update_prop)
update_widget(getattr(instance, prop))
def connect_button(instance, prop, widget):
"""
Connect a button with a callback method
Parameters
----------
instance : object
The class instance that the callback method is attached to
prop : str
The name of the callback method
widget : QtWidget
The Qt widget to connect. This should implement the ``clicked`` method
"""
widget.clicked.connect(getattr(instance, prop))
def _find_combo_data(widget, value):
"""
Returns the index in a combo box where itemData == value
Raises a ValueError if data is not found
"""
# Here we check that the result is True, because some classes may overload
# == and return other kinds of objects whether true or false.
for idx in range(widget.count()):
if widget.itemData(idx) is value or (widget.itemData(idx) == value) is True:
return idx
else:
raise ValueError("%s not found in combo box" % (value,))
def connect_combo_selection(instance, prop, widget, display=str):
if not isinstance(getattr(type(instance), prop), SelectionCallbackProperty):
raise TypeError('connect_combo_selection requires a SelectionCallbackProperty')
def update_widget(value):
# Update choices in the combo box
combo_data = [widget.itemData(idx) for idx in range(widget.count())]
choices = getattr(type(instance), prop).get_choices(instance)
choice_labels = getattr(type(instance), prop).get_choice_labels(instance)
if combo_data == choices:
choices_updated = False
else:
widget.blockSignals(True)
widget.clear()
if len(choices) == 0:
return
combo_model = widget.model()
for index, (label, choice) in enumerate(zip(choice_labels, choices)):
widget.addItem(label, userData=choice)
# We interpret None data as being disabled rows (used for headers)
if isinstance(choice, ChoiceSeparator):
item = combo_model.item(index)
palette = widget.palette()
item.setFlags(item.flags() & ~(Qt.ItemIsSelectable | Qt.ItemIsEnabled))
item.setData(palette.color(QtGui.QPalette.Disabled, QtGui.QPalette.Text))
choices_updated = True
# Update current selection
try:
idx = _find_combo_data(widget, value)
except ValueError:
if value is None:
idx = -1
else:
raise
if idx == widget.currentIndex() and not choices_updated:
return
widget.setCurrentIndex(idx)
widget.blockSignals(False)
widget.currentIndexChanged.emit(idx)
def update_prop(idx):
if idx == -1:
setattr(instance, prop, None)
else:
setattr(instance, prop, widget.itemData(idx))
add_callback(instance, prop, update_widget)
widget.currentIndexChanged.connect(update_prop)
update_widget(getattr(instance, prop))
|
glue-viz/echo | echo/callback_container.py | CallbackContainer._wrap | python | def _wrap(self, value, priority=0):
if not callable(value):
raise TypeError("Only callable values can be stored in CallbackContainer")
elif self.is_bound_method(value):
# We are dealing with a bound method. Method references aren't
# persistent, so instead we store a reference to the function
# and instance.
value = (weakref.ref(value.__func__),
weakref.ref(value.__self__, self._auto_remove),
priority)
else:
value = (value, priority)
return value | Given a function/method, this will automatically wrap a method using
weakref to avoid circular references. | train | https://github.com/glue-viz/echo/blob/6ad54cc5e869de27c34e8716f2619ddc640f08fe/echo/callback_container.py#L20-L43 | null | class CallbackContainer(object):
"""
A list-like container for callback functions. We need to be careful with
storing references to methods, because if a callback method is on a class
which contains both the callback and a callback property, a circular
reference is created which results in a memory leak. Instead, we need to use
a weak reference which results in the callback being removed if the instance
is destroyed. This container class takes care of this automatically.
"""
def __init__(self):
self.callbacks = []
def _auto_remove(self, method_instance):
# Called when weakref detects that the instance on which a method was
# defined has been garbage collected.
for value in self.callbacks[:]:
if isinstance(value, tuple) and value[1] is method_instance:
self.callbacks.remove(value)
def __contains__(self, value):
if self.is_bound_method(value):
for callback in self.callbacks[:]:
if len(callback) == 3 and value.__func__ is callback[0]() and value.__self__ is callback[1]():
return True
else:
return False
else:
for callback in self.callbacks[:]:
if len(callback) == 2 and value is callback[0]:
return True
else:
return False
def __iter__(self):
for callback in sorted(self.callbacks, key=lambda x: x[-1], reverse=True):
if len(callback) == 3:
func = callback[0]()
inst = callback[1]()
yield partial(func, inst)
else:
yield callback[0]
def __len__(self):
return len(self.callbacks)
@staticmethod
def is_bound_method(func):
return hasattr(func, '__func__') and getattr(func, '__self__', None) is not None
def append(self, value, priority=0):
self.callbacks.append(self._wrap(value, priority=priority))
def remove(self, value):
if self.is_bound_method(value):
for callback in self.callbacks[:]:
if len(callback) == 3 and value.__func__ is callback[0]() and value.__self__ is callback[1]():
self.callbacks.remove(callback)
else:
for callback in self.callbacks[:]:
if len(callback) == 2 and value is callback[0]:
self.callbacks.remove(callback)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.