code stringlengths 1 1.72M | language stringclasses 1 value |
|---|---|
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Basic string exercises
# Fill in the code for the functions below. main() is already set up
# to call the functions with a few different inputs,
# printing 'OK' when each function is correct.
# The starter code for each function includes a 'return'
# which is just a placeholder for your code.
# It's ok if you do not complete all the functions, and there
# are some additional functions to try in string2.py.
# A. donuts
# Given an int count of a number of donuts, return a string
# of the form 'Number of donuts: <count>', where <count> is the number
# passed in. However, if the count is 10 or more, then use the word 'many'
# instead of the actual count.
# So donuts(5) returns 'Number of donuts: 5'
# and donuts(23) returns 'Number of donuts: many'
def donuts(count):
# +++your code here+++
if count < 10:
return 'Number of donuts: '+ str(count)
else:
return 'Number of donuts: many'
# B. both_ends
# Given a string s, return a string made of the first 2
# and the last 2 chars of the original string,
# so 'spring' yields 'spng'. However, if the string length
# is less than 2, return instead the empty string.
def both_ends(s):
# +++your code here+++
if len(s) < 2:
return ""
else:
return s[:2] + s[-2:]
# C. fix_start
# Given a string s, return a string
# where all occurences of its first char have
# been changed to '*', except do not change
# the first char itself.
# e.g. 'babble' yields 'ba**le'
# Assume that the string is length 1 or more.
# Hint: s.replace(stra, strb) returns a version of string s
# where all instances of stra have been replaced by strb.
def fix_start(s):
# +++your code here+++
first = s[1:].replace(s[0], '*')
return s[0] + first
# D. MixUp
# Given strings a and b, return a single string with a and b separated
# by a space '<a> <b>', except swap the first 2 chars of each string.
# e.g.
# 'mix', pod' -> 'pox mid'
# 'dog', 'dinner' -> 'dig donner'
# Assume a and b are length 2 or more.
def mix_up(a, b):
# +++your code here+++
return b[:2] + a[2:] + " " + a[:2] + b[2:]
# Provided simple test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Provided main() calls the above functions with interesting inputs,
# using test() to check if each result is correct or not.
def main():
print 'donuts'
# Each line calls donuts, compares its result to the expected for that call.
test(donuts(4), 'Number of donuts: 4')
test(donuts(9), 'Number of donuts: 9')
test(donuts(10), 'Number of donuts: many')
test(donuts(99), 'Number of donuts: many')
print
print 'both_ends'
test(both_ends('spring'), 'spng')
test(both_ends('Hello'), 'Helo')
test(both_ends('a'), '')
test(both_ends('xyz'), 'xyyz')
print
print 'fix_start'
test(fix_start('babble'), 'ba**le')
test(fix_start('aardvark'), 'a*rdv*rk')
test(fix_start('google'), 'goo*le')
test(fix_start('donut'), 'donut')
print
print 'mix_up'
test(mix_up('mix', 'pod'), 'pox mid')
test(mix_up('dog', 'dinner'), 'dig donner')
test(mix_up('gnash', 'sport'), 'spash gnort')
test(mix_up('pezzy', 'firm'), 'fizzy perm')
# Standard boilerplate to call the main() function.
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Basic string exercises
# Fill in the code for the functions below. main() is already set up
# to call the functions with a few different inputs,
# printing 'OK' when each function is correct.
# The starter code for each function includes a 'return'
# which is just a placeholder for your code.
# It's ok if you do not complete all the functions, and there
# are some additional functions to try in string2.py.
# A. donuts
# Given an int count of a number of donuts, return a string
# of the form 'Number of donuts: <count>', where <count> is the number
# passed in. However, if the count is 10 or more, then use the word 'many'
# instead of the actual count.
# So donuts(5) returns 'Number of donuts: 5'
# and donuts(23) returns 'Number of donuts: many'
def donuts(count):
# +++your code here+++
if count < 10:
return 'Number of donuts: '+ str(count)
else:
return 'Number of donuts: many'
# B. both_ends
# Given a string s, return a string made of the first 2
# and the last 2 chars of the original string,
# so 'spring' yields 'spng'. However, if the string length
# is less than 2, return instead the empty string.
def both_ends(s):
# +++your code here+++
if len(s) < 2:
return ""
else:
return s[:2] + s[-2:]
# C. fix_start
# Given a string s, return a string
# where all occurences of its first char have
# been changed to '*', except do not change
# the first char itself.
# e.g. 'babble' yields 'ba**le'
# Assume that the string is length 1 or more.
# Hint: s.replace(stra, strb) returns a version of string s
# where all instances of stra have been replaced by strb.
def fix_start(s):
# +++your code here+++
first = s[1:].replace(s[0], '*')
return s[0] + first
# D. MixUp
# Given strings a and b, return a single string with a and b separated
# by a space '<a> <b>', except swap the first 2 chars of each string.
# e.g.
# 'mix', pod' -> 'pox mid'
# 'dog', 'dinner' -> 'dig donner'
# Assume a and b are length 2 or more.
def mix_up(a, b):
# +++your code here+++
return b[:2] + a[2:] + " " + a[:2] + b[2:]
# Provided simple test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Provided main() calls the above functions with interesting inputs,
# using test() to check if each result is correct or not.
def main():
print 'donuts'
# Each line calls donuts, compares its result to the expected for that call.
test(donuts(4), 'Number of donuts: 4')
test(donuts(9), 'Number of donuts: 9')
test(donuts(10), 'Number of donuts: many')
test(donuts(99), 'Number of donuts: many')
print
print 'both_ends'
test(both_ends('spring'), 'spng')
test(both_ends('Hello'), 'Helo')
test(both_ends('a'), '')
test(both_ends('xyz'), 'xyyz')
print
print 'fix_start'
test(fix_start('babble'), 'ba**le')
test(fix_start('aardvark'), 'a*rdv*rk')
test(fix_start('google'), 'goo*le')
test(fix_start('donut'), 'donut')
print
print 'mix_up'
test(mix_up('mix', 'pod'), 'pox mid')
test(mix_up('dog', 'dinner'), 'dig donner')
test(mix_up('gnash', 'sport'), 'spash gnort')
test(mix_up('pezzy', 'firm'), 'fizzy perm')
# Standard boilerplate to call the main() function.
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Basic list exercises
# Fill in the code for the functions below. main() is already set up
# to call the functions with a few different inputs,
# printing 'OK' when each function is correct.
# The starter code for each function includes a 'return'
# which is just a placeholder for your code.
# It's ok if you do not complete all the functions, and there
# are some additional functions to try in list2.py.
# A. match_ends
# Given a list of strings, return the count of the number of
# strings where the string length is 2 or more and the first
# and last chars of the string are the same.
# Note: python does not have a ++ operator, but += works.
def match_ends(words):
count = 0
for word in words:
if len(word) > 1:
if word[0] == word[-1] :
count +=1
return count
# B. front_x
# Given a list of strings, return a list with the strings
# in sorted order, except group all the strings that begin with 'x' first.
# e.g. ['mix', 'xyz', 'apple', 'xanadu', 'aardvark'] yields
# ['xanadu', 'xyz', 'aardvark', 'apple', 'mix']
# Hint: this can be done by making 2 lists and sorting each of them
# before combining them.
def front_x(words):
array_x = []
other_words = []
for word in words:
if word[0] == "x":
array_x.append(word)
else:
other_words.append(word)
return sorted(array_x) + sorted(other_words)
# C. sort_last
# Given a list of non-empty tuples, return a list sorted in increasing
# order by the last element in each tuple.
# e.g. [(1, 7), (1, 3), (3, 4, 5), (2, 2)] yields
# [(2, 2), (1, 3), (3, 4, 5), (1, 7)]
# Hint: use a custom key= function to extract the last element form each tuple.
def sort_last(tuples):
def last(s):
return s[-1]
return sorted(tuples, key=last)
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Calls the above functions with interesting inputs.
def main():
print 'match_ends'
test(match_ends(['aba', 'xyz', 'aa', 'x', 'bbb']), 3)
test(match_ends(['', 'x', 'xy', 'xyx', 'xx']), 2)
test(match_ends(['aaa', 'be', 'abc', 'hello']), 1)
print
print 'front_x'
test(front_x(['bbb', 'ccc', 'axx', 'xzz', 'xaa']),
['xaa', 'xzz', 'axx', 'bbb', 'ccc'])
test(front_x(['ccc', 'bbb', 'aaa', 'xcc', 'xaa']),
['xaa', 'xcc', 'aaa', 'bbb', 'ccc'])
test(front_x(['mix', 'xyz', 'apple', 'xanadu', 'aardvark']),
['xanadu', 'xyz', 'aardvark', 'apple', 'mix'])
print
print 'sort_last'
test(sort_last([(1, 3), (3, 2), (2, 1)]),
[(2, 1), (3, 2), (1, 3)])
test(sort_last([(2, 3), (1, 2), (3, 1)]),
[(3, 1), (1, 2), (2, 3)])
test(sort_last([(1, 7), (1, 3), (3, 4, 5), (2, 2)]),
[(2, 2), (1, 3), (3, 4, 5), (1, 7)])
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Additional basic list exercises
# D. Given a list of numbers, return a list where
# all adjacent == elements have been reduced to a single element,
# so [1, 2, 2, 3] returns [1, 2, 3]. You may create a new list or
# modify the passed in list.
def remove_adjacent(nums):
# +++your code here+++
flag = 1
new_list = []
for i in nums:
for j in new_list:
if i == j:
flag = 0
if flag == 1:
new_list.append(i)
flag=1
return new_list
# E. Given two lists sorted in increasing order, create and return a merged
# list of all the elements in sorted order. You may modify the passed in lists.
# Ideally, the solution should work in "linear" time, making a single
# pass of both lists.
def linear_merge(list1, list2):
# +++your code here+++
new_list = []
while len(list1) and len(list2):
if list1[0] < list2[0]:
new_list.append(list1.pop(0))
else:
new_list.append(list2.pop(0))
new_list.extend(list2)
new_list.extend(list1)
return new_list
# Note: the solution above is kind of cute, but unforunately list.pop(0)
# is not constant time with the standard python list implementation, so
# the above is not strictly linear time.
# An alternate approach uses pop(-1) to remove the endmost elements
# from each list, building a solution list which is backwards.
# Then use reversed() to put the result back in the correct order. That
# solution works in linear time, but is more ugly.
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Calls the above functions with interesting inputs.
def main():
print 'remove_adjacent'
test(remove_adjacent([1, 2, 2, 3]), [1, 2, 3])
test(remove_adjacent([2, 2, 3, 3, 3]), [2, 3])
test(remove_adjacent([]), [])
print
print 'linear_merge'
test(linear_merge(['aa', 'xx', 'zz'], ['bb', 'cc']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'xx'], ['bb', 'cc', 'zz']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'aa'], ['aa', 'bb', 'bb']),
['aa', 'aa', 'aa', 'bb', 'bb'])
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
"""Wordcount exercise
Google's Python class
The main() below is already defined and complete. It calls print_words()
and print_top() functions which you write.
1. For the --count flag, implement a print_words(filename) function that counts
how often each word appears in the text and prints:
word1 count1
word2 count2
...
Print the above list in order sorted by word (python will sort punctuation to
come before letters -- that's fine). Store all the words as lowercase,
so 'The' and 'the' count as the same word.
2. For the --topcount flag, implement a print_top(filename) which is similar
to print_words() but which prints just the top 20 most common words sorted
so the most common word is first, then the next most common, and so on.
Use str.split() (no arguments) to split on all whitespace.
Workflow: don't build the whole program at once. Get it to an intermediate
milestone and print your data structure and sys.exit(0).
When that's working, try for the next milestone.
Optional: define a helper function to avoid code duplication inside
print_words() and print_top().
"""
import sys
# +++your code here+++
# Define print_words(filename) and print_top(filename) functions.
# You could write a helper utility function that reads a file
# and builds and returns a word/count dict for it.
# Then print_words() and print_top() can just call the utility function.
def array_of_words(filename):
f = open(filename, 'r')
array_words = {}
for line in f:
for word in line.split():
word = word.lower()
if not word in array_words:
array_words[word] = 1
else:
array_words[word] =array_words[word] + 1
f.close()
return array_words
def print_words(filename):
array_words = array_of_words(filename)
sort_by_words = sorted(array_words.keys())
for word in sort_by_words:
print word, array_words[word]
###
def print_top(filename):
array_words = array_of_words(filename)
def mykey(value):
return value[1]
words = sorted(array_words.items(), key=mykey, reverse=True)
for i in words[:20]:
print i[0], i[1]
# This basic command line argument parsing code is provided and
# calls the print_words() and print_top() functions which you must define.
def main():
if len(sys.argv) != 3:
print 'usage: ./wordcount.py {--count | --topcount} file'
sys.exit(1)
option = sys.argv[1]
filename = sys.argv[2]
if option == '--count':
print_words(filename)
elif option == '--topcount':
print_top(filename)
else:
print 'unknown option: ' + option
sys.exit(1)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Additional basic string exercises
# D. verbing
# Given a string, if its length is at least 3,
# add 'ing' to its end.
# Unless it already ends in 'ing', in which case
# add 'ly' instead.
# If the string length is less than 3, leave it unchanged.
# Return the resulting string.
def verbing(s):
# +++your code here+++
if len(s) < 3:
return s
elif s[-3:] == "ing":
return s + "ly"
else:
return s + "ing"
# E. not_bad
# Given a string, find the first appearance of the
# substring 'not' and 'bad'. If the 'bad' follows
# the 'not', replace the whole 'not'...'bad' substring
# with 'good'.
# Return the resulting string.
# So 'This dinner is not that bad!' yields:
# This dinner is good!
def not_bad(s):
# +++your code here+++
n = s.find("not")
b = s.find("bad")
if n < b and n != -1 and b != -1:
return s[:n] + "good" + s[b+3:]
return s
# F. front_back
# Consider dividing a string into two halves.
# If the length is even, the front and back halves are the same length.
# If the length is odd, we'll say that the extra char goes in the front half.
# e.g. 'abcde', the front half is 'abc', the back half 'de'.
# Given 2 strings, a and b, return a string of the form
# a-front + b-front + a-back + b-back
def front_back(a, b):
# +++your code here+++
middle_of_a = len(a) / 2
middle_of_b = len(b) / 2
if len(a) % 2 == 1:
middle_of_a += 1
if len(b) % 2 == 1:
middle_of_b += 1
return a[:middle_of_a] + b[:middle_of_b] + a[middle_of_a:] + b[middle_of_b:]
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# main() calls the above functions with interesting inputs,
# using the above test() to check if the result is correct or not.
def main():
print 'verbing'
test(verbing('hail'), 'hailing')
test(verbing('swiming'), 'swimingly')
test(verbing('do'), 'do')
print
print 'not_bad'
test(not_bad('This movie is not so bad'), 'This movie is good')
test(not_bad('This dinner is not that bad!'), 'This dinner is good!')
test(not_bad('This tea is not hot'), 'This tea is not hot')
test(not_bad("It's bad yet not"), "It's bad yet not")
print
print 'front_back'
test(front_back('abcd', 'xy'), 'abxcdy')
test(front_back('abcde', 'xyz'), 'abcxydez')
test(front_back('Kitten', 'Donut'), 'KitDontenut')
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Additional basic list exercises
# D. Given a list of numbers, return a list where
# all adjacent == elements have been reduced to a single element,
# so [1, 2, 2, 3] returns [1, 2, 3]. You may create a new list or
# modify the passed in list.
def remove_adjacent(nums):
# +++your code here+++
flag = 1
new_list = []
for i in nums:
for j in new_list:
if i == j:
flag = 0
if flag == 1:
new_list.append(i)
flag=1
return new_list
# E. Given two lists sorted in increasing order, create and return a merged
# list of all the elements in sorted order. You may modify the passed in lists.
# Ideally, the solution should work in "linear" time, making a single
# pass of both lists.
def linear_merge(list1, list2):
# +++your code here+++
new_list = []
while len(list1) and len(list2):
if list1[0] < list2[0]:
new_list.append(list1.pop(0))
else:
new_list.append(list2.pop(0))
new_list.extend(list2)
new_list.extend(list1)
return new_list
# Note: the solution above is kind of cute, but unforunately list.pop(0)
# is not constant time with the standard python list implementation, so
# the above is not strictly linear time.
# An alternate approach uses pop(-1) to remove the endmost elements
# from each list, building a solution list which is backwards.
# Then use reversed() to put the result back in the correct order. That
# solution works in linear time, but is more ugly.
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Calls the above functions with interesting inputs.
def main():
print 'remove_adjacent'
test(remove_adjacent([1, 2, 2, 3]), [1, 2, 3])
test(remove_adjacent([2, 2, 3, 3, 3]), [2, 3])
test(remove_adjacent([]), [])
print
print 'linear_merge'
test(linear_merge(['aa', 'xx', 'zz'], ['bb', 'cc']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'xx'], ['bb', 'cc', 'zz']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'aa'], ['aa', 'bb', 'bb']),
['aa', 'aa', 'aa', 'bb', 'bb'])
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
"""Wordcount exercise
Google's Python class
The main() below is already defined and complete. It calls print_words()
and print_top() functions which you write.
1. For the --count flag, implement a print_words(filename) function that counts
how often each word appears in the text and prints:
word1 count1
word2 count2
...
Print the above list in order sorted by word (python will sort punctuation to
come before letters -- that's fine). Store all the words as lowercase,
so 'The' and 'the' count as the same word.
2. For the --topcount flag, implement a print_top(filename) which is similar
to print_words() but which prints just the top 20 most common words sorted
so the most common word is first, then the next most common, and so on.
Use str.split() (no arguments) to split on all whitespace.
Workflow: don't build the whole program at once. Get it to an intermediate
milestone and print your data structure and sys.exit(0).
When that's working, try for the next milestone.
Optional: define a helper function to avoid code duplication inside
print_words() and print_top().
"""
import sys
# +++your code here+++
# Define print_words(filename) and print_top(filename) functions.
# You could write a helper utility function that reads a file
# and builds and returns a word/count dict for it.
# Then print_words() and print_top() can just call the utility function.
def array_of_words(filename):
f = open(filename, 'r')
array_words = {}
for line in f:
for word in line.split():
word = word.lower()
if not word in array_words:
array_words[word] = 1
else:
array_words[word] =array_words[word] + 1
f.close()
return array_words
def print_words(filename):
array_words = array_of_words(filename)
sort_by_words = sorted(array_words.keys())
for word in sort_by_words:
print word, array_words[word]
###
def print_top(filename):
array_words = array_of_words(filename)
def mykey(value):
return value[1]
words = sorted(array_words.items(), key=mykey, reverse=True)
for i in words[:20]:
print i[0], i[1]
# This basic command line argument parsing code is provided and
# calls the print_words() and print_top() functions which you must define.
def main():
if len(sys.argv) != 3:
print 'usage: ./wordcount.py {--count | --topcount} file'
sys.exit(1)
option = sys.argv[1]
filename = sys.argv[2]
if option == '--count':
print_words(filename)
elif option == '--topcount':
print_top(filename)
else:
print 'unknown option: ' + option
sys.exit(1)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Additional basic string exercises
# D. verbing
# Given a string, if its length is at least 3,
# add 'ing' to its end.
# Unless it already ends in 'ing', in which case
# add 'ly' instead.
# If the string length is less than 3, leave it unchanged.
# Return the resulting string.
def verbing(s):
# +++your code here+++
if len(s) < 3:
return s
elif s[-3:] == "ing":
return s + "ly"
else:
return s + "ing"
# E. not_bad
# Given a string, find the first appearance of the
# substring 'not' and 'bad'. If the 'bad' follows
# the 'not', replace the whole 'not'...'bad' substring
# with 'good'.
# Return the resulting string.
# So 'This dinner is not that bad!' yields:
# This dinner is good!
def not_bad(s):
# +++your code here+++
n = s.find("not")
b = s.find("bad")
if n < b and n != -1 and b != -1:
return s[:n] + "good" + s[b+3:]
return s
# F. front_back
# Consider dividing a string into two halves.
# If the length is even, the front and back halves are the same length.
# If the length is odd, we'll say that the extra char goes in the front half.
# e.g. 'abcde', the front half is 'abc', the back half 'de'.
# Given 2 strings, a and b, return a string of the form
# a-front + b-front + a-back + b-back
def front_back(a, b):
# +++your code here+++
middle_of_a = len(a) / 2
middle_of_b = len(b) / 2
if len(a) % 2 == 1:
middle_of_a += 1
if len(b) % 2 == 1:
middle_of_b += 1
return a[:middle_of_a] + b[:middle_of_b] + a[middle_of_a:] + b[middle_of_b:]
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# main() calls the above functions with interesting inputs,
# using the above test() to check if the result is correct or not.
def main():
print 'verbing'
test(verbing('hail'), 'hailing')
test(verbing('swiming'), 'swimingly')
test(verbing('do'), 'do')
print
print 'not_bad'
test(not_bad('This movie is not so bad'), 'This movie is good')
test(not_bad('This dinner is not that bad!'), 'This dinner is good!')
test(not_bad('This tea is not hot'), 'This tea is not hot')
test(not_bad("It's bad yet not"), "It's bad yet not")
print
print 'front_back'
test(front_back('abcd', 'xy'), 'abxcdy')
test(front_back('abcde', 'xyz'), 'abcxydez')
test(front_back('Kitten', 'Donut'), 'KitDontenut')
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Basic list exercises
# Fill in the code for the functions below. main() is already set up
# to call the functions with a few different inputs,
# printing 'OK' when each function is correct.
# The starter code for each function includes a 'return'
# which is just a placeholder for your code.
# It's ok if you do not complete all the functions, and there
# are some additional functions to try in list2.py.
# A. match_ends
# Given a list of strings, return the count of the number of
# strings where the string length is 2 or more and the first
# and last chars of the string are the same.
# Note: python does not have a ++ operator, but += works.
def match_ends(words):
count = 0
for word in words:
wordlen = len(word)
if wordlen>1 and word[0]==word[wordlen-1]:
count+=1
return count
# B. front_x
# Given a list of strings, return a list with the strings
# in sorted order, except group all the strings that begin with 'x' first.
# e.g. ['mix', 'xyz', 'apple', 'xanadu', 'aardvark'] yields
# ['xanadu', 'xyz', 'aardvark', 'apple', 'mix']
# Hint: this can be done by making 2 lists and sorting each of them
# before combining them.
def front_x(words):
x_list = []
other_list = []
for word in words:
if word[0]=='x':
x_list.append(word)
else:
other_list.append(word)
x_list.sort()
other_list.sort()
x_list.extend(other_list)
# +++your code here+++
return x_list
# C. sort_last
# Given a list of non-empty tuples, return a list sorted in increasing
# order by the last element in each tuple.
# e.g. [(1, 7), (1, 3), (3, 4, 5), (2, 2)] yields
# [(2, 2), (1, 3), (3, 4, 5), (1, 7)]
# Hint: use a custom key= function to extract the last element form each tuple.
def sort_last(tuples):
return sorted(tuples,key=last)
def last(tuple):
last = tuple[len(tuple)-1]
return last
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Calls the above functions with interesting inputs.
def main():
print 'match_ends'
test(match_ends(['aba', 'xyz', 'aa', 'x', 'bbb']), 3)
test(match_ends(['', 'x', 'xy', 'xyx', 'xx']), 2)
test(match_ends(['aaa', 'be', 'abc', 'hello']), 1)
print
print 'front_x'
test(front_x(['bbb', 'ccc', 'axx', 'xzz', 'xaa']),
['xaa', 'xzz', 'axx', 'bbb', 'ccc'])
test(front_x(['ccc', 'bbb', 'aaa', 'xcc', 'xaa']),
['xaa', 'xcc', 'aaa', 'bbb', 'ccc'])
test(front_x(['mix', 'xyz', 'apple', 'xanadu', 'aardvark']),
['xanadu', 'xyz', 'aardvark', 'apple', 'mix'])
print
print 'sort_last'
test(sort_last([(1, 3), (3, 2), (2, 1)]),
[(2, 1), (3, 2), (1, 3)])
test(sort_last([(2, 3), (1, 2), (3, 1)]),
[(3, 1), (1, 2), (2, 3)])
test(sort_last([(1, 7), (1, 3), (3, 4, 5), (2, 2)]),
[(2, 2), (1, 3), (3, 4, 5), (1, 7)])
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Basic string exercises
# Fill in the code for the functions below. main() is already set up
# to call the functions with a few different inputs,
# printing 'OK' when each function is correct.
# The starter code for each function includes a 'return'
# which is just a placeholder for your code.
# It's ok if you do not complete all the functions, and there
# are some additional functions to try in string2.py.
# A. donuts
# Given an int count of a number of donuts, return a string
# of the form 'Number of donuts: <count>', where <count> is the number
# passed in. However, if the count is 10 or more, then use the word 'many'
# instead of the actual count.
# So donuts(5) returns 'Number of donuts: 5'
# and donuts(23) returns 'Number of donuts: many'
def donuts(count):
if count < 10:
return 'Number of donuts: %d' %count
else:
return 'Number of donuts: many'
# B. both_ends
# Given a string s, return a string made of the first 2
# and the last 2 chars of the original string,
# so 'spring' yields 'spng'. However, if the string length
# is less than 2, return instead the empty string.
def both_ends(s):
if len(s) < 2:
return ''
else:
return s[0:2]+s[-2:]
return
# C. fix_start
# Given a string s, return a string
# where all occurences of its first char have
# been changed to '*', except do not change
# the first char itself.
# e.g. 'babble' yields 'ba**le'
# Assume that the string is length 1 or more.
# Hint: s.replace(stra, strb) returns a version of string s
# where all instances of stra have been replaced by strb.
def fix_start(s):
char = s[0]
word = char
for num in range(1,len(s)):
if s[num] == char:
word += '*'
else:
word += s[num]
return word
# D. MixUp
# Given strings a and b, return a single string with a and b separated
# by a space '<a> <b>', except swap the first 2 chars of each string.
# e.g.
# 'mix', pod' -> 'pox mid'
# 'dog', 'dinner' -> 'dig donner'
# Assume a and b are length 2 or more.
def mix_up(a, b):
return b[0:2]+a[2:]+ ' ' +a[0:2]+b[2:]
# Provided simple test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Provided main() calls the above functions with interesting inputs,
# using test() to check if each result is correct or not.
def main():
print 'donuts'
# Each line calls donuts, compares its result to the expected for that call.
test(donuts(4), 'Number of donuts: 4')
test(donuts(9), 'Number of donuts: 9')
test(donuts(10), 'Number of donuts: many')
test(donuts(99), 'Number of donuts: many')
print
print 'both_ends'
test(both_ends('spring'), 'spng')
test(both_ends('Hello'), 'Helo')
test(both_ends('a'), '')
test(both_ends('xyz'), 'xyyz')
print
print 'fix_start'
test(fix_start('babble'), 'ba**le')
test(fix_start('aardvark'), 'a*rdv*rk')
test(fix_start('google'), 'goo*le')
test(fix_start('donut'), 'donut')
print
print 'mix_up'
test(mix_up('mix', 'pod'), 'pox mid')
test(mix_up('dog', 'dinner'), 'dig donner')
test(mix_up('gnash', 'sport'), 'spash gnort')
test(mix_up('pezzy', 'firm'), 'fizzy perm')
# Standard boilerplate to call the main() function.
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Basic string exercises
# Fill in the code for the functions below. main() is already set up
# to call the functions with a few different inputs,
# printing 'OK' when each function is correct.
# The starter code for each function includes a 'return'
# which is just a placeholder for your code.
# It's ok if you do not complete all the functions, and there
# are some additional functions to try in string2.py.
# A. donuts
# Given an int count of a number of donuts, return a string
# of the form 'Number of donuts: <count>', where <count> is the number
# passed in. However, if the count is 10 or more, then use the word 'many'
# instead of the actual count.
# So donuts(5) returns 'Number of donuts: 5'
# and donuts(23) returns 'Number of donuts: many'
def donuts(count):
if count < 10:
return 'Number of donuts: %d' %count
else:
return 'Number of donuts: many'
# B. both_ends
# Given a string s, return a string made of the first 2
# and the last 2 chars of the original string,
# so 'spring' yields 'spng'. However, if the string length
# is less than 2, return instead the empty string.
def both_ends(s):
if len(s) < 2:
return ''
else:
return s[0:2]+s[-2:]
return
# C. fix_start
# Given a string s, return a string
# where all occurences of its first char have
# been changed to '*', except do not change
# the first char itself.
# e.g. 'babble' yields 'ba**le'
# Assume that the string is length 1 or more.
# Hint: s.replace(stra, strb) returns a version of string s
# where all instances of stra have been replaced by strb.
def fix_start(s):
char = s[0]
word = char
for num in range(1,len(s)):
if s[num] == char:
word += '*'
else:
word += s[num]
return word
# D. MixUp
# Given strings a and b, return a single string with a and b separated
# by a space '<a> <b>', except swap the first 2 chars of each string.
# e.g.
# 'mix', pod' -> 'pox mid'
# 'dog', 'dinner' -> 'dig donner'
# Assume a and b are length 2 or more.
def mix_up(a, b):
return b[0:2]+a[2:]+ ' ' +a[0:2]+b[2:]
# Provided simple test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Provided main() calls the above functions with interesting inputs,
# using test() to check if each result is correct or not.
def main():
print 'donuts'
# Each line calls donuts, compares its result to the expected for that call.
test(donuts(4), 'Number of donuts: 4')
test(donuts(9), 'Number of donuts: 9')
test(donuts(10), 'Number of donuts: many')
test(donuts(99), 'Number of donuts: many')
print
print 'both_ends'
test(both_ends('spring'), 'spng')
test(both_ends('Hello'), 'Helo')
test(both_ends('a'), '')
test(both_ends('xyz'), 'xyyz')
print
print 'fix_start'
test(fix_start('babble'), 'ba**le')
test(fix_start('aardvark'), 'a*rdv*rk')
test(fix_start('google'), 'goo*le')
test(fix_start('donut'), 'donut')
print
print 'mix_up'
test(mix_up('mix', 'pod'), 'pox mid')
test(mix_up('dog', 'dinner'), 'dig donner')
test(mix_up('gnash', 'sport'), 'spash gnort')
test(mix_up('pezzy', 'firm'), 'fizzy perm')
# Standard boilerplate to call the main() function.
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Basic list exercises
# Fill in the code for the functions below. main() is already set up
# to call the functions with a few different inputs,
# printing 'OK' when each function is correct.
# The starter code for each function includes a 'return'
# which is just a placeholder for your code.
# It's ok if you do not complete all the functions, and there
# are some additional functions to try in list2.py.
# A. match_ends
# Given a list of strings, return the count of the number of
# strings where the string length is 2 or more and the first
# and last chars of the string are the same.
# Note: python does not have a ++ operator, but += works.
def match_ends(words):
count = 0
for word in words:
wordlen = len(word)
if wordlen>1 and word[0]==word[wordlen-1]:
count+=1
return count
# B. front_x
# Given a list of strings, return a list with the strings
# in sorted order, except group all the strings that begin with 'x' first.
# e.g. ['mix', 'xyz', 'apple', 'xanadu', 'aardvark'] yields
# ['xanadu', 'xyz', 'aardvark', 'apple', 'mix']
# Hint: this can be done by making 2 lists and sorting each of them
# before combining them.
def front_x(words):
x_list = []
other_list = []
for word in words:
if word[0]=='x':
x_list.append(word)
else:
other_list.append(word)
x_list.sort()
other_list.sort()
x_list.extend(other_list)
# +++your code here+++
return x_list
# C. sort_last
# Given a list of non-empty tuples, return a list sorted in increasing
# order by the last element in each tuple.
# e.g. [(1, 7), (1, 3), (3, 4, 5), (2, 2)] yields
# [(2, 2), (1, 3), (3, 4, 5), (1, 7)]
# Hint: use a custom key= function to extract the last element form each tuple.
def sort_last(tuples):
return sorted(tuples,key=last)
def last(tuple):
last = tuple[len(tuple)-1]
return last
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Calls the above functions with interesting inputs.
def main():
print 'match_ends'
test(match_ends(['aba', 'xyz', 'aa', 'x', 'bbb']), 3)
test(match_ends(['', 'x', 'xy', 'xyx', 'xx']), 2)
test(match_ends(['aaa', 'be', 'abc', 'hello']), 1)
print
print 'front_x'
test(front_x(['bbb', 'ccc', 'axx', 'xzz', 'xaa']),
['xaa', 'xzz', 'axx', 'bbb', 'ccc'])
test(front_x(['ccc', 'bbb', 'aaa', 'xcc', 'xaa']),
['xaa', 'xcc', 'aaa', 'bbb', 'ccc'])
test(front_x(['mix', 'xyz', 'apple', 'xanadu', 'aardvark']),
['xanadu', 'xyz', 'aardvark', 'apple', 'mix'])
print
print 'sort_last'
test(sort_last([(1, 3), (3, 2), (2, 1)]),
[(2, 1), (3, 2), (1, 3)])
test(sort_last([(2, 3), (1, 2), (3, 1)]),
[(3, 1), (1, 2), (2, 3)])
test(sort_last([(1, 7), (1, 3), (3, 4, 5), (2, 2)]),
[(2, 2), (1, 3), (3, 4, 5), (1, 7)])
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Additional basic list exercises
# D. Given a list of numbers, return a list where
# all adjacent == elements have been reduced to a single element,
# so [1, 2, 2, 3] returns [1, 2, 3]. You may create a new list or
# modify the passed in list.
def remove_adjacent(nums):
if nums == []:
return nums
last = nums[0]
mlist = []
mlist.append(last)
for i in nums[1:]:
if i!= last:
mlist.append(i)
last = i
return mlist
# E. Given two lists sorted in increasing order, create and return a merged
# list of all the elements in sorted order. You may modify the passed in lists.
# Ideally, the solution should work in "linear" time, making a single
# pass of both lists.
def linear_merge(list1, list2):
list1.extend(list2)
list1.sort()
return list1
# Note: the solution above is kind of cute, but unforunately list.pop(0)
# is not constant time with the standard python list implementation, so
# the above is not strictly linear time.
# An alternate approach uses pop(-1) to remove the endmost elements
# from each list, building a solution list which is backwards.
# Then use reversed() to put the result back in the correct order. That
# solution works in linear time, but is more ugly.
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Calls the above functions with interesting inputs.
def main():
print 'remove_adjacent'
test(remove_adjacent([1, 2, 2, 3]), [1, 2, 3])
test(remove_adjacent([2, 2, 3, 3, 3]), [2, 3])
test(remove_adjacent([]), [])
print
print 'linear_merge'
test(linear_merge(['aa', 'xx', 'zz'], ['bb', 'cc']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'xx'], ['bb', 'cc', 'zz']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'aa'], ['aa', 'bb', 'bb']),
['aa', 'aa', 'aa', 'bb', 'bb'])
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
"""Wordcount exercise
Google's Python class
The main() below is already defined and complete. It calls print_words()
and print_top() functions which you write.
1. For the --count flag, implement a print_words(filename) function that counts
how often each word appears in the text and prints:
word1 count1
word2 count2
...
Print the above list in order sorted by word (python will sort punctuation to
come before letters -- that's fine). Store all the words as lowercase,
so 'The' and 'the' count as the same word.
2. For the --topcount flag, implement a print_top(filename) which is similar
to print_words() but which prints just the top 20 most common words sorted
so the most common word is first, then the next most common, and so on.
Use str.split() (no arguments) to split on all whitespace.
Workflow: don't build the whole program at once. Get it to an intermediate
milestone and print your data structure and sys.exit(0).
When that's working, try for the next milestone.
Optional: define a helper function to avoid code duplication inside
print_words() and print_top().
"""
import sys
import re
# +++your code here+++
# Define print_words(filename) and print_top(filename) functions.
# You could write a helper utility function that reads a file
# and builds and returns a word/count dict for it.
# Then print_words() and print_top() can just call the utility function.
def read_file(filename):
f=open(filename,'r')
text = f.read()
text = re.sub("[/.,\';:?!-]","",text)
text = text.lower()
words = text.split()
wordsMap = {}
for word in words:
if word in wordsMap:
wordsMap[word] +=1
else:
wordsMap[word] = 1
return wordsMap
def print_words(filename):
wordsMap = read_file(filename)
keys=sorted(wordsMap.keys())
for key in keys:
print key, wordsMap[key]
return
def print_top(filename):
wordsMap = read_file(filename)
items=sorted(wordsMap.items(),key=lambda (k, v): v,reverse=True)
for item in items[:20]:
print item[0], item[1]
###
# This basic command line argument parsing code is provided and
# calls the print_words() and print_top() functions which you must define.
def main():
if len(sys.argv) != 3:
print 'usage: ./wordcount.py {--count | --topcount} file'
sys.exit(1)
option = sys.argv[1]
filename = sys.argv[2]
if option == '--count':
print_words(filename)
elif option == '--topcount':
print_top(filename)
else:
print 'unknown option: ' + option
sys.exit(1)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python2.4 -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Additional basic string exercises
# D. verbing
# Given a string, if its length is at least 3,
# add 'ing' to its end.
# Unless it already ends in 'ing', in which case
# add 'ly' instead.
# If the string length is less than 3, leave it unchanged.
# Return the resulting string.
def verbing(s):
if len(s)<3:
return s
elif s[-3:] == 'ing':
return s + 'ly'
else:
return s + 'ing'
# E. not_bad
# Given a string, find the first appearance of the
# substring 'not' and 'bad'. If the 'bad' follows
# the 'not', replace the whole 'not'...'bad' substring
# with 'good'.
# Return the resulting string.
# So 'This dinner is not that bad!' yields:
# This dinner is good!
def not_bad(s):
if s.find('bad') > s.find('not'):
return s.replace(s[s.find('not'):s.find('bad')+3], 'good')
else:
return s
# F. front_back
# Consider dividing a string into two halves.
# If the length is even, the front and back halves are the same length.
# If the length is odd, we'll say that the extra char goes in the front half.
# e.g. 'abcde', the front half is 'abc', the back half 'de'.
# Given 2 strings, a and b, return a string of the form
# a-front + b-front + a-back + b-back
def front_back(a, b):
af = (len(a)+1)/2
bf = (len(b)+1)/2
return a[0:af] + b[0:bf] + a[af:] + b[bf:]
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# main() calls the above functions with interesting inputs,
# using the above test() to check if the result is correct or not.
def main():
print 'verbing'
test(verbing('hail'), 'hailing')
test(verbing('swiming'), 'swimingly')
test(verbing('do'), 'do')
print
print 'not_bad'
test(not_bad('This movie is not so bad'), 'This movie is good')
test(not_bad('This dinner is not that bad!'), 'This dinner is good!')
test(not_bad('This tea is not hot'), 'This tea is not hot')
test(not_bad("It's bad yet not"), "It's bad yet not")
print
print 'front_back'
test(front_back('abcd', 'xy'), 'abxcdy')
test(front_back('abcde', 'xyz'), 'abcxydez')
test(front_back('Kitten', 'Donut'), 'KitDontenut')
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Additional basic list exercises
# D. Given a list of numbers, return a list where
# all adjacent == elements have been reduced to a single element,
# so [1, 2, 2, 3] returns [1, 2, 3]. You may create a new list or
# modify the passed in list.
def remove_adjacent(nums):
if nums == []:
return nums
last = nums[0]
mlist = []
mlist.append(last)
for i in nums[1:]:
if i!= last:
mlist.append(i)
last = i
return mlist
# E. Given two lists sorted in increasing order, create and return a merged
# list of all the elements in sorted order. You may modify the passed in lists.
# Ideally, the solution should work in "linear" time, making a single
# pass of both lists.
def linear_merge(list1, list2):
list1.extend(list2)
list1.sort()
return list1
# Note: the solution above is kind of cute, but unforunately list.pop(0)
# is not constant time with the standard python list implementation, so
# the above is not strictly linear time.
# An alternate approach uses pop(-1) to remove the endmost elements
# from each list, building a solution list which is backwards.
# Then use reversed() to put the result back in the correct order. That
# solution works in linear time, but is more ugly.
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Calls the above functions with interesting inputs.
def main():
print 'remove_adjacent'
test(remove_adjacent([1, 2, 2, 3]), [1, 2, 3])
test(remove_adjacent([2, 2, 3, 3, 3]), [2, 3])
test(remove_adjacent([]), [])
print
print 'linear_merge'
test(linear_merge(['aa', 'xx', 'zz'], ['bb', 'cc']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'xx'], ['bb', 'cc', 'zz']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'aa'], ['aa', 'bb', 'bb']),
['aa', 'aa', 'aa', 'bb', 'bb'])
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
"""Wordcount exercise
Google's Python class
The main() below is already defined and complete. It calls print_words()
and print_top() functions which you write.
1. For the --count flag, implement a print_words(filename) function that counts
how often each word appears in the text and prints:
word1 count1
word2 count2
...
Print the above list in order sorted by word (python will sort punctuation to
come before letters -- that's fine). Store all the words as lowercase,
so 'The' and 'the' count as the same word.
2. For the --topcount flag, implement a print_top(filename) which is similar
to print_words() but which prints just the top 20 most common words sorted
so the most common word is first, then the next most common, and so on.
Use str.split() (no arguments) to split on all whitespace.
Workflow: don't build the whole program at once. Get it to an intermediate
milestone and print your data structure and sys.exit(0).
When that's working, try for the next milestone.
Optional: define a helper function to avoid code duplication inside
print_words() and print_top().
"""
import sys
import re
# +++your code here+++
# Define print_words(filename) and print_top(filename) functions.
# You could write a helper utility function that reads a file
# and builds and returns a word/count dict for it.
# Then print_words() and print_top() can just call the utility function.
def read_file(filename):
f=open(filename,'r')
text = f.read()
text = re.sub("[/.,\';:?!-]","",text)
text = text.lower()
words = text.split()
wordsMap = {}
for word in words:
if word in wordsMap:
wordsMap[word] +=1
else:
wordsMap[word] = 1
return wordsMap
def print_words(filename):
wordsMap = read_file(filename)
keys=sorted(wordsMap.keys())
for key in keys:
print key, wordsMap[key]
return
def print_top(filename):
wordsMap = read_file(filename)
items=sorted(wordsMap.items(),key=lambda (k, v): v,reverse=True)
for item in items[:20]:
print item[0], item[1]
###
# This basic command line argument parsing code is provided and
# calls the print_words() and print_top() functions which you must define.
def main():
if len(sys.argv) != 3:
print 'usage: ./wordcount.py {--count | --topcount} file'
sys.exit(1)
option = sys.argv[1]
filename = sys.argv[2]
if option == '--count':
print_words(filename)
elif option == '--topcount':
print_top(filename)
else:
print 'unknown option: ' + option
sys.exit(1)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python2.4 -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Additional basic string exercises
# D. verbing
# Given a string, if its length is at least 3,
# add 'ing' to its end.
# Unless it already ends in 'ing', in which case
# add 'ly' instead.
# If the string length is less than 3, leave it unchanged.
# Return the resulting string.
def verbing(s):
if len(s)<3:
return s
elif s[-3:] == 'ing':
return s + 'ly'
else:
return s + 'ing'
# E. not_bad
# Given a string, find the first appearance of the
# substring 'not' and 'bad'. If the 'bad' follows
# the 'not', replace the whole 'not'...'bad' substring
# with 'good'.
# Return the resulting string.
# So 'This dinner is not that bad!' yields:
# This dinner is good!
def not_bad(s):
if s.find('bad') > s.find('not'):
return s.replace(s[s.find('not'):s.find('bad')+3], 'good')
else:
return s
# F. front_back
# Consider dividing a string into two halves.
# If the length is even, the front and back halves are the same length.
# If the length is odd, we'll say that the extra char goes in the front half.
# e.g. 'abcde', the front half is 'abc', the back half 'de'.
# Given 2 strings, a and b, return a string of the form
# a-front + b-front + a-back + b-back
def front_back(a, b):
af = (len(a)+1)/2
bf = (len(b)+1)/2
return a[0:af] + b[0:bf] + a[af:] + b[bf:]
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# main() calls the above functions with interesting inputs,
# using the above test() to check if the result is correct or not.
def main():
print 'verbing'
test(verbing('hail'), 'hailing')
test(verbing('swiming'), 'swimingly')
test(verbing('do'), 'do')
print
print 'not_bad'
test(not_bad('This movie is not so bad'), 'This movie is good')
test(not_bad('This dinner is not that bad!'), 'This dinner is good!')
test(not_bad('This tea is not hot'), 'This tea is not hot')
test(not_bad("It's bad yet not"), "It's bad yet not")
print
print 'front_back'
test(front_back('abcd', 'xy'), 'abxcdy')
test(front_back('abcde', 'xyz'), 'abcxydez')
test(front_back('Kitten', 'Donut'), 'KitDontenut')
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
# A. match_ends
# Given a list of strings, return the count of the number of
# strings where the string length is 2 or more and the first
# and last chars of the string are the same.
# Note: python does not have a ++ operator, but += works.
def match_ends(words):
n = 0
for s in words:
if s[:1]==s[-1:] and len(s)>= 2:
n+=1
return n
# B. front_x
# Given a list of strings, return a list with the strings
# in sorted order, except group all the strings that begin with 'x' first.
# e.g. ['mix', 'xyz', 'apple', 'xanadu', 'aardvark'] yields
# ['xanadu', 'xyz', 'aardvark', 'apple', 'mix']
# Hint: this can be done by making 2 lists and sorting each of them
# before combining them.
def front_x(words):
x_arr=[]
arr=[]
for s in words:
if s.startswith('x'):
x_arr.append(s)
else:
arr.append(s)
return sorted(x_arr)+sorted(arr)
# C. sort_last
# Given a list of non-empty tuples, return a list sorted in increasing
# order by the last element in each tuple.
# e.g. [(1, 7), (1, 3), (3, 4, 5), (2, 2)] yields
# [(2, 2), (1, 3), (3, 4, 5), (1, 7)]
# Hint: use a custom key= function to extract the last element form each tuple.
def get_last(arr):
return arr[-1:]
def sort_last(tuples):
return sorted(tuples, key=get_last)
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Calls the above functions with interesting inputs.
def main():
print 'match_ends'
test(match_ends(['aba', 'xyz', 'aa', 'x', 'bbb']), 3)
test(match_ends(['', 'x', 'xy', 'xyx', 'xx']), 2)
test(match_ends(['aaa', 'be', 'abc', 'hello']), 1)
print
print 'front_x'
test(front_x(['bbb', 'ccc', 'axx', 'xzz', 'xaa']),
['xaa', 'xzz', 'axx', 'bbb', 'ccc'])
test(front_x(['ccc', 'bbb', 'aaa', 'xcc', 'xaa']),
['xaa', 'xcc', 'aaa', 'bbb', 'ccc'])
test(front_x(['mix', 'xyz', 'apple', 'xanadu', 'aardvark']),
['xanadu', 'xyz', 'aardvark', 'apple', 'mix'])
print
print 'sort_last'
test(sort_last([(1, 3), (3, 2), (2, 1)]),
[(2, 1), (3, 2), (1, 3)])
test(sort_last([(2, 3), (1, 2), (3, 1)]),
[(3, 1), (1, 2), (2, 3)])
test(sort_last([(1, 7), (1, 3), (3, 4, 5), (2, 2)]),
[(2, 2), (1, 3), (3, 4, 5), (1, 7)])
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
"""Mimic pyquick exercise -- optional extra exercise.
Google's Python Class
Read in the file specified on the command line.
Do a simple split() on whitespace to obtain all the words in the file.
Rather than read the file line by line, it's easier to read
it into one giant string and split it once.
Build a "mimic" dict that maps each word that appears in the file
to a list of all the words that immediately follow that word in the file.
The list of words can be be in any order and should include
duplicates. So for example the key "and" might have the list
["then", "best", "then", "after", ...] listing
all the words which came after "and" in the text.
We'll say that the empty string is what comes before
the first word in the file.
With the mimic dict, it's fairly easy to emit random
text that mimics the original. Print a word, then look
up what words might come next and pick one at random as
the next work.
Use the empty string as the first word to prime things.
If we ever get stuck with a word that is not in the dict,
go back to the empty string to keep things moving.
Note: the standard python module 'random' includes a
random.choice(list) method which picks a random element
from a non-empty list.
For fun, feed your program to itself as input.
Could work on getting it to put in linebreaks around 70
columns, so the output looks better.
"""
import random
import sys
def mimic_dict(filename):
"""Returns mimic dict mapping each word to list of words which follow it."""
# +++your code here+++
return
def print_mimic(mimic_dict, word):
"""Given mimic dict and start word, prints 200 random words."""
# +++your code here+++
return
# Provided main(), calls mimic_dict() and mimic()
def main():
if len(sys.argv) != 2:
print 'usage: ./mimic.py file-to-read'
sys.exit(1)
dict = mimic_dict(sys.argv[1])
print_mimic(dict, '')
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
import sys
print "hello world!"
| Python |
#!/usr/bin/python
# A. donuts
# Given an int count of a number of donuts, return a string
# of the form 'Number of donuts: <count>', where <count> is the number
# passed in. However, if the count is 10 or more, then use the word 'many'
# instead of the actual count.
# So donuts(5) returns 'Number of donuts: 5'
# and donuts(23) returns 'Number of donuts: many'
def donuts(count):
string = 'Number of donuts: '
if count > 9:
return string+'many'
else:
return string+str(count)
# B. both_ends
# Given a string s, return a string made of the first 2
# and the last 2 chars of the original string,
# so 'spring' yields 'spng'. However, if the string length
# is less than 2, return instead the empty string.
def both_ends(s):
if len(s)<2:
return ''
else:
begin = s[0:2]
end = s[len(s)-2:len(s)]
return begin+end
# C. fix_start
# Given a string s, return a string
# where all occurences of its first char have
# been changed to '*', except do not change
# the first char itself.
# e.g. 'babble' yields 'ba**le'
# Assume that the string is length 1 or more.
# Hint: s.replace(stra, strb) returns a version of string s
# where all instances of stra have been replaced by strb.
def fix_start(s):
first_char = s[0:1]
return first_char+s[1:len(s)].replace(first_char, '*')
# D. MixUp
# Given strings a and b, return a single string with a and b separated
# by a space '<a> <b>', except swap the first 2 chars of each string.
# e.g.
# 'mix', pod' -> 'pox mid'
# 'dog', 'dinner' -> 'dig donner'
# Assume a and b are length 2 or more.
def mix_up(a, b):
temp_a = a[0:2]
return b[0:2]+a[2:len(a)]+' '+temp_a+b[2:len(b)]
# Provided simple test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Provided main() calls the above functions with interesting inputs,
# using test() to check if each result is correct or not.
def main():
print 'donuts'
# Each line calls donuts, compares its result to the expected for that call.
test(donuts(4), 'Number of donuts: 4')
test(donuts(9), 'Number of donuts: 9')
test(donuts(10), 'Number of donuts: many')
test(donuts(99), 'Number of donuts: many')
print
print 'both_ends'
test(both_ends('spring'), 'spng')
test(both_ends('Hello'), 'Helo')
test(both_ends('a'), '')
test(both_ends('xyz'), 'xyyz')
print
print 'fix_start'
test(fix_start('babble'), 'ba**le')
test(fix_start('aardvark'), 'a*rdv*rk')
test(fix_start('google'), 'goo*le')
test(fix_start('donut'), 'donut')
print
print 'mix_up'
test(mix_up('mix', 'pod'), 'pox mid')
test(mix_up('dog', 'dinner'), 'dig donner')
test(mix_up('gnash', 'sport'), 'spash gnort')
test(mix_up('pezzy', 'firm'), 'fizzy perm')
# Standard boilerplate to call the main() function.
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
# A. donuts
# Given an int count of a number of donuts, return a string
# of the form 'Number of donuts: <count>', where <count> is the number
# passed in. However, if the count is 10 or more, then use the word 'many'
# instead of the actual count.
# So donuts(5) returns 'Number of donuts: 5'
# and donuts(23) returns 'Number of donuts: many'
def donuts(count):
string = 'Number of donuts: '
if count > 9:
return string+'many'
else:
return string+str(count)
# B. both_ends
# Given a string s, return a string made of the first 2
# and the last 2 chars of the original string,
# so 'spring' yields 'spng'. However, if the string length
# is less than 2, return instead the empty string.
def both_ends(s):
if len(s)<2:
return ''
else:
begin = s[0:2]
end = s[len(s)-2:len(s)]
return begin+end
# C. fix_start
# Given a string s, return a string
# where all occurences of its first char have
# been changed to '*', except do not change
# the first char itself.
# e.g. 'babble' yields 'ba**le'
# Assume that the string is length 1 or more.
# Hint: s.replace(stra, strb) returns a version of string s
# where all instances of stra have been replaced by strb.
def fix_start(s):
first_char = s[0:1]
return first_char+s[1:len(s)].replace(first_char, '*')
# D. MixUp
# Given strings a and b, return a single string with a and b separated
# by a space '<a> <b>', except swap the first 2 chars of each string.
# e.g.
# 'mix', pod' -> 'pox mid'
# 'dog', 'dinner' -> 'dig donner'
# Assume a and b are length 2 or more.
def mix_up(a, b):
temp_a = a[0:2]
return b[0:2]+a[2:len(a)]+' '+temp_a+b[2:len(b)]
# Provided simple test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Provided main() calls the above functions with interesting inputs,
# using test() to check if each result is correct or not.
def main():
print 'donuts'
# Each line calls donuts, compares its result to the expected for that call.
test(donuts(4), 'Number of donuts: 4')
test(donuts(9), 'Number of donuts: 9')
test(donuts(10), 'Number of donuts: many')
test(donuts(99), 'Number of donuts: many')
print
print 'both_ends'
test(both_ends('spring'), 'spng')
test(both_ends('Hello'), 'Helo')
test(both_ends('a'), '')
test(both_ends('xyz'), 'xyyz')
print
print 'fix_start'
test(fix_start('babble'), 'ba**le')
test(fix_start('aardvark'), 'a*rdv*rk')
test(fix_start('google'), 'goo*le')
test(fix_start('donut'), 'donut')
print
print 'mix_up'
test(mix_up('mix', 'pod'), 'pox mid')
test(mix_up('dog', 'dinner'), 'dig donner')
test(mix_up('gnash', 'sport'), 'spash gnort')
test(mix_up('pezzy', 'firm'), 'fizzy perm')
# Standard boilerplate to call the main() function.
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
# A. match_ends
# Given a list of strings, return the count of the number of
# strings where the string length is 2 or more and the first
# and last chars of the string are the same.
# Note: python does not have a ++ operator, but += works.
def match_ends(words):
n = 0
for s in words:
if s[:1]==s[-1:] and len(s)>= 2:
n+=1
return n
# B. front_x
# Given a list of strings, return a list with the strings
# in sorted order, except group all the strings that begin with 'x' first.
# e.g. ['mix', 'xyz', 'apple', 'xanadu', 'aardvark'] yields
# ['xanadu', 'xyz', 'aardvark', 'apple', 'mix']
# Hint: this can be done by making 2 lists and sorting each of them
# before combining them.
def front_x(words):
x_arr=[]
arr=[]
for s in words:
if s.startswith('x'):
x_arr.append(s)
else:
arr.append(s)
return sorted(x_arr)+sorted(arr)
# C. sort_last
# Given a list of non-empty tuples, return a list sorted in increasing
# order by the last element in each tuple.
# e.g. [(1, 7), (1, 3), (3, 4, 5), (2, 2)] yields
# [(2, 2), (1, 3), (3, 4, 5), (1, 7)]
# Hint: use a custom key= function to extract the last element form each tuple.
def get_last(arr):
return arr[-1:]
def sort_last(tuples):
return sorted(tuples, key=get_last)
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Calls the above functions with interesting inputs.
def main():
print 'match_ends'
test(match_ends(['aba', 'xyz', 'aa', 'x', 'bbb']), 3)
test(match_ends(['', 'x', 'xy', 'xyx', 'xx']), 2)
test(match_ends(['aaa', 'be', 'abc', 'hello']), 1)
print
print 'front_x'
test(front_x(['bbb', 'ccc', 'axx', 'xzz', 'xaa']),
['xaa', 'xzz', 'axx', 'bbb', 'ccc'])
test(front_x(['ccc', 'bbb', 'aaa', 'xcc', 'xaa']),
['xaa', 'xcc', 'aaa', 'bbb', 'ccc'])
test(front_x(['mix', 'xyz', 'apple', 'xanadu', 'aardvark']),
['xanadu', 'xyz', 'aardvark', 'apple', 'mix'])
print
print 'sort_last'
test(sort_last([(1, 3), (3, 2), (2, 1)]),
[(2, 1), (3, 2), (1, 3)])
test(sort_last([(2, 3), (1, 2), (3, 1)]),
[(3, 1), (1, 2), (2, 3)])
test(sort_last([(1, 7), (1, 3), (3, 4, 5), (2, 2)]),
[(2, 2), (1, 3), (3, 4, 5), (1, 7)])
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
# D. Given a list of numbers, return a list where
# all adjacent == elements have been reduced to a single element,
# so [1, 2, 2, 3] returns [1, 2, 3]. You may create a new list or
# modify the passed in list.
def remove_adjacent(nums):
arr=[]
for n in nums:
if n not in arr:
arr.append(n)
return arr
# E. Given two lists sorted in increasing order, create and return a merged
# list of all the elements in sorted order. You may modify the passed in lists.
# Ideally, the solution should work in "linear" time, making a single
# pass of both lists.
def linear_merge(list1, list2):
return sorted(list1+list2)
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Calls the above functions with interesting inputs.
def main():
print 'remove_adjacent'
test(remove_adjacent([1, 2, 2, 3]), [1, 2, 3])
test(remove_adjacent([2, 2, 3, 3, 3]), [2, 3])
test(remove_adjacent([]), [])
print
print 'linear_merge'
test(linear_merge(['aa', 'xx', 'zz'], ['bb', 'cc']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'xx'], ['bb', 'cc', 'zz']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'aa'], ['aa', 'bb', 'bb']),
['aa', 'aa', 'aa', 'bb', 'bb'])
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
import sys
print "hello world!"
| Python |
#!/usr/bin/python
import sys
def read_and_count(filename):
words_map={}
file=open(filename, 'r')
for str in file:
arr=str.split()
for s in arr:
s=s.lower();
if s not in words_map:
words_map[s] = 1
else:
words_map[s] +=1
file.close()
return words_map
#
def print_words(filename):
words_map=read_and_count(filename)
keys=sorted(words_map.keys())
for key in keys:
print key, words_map[key]
return
#
def get_count(word):
return word[1]
def print_top(filename):
words_map = read_and_count(filename)
items = sorted(words_map.items(), key=get_count, reverse=True)
for i in items[:20]:
print i[0], i[1]
def main():
if len(sys.argv) != 3:
print 'usage: ./wordcount.py {--count | --topcount} file'
sys.exit(1)
option = sys.argv[1]
filename = sys.argv[2]
if option == '--count':
print_words(filename)
elif option == '--topcount':
print_top(filename)
else:
print 'unknown option: ' + option
sys.exit(1)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
"""Mimic pyquick exercise -- optional extra exercise.
Google's Python Class
Read in the file specified on the command line.
Do a simple split() on whitespace to obtain all the words in the file.
Rather than read the file line by line, it's easier to read
it into one giant string and split it once.
Build a "mimic" dict that maps each word that appears in the file
to a list of all the words that immediately follow that word in the file.
The list of words can be be in any order and should include
duplicates. So for example the key "and" might have the list
["then", "best", "then", "after", ...] listing
all the words which came after "and" in the text.
We'll say that the empty string is what comes before
the first word in the file.
With the mimic dict, it's fairly easy to emit random
text that mimics the original. Print a word, then look
up what words might come next and pick one at random as
the next work.
Use the empty string as the first word to prime things.
If we ever get stuck with a word that is not in the dict,
go back to the empty string to keep things moving.
Note: the standard python module 'random' includes a
random.choice(list) method which picks a random element
from a non-empty list.
For fun, feed your program to itself as input.
Could work on getting it to put in linebreaks around 70
columns, so the output looks better.
"""
import random
import sys
def mimic_dict(filename):
"""Returns mimic dict mapping each word to list of words which follow it."""
# +++your code here+++
return
def print_mimic(mimic_dict, word):
"""Given mimic dict and start word, prints 200 random words."""
# +++your code here+++
return
# Provided main(), calls mimic_dict() and mimic()
def main():
if len(sys.argv) != 2:
print 'usage: ./mimic.py file-to-read'
sys.exit(1)
dict = mimic_dict(sys.argv[1])
print_mimic(dict, '')
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
# D. verbing
# Given a string, if its length is at least 3,
# add 'ing' to its end.
# Unless it already ends in 'ing', in which case
# add 'ly' instead.
# If the string length is less than 3, leave it unchanged.
# Return the resulting string.
def verbing(s):
if len(s) < 3:
return s
elif s[-3:] == 'ing':
return s+'ly'
else:
return s+'ing'
# E. not_bad
# Given a string, find the first appearance of the
# substring 'not' and 'bad'. If the 'bad' follows
# the 'not', replace the whole 'not'...'bad' substring
# with 'good'.
# Return the resulting string.
# So 'This dinner is not that bad!' yields:
# This dinner is good!
def not_bad(s):
begin = s.find('not')
end = s.find('bad')
if end>begin and begin!=-1 and end!=-1:
return s[:begin]+'good'+s[end+3:]
else:
return s
# F. front_back
# Consider dividing a string into two halves.
# If the length is even, the front and back halves are the same length.
# If the length is odd, we'll say that the extra char goes in the front half.
# e.g. 'abcde', the front half is 'abc', the back half 'de'.
# Given 2 strings, a and b, return a string of the form
# a-front + b-front + a-back + b-back
def front_back(a, b):
a_mid = len(a)/2
b_mid = len(b)/2
if len(a)%2==1:
a_mid +=1
if len(b)%2==1:
b_mid+=1
return a[:a_mid]+b[:b_mid]+a[a_mid:]+b[b_mid:]
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# main() calls the above functions with interesting inputs,
# using the above test() to check if the result is correct or not.
def main():
print 'verbing'
test(verbing('hail'), 'hailing')
test(verbing('swiming'), 'swimingly')
test(verbing('do'), 'do')
print
print 'not_bad'
test(not_bad('This movie is not so bad'), 'This movie is good')
test(not_bad('This dinner is not that bad!'), 'This dinner is good!')
test(not_bad('This tea is not hot'), 'This tea is not hot')
test(not_bad("It's bad yet not"), "It's bad yet not")
print
print 'front_back'
test(front_back('abcd', 'xy'), 'abxcdy')
test(front_back('abcde', 'xyz'), 'abcxydez')
test(front_back('Kitten', 'Donut'), 'KitDontenut')
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
# D. Given a list of numbers, return a list where
# all adjacent == elements have been reduced to a single element,
# so [1, 2, 2, 3] returns [1, 2, 3]. You may create a new list or
# modify the passed in list.
def remove_adjacent(nums):
arr=[]
for n in nums:
if n not in arr:
arr.append(n)
return arr
# E. Given two lists sorted in increasing order, create and return a merged
# list of all the elements in sorted order. You may modify the passed in lists.
# Ideally, the solution should work in "linear" time, making a single
# pass of both lists.
def linear_merge(list1, list2):
return sorted(list1+list2)
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Calls the above functions with interesting inputs.
def main():
print 'remove_adjacent'
test(remove_adjacent([1, 2, 2, 3]), [1, 2, 3])
test(remove_adjacent([2, 2, 3, 3, 3]), [2, 3])
test(remove_adjacent([]), [])
print
print 'linear_merge'
test(linear_merge(['aa', 'xx', 'zz'], ['bb', 'cc']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'xx'], ['bb', 'cc', 'zz']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'aa'], ['aa', 'bb', 'bb']),
['aa', 'aa', 'aa', 'bb', 'bb'])
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
import sys
def read_and_count(filename):
words_map={}
file=open(filename, 'r')
for str in file:
arr=str.split()
for s in arr:
s=s.lower();
if s not in words_map:
words_map[s] = 1
else:
words_map[s] +=1
file.close()
return words_map
#
def print_words(filename):
words_map=read_and_count(filename)
keys=sorted(words_map.keys())
for key in keys:
print key, words_map[key]
return
#
def get_count(word):
return word[1]
def print_top(filename):
words_map = read_and_count(filename)
items = sorted(words_map.items(), key=get_count, reverse=True)
for i in items[:20]:
print i[0], i[1]
def main():
if len(sys.argv) != 3:
print 'usage: ./wordcount.py {--count | --topcount} file'
sys.exit(1)
option = sys.argv[1]
filename = sys.argv[2]
if option == '--count':
print_words(filename)
elif option == '--topcount':
print_top(filename)
else:
print 'unknown option: ' + option
sys.exit(1)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
# D. verbing
# Given a string, if its length is at least 3,
# add 'ing' to its end.
# Unless it already ends in 'ing', in which case
# add 'ly' instead.
# If the string length is less than 3, leave it unchanged.
# Return the resulting string.
def verbing(s):
if len(s) < 3:
return s
elif s[-3:] == 'ing':
return s+'ly'
else:
return s+'ing'
# E. not_bad
# Given a string, find the first appearance of the
# substring 'not' and 'bad'. If the 'bad' follows
# the 'not', replace the whole 'not'...'bad' substring
# with 'good'.
# Return the resulting string.
# So 'This dinner is not that bad!' yields:
# This dinner is good!
def not_bad(s):
begin = s.find('not')
end = s.find('bad')
if end>begin and begin!=-1 and end!=-1:
return s[:begin]+'good'+s[end+3:]
else:
return s
# F. front_back
# Consider dividing a string into two halves.
# If the length is even, the front and back halves are the same length.
# If the length is odd, we'll say that the extra char goes in the front half.
# e.g. 'abcde', the front half is 'abc', the back half 'de'.
# Given 2 strings, a and b, return a string of the form
# a-front + b-front + a-back + b-back
def front_back(a, b):
a_mid = len(a)/2
b_mid = len(b)/2
if len(a)%2==1:
a_mid +=1
if len(b)%2==1:
b_mid+=1
return a[:a_mid]+b[:b_mid]+a[a_mid:]+b[b_mid:]
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# main() calls the above functions with interesting inputs,
# using the above test() to check if the result is correct or not.
def main():
print 'verbing'
test(verbing('hail'), 'hailing')
test(verbing('swiming'), 'swimingly')
test(verbing('do'), 'do')
print
print 'not_bad'
test(not_bad('This movie is not so bad'), 'This movie is good')
test(not_bad('This dinner is not that bad!'), 'This dinner is good!')
test(not_bad('This tea is not hot'), 'This tea is not hot')
test(not_bad("It's bad yet not"), "It's bad yet not")
print
print 'front_back'
test(front_back('abcd', 'xy'), 'abxcdy')
test(front_back('abcde', 'xyz'), 'abcxydez')
test(front_back('Kitten', 'Donut'), 'KitDontenut')
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Basic list exercises
# Fill in the code for the functions below. main() is already set up
# to call the functions with a few different inputs,
# printing 'OK' when each function is correct.
# The starter code for each function includes a 'return'
# which is just a placeholder for your code.
# It's ok if you do not complete all the functions, and there
# are some additional functions to try in list2.py.
# A. match_ends
# Given a list of strings, return the count of the number of
# strings where the string length is 2 or more and the first
# and last chars of the string are the same.
# Note: python does not have a ++ operator, but += works.
def match_ends(words):
# +++your code here+++
return
# B. front_x
# Given a list of strings, return a list with the strings
# in sorted order, except group all the strings that begin with 'x' first.
# e.g. ['mix', 'xyz', 'apple', 'xanadu', 'aardvark'] yields
# ['xanadu', 'xyz', 'aardvark', 'apple', 'mix']
# Hint: this can be done by making 2 lists and sorting each of them
# before combining them.
def front_x(words):
# +++your code here+++
return
# C. sort_last
# Given a list of non-empty tuples, return a list sorted in increasing
# order by the last element in each tuple.
# e.g. [(1, 7), (1, 3), (3, 4, 5), (2, 2)] yields
# [(2, 2), (1, 3), (3, 4, 5), (1, 7)]
# Hint: use a custom key= function to extract the last element form each tuple.
def sort_last(tuples):
# +++your code here+++
return
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Calls the above functions with interesting inputs.
def main():
print 'match_ends'
test(match_ends(['aba', 'xyz', 'aa', 'x', 'bbb']), 3)
test(match_ends(['', 'x', 'xy', 'xyx', 'xx']), 2)
test(match_ends(['aaa', 'be', 'abc', 'hello']), 1)
print
print 'front_x'
test(front_x(['bbb', 'ccc', 'axx', 'xzz', 'xaa']),
['xaa', 'xzz', 'axx', 'bbb', 'ccc'])
test(front_x(['ccc', 'bbb', 'aaa', 'xcc', 'xaa']),
['xaa', 'xcc', 'aaa', 'bbb', 'ccc'])
test(front_x(['mix', 'xyz', 'apple', 'xanadu', 'aardvark']),
['xanadu', 'xyz', 'aardvark', 'apple', 'mix'])
print
print 'sort_last'
test(sort_last([(1, 3), (3, 2), (2, 1)]),
[(2, 1), (3, 2), (1, 3)])
test(sort_last([(2, 3), (1, 2), (3, 1)]),
[(3, 1), (1, 2), (2, 3)])
test(sort_last([(1, 7), (1, 3), (3, 4, 5), (2, 2)]),
[(2, 2), (1, 3), (3, 4, 5), (1, 7)])
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
"""Mimic pyquick exercise -- optional extra exercise.
Google's Python Class
Read in the file specified on the command line.
Do a simple split() on whitespace to obtain all the words in the file.
Rather than read the file line by line, it's easier to read
it into one giant string and split it once.
Build a "mimic" dict that maps each word that appears in the file
to a list of all the words that immediately follow that word in the file.
The list of words can be be in any order and should include
duplicates. So for example the key "and" might have the list
["then", "best", "then", "after", ...] listing
all the words which came after "and" in the text.
We'll say that the empty string is what comes before
the first word in the file.
With the mimic dict, it's fairly easy to emit random
text that mimics the original. Print a word, then look
up what words might come next and pick one at random as
the next work.
Use the empty string as the first word to prime things.
If we ever get stuck with a word that is not in the dict,
go back to the empty string to keep things moving.
Note: the standard python module 'random' includes a
random.choice(list) method which picks a random element
from a non-empty list.
For fun, feed your program to itself as input.
Could work on getting it to put in linebreaks around 70
columns, so the output looks better.
"""
import random
import sys
def mimic_dict(filename):
"""Returns mimic dict mapping each word to list of words which follow it."""
# +++your code here+++
return
def print_mimic(mimic_dict, word):
"""Given mimic dict and start word, prints 200 random words."""
# +++your code here+++
return
# Provided main(), calls mimic_dict() and mimic()
def main():
if len(sys.argv) != 2:
print 'usage: ./mimic.py file-to-read'
sys.exit(1)
dict = mimic_dict(sys.argv[1])
print_mimic(dict, '')
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Basic string exercises
# Fill in the code for the functions below. main() is already set up
# to call the functions with a few different inputs,
# printing 'OK' when each function is correct.
# The starter code for each function includes a 'return'
# which is just a placeholder for your code.
# It's ok if you do not complete all the functions, and there
# are some additional functions to try in string2.py.
# A. donuts
# Given an int count of a number of donuts, return a string
# of the form 'Number of donuts: <count>', where <count> is the number
# passed in. However, if the count is 10 or more, then use the word 'many'
# instead of the actual count.
# So donuts(5) returns 'Number of donuts: 5'
# and donuts(23) returns 'Number of donuts: many'
def donuts(count):
# +++your code here+++
return
# B. both_ends
# Given a string s, return a string made of the first 2
# and the last 2 chars of the original string,
# so 'spring' yields 'spng'. However, if the string length
# is less than 2, return instead the empty string.
def both_ends(s):
# +++your code here+++
return
# C. fix_start
# Given a string s, return a string
# where all occurences of its first char have
# been changed to '*', except do not change
# the first char itself.
# e.g. 'babble' yields 'ba**le'
# Assume that the string is length 1 or more.
# Hint: s.replace(stra, strb) returns a version of string s
# where all instances of stra have been replaced by strb.
def fix_start(s):
# +++your code here+++
return
# D. MixUp
# Given strings a and b, return a single string with a and b separated
# by a space '<a> <b>', except swap the first 2 chars of each string.
# e.g.
# 'mix', pod' -> 'pox mid'
# 'dog', 'dinner' -> 'dig donner'
# Assume a and b are length 2 or more.
def mix_up(a, b):
# +++your code here+++
return
# Provided simple test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Provided main() calls the above functions with interesting inputs,
# using test() to check if each result is correct or not.
def main():
print 'donuts'
# Each line calls donuts, compares its result to the expected for that call.
test(donuts(4), 'Number of donuts: 4')
test(donuts(9), 'Number of donuts: 9')
test(donuts(10), 'Number of donuts: many')
test(donuts(99), 'Number of donuts: many')
print
print 'both_ends'
test(both_ends('spring'), 'spng')
test(both_ends('Hello'), 'Helo')
test(both_ends('a'), '')
test(both_ends('xyz'), 'xyyz')
print
print 'fix_start'
test(fix_start('babble'), 'ba**le')
test(fix_start('aardvark'), 'a*rdv*rk')
test(fix_start('google'), 'goo*le')
test(fix_start('donut'), 'donut')
print
print 'mix_up'
test(mix_up('mix', 'pod'), 'pox mid')
test(mix_up('dog', 'dinner'), 'dig donner')
test(mix_up('gnash', 'sport'), 'spash gnort')
test(mix_up('pezzy', 'firm'), 'fizzy perm')
# Standard boilerplate to call the main() function.
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Basic string exercises
# Fill in the code for the functions below. main() is already set up
# to call the functions with a few different inputs,
# printing 'OK' when each function is correct.
# The starter code for each function includes a 'return'
# which is just a placeholder for your code.
# It's ok if you do not complete all the functions, and there
# are some additional functions to try in string2.py.
# A. donuts
# Given an int count of a number of donuts, return a string
# of the form 'Number of donuts: <count>', where <count> is the number
# passed in. However, if the count is 10 or more, then use the word 'many'
# instead of the actual count.
# So donuts(5) returns 'Number of donuts: 5'
# and donuts(23) returns 'Number of donuts: many'
def donuts(count):
# +++your code here+++
return
# B. both_ends
# Given a string s, return a string made of the first 2
# and the last 2 chars of the original string,
# so 'spring' yields 'spng'. However, if the string length
# is less than 2, return instead the empty string.
def both_ends(s):
# +++your code here+++
return
# C. fix_start
# Given a string s, return a string
# where all occurences of its first char have
# been changed to '*', except do not change
# the first char itself.
# e.g. 'babble' yields 'ba**le'
# Assume that the string is length 1 or more.
# Hint: s.replace(stra, strb) returns a version of string s
# where all instances of stra have been replaced by strb.
def fix_start(s):
# +++your code here+++
return
# D. MixUp
# Given strings a and b, return a single string with a and b separated
# by a space '<a> <b>', except swap the first 2 chars of each string.
# e.g.
# 'mix', pod' -> 'pox mid'
# 'dog', 'dinner' -> 'dig donner'
# Assume a and b are length 2 or more.
def mix_up(a, b):
# +++your code here+++
return
# Provided simple test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Provided main() calls the above functions with interesting inputs,
# using test() to check if each result is correct or not.
def main():
print 'donuts'
# Each line calls donuts, compares its result to the expected for that call.
test(donuts(4), 'Number of donuts: 4')
test(donuts(9), 'Number of donuts: 9')
test(donuts(10), 'Number of donuts: many')
test(donuts(99), 'Number of donuts: many')
print
print 'both_ends'
test(both_ends('spring'), 'spng')
test(both_ends('Hello'), 'Helo')
test(both_ends('a'), '')
test(both_ends('xyz'), 'xyyz')
print
print 'fix_start'
test(fix_start('babble'), 'ba**le')
test(fix_start('aardvark'), 'a*rdv*rk')
test(fix_start('google'), 'goo*le')
test(fix_start('donut'), 'donut')
print
print 'mix_up'
test(mix_up('mix', 'pod'), 'pox mid')
test(mix_up('dog', 'dinner'), 'dig donner')
test(mix_up('gnash', 'sport'), 'spash gnort')
test(mix_up('pezzy', 'firm'), 'fizzy perm')
# Standard boilerplate to call the main() function.
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Basic list exercises
# Fill in the code for the functions below. main() is already set up
# to call the functions with a few different inputs,
# printing 'OK' when each function is correct.
# The starter code for each function includes a 'return'
# which is just a placeholder for your code.
# It's ok if you do not complete all the functions, and there
# are some additional functions to try in list2.py.
# A. match_ends
# Given a list of strings, return the count of the number of
# strings where the string length is 2 or more and the first
# and last chars of the string are the same.
# Note: python does not have a ++ operator, but += works.
def match_ends(words):
# +++your code here+++
return
# B. front_x
# Given a list of strings, return a list with the strings
# in sorted order, except group all the strings that begin with 'x' first.
# e.g. ['mix', 'xyz', 'apple', 'xanadu', 'aardvark'] yields
# ['xanadu', 'xyz', 'aardvark', 'apple', 'mix']
# Hint: this can be done by making 2 lists and sorting each of them
# before combining them.
def front_x(words):
# +++your code here+++
return
# C. sort_last
# Given a list of non-empty tuples, return a list sorted in increasing
# order by the last element in each tuple.
# e.g. [(1, 7), (1, 3), (3, 4, 5), (2, 2)] yields
# [(2, 2), (1, 3), (3, 4, 5), (1, 7)]
# Hint: use a custom key= function to extract the last element form each tuple.
def sort_last(tuples):
# +++your code here+++
return
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Calls the above functions with interesting inputs.
def main():
print 'match_ends'
test(match_ends(['aba', 'xyz', 'aa', 'x', 'bbb']), 3)
test(match_ends(['', 'x', 'xy', 'xyx', 'xx']), 2)
test(match_ends(['aaa', 'be', 'abc', 'hello']), 1)
print
print 'front_x'
test(front_x(['bbb', 'ccc', 'axx', 'xzz', 'xaa']),
['xaa', 'xzz', 'axx', 'bbb', 'ccc'])
test(front_x(['ccc', 'bbb', 'aaa', 'xcc', 'xaa']),
['xaa', 'xcc', 'aaa', 'bbb', 'ccc'])
test(front_x(['mix', 'xyz', 'apple', 'xanadu', 'aardvark']),
['xanadu', 'xyz', 'aardvark', 'apple', 'mix'])
print
print 'sort_last'
test(sort_last([(1, 3), (3, 2), (2, 1)]),
[(2, 1), (3, 2), (1, 3)])
test(sort_last([(2, 3), (1, 2), (3, 1)]),
[(3, 1), (1, 2), (2, 3)])
test(sort_last([(1, 7), (1, 3), (3, 4, 5), (2, 2)]),
[(2, 2), (1, 3), (3, 4, 5), (1, 7)])
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Additional basic list exercises
# D. Given a list of numbers, return a list where
# all adjacent == elements have been reduced to a single element,
# so [1, 2, 2, 3] returns [1, 2, 3]. You may create a new list or
# modify the passed in list.
def remove_adjacent(nums):
# +++your code here+++
return
# E. Given two lists sorted in increasing order, create and return a merged
# list of all the elements in sorted order. You may modify the passed in lists.
# Ideally, the solution should work in "linear" time, making a single
# pass of both lists.
def linear_merge(list1, list2):
# +++your code here+++
return
# Note: the solution above is kind of cute, but unforunately list.pop(0)
# is not constant time with the standard python list implementation, so
# the above is not strictly linear time.
# An alternate approach uses pop(-1) to remove the endmost elements
# from each list, building a solution list which is backwards.
# Then use reversed() to put the result back in the correct order. That
# solution works in linear time, but is more ugly.
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Calls the above functions with interesting inputs.
def main():
print 'remove_adjacent'
test(remove_adjacent([1, 2, 2, 3]), [1, 2, 3])
test(remove_adjacent([2, 2, 3, 3, 3]), [2, 3])
test(remove_adjacent([]), [])
print
print 'linear_merge'
test(linear_merge(['aa', 'xx', 'zz'], ['bb', 'cc']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'xx'], ['bb', 'cc', 'zz']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'aa'], ['aa', 'bb', 'bb']),
['aa', 'aa', 'aa', 'bb', 'bb'])
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
"""Wordcount exercise
Google's Python class
The main() below is already defined and complete. It calls print_words()
and print_top() functions which you write.
1. For the --count flag, implement a print_words(filename) function that counts
how often each word appears in the text and prints:
word1 count1
word2 count2
...
Print the above list in order sorted by word (python will sort punctuation to
come before letters -- that's fine). Store all the words as lowercase,
so 'The' and 'the' count as the same word.
2. For the --topcount flag, implement a print_top(filename) which is similar
to print_words() but which prints just the top 20 most common words sorted
so the most common word is first, then the next most common, and so on.
Use str.split() (no arguments) to split on all whitespace.
Workflow: don't build the whole program at once. Get it to an intermediate
milestone and print your data structure and sys.exit(0).
When that's working, try for the next milestone.
Optional: define a helper function to avoid code duplication inside
print_words() and print_top().
"""
import sys
# +++your code here+++
# Define print_words(filename) and print_top(filename) functions.
# You could write a helper utility function that reads a file
# and builds and returns a word/count dict for it.
# Then print_words() and print_top() can just call the utility function.
###
# This basic command line argument parsing code is provided and
# calls the print_words() and print_top() functions which you must define.
def main():
if len(sys.argv) != 3:
print 'usage: ./wordcount.py {--count | --topcount} file'
sys.exit(1)
option = sys.argv[1]
filename = sys.argv[2]
if option == '--count':
print_words(filename)
elif option == '--topcount':
print_top(filename)
else:
print 'unknown option: ' + option
sys.exit(1)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
"""Mimic pyquick exercise -- optional extra exercise.
Google's Python Class
Read in the file specified on the command line.
Do a simple split() on whitespace to obtain all the words in the file.
Rather than read the file line by line, it's easier to read
it into one giant string and split it once.
Build a "mimic" dict that maps each word that appears in the file
to a list of all the words that immediately follow that word in the file.
The list of words can be be in any order and should include
duplicates. So for example the key "and" might have the list
["then", "best", "then", "after", ...] listing
all the words which came after "and" in the text.
We'll say that the empty string is what comes before
the first word in the file.
With the mimic dict, it's fairly easy to emit random
text that mimics the original. Print a word, then look
up what words might come next and pick one at random as
the next work.
Use the empty string as the first word to prime things.
If we ever get stuck with a word that is not in the dict,
go back to the empty string to keep things moving.
Note: the standard python module 'random' includes a
random.choice(list) method which picks a random element
from a non-empty list.
For fun, feed your program to itself as input.
Could work on getting it to put in linebreaks around 70
columns, so the output looks better.
"""
import random
import sys
def mimic_dict(filename):
"""Returns mimic dict mapping each word to list of words which follow it."""
# +++your code here+++
return
def print_mimic(mimic_dict, word):
"""Given mimic dict and start word, prints 200 random words."""
# +++your code here+++
return
# Provided main(), calls mimic_dict() and mimic()
def main():
if len(sys.argv) != 2:
print 'usage: ./mimic.py file-to-read'
sys.exit(1)
dict = mimic_dict(sys.argv[1])
print_mimic(dict, '')
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python2.4 -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Additional basic string exercises
# D. verbing
# Given a string, if its length is at least 3,
# add 'ing' to its end.
# Unless it already ends in 'ing', in which case
# add 'ly' instead.
# If the string length is less than 3, leave it unchanged.
# Return the resulting string.
def verbing(s):
# +++your code here+++
return
# E. not_bad
# Given a string, find the first appearance of the
# substring 'not' and 'bad'. If the 'bad' follows
# the 'not', replace the whole 'not'...'bad' substring
# with 'good'.
# Return the resulting string.
# So 'This dinner is not that bad!' yields:
# This dinner is good!
def not_bad(s):
# +++your code here+++
return
# F. front_back
# Consider dividing a string into two halves.
# If the length is even, the front and back halves are the same length.
# If the length is odd, we'll say that the extra char goes in the front half.
# e.g. 'abcde', the front half is 'abc', the back half 'de'.
# Given 2 strings, a and b, return a string of the form
# a-front + b-front + a-back + b-back
def front_back(a, b):
# +++your code here+++
return
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# main() calls the above functions with interesting inputs,
# using the above test() to check if the result is correct or not.
def main():
print 'verbing'
test(verbing('hail'), 'hailing')
test(verbing('swiming'), 'swimingly')
test(verbing('do'), 'do')
print
print 'not_bad'
test(not_bad('This movie is not so bad'), 'This movie is good')
test(not_bad('This dinner is not that bad!'), 'This dinner is good!')
test(not_bad('This tea is not hot'), 'This tea is not hot')
test(not_bad("It's bad yet not"), "It's bad yet not")
print
print 'front_back'
test(front_back('abcd', 'xy'), 'abxcdy')
test(front_back('abcde', 'xyz'), 'abcxydez')
test(front_back('Kitten', 'Donut'), 'KitDontenut')
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Additional basic list exercises
# D. Given a list of numbers, return a list where
# all adjacent == elements have been reduced to a single element,
# so [1, 2, 2, 3] returns [1, 2, 3]. You may create a new list or
# modify the passed in list.
def remove_adjacent(nums):
# +++your code here+++
return
# E. Given two lists sorted in increasing order, create and return a merged
# list of all the elements in sorted order. You may modify the passed in lists.
# Ideally, the solution should work in "linear" time, making a single
# pass of both lists.
def linear_merge(list1, list2):
# +++your code here+++
return
# Note: the solution above is kind of cute, but unforunately list.pop(0)
# is not constant time with the standard python list implementation, so
# the above is not strictly linear time.
# An alternate approach uses pop(-1) to remove the endmost elements
# from each list, building a solution list which is backwards.
# Then use reversed() to put the result back in the correct order. That
# solution works in linear time, but is more ugly.
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Calls the above functions with interesting inputs.
def main():
print 'remove_adjacent'
test(remove_adjacent([1, 2, 2, 3]), [1, 2, 3])
test(remove_adjacent([2, 2, 3, 3, 3]), [2, 3])
test(remove_adjacent([]), [])
print
print 'linear_merge'
test(linear_merge(['aa', 'xx', 'zz'], ['bb', 'cc']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'xx'], ['bb', 'cc', 'zz']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'aa'], ['aa', 'bb', 'bb']),
['aa', 'aa', 'aa', 'bb', 'bb'])
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
"""Wordcount exercise
Google's Python class
The main() below is already defined and complete. It calls print_words()
and print_top() functions which you write.
1. For the --count flag, implement a print_words(filename) function that counts
how often each word appears in the text and prints:
word1 count1
word2 count2
...
Print the above list in order sorted by word (python will sort punctuation to
come before letters -- that's fine). Store all the words as lowercase,
so 'The' and 'the' count as the same word.
2. For the --topcount flag, implement a print_top(filename) which is similar
to print_words() but which prints just the top 20 most common words sorted
so the most common word is first, then the next most common, and so on.
Use str.split() (no arguments) to split on all whitespace.
Workflow: don't build the whole program at once. Get it to an intermediate
milestone and print your data structure and sys.exit(0).
When that's working, try for the next milestone.
Optional: define a helper function to avoid code duplication inside
print_words() and print_top().
"""
import sys
# +++your code here+++
# Define print_words(filename) and print_top(filename) functions.
# You could write a helper utility function that reads a file
# and builds and returns a word/count dict for it.
# Then print_words() and print_top() can just call the utility function.
###
# This basic command line argument parsing code is provided and
# calls the print_words() and print_top() functions which you must define.
def main():
if len(sys.argv) != 3:
print 'usage: ./wordcount.py {--count | --topcount} file'
sys.exit(1)
option = sys.argv[1]
filename = sys.argv[2]
if option == '--count':
print_words(filename)
elif option == '--topcount':
print_top(filename)
else:
print 'unknown option: ' + option
sys.exit(1)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python2.4 -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Additional basic string exercises
# D. verbing
# Given a string, if its length is at least 3,
# add 'ing' to its end.
# Unless it already ends in 'ing', in which case
# add 'ly' instead.
# If the string length is less than 3, leave it unchanged.
# Return the resulting string.
def verbing(s):
# +++your code here+++
return
# E. not_bad
# Given a string, find the first appearance of the
# substring 'not' and 'bad'. If the 'bad' follows
# the 'not', replace the whole 'not'...'bad' substring
# with 'good'.
# Return the resulting string.
# So 'This dinner is not that bad!' yields:
# This dinner is good!
def not_bad(s):
# +++your code here+++
return
# F. front_back
# Consider dividing a string into two halves.
# If the length is even, the front and back halves are the same length.
# If the length is odd, we'll say that the extra char goes in the front half.
# e.g. 'abcde', the front half is 'abc', the back half 'de'.
# Given 2 strings, a and b, return a string of the form
# a-front + b-front + a-back + b-back
def front_back(a, b):
# +++your code here+++
return
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# main() calls the above functions with interesting inputs,
# using the above test() to check if the result is correct or not.
def main():
print 'verbing'
test(verbing('hail'), 'hailing')
test(verbing('swiming'), 'swimingly')
test(verbing('do'), 'do')
print
print 'not_bad'
test(not_bad('This movie is not so bad'), 'This movie is good')
test(not_bad('This dinner is not that bad!'), 'This dinner is good!')
test(not_bad('This tea is not hot'), 'This tea is not hot')
test(not_bad("It's bad yet not"), "It's bad yet not")
print
print 'front_back'
test(front_back('abcd', 'xy'), 'abxcdy')
test(front_back('abcde', 'xyz'), 'abcxydez')
test(front_back('Kitten', 'Donut'), 'KitDontenut')
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
import sys
import re
"""Baby Names exercise
Define the extract_names() function below and change main()
to call it.
For writing regex, it's nice to include a copy of the target
text for inspiration.
Here's what the html looks like in the baby.html files:
...
<h3 align="center">Popularity in 1990</h3>
....
<tr align="right"><td>1</td><td>Michael</td><td>Jessica</td>
<tr align="right"><td>2</td><td>Christopher</td><td>Ashley</td>
<tr align="right"><td>3</td><td>Matthew</td><td>Brittany</td>
...
Suggested milestones for incremental development:
-Extract the year and print it
-Extract the names and rank numbers and just print them
-Get the names data into a dict and print it
-Build the [year, 'name rank', ... ] list and print it
-Fix main() to use the extract_names list
"""
def extract_names(filename):
"""
Given a file name for baby.html, returns a list starting with the year string
followed by the name-rank strings in alphabetical order.
['2006', 'Aaliyah 91', Aaron 57', 'Abagail 895', ' ...]
"""
# +++your code here+++
return
def main():
# This command-line parsing code is provided.
# Make a list of command line arguments, omitting the [0] element
# which is the script itself.
args = sys.argv[1:]
if not args:
print 'usage: [--summaryfile] file [file ...]'
sys.exit(1)
# Notice the summary flag and remove it from args if it is present.
summary = False
if args[0] == '--summaryfile':
summary = True
del args[0]
# +++your code here+++
# For each filename, get the names, then either print the text output
# or write it to a summary file
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
import sys
import re
"""Baby Names exercise
Define the extract_names() function below and change main()
to call it.
For writing regex, it's nice to include a copy of the target
text for inspiration.
Here's what the html looks like in the baby.html files:
...
<h3 align="center">Popularity in 1990</h3>
....
<tr align="right"><td>1</td><td>Michael</td><td>Jessica</td>
<tr align="right"><td>2</td><td>Christopher</td><td>Ashley</td>
<tr align="right"><td>3</td><td>Matthew</td><td>Brittany</td>
...
Suggested milestones for incremental development:
-Extract the year and print it
-Extract the names and rank numbers and just print them
-Get the names data into a dict and print it
-Build the [year, 'name rank', ... ] list and print it
-Fix main() to use the extract_names list
"""
def extract_names(filename):
"""
Given a file name for baby.html, returns a list starting with the year string
followed by the name-rank strings in alphabetical order.
['2006', 'Aaliyah 91', Aaron 57', 'Abagail 895', ' ...]
"""
# +++your code here+++
return
def main():
# This command-line parsing code is provided.
# Make a list of command line arguments, omitting the [0] element
# which is the script itself.
args = sys.argv[1:]
if not args:
print 'usage: [--summaryfile] file [file ...]'
sys.exit(1)
# Notice the summary flag and remove it from args if it is present.
summary = False
if args[0] == '--summaryfile':
summary = True
del args[0]
# +++your code here+++
# For each filename, get the names, then either print the text output
# or write it to a summary file
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
"""A tiny Python program to check that Python is working.
Try running this program from the command line like this:
python hello.py
python hello.py Alice
That should print:
Hello World -or- Hello Alice
Try changing the 'Hello' to 'Howdy' and run again.
Once you have that working, you're ready for class -- you can edit
and run Python code; now you just need to learn Python!
"""
import sys
# Define a main() function that prints a little greeting.
def main():
# Get the name from the command line, using 'World' as a fallback.
if len(sys.argv) >= 2:
name = sys.argv[1]
else:
name = 'World'
print 'Hello', name
# This is the standard boilerplate that calls the main() function.
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
"""A tiny Python program to check that Python is working.
Try running this program from the command line like this:
python hello.py
python hello.py Alice
That should print:
Hello World -or- Hello Alice
Try changing the 'Hello' to 'Howdy' and run again.
Once you have that working, you're ready for class -- you can edit
and run Python code; now you just need to learn Python!
"""
import sys
# Define a main() function that prints a little greeting.
def main():
# Get the name from the command line, using 'World' as a fallback.
if len(sys.argv) >= 2:
name = sys.argv[1]
else:
name = 'World'
print 'Hello', name
# This is the standard boilerplate that calls the main() function.
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
import sys
import re
import os
import shutil
import commands
"""Copy Special exercise
"""
# +++your code here+++
# Write functions and modify main() to call them
def main():
# This basic command line argument parsing code is provided.
# Add code to call your functions below.
# Make a list of command line arguments, omitting the [0] element
# which is the script itself.
args = sys.argv[1:]
if not args:
print "usage: [--todir dir][--tozip zipfile] dir [dir ...]";
sys.exit(1)
# todir and tozip are either set from command line
# or left as the empty string.
# The args array is left just containing the dirs.
todir = ''
if args[0] == '--todir':
todir = args[1]
del args[0:2]
tozip = ''
if args[0] == '--tozip':
tozip = args[1]
del args[0:2]
if len(args) == 0:
print "error: must specify one or more dirs"
sys.exit(1)
# +++your code here+++
# Call your functions
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/python
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
import sys
import re
import os
import shutil
import commands
"""Copy Special exercise
"""
# +++your code here+++
# Write functions and modify main() to call them
def main():
# This basic command line argument parsing code is provided.
# Add code to call your functions below.
# Make a list of command line arguments, omitting the [0] element
# which is the script itself.
args = sys.argv[1:]
if not args:
print "usage: [--todir dir][--tozip zipfile] dir [dir ...]";
sys.exit(1)
# todir and tozip are either set from command line
# or left as the empty string.
# The args array is left just containing the dirs.
todir = ''
if args[0] == '--todir':
todir = args[1]
del args[0:2]
tozip = ''
if args[0] == '--tozip':
tozip = args[1]
del args[0:2]
if len(args) == 0:
print "error: must specify one or more dirs"
sys.exit(1)
# +++your code here+++
# Call your functions
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/python
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
import os
import re
import sys
import urllib
"""Logpuzzle exercise
Given an apache logfile, find the puzzle urls and download the images.
Here's what a puzzle url looks like:
10.254.254.28 - - [06/Aug/2007:00:13:48 -0700] "GET /~foo/puzzle-bar-aaab.jpg HTTP/1.0" 302 528 "-" "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.6) Gecko/20070725 Firefox/2.0.0.6"
"""
def read_urls(filename):
"""Returns a list of the puzzle urls from the given log file,
extracting the hostname from the filename itself.
Screens out duplicate urls and returns the urls sorted into
increasing order."""
# +++your code here+++
def download_images(img_urls, dest_dir):
"""Given the urls already in the correct order, downloads
each image into the given directory.
Gives the images local filenames img0, img1, and so on.
Creates an index.html in the directory
with an img tag to show each local image file.
Creates the directory if necessary.
"""
# +++your code here+++
def main():
args = sys.argv[1:]
if not args:
print 'usage: [--todir dir] logfile '
sys.exit(1)
todir = ''
if args[0] == '--todir':
todir = args[1]
del args[0:2]
img_urls = read_urls(args[0])
if todir:
download_images(img_urls, todir)
else:
print '\n'.join(img_urls)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
import os
import re
import sys
import urllib
"""Logpuzzle exercise
Given an apache logfile, find the puzzle urls and download the images.
Here's what a puzzle url looks like:
10.254.254.28 - - [06/Aug/2007:00:13:48 -0700] "GET /~foo/puzzle-bar-aaab.jpg HTTP/1.0" 302 528 "-" "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.6) Gecko/20070725 Firefox/2.0.0.6"
"""
def read_urls(filename):
"""Returns a list of the puzzle urls from the given log file,
extracting the hostname from the filename itself.
Screens out duplicate urls and returns the urls sorted into
increasing order."""
# +++your code here+++
def download_images(img_urls, dest_dir):
"""Given the urls already in the correct order, downloads
each image into the given directory.
Gives the images local filenames img0, img1, and so on.
Creates an index.html in the directory
with an img tag to show each local image file.
Creates the directory if necessary.
"""
# +++your code here+++
def main():
args = sys.argv[1:]
if not args:
print 'usage: [--todir dir] logfile '
sys.exit(1)
todir = ''
if args[0] == '--todir':
todir = args[1]
del args[0:2]
img_urls = read_urls(args[0])
if todir:
download_images(img_urls, todir)
else:
print '\n'.join(img_urls)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python -tt
# A. match_ends
# Given a list of strings, return the count of the number of
# strings where the string length is 2 or more and the first
# and last chars of the string are the same.
# Note: python does not have a ++ operator, but += works.
def match_ends(words):
c = 0
for word in words:
if len(word) >= 2 and word[0] == word[-1]:
c = c + 1
return c
# B. front_x
# Given a list of strings, return a list with the strings
# in sorted order, except group all the strings that begin with 'x' first.
# e.g. ['mix', 'xyz', 'apple', 'xanadu', 'aardvark'] yields
# ['xanadu', 'xyz', 'aardvark', 'apple', 'mix']
# Hint: this can be done by making 2 lists and sorting each of them
# before combining them.
def front_x(words):
with_x = []
without_x = []
for word in words:
if word.startswith('x'):
with_x.append(word)
else:
without_x.append(word)
return sorted(with_x) + sorted(without_x)
#-----------------------------------------------
def last(a):
return a[-1]
# C. sort_last
# Given a list of non-empty tuples, return a list sorted in increasing
# order by the last element in each tuple.
# e.g. [(1, 7), (1, 3), (3, 4, 5), (2, 2)] yields
# [(2, 2), (1, 3), (3, 4, 5), (1, 7)]
# Hint: use a custom key= function to extract the last element form each tuple.
def sort_last(tuples):
return sorted(tuples, key=last)
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Calls the above functions with interesting inputs.
def main():
print 'match_ends'
test(match_ends(['aba', 'xyz', 'aa', 'x', 'bbb']), 3)
test(match_ends(['', 'x', 'xy', 'xyx', 'xx']), 2)
test(match_ends(['aaa', 'be', 'abc', 'hello']), 1)
print
print 'front_x'
test(front_x(['bbb', 'ccc', 'axx', 'xzz', 'xaa']),
['xaa', 'xzz', 'axx', 'bbb', 'ccc'])
test(front_x(['ccc', 'bbb', 'aaa', 'xcc', 'xaa']),
['xaa', 'xcc', 'aaa', 'bbb', 'ccc'])
test(front_x(['mix', 'xyz', 'apple', 'xanadu', 'aardvark']),
['xanadu', 'xyz', 'aardvark', 'apple', 'mix'])
print
print 'sort_last'
test(sort_last([(1, 3), (3, 2), (2, 1)]),
[(2, 1), (3, 2), (1, 3)])
test(sort_last([(2, 3), (1, 2), (3, 1)]),
[(3, 1), (1, 2), (2, 3)])
test(sort_last([(1, 7), (1, 3), (3, 4, 5), (2, 2)]),
[(2, 2), (1, 3), (3, 4, 5), (1, 7)])
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python -tt
# A. donuts
# Given an int count of a number of donuts, return a string
# of the form 'Number of donuts: <count>', where <count> is the number
# passed in. However, if the count is 10 or more, then use the word 'many'
# instead of the actual count.
# So donuts(5) returns 'Number of donuts: 5'
# and donuts(23) returns 'Number of donuts: many'
def donuts(count):
if count < 10:
return 'Number of donuts: ' + str(count)
else:
return 'Number of donuts: many'
# B. both_ends
# Given a string s, return a string made of the first 2
# and the last 2 chars of the original string,
# so 'spring' yields 'spng'. However, if the string length
# is less than 2, return instead the empty string.
def both_ends(s):
if len(s) < 2:
return ''
start = s[0:2]
end = s[-2:]
return start + end
# C. fix_start
# Given a string s, return a string
# where all occurences of its first char have
# been changed to '*', except do not change
# the first char itself.
# e.g. 'babble' yields 'ba**le'
# Assume that the string is length 1 or more.
# Hint: s.replace(stra, strb) returns a version of string s
# where all instances of stra have been replaced by strb.
def fix_start(s):
first = s[0]
others = s[1:]
new_others = others.replace(first, '*')
return first + new_others
# D. MixUp
# Given strings a and b, return a single string with a and b separated
# by a space '<a> <b>', except swap the first 2 chars of each string.
# e.g.
# 'mix', pod' -> 'pox mid'
# 'dog', 'dinner' -> 'dig donner'
# Assume a and b are length 2 or more.
def mix_up(a, b):
new_a = b[:2] + a[2:]
new_b = a[:2] + b[2:]
return new_a + ' ' + new_b
# Provided simple test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Provided main() calls the above functions with interesting inputs,
# using test() to check if each result is correct or not.
def main():
print 'donuts'
# Each line calls donuts, compares its result to the expected for that call.
test(donuts(4), 'Number of donuts: 4')
test(donuts(9), 'Number of donuts: 9')
test(donuts(10), 'Number of donuts: many')
test(donuts(99), 'Number of donuts: many')
print
print 'both_ends'
test(both_ends('spring'), 'spng')
test(both_ends('Hello'), 'Helo')
test(both_ends('a'), '')
test(both_ends('xyz'), 'xyyz')
print
print 'fix_start'
test(fix_start('babble'), 'ba**le')
test(fix_start('aardvark'), 'a*rdv*rk')
test(fix_start('google'), 'goo*le')
test(fix_start('donut'), 'donut')
print
print 'mix_up'
test(mix_up('mix', 'pod'), 'pox mid')
test(mix_up('dog', 'dinner'), 'dig donner')
test(mix_up('gnash', 'sport'), 'spash gnort')
test(mix_up('pezzy', 'firm'), 'fizzy perm')
# Standard boilerplate to call the main() function.
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python -tt
# A. donuts
# Given an int count of a number of donuts, return a string
# of the form 'Number of donuts: <count>', where <count> is the number
# passed in. However, if the count is 10 or more, then use the word 'many'
# instead of the actual count.
# So donuts(5) returns 'Number of donuts: 5'
# and donuts(23) returns 'Number of donuts: many'
def donuts(count):
if count < 10:
return 'Number of donuts: ' + str(count)
else:
return 'Number of donuts: many'
# B. both_ends
# Given a string s, return a string made of the first 2
# and the last 2 chars of the original string,
# so 'spring' yields 'spng'. However, if the string length
# is less than 2, return instead the empty string.
def both_ends(s):
if len(s) < 2:
return ''
start = s[0:2]
end = s[-2:]
return start + end
# C. fix_start
# Given a string s, return a string
# where all occurences of its first char have
# been changed to '*', except do not change
# the first char itself.
# e.g. 'babble' yields 'ba**le'
# Assume that the string is length 1 or more.
# Hint: s.replace(stra, strb) returns a version of string s
# where all instances of stra have been replaced by strb.
def fix_start(s):
first = s[0]
others = s[1:]
new_others = others.replace(first, '*')
return first + new_others
# D. MixUp
# Given strings a and b, return a single string with a and b separated
# by a space '<a> <b>', except swap the first 2 chars of each string.
# e.g.
# 'mix', pod' -> 'pox mid'
# 'dog', 'dinner' -> 'dig donner'
# Assume a and b are length 2 or more.
def mix_up(a, b):
new_a = b[:2] + a[2:]
new_b = a[:2] + b[2:]
return new_a + ' ' + new_b
# Provided simple test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Provided main() calls the above functions with interesting inputs,
# using test() to check if each result is correct or not.
def main():
print 'donuts'
# Each line calls donuts, compares its result to the expected for that call.
test(donuts(4), 'Number of donuts: 4')
test(donuts(9), 'Number of donuts: 9')
test(donuts(10), 'Number of donuts: many')
test(donuts(99), 'Number of donuts: many')
print
print 'both_ends'
test(both_ends('spring'), 'spng')
test(both_ends('Hello'), 'Helo')
test(both_ends('a'), '')
test(both_ends('xyz'), 'xyyz')
print
print 'fix_start'
test(fix_start('babble'), 'ba**le')
test(fix_start('aardvark'), 'a*rdv*rk')
test(fix_start('google'), 'goo*le')
test(fix_start('donut'), 'donut')
print
print 'mix_up'
test(mix_up('mix', 'pod'), 'pox mid')
test(mix_up('dog', 'dinner'), 'dig donner')
test(mix_up('gnash', 'sport'), 'spash gnort')
test(mix_up('pezzy', 'firm'), 'fizzy perm')
# Standard boilerplate to call the main() function.
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python -tt
# A. match_ends
# Given a list of strings, return the count of the number of
# strings where the string length is 2 or more and the first
# and last chars of the string are the same.
# Note: python does not have a ++ operator, but += works.
def match_ends(words):
c = 0
for word in words:
if len(word) >= 2 and word[0] == word[-1]:
c = c + 1
return c
# B. front_x
# Given a list of strings, return a list with the strings
# in sorted order, except group all the strings that begin with 'x' first.
# e.g. ['mix', 'xyz', 'apple', 'xanadu', 'aardvark'] yields
# ['xanadu', 'xyz', 'aardvark', 'apple', 'mix']
# Hint: this can be done by making 2 lists and sorting each of them
# before combining them.
def front_x(words):
with_x = []
without_x = []
for word in words:
if word.startswith('x'):
with_x.append(word)
else:
without_x.append(word)
return sorted(with_x) + sorted(without_x)
#-----------------------------------------------
def last(a):
return a[-1]
# C. sort_last
# Given a list of non-empty tuples, return a list sorted in increasing
# order by the last element in each tuple.
# e.g. [(1, 7), (1, 3), (3, 4, 5), (2, 2)] yields
# [(2, 2), (1, 3), (3, 4, 5), (1, 7)]
# Hint: use a custom key= function to extract the last element form each tuple.
def sort_last(tuples):
return sorted(tuples, key=last)
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Calls the above functions with interesting inputs.
def main():
print 'match_ends'
test(match_ends(['aba', 'xyz', 'aa', 'x', 'bbb']), 3)
test(match_ends(['', 'x', 'xy', 'xyx', 'xx']), 2)
test(match_ends(['aaa', 'be', 'abc', 'hello']), 1)
print
print 'front_x'
test(front_x(['bbb', 'ccc', 'axx', 'xzz', 'xaa']),
['xaa', 'xzz', 'axx', 'bbb', 'ccc'])
test(front_x(['ccc', 'bbb', 'aaa', 'xcc', 'xaa']),
['xaa', 'xcc', 'aaa', 'bbb', 'ccc'])
test(front_x(['mix', 'xyz', 'apple', 'xanadu', 'aardvark']),
['xanadu', 'xyz', 'aardvark', 'apple', 'mix'])
print
print 'sort_last'
test(sort_last([(1, 3), (3, 2), (2, 1)]),
[(2, 1), (3, 2), (1, 3)])
test(sort_last([(2, 3), (1, 2), (3, 1)]),
[(3, 1), (1, 2), (2, 3)])
test(sort_last([(1, 7), (1, 3), (3, 4, 5), (2, 2)]),
[(2, 2), (1, 3), (3, 4, 5), (1, 7)])
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python -tt
# D. Given a list of numbers, return a list where
# all adjacent == elements have been reduced to a single element,
# so [1, 2, 2, 3] returns [1, 2, 3]. You may create a new list or
# modify the passed in list.
def remove_adjacent(nums):
array = []
for num in nums:
if num not in array:
array.append(num)
return array
# E. Given two lists sorted in increasing order, create and return a merged
# list of all the elements in sorted order. You may modify the passed in lists.
# Ideally, the solution should work in "linear" time, making a single
# pass of both lists.
def linear_merge(list1, list2):
array = list1 + list2
return sorted(array)
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Calls the above functions with interesting inputs.
def main():
print 'remove_adjacent'
test(remove_adjacent([1, 2, 2, 3]), [1, 2, 3])
test(remove_adjacent([2, 2, 3, 3, 3]), [2, 3])
test(remove_adjacent([]), [])
print
print 'linear_merge'
test(linear_merge(['aa', 'xx', 'zz'], ['bb', 'cc']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'xx'], ['bb', 'cc', 'zz']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'aa'], ['aa', 'bb', 'bb']),
['aa', 'aa', 'aa', 'bb', 'bb'])
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python -tt
import sys
# Define print_words(filename) and print_top(filename) functions.
# You could write a helper utility function that reads a file
# and builds and returns a word/count dict for it.
# Then print_words() and print_top() can just call the utility function.
def word_count_dict(filename):
word_count = {}
my_file = open(filename, 'r')
for line in my_file:
words = line.split()
for word in words:
word = word.lower()
if not word in word_count:
word_count[word] = 1
else:
word_count[word] = word_count[word] + 1
my_file.close()
return word_count
def print_words(filename):
word_count = word_count_dict(filename)
words = sorted(word_count.keys())
for word in words:
print word, word_count[word]
def get_count(word_count_tuple):
return word_count_tuple[1]
def print_top(filename):
word_count = word_count_dict(filename)
items = sorted(word_count.items(), key=get_count, reverse=True)
for item in items[:20]:
print item[0], item[1]
def main():
if len(sys.argv) != 3:
print 'usage: ./wordcount.py {--count | --topcount} file'
sys.exit(1)
option = sys.argv[1]
filename = sys.argv[2]
if option == '--count':
print_words(filename)
elif option == '--topcount':
print_top(filename)
else:
print 'unknown option: ' + option
sys.exit(1)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python -tt
# D. verbing
# Given a string, if its length is at least 3,
# add 'ing' to its end.
# Unless it already ends in 'ing', in which case
# add 'ly' instead.
# If the string length is less than 3, leave it unchanged.
# Return the resulting string.
def verbing(s):
if len(s) >= 3:
if s[-3:] != 'ing': s = s + 'ing'
else: s = s + 'ly'
return s
# E. not_bad
# Given a string, find the first appearance of the
# substring 'not' and 'bad'. If the 'bad' follows
# the 'not', replace the whole 'not'...'bad' substring
# with 'good'.
# Return the resulting string.
# So 'This dinner is not that bad!' yields:
# This dinner is good!
def not_bad(s):
start = s.find('not')
end = s.find('bad')
if start != -1 and end != -1 and start < end:
s = s[:start] + 'good' + s[end+3:]
return s
# F. front_back
# Consider dividing a string into two halves.
# If the length is even, the front and back halves are the same length.
# If the length is odd, we'll say that the extra char goes in the front half.
# e.g. 'abcde', the front half is 'abc', the back half 'de'.
# Given 2 strings, a and b, return a string of the form
# a-front + b-front + a-back + b-back
def front_back(a, b):
center_a = len(a) / 2
center_b = len(b) / 2
if len(a) % 2 == 1:
center_a = center_a + 1
if len(b) % 2 == 1:s
center_b = center_b + 1
return a[:center_a] + b[:center_b] + a[center_a:] + b[center_b:]
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# main() calls the above functions with interesting inputs,
# using the above test() to check if the result is correct or not.
def main():
print 'verbing'
test(verbing('hail'), 'hailing')
test(verbing('swiming'), 'swimingly')
test(verbing('do'), 'do')
print
print 'not_bad'
test(not_bad('This movie is not so bad'), 'This movie is good')
test(not_bad('This dinner is not that bad!'), 'This dinner is good!')
test(not_bad('This tea is not hot'), 'This tea is not hot')
test(not_bad("It's bad yet not"), "It's bad yet not")
print
print 'front_back'
test(front_back('abcd', 'xy'), 'abxcdy')
test(front_back('abcde', 'xyz'), 'abcxydez')
test(front_back('Kitten', 'Donut'), 'KitDontenut')
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python -tt
# D. Given a list of numbers, return a list where
# all adjacent == elements have been reduced to a single element,
# so [1, 2, 2, 3] returns [1, 2, 3]. You may create a new list or
# modify the passed in list.
def remove_adjacent(nums):
array = []
for num in nums:
if num not in array:
array.append(num)
return array
# E. Given two lists sorted in increasing order, create and return a merged
# list of all the elements in sorted order. You may modify the passed in lists.
# Ideally, the solution should work in "linear" time, making a single
# pass of both lists.
def linear_merge(list1, list2):
array = list1 + list2
return sorted(array)
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# Calls the above functions with interesting inputs.
def main():
print 'remove_adjacent'
test(remove_adjacent([1, 2, 2, 3]), [1, 2, 3])
test(remove_adjacent([2, 2, 3, 3, 3]), [2, 3])
test(remove_adjacent([]), [])
print
print 'linear_merge'
test(linear_merge(['aa', 'xx', 'zz'], ['bb', 'cc']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'xx'], ['bb', 'cc', 'zz']),
['aa', 'bb', 'cc', 'xx', 'zz'])
test(linear_merge(['aa', 'aa'], ['aa', 'bb', 'bb']),
['aa', 'aa', 'aa', 'bb', 'bb'])
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python -tt
import sys
# Define print_words(filename) and print_top(filename) functions.
# You could write a helper utility function that reads a file
# and builds and returns a word/count dict for it.
# Then print_words() and print_top() can just call the utility function.
def word_count_dict(filename):
word_count = {}
my_file = open(filename, 'r')
for line in my_file:
words = line.split()
for word in words:
word = word.lower()
if not word in word_count:
word_count[word] = 1
else:
word_count[word] = word_count[word] + 1
my_file.close()
return word_count
def print_words(filename):
word_count = word_count_dict(filename)
words = sorted(word_count.keys())
for word in words:
print word, word_count[word]
def get_count(word_count_tuple):
return word_count_tuple[1]
def print_top(filename):
word_count = word_count_dict(filename)
items = sorted(word_count.items(), key=get_count, reverse=True)
for item in items[:20]:
print item[0], item[1]
def main():
if len(sys.argv) != 3:
print 'usage: ./wordcount.py {--count | --topcount} file'
sys.exit(1)
option = sys.argv[1]
filename = sys.argv[2]
if option == '--count':
print_words(filename)
elif option == '--topcount':
print_top(filename)
else:
print 'unknown option: ' + option
sys.exit(1)
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/python -tt
# D. verbing
# Given a string, if its length is at least 3,
# add 'ing' to its end.
# Unless it already ends in 'ing', in which case
# add 'ly' instead.
# If the string length is less than 3, leave it unchanged.
# Return the resulting string.
def verbing(s):
if len(s) >= 3:
if s[-3:] != 'ing': s = s + 'ing'
else: s = s + 'ly'
return s
# E. not_bad
# Given a string, find the first appearance of the
# substring 'not' and 'bad'. If the 'bad' follows
# the 'not', replace the whole 'not'...'bad' substring
# with 'good'.
# Return the resulting string.
# So 'This dinner is not that bad!' yields:
# This dinner is good!
def not_bad(s):
start = s.find('not')
end = s.find('bad')
if start != -1 and end != -1 and start < end:
s = s[:start] + 'good' + s[end+3:]
return s
# F. front_back
# Consider dividing a string into two halves.
# If the length is even, the front and back halves are the same length.
# If the length is odd, we'll say that the extra char goes in the front half.
# e.g. 'abcde', the front half is 'abc', the back half 'de'.
# Given 2 strings, a and b, return a string of the form
# a-front + b-front + a-back + b-back
def front_back(a, b):
center_a = len(a) / 2
center_b = len(b) / 2
if len(a) % 2 == 1:
center_a = center_a + 1
if len(b) % 2 == 1:s
center_b = center_b + 1
return a[:center_a] + b[:center_b] + a[center_a:] + b[center_b:]
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# main() calls the above functions with interesting inputs,
# using the above test() to check if the result is correct or not.
def main():
print 'verbing'
test(verbing('hail'), 'hailing')
test(verbing('swiming'), 'swimingly')
test(verbing('do'), 'do')
print
print 'not_bad'
test(not_bad('This movie is not so bad'), 'This movie is good')
test(not_bad('This dinner is not that bad!'), 'This dinner is good!')
test(not_bad('This tea is not hot'), 'This tea is not hot')
test(not_bad("It's bad yet not"), "It's bad yet not")
print
print 'front_back'
test(front_back('abcd', 'xy'), 'abxcdy')
test(front_back('abcde', 'xyz'), 'abcxydez')
test(front_back('Kitten', 'Donut'), 'KitDontenut')
if __name__ == '__main__':
main()
| Python |
#!/usr/bin/env python
# -*- encoding:utf8 -*-
# protoc-gen-erl
# Google's Protocol Buffers project, ported to lua.
# https://code.google.com/p/protoc-gen-lua/
#
# Copyright (c) 2010 , 林卓毅 (Zhuoyi Lin) netsnail@gmail.com
# All rights reserved.
#
# Use, modification and distribution are subject to the "New BSD License"
# as listed at <url: http://www.opensource.org/licenses/bsd-license.php >.
import sys
import os.path as path
from cStringIO import StringIO
import plugin_pb2
import google.protobuf.descriptor_pb2 as descriptor_pb2
_packages = {}
_files = {}
_message = {}
FDP = plugin_pb2.descriptor_pb2.FieldDescriptorProto
if sys.platform == "win32":
import msvcrt, os
msvcrt.setmode(sys.stdin.fileno(), os.O_BINARY)
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
class CppType:
CPPTYPE_INT32 = 1
CPPTYPE_INT64 = 2
CPPTYPE_UINT32 = 3
CPPTYPE_UINT64 = 4
CPPTYPE_DOUBLE = 5
CPPTYPE_FLOAT = 6
CPPTYPE_BOOL = 7
CPPTYPE_ENUM = 8
CPPTYPE_STRING = 9
CPPTYPE_MESSAGE = 10
CPP_TYPE ={
FDP.TYPE_DOUBLE : CppType.CPPTYPE_DOUBLE,
FDP.TYPE_FLOAT : CppType.CPPTYPE_FLOAT,
FDP.TYPE_INT64 : CppType.CPPTYPE_INT64,
FDP.TYPE_UINT64 : CppType.CPPTYPE_UINT64,
FDP.TYPE_INT32 : CppType.CPPTYPE_INT32,
FDP.TYPE_FIXED64 : CppType.CPPTYPE_UINT64,
FDP.TYPE_FIXED32 : CppType.CPPTYPE_UINT32,
FDP.TYPE_BOOL : CppType.CPPTYPE_BOOL,
FDP.TYPE_STRING : CppType.CPPTYPE_STRING,
FDP.TYPE_MESSAGE : CppType.CPPTYPE_MESSAGE,
FDP.TYPE_BYTES : CppType.CPPTYPE_STRING,
FDP.TYPE_UINT32 : CppType.CPPTYPE_UINT32,
FDP.TYPE_ENUM : CppType.CPPTYPE_ENUM,
FDP.TYPE_SFIXED32 : CppType.CPPTYPE_INT32,
FDP.TYPE_SFIXED64 : CppType.CPPTYPE_INT64,
FDP.TYPE_SINT32 : CppType.CPPTYPE_INT32,
FDP.TYPE_SINT64 : CppType.CPPTYPE_INT64
}
def printerr(*args):
sys.stderr.write(" ".join(args))
sys.stderr.write("\n")
sys.stderr.flush()
class TreeNode(object):
def __init__(self, name, parent=None, filename=None, package=None):
super(TreeNode, self).__init__()
self.child = []
self.parent = parent
self.filename = filename
self.package = package
if parent:
self.parent.add_child(self)
self.name = name
def add_child(self, child):
self.child.append(child)
def find_child(self, child_names):
if child_names:
for i in self.child:
if i.name == child_names[0]:
return i.find_child(child_names[1:])
raise StandardError
else:
return self
def get_child(self, child_name):
for i in self.child:
if i.name == child_name:
return i
return None
def get_path(self, end = None):
pos = self
out = []
while pos and pos != end:
out.append(pos.name)
pos = pos.parent
out.reverse()
return '.'.join(out)
def get_global_name(self):
return self.get_path()
def get_local_name(self):
pos = self
while pos.parent:
pos = pos.parent
if self.package and pos.name == self.package[-1]:
break
return self.get_path(pos)
def __str__(self):
return self.to_string(0)
def __repr__(self):
return str(self)
def to_string(self, indent = 0):
return ' '*indent + '<TreeNode ' + self.name + '(\n' + \
','.join([i.to_string(indent + 4) for i in self.child]) + \
' '*indent +')>\n'
class Env(object):
filename = None
package = None
extend = None
descriptor = None
message = None
context = None
register = None
def __init__(self):
self.message_tree = TreeNode('')
self.scope = self.message_tree
def get_global_name(self):
return self.scope.get_global_name()
def get_local_name(self):
return self.scope.get_local_name()
def get_ref_name(self, type_name):
try:
node = self.lookup_name(type_name)
except:
# if the child doesn't be founded, it must be in this file
return type_name[len('.'.join(self.package)) + 2:]
if node.filename != self.filename:
return node.filename + '_pb.' + node.get_local_name()
return node.get_local_name()
def lookup_name(self, name):
names = name.split('.')
if names[0] == '':
return self.message_tree.find_child(names[1:])
else:
return self.scope.parent.find_child(names)
def enter_package(self, package):
if not package:
return self.message_tree
names = package.split('.')
pos = self.message_tree
for i, name in enumerate(names):
new_pos = pos.get_child(name)
if new_pos:
pos = new_pos
else:
return self._build_nodes(pos, names[i:])
return pos
def enter_file(self, filename, package):
self.filename = filename
self.package = package.split('.')
self._init_field()
self.scope = self.enter_package(package)
def exit_file(self):
self._init_field()
self.filename = None
self.package = []
self.scope = self.scope.parent
def enter(self, message_name):
self.scope = TreeNode(message_name, self.scope, self.filename,
self.package)
def exit(self):
self.scope = self.scope.parent
def _init_field(self):
self.descriptor = []
self.context = []
self.message = []
self.register = []
def _build_nodes(self, node, names):
parent = node
for i in names:
parent = TreeNode(i, parent, self.filename, self.package)
return parent
class Writer(object):
def __init__(self, prefix=None):
self.io = StringIO()
self.__indent = ''
self.__prefix = prefix
def getvalue(self):
return self.io.getvalue()
def __enter__(self):
self.__indent += ' '
return self
def __exit__(self, type, value, trackback):
self.__indent = self.__indent[:-4]
def __call__(self, data):
self.io.write(self.__indent)
if self.__prefix:
self.io.write(self.__prefix)
self.io.write(data)
DEFAULT_VALUE = {
FDP.TYPE_DOUBLE : '0.0',
FDP.TYPE_FLOAT : '0.0',
FDP.TYPE_INT64 : '0',
FDP.TYPE_UINT64 : '0',
FDP.TYPE_INT32 : '0',
FDP.TYPE_FIXED64 : '0',
FDP.TYPE_FIXED32 : '0',
FDP.TYPE_BOOL : 'false',
FDP.TYPE_STRING : '""',
FDP.TYPE_MESSAGE : 'nil',
FDP.TYPE_BYTES : '""',
FDP.TYPE_UINT32 : '0',
FDP.TYPE_ENUM : '1',
FDP.TYPE_SFIXED32 : '0',
FDP.TYPE_SFIXED64 : '0',
FDP.TYPE_SINT32 : '0',
FDP.TYPE_SINT64 : '0',
}
def code_gen_enum_item(index, enum_value, env):
full_name = env.get_local_name() + '.' + enum_value.name
obj_name = full_name.upper().replace('.', '_') + '_ENUM'
env.descriptor.append(
"local %s = protobuf.EnumValueDescriptor();\n"% obj_name
)
context = Writer(obj_name)
context('.name = "%s"\n' % enum_value.name)
context('.index = %d\n' % index)
context('.number = %d\n' % enum_value.number)
env.context.append(context.getvalue())
return obj_name
def code_gen_enum(enum_desc, env):
env.enter(enum_desc.name)
full_name = env.get_local_name()
obj_name = full_name.upper().replace('.', '_')
env.descriptor.append(
"local %s = protobuf.EnumDescriptor();\n"% obj_name
)
context = Writer(obj_name)
context('.name = "%s"\n' % enum_desc.name)
context('.full_name = "%s"\n' % env.get_global_name())
values = []
for i, enum_value in enumerate(enum_desc.value):
values.append(code_gen_enum_item(i, enum_value, env))
context('.values = {%s}\n' % ','.join(values))
env.context.append(context.getvalue())
env.exit()
return obj_name
def code_gen_field(index, field_desc, env):
full_name = env.get_local_name() + '.' + field_desc.name
obj_name = full_name.upper().replace('.', '_') + '_FIELD'
env.descriptor.append(
"local %s = protobuf.FieldDescriptor();\n"% obj_name
)
context = Writer(obj_name)
context('.name = "%s"\n' % field_desc.name)
context('.full_name = "%s"\n' % (
env.get_global_name() + '.' + field_desc.name))
context('.number = %d\n' % field_desc.number)
context('.index = %d\n' % index)
context('.label = %d\n' % field_desc.label)
if field_desc.HasField("default_value"):
context('.has_default_value = true\n')
value = field_desc.default_value
if field_desc.type == FDP.TYPE_STRING:
context('.default_value = "%s"\n'%value)
else:
context('.default_value = %s\n'%value)
else:
context('.has_default_value = false\n')
if field_desc.label == FDP.LABEL_REPEATED:
default_value = "{}"
elif field_desc.HasField('type_name'):
default_value = "nil"
else:
default_value = DEFAULT_VALUE[field_desc.type]
context('.default_value = %s\n' % default_value)
if field_desc.HasField('type_name'):
type_name = env.get_ref_name(field_desc.type_name).upper().replace('.', '_')
if field_desc.type == FDP.TYPE_MESSAGE:
context('.message_type = %s\n' % type_name)
else:
context('.enum_type = %s\n' % type_name)
if field_desc.HasField('extendee'):
type_name = env.get_ref_name(field_desc.extendee)
env.register.append(
"%s.RegisterExtension(%s)\n" % (type_name, obj_name)
)
context('.type = %d\n' % field_desc.type)
context('.cpp_type = %d\n\n' % CPP_TYPE[field_desc.type])
env.context.append(context.getvalue())
return obj_name
def code_gen_message(message_descriptor, env, containing_type = None):
env.enter(message_descriptor.name)
full_name = env.get_local_name()
obj_name = full_name.upper().replace('.', '_')
env.descriptor.append(
"local %s = protobuf.Descriptor();\n"% obj_name
)
context = Writer(obj_name)
context('.name = "%s"\n' % message_descriptor.name)
context('.full_name = "%s"\n' % env.get_global_name())
nested_types = []
for msg_desc in message_descriptor.nested_type:
msg_name = code_gen_message(msg_desc, env, obj_name)
nested_types.append(msg_name)
context('.nested_types = {%s}\n' % ', '.join(nested_types))
enums = []
for enum_desc in message_descriptor.enum_type:
enums.append(code_gen_enum(enum_desc, env))
context('.enum_types = {%s}\n' % ', '.join(enums))
fields = []
for i, field_desc in enumerate(message_descriptor.field):
fields.append(code_gen_field(i, field_desc, env))
context('.fields = {%s}\n' % ', '.join(fields))
if len(message_descriptor.extension_range) > 0:
context('.is_extendable = true\n')
else:
context('.is_extendable = false\n')
extensions = []
for i, field_desc in enumerate(message_descriptor.extension):
extensions.append(code_gen_field(i, field_desc, env))
context('.extensions = {%s}\n' % ', '.join(extensions))
if containing_type:
context('.containing_type = %s\n' % containing_type)
env.message.append('%s = protobuf.Message(%s)\n' % (full_name,
obj_name))
env.context.append(context.getvalue())
env.exit()
return obj_name
def write_header(writer):
writer("""-- Generated By protoc-gen-lua Do not Edit
""")
def code_gen_file(proto_file, env, is_gen):
filename = path.splitext(proto_file.name)[0]
env.enter_file(filename, proto_file.package)
includes = []
for f in proto_file.dependency:
inc_file = path.splitext(f)[0]
includes.append(inc_file)
# for field_desc in proto_file.extension:
# code_gen_extensions(field_desc, field_desc.name, env)
for enum_desc in proto_file.enum_type:
code_gen_enum(enum_desc, env)
for enum_value in enum_desc.value:
env.message.append('%s = %d\n' % (enum_value.name,
enum_value.number))
for msg_desc in proto_file.message_type:
code_gen_message(msg_desc, env)
if is_gen:
lua = Writer()
write_header(lua)
lua('local protobuf = require "protobuf"\n')
for i in includes:
lua('local %s_pb = require("%s_pb")\n' % (i, i))
lua("module('%s_pb')\n" % env.filename)
lua('\n\n')
map(lua, env.descriptor)
lua('\n')
map(lua, env.context)
lua('\n')
env.message.sort()
map(lua, env.message)
lua('\n')
map(lua, env.register)
_files[env.filename+ '_pb.lua'] = lua.getvalue()
env.exit_file()
def main():
plugin_require_bin = sys.stdin.read()
code_gen_req = plugin_pb2.CodeGeneratorRequest()
code_gen_req.ParseFromString(plugin_require_bin)
env = Env()
for proto_file in code_gen_req.proto_file:
code_gen_file(proto_file, env,
proto_file.name in code_gen_req.file_to_generate)
code_generated = plugin_pb2.CodeGeneratorResponse()
for k in _files:
file_desc = code_generated.file.add()
file_desc.name = k
file_desc.content = _files[k]
sys.stdout.write(code_generated.SerializeToString())
if __name__ == "__main__":
main()
| Python |
'''
Module which brings history information about files from Mercurial.
@author: Rodrigo Damazio
'''
import re
import subprocess
REVISION_REGEX = re.compile(r'(?P<hash>[0-9a-f]{12}):.*')
def _GetOutputLines(args):
'''
Runs an external process and returns its output as a list of lines.
@param args: the arguments to run
'''
process = subprocess.Popen(args,
stdout=subprocess.PIPE,
universal_newlines = True,
shell = False)
output = process.communicate()[0]
return output.splitlines()
def FillMercurialRevisions(filename, parsed_file):
'''
Fills the revs attribute of all strings in the given parsed file with
a list of revisions that touched the lines corresponding to that string.
@param filename: the name of the file to get history for
@param parsed_file: the parsed file to modify
'''
# Take output of hg annotate to get revision of each line
output_lines = _GetOutputLines(['hg', 'annotate', '-c', filename])
# Create a map of line -> revision (key is list index, line 0 doesn't exist)
line_revs = ['dummy']
for line in output_lines:
rev_match = REVISION_REGEX.match(line)
if not rev_match:
raise 'Unexpected line of output from hg: %s' % line
rev_hash = rev_match.group('hash')
line_revs.append(rev_hash)
for str in parsed_file.itervalues():
# Get the lines that correspond to each string
start_line = str['startLine']
end_line = str['endLine']
# Get the revisions that touched those lines
revs = []
for line_number in range(start_line, end_line + 1):
revs.append(line_revs[line_number])
# Merge with any revisions that were already there
# (for explict revision specification)
if 'revs' in str:
revs += str['revs']
# Assign the revisions to the string
str['revs'] = frozenset(revs)
def DoesRevisionSuperceed(filename, rev1, rev2):
'''
Tells whether a revision superceeds another.
This essentially means that the older revision is an ancestor of the newer
one.
This also returns True if the two revisions are the same.
@param rev1: the revision that may be superceeding the other
@param rev2: the revision that may be superceeded
@return: True if rev1 superceeds rev2 or they're the same
'''
if rev1 == rev2:
return True
# TODO: Add filename
args = ['hg', 'log', '-r', 'ancestors(%s)' % rev1, '--template', '{node|short}\n', filename]
output_lines = _GetOutputLines(args)
return rev2 in output_lines
def NewestRevision(filename, rev1, rev2):
'''
Returns which of two revisions is closest to the head of the repository.
If none of them is the ancestor of the other, then we return either one.
@param rev1: the first revision
@param rev2: the second revision
'''
if DoesRevisionSuperceed(filename, rev1, rev2):
return rev1
return rev2 | Python |
#!/usr/bin/python
'''
Entry point for My Tracks i18n tool.
@author: Rodrigo Damazio
'''
import mytracks.files
import mytracks.translate
import mytracks.validate
import sys
def Usage():
print 'Usage: %s <command> [<language> ...]\n' % sys.argv[0]
print 'Commands are:'
print ' cleanup'
print ' translate'
print ' validate'
sys.exit(1)
def Translate(languages):
'''
Asks the user to interactively translate any missing or oudated strings from
the files for the given languages.
@param languages: the languages to translate
'''
validator = mytracks.validate.Validator(languages)
validator.Validate()
missing = validator.missing_in_lang()
outdated = validator.outdated_in_lang()
for lang in languages:
untranslated = missing[lang] + outdated[lang]
if len(untranslated) == 0:
continue
translator = mytracks.translate.Translator(lang)
translator.Translate(untranslated)
def Validate(languages):
'''
Computes and displays errors in the string files for the given languages.
@param languages: the languages to compute for
'''
validator = mytracks.validate.Validator(languages)
validator.Validate()
error_count = 0
if (validator.valid()):
print 'All files OK'
else:
for lang, missing in validator.missing_in_master().iteritems():
print 'Missing in master, present in %s: %s:' % (lang, str(missing))
error_count = error_count + len(missing)
for lang, missing in validator.missing_in_lang().iteritems():
print 'Missing in %s, present in master: %s:' % (lang, str(missing))
error_count = error_count + len(missing)
for lang, outdated in validator.outdated_in_lang().iteritems():
print 'Outdated in %s: %s:' % (lang, str(outdated))
error_count = error_count + len(outdated)
return error_count
if __name__ == '__main__':
argv = sys.argv
argc = len(argv)
if argc < 2:
Usage()
languages = mytracks.files.GetAllLanguageFiles()
if argc == 3:
langs = set(argv[2:])
if not langs.issubset(languages):
raise 'Language(s) not found'
# Filter just to the languages specified
languages = dict((lang, lang_file)
for lang, lang_file in languages.iteritems()
if lang in langs or lang == 'en' )
cmd = argv[1]
if cmd == 'translate':
Translate(languages)
elif cmd == 'validate':
error_count = Validate(languages)
else:
Usage()
error_count = 0
print '%d errors found.' % error_count
| Python |
'''
Module which prompts the user for translations and saves them.
TODO: implement
@author: Rodrigo Damazio
'''
class Translator(object):
'''
classdocs
'''
def __init__(self, language):
'''
Constructor
'''
self._language = language
def Translate(self, string_names):
print string_names | Python |
'''
Module which compares languague files to the master file and detects
issues.
@author: Rodrigo Damazio
'''
import os
from mytracks.parser import StringsParser
import mytracks.history
class Validator(object):
def __init__(self, languages):
'''
Builds a strings file validator.
Params:
@param languages: a dictionary mapping each language to its corresponding directory
'''
self._langs = {}
self._master = None
self._language_paths = languages
parser = StringsParser()
for lang, lang_dir in languages.iteritems():
filename = os.path.join(lang_dir, 'strings.xml')
parsed_file = parser.Parse(filename)
mytracks.history.FillMercurialRevisions(filename, parsed_file)
if lang == 'en':
self._master = parsed_file
else:
self._langs[lang] = parsed_file
self._Reset()
def Validate(self):
'''
Computes whether all the data in the files for the given languages is valid.
'''
self._Reset()
self._ValidateMissingKeys()
self._ValidateOutdatedKeys()
def valid(self):
return (len(self._missing_in_master) == 0 and
len(self._missing_in_lang) == 0 and
len(self._outdated_in_lang) == 0)
def missing_in_master(self):
return self._missing_in_master
def missing_in_lang(self):
return self._missing_in_lang
def outdated_in_lang(self):
return self._outdated_in_lang
def _Reset(self):
# These are maps from language to string name list
self._missing_in_master = {}
self._missing_in_lang = {}
self._outdated_in_lang = {}
def _ValidateMissingKeys(self):
'''
Computes whether there are missing keys on either side.
'''
master_keys = frozenset(self._master.iterkeys())
for lang, file in self._langs.iteritems():
keys = frozenset(file.iterkeys())
missing_in_master = keys - master_keys
missing_in_lang = master_keys - keys
if len(missing_in_master) > 0:
self._missing_in_master[lang] = missing_in_master
if len(missing_in_lang) > 0:
self._missing_in_lang[lang] = missing_in_lang
def _ValidateOutdatedKeys(self):
'''
Computers whether any of the language keys are outdated with relation to the
master keys.
'''
for lang, file in self._langs.iteritems():
outdated = []
for key, str in file.iteritems():
# Get all revisions that touched master and language files for this
# string.
master_str = self._master[key]
master_revs = master_str['revs']
lang_revs = str['revs']
if not master_revs or not lang_revs:
print 'WARNING: No revision for %s in %s' % (key, lang)
continue
master_file = os.path.join(self._language_paths['en'], 'strings.xml')
lang_file = os.path.join(self._language_paths[lang], 'strings.xml')
# Assume that the repository has a single head (TODO: check that),
# and as such there is always one revision which superceeds all others.
master_rev = reduce(
lambda r1, r2: mytracks.history.NewestRevision(master_file, r1, r2),
master_revs)
lang_rev = reduce(
lambda r1, r2: mytracks.history.NewestRevision(lang_file, r1, r2),
lang_revs)
# If the master version is newer than the lang version
if mytracks.history.DoesRevisionSuperceed(lang_file, master_rev, lang_rev):
outdated.append(key)
if len(outdated) > 0:
self._outdated_in_lang[lang] = outdated
| Python |
'''
Module for dealing with resource files (but not their contents).
@author: Rodrigo Damazio
'''
import os.path
from glob import glob
import re
MYTRACKS_RES_DIR = 'MyTracks/res'
ANDROID_MASTER_VALUES = 'values'
ANDROID_VALUES_MASK = 'values-*'
def GetMyTracksDir():
'''
Returns the directory in which the MyTracks directory is located.
'''
path = os.getcwd()
while not os.path.isdir(os.path.join(path, MYTRACKS_RES_DIR)):
if path == '/':
raise 'Not in My Tracks project'
# Go up one level
path = os.path.split(path)[0]
return path
def GetAllLanguageFiles():
'''
Returns a mapping from all found languages to their respective directories.
'''
mytracks_path = GetMyTracksDir()
res_dir = os.path.join(mytracks_path, MYTRACKS_RES_DIR, ANDROID_VALUES_MASK)
language_dirs = glob(res_dir)
master_dir = os.path.join(mytracks_path, MYTRACKS_RES_DIR, ANDROID_MASTER_VALUES)
if len(language_dirs) == 0:
raise 'No languages found!'
if not os.path.isdir(master_dir):
raise 'Couldn\'t find master file'
language_tuples = [(re.findall(r'.*values-([A-Za-z-]+)', dir)[0],dir) for dir in language_dirs]
language_tuples.append(('en', master_dir))
return dict(language_tuples)
| Python |
'''
Module which parses a string XML file.
@author: Rodrigo Damazio
'''
from xml.parsers.expat import ParserCreate
import re
#import xml.etree.ElementTree as ET
class StringsParser(object):
'''
Parser for string XML files.
This object is not thread-safe and should be used for parsing a single file at
a time, only.
'''
def Parse(self, file):
'''
Parses the given file and returns a dictionary mapping keys to an object
with attributes for that key, such as the value, start/end line and explicit
revisions.
In addition to the standard XML format of the strings file, this parser
supports an annotation inside comments, in one of these formats:
<!-- KEEP_PARENT name="bla" -->
<!-- KEEP_PARENT name="bla" rev="123456789012" -->
Such an annotation indicates that we're explicitly inheriting form the
master file (and the optional revision says that this decision is compatible
with the master file up to that revision).
@param file: the name of the file to parse
'''
self._Reset()
# Unfortunately expat is the only parser that will give us line numbers
self._xml_parser = ParserCreate()
self._xml_parser.StartElementHandler = self._StartElementHandler
self._xml_parser.EndElementHandler = self._EndElementHandler
self._xml_parser.CharacterDataHandler = self._CharacterDataHandler
self._xml_parser.CommentHandler = self._CommentHandler
file_obj = open(file)
self._xml_parser.ParseFile(file_obj)
file_obj.close()
return self._all_strings
def _Reset(self):
self._currentString = None
self._currentStringName = None
self._currentStringValue = None
self._all_strings = {}
def _StartElementHandler(self, name, attrs):
if name != 'string':
return
if 'name' not in attrs:
return
assert not self._currentString
assert not self._currentStringName
self._currentString = {
'startLine' : self._xml_parser.CurrentLineNumber,
}
if 'rev' in attrs:
self._currentString['revs'] = [attrs['rev']]
self._currentStringName = attrs['name']
self._currentStringValue = ''
def _EndElementHandler(self, name):
if name != 'string':
return
assert self._currentString
assert self._currentStringName
self._currentString['value'] = self._currentStringValue
self._currentString['endLine'] = self._xml_parser.CurrentLineNumber
self._all_strings[self._currentStringName] = self._currentString
self._currentString = None
self._currentStringName = None
self._currentStringValue = None
def _CharacterDataHandler(self, data):
if not self._currentString:
return
self._currentStringValue += data
_KEEP_PARENT_REGEX = re.compile(r'\s*KEEP_PARENT\s+'
r'name\s*=\s*[\'"]?(?P<name>[a-z0-9_]+)[\'"]?'
r'(?:\s+rev=[\'"]?(?P<rev>[0-9a-f]{12})[\'"]?)?\s*',
re.MULTILINE | re.DOTALL)
def _CommentHandler(self, data):
keep_parent_match = self._KEEP_PARENT_REGEX.match(data)
if not keep_parent_match:
return
name = keep_parent_match.group('name')
self._all_strings[name] = {
'keepParent' : True,
'startLine' : self._xml_parser.CurrentLineNumber,
'endLine' : self._xml_parser.CurrentLineNumber
}
rev = keep_parent_match.group('rev')
if rev:
self._all_strings[name]['revs'] = [rev] | Python |
#!/usr/bin/python
import os
def main():
lists = [
"ISODrivers/Galaxy/galaxy.prx",
"ISODrivers/March33/march33.prx",
"ISODrivers/March33/march33_620.prx",
"ISODrivers/Inferno/inferno.prx",
"Popcorn/popcorn.prx",
"Satelite/satelite.prx",
"Stargate/stargate.prx",
"SystemControl/systemctrl.prx",
"contrib/usbdevice.prx",
"Vshctrl/vshctrl.prx",
"Recovery/recovery.prx",
]
for fn in lists:
path = "../" + fn
name=os.path.split(fn)[-1]
name=os.path.splitext(name)[0]
ret = os.system("bin2c %s %s.h %s"%(path, name, name))
assert(ret == 0)
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/python
class FakeTime:
def time(self):
return 1225856967.109
import os, gzip, StringIO
gzip.time = FakeTime()
def create_gzip(input, output):
f_in=open(input, 'rb')
temp=StringIO.StringIO()
f=gzip.GzipFile(fileobj=temp, mode='wb')
f.writelines(f_in)
f.close()
f_in.close()
fout=open(output, 'wb')
temp.seek(0)
fout.writelines(temp)
fout.close()
temp.close()
def cleanup():
del_list = [
"installer.prx.gz",
"Rebootex.prx.gz",
]
for file in del_list:
try:
os.remove(file)
except OSError:
pass
def main():
create_gzip("../../Installer/installer.prx", "installer.prx.gz")
create_gzip("../../Rebootex/Rebootex.prx", "Rebootex.prx.gz")
os.system("bin2c installer.prx.gz installer.h installer")
os.system("bin2c Rebootex.prx.gz Rebootex_prx.h Rebootex_prx")
cleanup()
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/python
from hashlib import *
import sys, struct
def sha512(psid):
if len(psid) != 16:
return "".encode()
for i in range(512):
psid = sha1(psid).digest()
return psid
def get_psid(str):
if len(str) != 32:
return "".encode()
b = "".encode()
for i in range(0, len(str), 2):
b += struct.pack('B', int(str[i] + str[i+1], 16))
return b
def main():
if len(sys.argv) < 2:
print ("Usage: sha512.py psid")
exit(0)
psid = get_psid(sys.argv[1])
xhash = sha512(psid)
if len(xhash) == 0:
print ("wrong PSID")
exit(0)
print ("{\n\t"),
for i in range(len(xhash)):
if i != 0 and i % 8 == 0:
print ("\n\t"),
print ("0x%02X, "%(struct.unpack('B', xhash[i])[0])),
print ("\n},")
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/python
import sys, hashlib
def toNID(name):
hashstr = hashlib.sha1(name.encode()).hexdigest().upper()
return "0x" + hashstr[6:8] + hashstr[4:6] + hashstr[2:4] + hashstr[0:2]
if __name__ == "__main__":
assert(toNID("sceKernelCpuSuspendIntr") == "0x092968F4")
for name in sys.argv[1:]:
print ("%s: %s"%(name, toNID(name)))
| Python |
#!/usr/bin/python
"""
pspbtcnf_editor: An script that add modules from pspbtcnf
"""
import sys, os, re
from getopt import *
from struct import *
BTCNF_MAGIC=0x0F803001
verbose = False
def print_usage():
print ("%s: pspbtcnf.bin [-o output.bin] [-a add_module_name:before_module_name:flag]" %(os.path.split(sys.argv[0]))[-1])
def replace_binary(data, offset, newdata):
newdata = data[0:offset] + newdata + data[offset+len(newdata):]
assert(len(data) == len(newdata))
return newdata
def dump_binary(data, offset, size):
newdata = data[offset:offset+size]
assert(len(newdata) == size)
return newdata
def dump_binary_str(data, offset):
ch = data[offset]
tmp = b''
while ch != 0:
tmp += pack('b', ch)
offset += 1
ch = data[offset]
return tmp.decode()
def add_prx_to_bootconf(srcfn, before_modname, modname, modflag):
"Return new bootconf data"
fn=open(srcfn, "rb")
bootconf = fn.read()
fn.close()
if len(bootconf) < 64:
raise Exception("Bad bootconf")
signature, devkit, modestart, nmodes, modulestart, nmodules, modnamestart, modnameend = unpack('LL8xLL8xLL8xLL8x', bootconf[:64])
if verbose:
print ("Devkit: 0x%08X"%(devkit))
print ("modestart: 0x%08X"%(modestart))
print ("nmodes: %d"%(nmodes))
print ("modulestart: 0x%08X"%(modulestart))
print ("nmodules: 0x%08X"%(nmodules))
print ("modnamestart: 0x%08X"%(modnamestart))
print ("modnameend: 0x%08X"%(modnameend))
if signature != BTCNF_MAGIC or nmodules <= 0 or nmodes <= 0:
raise Exception("Bad bootconf")
bootconf = bootconf + modname.encode() + b'\0'
modnameend += len(modname) + 1
i=0
while i < nmodules:
module_path, module_flags = unpack('L4xL4x16x', bootconf[modulestart+i*32:modulestart+(i+1)*32])
module_name = dump_binary_str(bootconf, modnamestart+module_path)
if verbose:
print ("[%02d]: Module path: %s flag: 0x%08X"%(i, module_name, module_flags))
if before_modname == module_name:
break
i+=1
if i >= nmodules:
raise Exception("module %s not found"%(before_modname))
module_path = modnameend - len(modname) - 1 - modnamestart
module_flag = 0x80010000 | (modflag & 0xFFFF)
newmod = dump_binary(bootconf, modulestart+i*32, 32)
newmod = replace_binary(newmod, 0, pack('L', module_path))
newmod = replace_binary(newmod, 8, pack('L', module_flag))
bootconf = bootconf[0:modulestart+i*32] + newmod + bootconf[modulestart+i*32:]
nmodules+=1
bootconf = replace_binary(bootconf, 0x24, pack('L', nmodules))
modnamestart += 32
bootconf = replace_binary(bootconf, 0x30, pack('L', modnamestart))
modnameend += 32
bootconf = replace_binary(bootconf, 0x34, pack('L', modnameend))
i = 0
while i < nmodes:
num = unpack('H', bootconf[modestart+i*32:modestart+i*32+2])[0]
num += 1
bootconf = replace_binary(bootconf, modestart + i * 32, pack('H', num))
i += 1
return bootconf
def write_file(output_fn, data):
fn = open(output_fn, "wb")
fn.write(data)
fn.close()
def main():
global verbose
try:
optlist, args = gnu_getopt(sys.argv, "a:o:vh")
except GetoptError as err:
print(err)
print_usage()
sys.exit(1)
# default configure
verbose = False
dst_filename = "-"
add_module = ""
for o, a in optlist:
if o == "-v":
verbose = True
elif o == "-h":
print_usage()
sys.exit()
elif o == "-o":
dst_filename = a
elif o == "-a":
add_module = a
else:
assert False, "unhandled option"
if verbose:
print (optlist, args)
if len(args) < 2:
print ("Missing input pspbtcnf.bin")
sys.exit(1)
src_filename = args[1]
if verbose:
print ("src_filename: " + src_filename)
print ("dst_filename: " + dst_filename)
# check add_module
if add_module != "":
t = (re.split(":", add_module, re.I))
if len(t) != 3:
print ("Bad add_module input")
sys.exit(1)
add_module, before_module, add_module_flag = (re.split(":", add_module, re.I))
if verbose:
print ("add_module: " + add_module)
print ("before_module: " + before_module)
print ("add_module_flag: " + add_module_flag)
if add_module != "":
result = add_prx_to_bootconf(src_filename, before_module, add_module, int(add_module_flag, 16))
if dst_filename == "-":
# print("Bootconf result:")
# print(result)
pass
else:
write_file(dst_filename, result)
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/python
class FakeTime:
def time(self):
return 1225856967.109
import sys, os, struct, gzip, hashlib, StringIO
gzip.time = FakeTime()
def binary_replace(data, newdata, offset):
return data[0:offset] + newdata + data[offset+len(newdata):]
def prx_compress(output, hdr, input, mod_name="", mod_attr=0xFFFFFFFF):
a=open(hdr, "rb")
fileheader = a.read();
a.close()
a=open(input, "rb")
elf = a.read(4);
a.close()
if (elf != '\x7fELF'.encode()):
print ("not a ELF/PRX file!")
return -1
uncompsize = os.stat(input).st_size
f_in=open(input, 'rb')
temp=StringIO.StringIO()
f=gzip.GzipFile(fileobj=temp, mode='wb')
f.writelines(f_in)
f.close()
f_in.close()
prx=temp.getvalue()
temp.close()
digest=hashlib.md5(prx).digest()
filesize = len(fileheader) + len(prx)
if mod_name != "":
if len(mod_name) < 28:
mod_name += "\x00" * (28-len(mod_name))
else:
mod_name = mod_name[0:28]
fileheader = binary_replace(fileheader, mod_name.encode(), 0xA)
if mod_attr != 0xFFFFFFFF:
fileheader = binary_replace(fileheader, struct.pack('H', mod_attr), 0x4)
fileheader = binary_replace(fileheader, struct.pack('L', uncompsize), 0x28)
fileheader = binary_replace(fileheader, struct.pack('L', filesize), 0x2c)
fileheader = binary_replace(fileheader, struct.pack('L', len(prx)), 0xb0)
fileheader = binary_replace(fileheader, digest, 0x140)
a=open(output, "wb")
assert(len(fileheader) == 0x150)
a.write(fileheader)
a.write(prx)
a.close()
try:
os.remove("tmp.gz")
except OSError:
pass
return 0
def main():
if len(sys.argv) < 4:
print ("Usage: %s outfile prxhdr infile [modname] [modattr]\n"%(sys.argv[0]))
exit(-1)
if len(sys.argv) < 5:
prx_compress(sys.argv[1], sys.argv[2], sys.argv[3])
elif len(sys.argv) < 6:
prx_compress(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4])
else:
prx_compress(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4], int(sys.argv[5], 16))
if __name__ == "__main__":
main()
| Python |
#!/usr/bin/env python
import time
t = time.time()
u = time.gmtime(t)
s = time.strftime('%a, %e %b %Y %T GMT', u)
print 'Content-Type: text/javascript'
print 'Cache-Control: no-cache'
print 'Date: ' + s
print 'Expires: ' + s
print ''
print 'var timeskew = new Date().getTime() - ' + str(t*1000) + ';'
| Python |
#!/bin/env python
import xml.dom.minidom as dom
import sys
import struct
WEAP_NUM = 780
struct_fmt = "<H BBHBBBB 8B8B8b8b8b8b8H bbBBBB"
def pack_weapon(dict):
l = []
l.append(dict['drain'])
l.append(dict['shotRepeat'])
l.append(dict['multi'])
l.append(dict['weapAni'])
l.append(dict['max'])
l.append(dict['tx'])
l.append(dict['ty'])
l.append(dict['aim'])
tmp = dict['patterns']
for j in xrange(8):
l.append(tmp[j]['attack'])
for j in xrange(8):
l.append(tmp[j]['del'])
for j in xrange(8):
l.append(tmp[j]['sx'])
for j in xrange(8):
l.append(tmp[j]['sy'])
for j in xrange(8):
l.append(tmp[j]['bx'])
for j in xrange(8):
l.append(tmp[j]['by'])
for j in xrange(8):
l.append(tmp[j]['sg'])
l.append(dict['acceleration'])
l.append(dict['accelerationx'])
l.append(dict['circleSize'])
l.append(dict['sound'])
l.append(dict['trail'])
l.append(dict['shipBlastFilter'])
return struct.pack(struct_fmt, *l)
def unpack_weapon(str):
tup = struct.unpack(struct_fmt, str)
dict = {}
dict['drain'] = tup[0]
dict['shotRepeat'] = tup[1]
dict['multi'] = tup[2]
dict['weapAni'] = tup[3]
dict['max'] = tup[4]
dict['tx'] = tup[5]
dict['ty'] = tup[6]
dict['aim'] = tup[7]
i = 8
tmp = [{} for j in xrange(8)]
for j in xrange(8):
tmp[j]['attack'] = tup[i]
i += 1
for j in xrange(8):
tmp[j]['del'] = tup[i]
i += 1
for j in xrange(8):
tmp[j]['sx'] = tup[i]
i += 1
for j in xrange(8):
tmp[j]['sy'] = tup[i]
i += 1
for j in xrange(8):
tmp[j]['bx'] = tup[i]
i += 1
for j in xrange(8):
tmp[j]['by'] = tup[i]
i += 1
for j in xrange(8):
tmp[j]['sg'] = tup[i]
i += 1
dict['patterns'] = tmp
dict['acceleration'] = tup[i]
dict['accelerationx'] = tup[i+1]
dict['circleSize'] = tup[i+2]
dict['sound'] = tup[i+3]
dict['trail'] = tup[i+4]
dict['shipBlastFilter'] = tup[i+5]
return dict
def DOMToDict(doc, weap_node):
dict = {}
for i in weap_node.childNodes:
if i.nodeType != i.ELEMENT_NODE:
continue
if i.hasAttribute("value"):
dict[i.tagName] = int(i.getAttribute("value"))
elif i.tagName == "patterns":
dict['patterns'] = [{} for el in xrange(8)]
index = 0
for j in i.childNodes:
if j.nodeType != i.ELEMENT_NODE:
continue
attrs = [j.attributes.item(i) for i in xrange(j.attributes.length)]
for i in attrs:
dict['patterns'][index][i.name] = int(i.nodeValue)
index += 1
return dict
def dictToDOM(doc, root, dict, index=None):
entry = doc.createElement("weapon")
if index != None:
entry.setAttribute("index", "%04X" % (index,))
keys = dict.keys()
keys.sort()
for i in keys:
node = doc.createElement(i)
if isinstance(dict[i], list):
for j in dict[i]:
keys = j.keys()
keys.sort()
n = doc.createElement("entry")
for i in keys:
n.setAttribute(i, str(j[i]))
node.appendChild(n)
else:
node.setAttribute("value", str(dict[i]))
entry.appendChild(node)
root.appendChild(entry)
def toXML(hdt, output):
doc = dom.getDOMImplementation().createDocument(None, "TyrianHDT", None)
try:
f = file(hdt, "rb")
except IOError:
print "%s couldn't be opened for reading." % (hdt,)
sys.exit(1)
try:
outf = file(output, "w")
except IOError:
print "%s couldn't be opened for writing." % (outf,)
sys.exit(1)
f.seek(struct.unpack("<i", f.read(4))[0])
f.read(7*2)
sys.stdout.write("Converting weapons")
index = 0
for i in xrange(WEAP_NUM+1):
tmp = f.read(struct.calcsize(struct_fmt))
shot = unpack_weapon(tmp)
dictToDOM(doc, doc.documentElement, shot, index)
index += 1
sys.stdout.write(".")
sys.stdout.flush()
sys.stdout.write("Done!\n")
sys.stdout.write("Writing XML...")
sys.stdout.flush()
doc.writexml(outf, addindent="\t", newl="\n")
sys.stdout.write("Done!\n")
def toHDT(input, hdt):
try:
f = file(input, "r")
except IOError:
print "%s couldn't be opened for reading." % (input,)
sys.exit(1)
try:
outf = file(hdt, "r+b")
except IOError:
print "%s couldn't be opened for writing." % (hdt,)
sys.exit(1)
outf.seek(struct.unpack("<i", outf.read(4))[0])
outf.read(7*2)
sys.stdout.write("Reading XML...")
sys.stdout.flush()
doc = dom.parse(f)
sys.stdout.write("Done!\n")
sys.stdout.write("Writing weapons")
for i in doc.documentElement.childNodes:
if i.nodeType != i.ELEMENT_NODE:
continue
shot = DOMToDict(doc, i)
str = pack_weapon(shot)
outf.write(str)
sys.stdout.write(".")
sys.stdout.flush()
sys.stdout.write("Done!\n")
def printHelp():
print "Usage: weapons.py toxml path/to/tyrian.hdt output.xml"
print " weapons.py tohdt input.xml path/to/tyrian.hdt"
sys.exit(1)
##############################
if __name__ == "__main__":
if len(sys.argv) != 4:
printHelp()
if sys.argv[1] == "toxml":
toXML(sys.argv[2], sys.argv[3])
elif sys.argv[1] == "tohdt":
toHDT(sys.argv[2], sys.argv[3])
else:
printHelp()
| Python |
# -*- coding: utf-8 -*-
#
# jQuery File Upload Plugin GAE Python Example 2.1.0
# https://github.com/blueimp/jQuery-File-Upload
#
# Copyright 2011, Sebastian Tschan
# https://blueimp.net
#
# Licensed under the MIT license:
# http://www.opensource.org/licenses/MIT
#
from __future__ import with_statement
from google.appengine.api import files, images
from google.appengine.ext import blobstore, deferred
from google.appengine.ext.webapp import blobstore_handlers
import json
import re
import urllib
import webapp2
WEBSITE = 'http://blueimp.github.io/jQuery-File-Upload/'
MIN_FILE_SIZE = 1 # bytes
MAX_FILE_SIZE = 5000000 # bytes
IMAGE_TYPES = re.compile('image/(gif|p?jpeg|(x-)?png)')
ACCEPT_FILE_TYPES = IMAGE_TYPES
THUMBNAIL_MODIFICATOR = '=s80' # max width / height
EXPIRATION_TIME = 300 # seconds
def cleanup(blob_keys):
blobstore.delete(blob_keys)
class UploadHandler(webapp2.RequestHandler):
def initialize(self, request, response):
super(UploadHandler, self).initialize(request, response)
self.response.headers['Access-Control-Allow-Origin'] = '*'
self.response.headers[
'Access-Control-Allow-Methods'
] = 'OPTIONS, HEAD, GET, POST, PUT, DELETE'
self.response.headers[
'Access-Control-Allow-Headers'
] = 'Content-Type, Content-Range, Content-Disposition'
def validate(self, file):
if file['size'] < MIN_FILE_SIZE:
file['error'] = 'File is too small'
elif file['size'] > MAX_FILE_SIZE:
file['error'] = 'File is too big'
elif not ACCEPT_FILE_TYPES.match(file['type']):
file['error'] = 'Filetype not allowed'
else:
return True
return False
def get_file_size(self, file):
file.seek(0, 2) # Seek to the end of the file
size = file.tell() # Get the position of EOF
file.seek(0) # Reset the file position to the beginning
return size
def write_blob(self, data, info):
blob = files.blobstore.create(
mime_type=info['type'],
_blobinfo_uploaded_filename=info['name']
)
with files.open(blob, 'a') as f:
f.write(data)
files.finalize(blob)
return files.blobstore.get_blob_key(blob)
def handle_upload(self):
results = []
blob_keys = []
for name, fieldStorage in self.request.POST.items():
if type(fieldStorage) is unicode:
continue
result = {}
result['name'] = re.sub(
r'^.*\\',
'',
fieldStorage.filename
)
result['type'] = fieldStorage.type
result['size'] = self.get_file_size(fieldStorage.file)
if self.validate(result):
blob_key = str(
self.write_blob(fieldStorage.value, result)
)
blob_keys.append(blob_key)
result['deleteType'] = 'DELETE'
result['deleteUrl'] = self.request.host_url +\
'/?key=' + urllib.quote(blob_key, '')
if (IMAGE_TYPES.match(result['type'])):
try:
result['url'] = images.get_serving_url(
blob_key,
secure_url=self.request.host_url.startswith(
'https'
)
)
result['thumbnailUrl'] = result['url'] +\
THUMBNAIL_MODIFICATOR
except: # Could not get an image serving url
pass
if not 'url' in result:
result['url'] = self.request.host_url +\
'/' + blob_key + '/' + urllib.quote(
result['name'].encode('utf-8'), '')
results.append(result)
deferred.defer(
cleanup,
blob_keys,
_countdown=EXPIRATION_TIME
)
return results
def options(self):
pass
def head(self):
pass
def get(self):
self.redirect(WEBSITE)
def post(self):
if (self.request.get('_method') == 'DELETE'):
return self.delete()
result = {'files': self.handle_upload()}
s = json.dumps(result, separators=(',', ':'))
redirect = self.request.get('redirect')
if redirect:
return self.redirect(str(
redirect.replace('%s', urllib.quote(s, ''), 1)
))
if 'application/json' in self.request.headers.get('Accept'):
self.response.headers['Content-Type'] = 'application/json'
self.response.write(s)
def delete(self):
blobstore.delete(self.request.get('key') or '')
class DownloadHandler(blobstore_handlers.BlobstoreDownloadHandler):
def get(self, key, filename):
if not blobstore.get(key):
self.error(404)
else:
# Prevent browsers from MIME-sniffing the content-type:
self.response.headers['X-Content-Type-Options'] = 'nosniff'
# Cache for the expiration time:
self.response.headers['Cache-Control'] = 'public,max-age=%d' % EXPIRATION_TIME
# Send the file forcing a download dialog:
self.send_blob(key, save_as=filename, content_type='application/octet-stream')
app = webapp2.WSGIApplication(
[
('/', UploadHandler),
('/([^/]+)/([^/]+)', DownloadHandler)
],
debug=True
)
| Python |
import socket
port = 54321
#host = "137.138.196.188"
host="0.0.0.0"
UDPsock=socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Accepte les datagrammes UDP sur le port depuis tous les expediteurs
UDPsock.bind((host, port))
#UDPsock.connect(('137.138.196.188',port))
print "Ecoute sur port", port
while 1:
data, addr = UDPsock.recvfrom(128)
# print "Recu:", data , "de", addr
# print "Recu:", data
if not data:
print "Client has exited!"
break
else:
print "\nReceived message '", data,"' from ", addr
| Python |
import socket,time
port = 54321
#host = "localhost"
host = "192.168.0.9"
#host = "137.138.196.188"
#host="128.141.140.144"
UDPsock=socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#s.bind(("", port))
while ( 1 ):
commande = raw_input("Commande? ")
UDPsock.sendto(commande,(host, port))
print "envoie commande[",commande,"]" | Python |
'''
Module which brings history information about files from Mercurial.
@author: Rodrigo Damazio
'''
import re
import subprocess
REVISION_REGEX = re.compile(r'(?P<hash>[0-9a-f]{12}):.*')
def _GetOutputLines(args):
'''
Runs an external process and returns its output as a list of lines.
@param args: the arguments to run
'''
process = subprocess.Popen(args,
stdout=subprocess.PIPE,
universal_newlines = True,
shell = False)
output = process.communicate()[0]
return output.splitlines()
def FillMercurialRevisions(filename, parsed_file):
'''
Fills the revs attribute of all strings in the given parsed file with
a list of revisions that touched the lines corresponding to that string.
@param filename: the name of the file to get history for
@param parsed_file: the parsed file to modify
'''
# Take output of hg annotate to get revision of each line
output_lines = _GetOutputLines(['hg', 'annotate', '-c', filename])
# Create a map of line -> revision (key is list index, line 0 doesn't exist)
line_revs = ['dummy']
for line in output_lines:
rev_match = REVISION_REGEX.match(line)
if not rev_match:
raise 'Unexpected line of output from hg: %s' % line
rev_hash = rev_match.group('hash')
line_revs.append(rev_hash)
for str in parsed_file.itervalues():
# Get the lines that correspond to each string
start_line = str['startLine']
end_line = str['endLine']
# Get the revisions that touched those lines
revs = []
for line_number in range(start_line, end_line + 1):
revs.append(line_revs[line_number])
# Merge with any revisions that were already there
# (for explict revision specification)
if 'revs' in str:
revs += str['revs']
# Assign the revisions to the string
str['revs'] = frozenset(revs)
def DoesRevisionSuperceed(filename, rev1, rev2):
'''
Tells whether a revision superceeds another.
This essentially means that the older revision is an ancestor of the newer
one.
This also returns True if the two revisions are the same.
@param rev1: the revision that may be superceeding the other
@param rev2: the revision that may be superceeded
@return: True if rev1 superceeds rev2 or they're the same
'''
if rev1 == rev2:
return True
# TODO: Add filename
args = ['hg', 'log', '-r', 'ancestors(%s)' % rev1, '--template', '{node|short}\n', filename]
output_lines = _GetOutputLines(args)
return rev2 in output_lines
def NewestRevision(filename, rev1, rev2):
'''
Returns which of two revisions is closest to the head of the repository.
If none of them is the ancestor of the other, then we return either one.
@param rev1: the first revision
@param rev2: the second revision
'''
if DoesRevisionSuperceed(filename, rev1, rev2):
return rev1
return rev2 | Python |
#!/usr/bin/python
'''
Entry point for My Tracks i18n tool.
@author: Rodrigo Damazio
'''
import mytracks.files
import mytracks.translate
import mytracks.validate
import sys
def Usage():
print 'Usage: %s <command> [<language> ...]\n' % sys.argv[0]
print 'Commands are:'
print ' cleanup'
print ' translate'
print ' validate'
sys.exit(1)
def Translate(languages):
'''
Asks the user to interactively translate any missing or oudated strings from
the files for the given languages.
@param languages: the languages to translate
'''
validator = mytracks.validate.Validator(languages)
validator.Validate()
missing = validator.missing_in_lang()
outdated = validator.outdated_in_lang()
for lang in languages:
untranslated = missing[lang] + outdated[lang]
if len(untranslated) == 0:
continue
translator = mytracks.translate.Translator(lang)
translator.Translate(untranslated)
def Validate(languages):
'''
Computes and displays errors in the string files for the given languages.
@param languages: the languages to compute for
'''
validator = mytracks.validate.Validator(languages)
validator.Validate()
error_count = 0
if (validator.valid()):
print 'All files OK'
else:
for lang, missing in validator.missing_in_master().iteritems():
print 'Missing in master, present in %s: %s:' % (lang, str(missing))
error_count = error_count + len(missing)
for lang, missing in validator.missing_in_lang().iteritems():
print 'Missing in %s, present in master: %s:' % (lang, str(missing))
error_count = error_count + len(missing)
for lang, outdated in validator.outdated_in_lang().iteritems():
print 'Outdated in %s: %s:' % (lang, str(outdated))
error_count = error_count + len(outdated)
return error_count
if __name__ == '__main__':
argv = sys.argv
argc = len(argv)
if argc < 2:
Usage()
languages = mytracks.files.GetAllLanguageFiles()
if argc == 3:
langs = set(argv[2:])
if not langs.issubset(languages):
raise 'Language(s) not found'
# Filter just to the languages specified
languages = dict((lang, lang_file)
for lang, lang_file in languages.iteritems()
if lang in langs or lang == 'en' )
cmd = argv[1]
if cmd == 'translate':
Translate(languages)
elif cmd == 'validate':
error_count = Validate(languages)
else:
Usage()
error_count = 0
print '%d errors found.' % error_count
| Python |
'''
Module which prompts the user for translations and saves them.
TODO: implement
@author: Rodrigo Damazio
'''
class Translator(object):
'''
classdocs
'''
def __init__(self, language):
'''
Constructor
'''
self._language = language
def Translate(self, string_names):
print string_names | Python |
'''
Module which compares languague files to the master file and detects
issues.
@author: Rodrigo Damazio
'''
import os
from mytracks.parser import StringsParser
import mytracks.history
class Validator(object):
def __init__(self, languages):
'''
Builds a strings file validator.
Params:
@param languages: a dictionary mapping each language to its corresponding directory
'''
self._langs = {}
self._master = None
self._language_paths = languages
parser = StringsParser()
for lang, lang_dir in languages.iteritems():
filename = os.path.join(lang_dir, 'strings.xml')
parsed_file = parser.Parse(filename)
mytracks.history.FillMercurialRevisions(filename, parsed_file)
if lang == 'en':
self._master = parsed_file
else:
self._langs[lang] = parsed_file
self._Reset()
def Validate(self):
'''
Computes whether all the data in the files for the given languages is valid.
'''
self._Reset()
self._ValidateMissingKeys()
self._ValidateOutdatedKeys()
def valid(self):
return (len(self._missing_in_master) == 0 and
len(self._missing_in_lang) == 0 and
len(self._outdated_in_lang) == 0)
def missing_in_master(self):
return self._missing_in_master
def missing_in_lang(self):
return self._missing_in_lang
def outdated_in_lang(self):
return self._outdated_in_lang
def _Reset(self):
# These are maps from language to string name list
self._missing_in_master = {}
self._missing_in_lang = {}
self._outdated_in_lang = {}
def _ValidateMissingKeys(self):
'''
Computes whether there are missing keys on either side.
'''
master_keys = frozenset(self._master.iterkeys())
for lang, file in self._langs.iteritems():
keys = frozenset(file.iterkeys())
missing_in_master = keys - master_keys
missing_in_lang = master_keys - keys
if len(missing_in_master) > 0:
self._missing_in_master[lang] = missing_in_master
if len(missing_in_lang) > 0:
self._missing_in_lang[lang] = missing_in_lang
def _ValidateOutdatedKeys(self):
'''
Computers whether any of the language keys are outdated with relation to the
master keys.
'''
for lang, file in self._langs.iteritems():
outdated = []
for key, str in file.iteritems():
# Get all revisions that touched master and language files for this
# string.
master_str = self._master[key]
master_revs = master_str['revs']
lang_revs = str['revs']
if not master_revs or not lang_revs:
print 'WARNING: No revision for %s in %s' % (key, lang)
continue
master_file = os.path.join(self._language_paths['en'], 'strings.xml')
lang_file = os.path.join(self._language_paths[lang], 'strings.xml')
# Assume that the repository has a single head (TODO: check that),
# and as such there is always one revision which superceeds all others.
master_rev = reduce(
lambda r1, r2: mytracks.history.NewestRevision(master_file, r1, r2),
master_revs)
lang_rev = reduce(
lambda r1, r2: mytracks.history.NewestRevision(lang_file, r1, r2),
lang_revs)
# If the master version is newer than the lang version
if mytracks.history.DoesRevisionSuperceed(lang_file, master_rev, lang_rev):
outdated.append(key)
if len(outdated) > 0:
self._outdated_in_lang[lang] = outdated
| Python |
'''
Module for dealing with resource files (but not their contents).
@author: Rodrigo Damazio
'''
import os.path
from glob import glob
import re
MYTRACKS_RES_DIR = 'MyTracks/res'
ANDROID_MASTER_VALUES = 'values'
ANDROID_VALUES_MASK = 'values-*'
def GetMyTracksDir():
'''
Returns the directory in which the MyTracks directory is located.
'''
path = os.getcwd()
while not os.path.isdir(os.path.join(path, MYTRACKS_RES_DIR)):
if path == '/':
raise 'Not in My Tracks project'
# Go up one level
path = os.path.split(path)[0]
return path
def GetAllLanguageFiles():
'''
Returns a mapping from all found languages to their respective directories.
'''
mytracks_path = GetMyTracksDir()
res_dir = os.path.join(mytracks_path, MYTRACKS_RES_DIR, ANDROID_VALUES_MASK)
language_dirs = glob(res_dir)
master_dir = os.path.join(mytracks_path, MYTRACKS_RES_DIR, ANDROID_MASTER_VALUES)
if len(language_dirs) == 0:
raise 'No languages found!'
if not os.path.isdir(master_dir):
raise 'Couldn\'t find master file'
language_tuples = [(re.findall(r'.*values-([A-Za-z-]+)', dir)[0],dir) for dir in language_dirs]
language_tuples.append(('en', master_dir))
return dict(language_tuples)
| Python |
'''
Module which parses a string XML file.
@author: Rodrigo Damazio
'''
from xml.parsers.expat import ParserCreate
import re
#import xml.etree.ElementTree as ET
class StringsParser(object):
'''
Parser for string XML files.
This object is not thread-safe and should be used for parsing a single file at
a time, only.
'''
def Parse(self, file):
'''
Parses the given file and returns a dictionary mapping keys to an object
with attributes for that key, such as the value, start/end line and explicit
revisions.
In addition to the standard XML format of the strings file, this parser
supports an annotation inside comments, in one of these formats:
<!-- KEEP_PARENT name="bla" -->
<!-- KEEP_PARENT name="bla" rev="123456789012" -->
Such an annotation indicates that we're explicitly inheriting form the
master file (and the optional revision says that this decision is compatible
with the master file up to that revision).
@param file: the name of the file to parse
'''
self._Reset()
# Unfortunately expat is the only parser that will give us line numbers
self._xml_parser = ParserCreate()
self._xml_parser.StartElementHandler = self._StartElementHandler
self._xml_parser.EndElementHandler = self._EndElementHandler
self._xml_parser.CharacterDataHandler = self._CharacterDataHandler
self._xml_parser.CommentHandler = self._CommentHandler
file_obj = open(file)
self._xml_parser.ParseFile(file_obj)
file_obj.close()
return self._all_strings
def _Reset(self):
self._currentString = None
self._currentStringName = None
self._currentStringValue = None
self._all_strings = {}
def _StartElementHandler(self, name, attrs):
if name != 'string':
return
if 'name' not in attrs:
return
assert not self._currentString
assert not self._currentStringName
self._currentString = {
'startLine' : self._xml_parser.CurrentLineNumber,
}
if 'rev' in attrs:
self._currentString['revs'] = [attrs['rev']]
self._currentStringName = attrs['name']
self._currentStringValue = ''
def _EndElementHandler(self, name):
if name != 'string':
return
assert self._currentString
assert self._currentStringName
self._currentString['value'] = self._currentStringValue
self._currentString['endLine'] = self._xml_parser.CurrentLineNumber
self._all_strings[self._currentStringName] = self._currentString
self._currentString = None
self._currentStringName = None
self._currentStringValue = None
def _CharacterDataHandler(self, data):
if not self._currentString:
return
self._currentStringValue += data
_KEEP_PARENT_REGEX = re.compile(r'\s*KEEP_PARENT\s+'
r'name\s*=\s*[\'"]?(?P<name>[a-z0-9_]+)[\'"]?'
r'(?:\s+rev=[\'"]?(?P<rev>[0-9a-f]{12})[\'"]?)?\s*',
re.MULTILINE | re.DOTALL)
def _CommentHandler(self, data):
keep_parent_match = self._KEEP_PARENT_REGEX.match(data)
if not keep_parent_match:
return
name = keep_parent_match.group('name')
self._all_strings[name] = {
'keepParent' : True,
'startLine' : self._xml_parser.CurrentLineNumber,
'endLine' : self._xml_parser.CurrentLineNumber
}
rev = keep_parent_match.group('rev')
if rev:
self._all_strings[name]['revs'] = [rev] | Python |
#
# Secret Labs' Regular Expression Engine core module
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# This version of the SRE library can be redistributed under CNRI's
# Python 1.6 license. For any other use, please contact Secret Labs
# AB (info@pythonware.com).
#
# Portions of this engine have been developed in cooperation with
# CNRI. Hewlett-Packard provided funding for 1.6 integration and
# other compatibility work.
#
# 2010-01-16 mrab Python front-end re-written and extended
import string
import sys
import unicodedata
from collections import defaultdict
import _regex
__all__ = ["A", "ASCII", "B", "BESTMATCH", "D", "DEBUG", "E", "ENHANCEMATCH",
"F", "FULLCASE", "I", "IGNORECASE", "L", "LOCALE", "M", "MULTILINE", "R",
"REVERSE", "S", "DOTALL", "T", "TEMPLATE", "U", "UNICODE", "V0", "VERSION0",
"V1", "VERSION1", "W", "WORD", "X", "VERBOSE", "error",
"Scanner"]
# The regex exception.
class error(Exception):
def __init__(self, message, pattern=None, pos=None):
newline = u'\n' if isinstance(pattern, unicode) else '\n'
self.msg = message
self.pattern = pattern
self.pos = pos
if pattern is not None and pos is not None:
self.lineno = pattern.count(newline, 0, pos) + 1
self.colno = pos - pattern.rfind(newline, 0, pos)
message = "%s at position %d" % (message, pos)
if newline in pattern:
message += " (line %d, column %d)" % (self.lineno, self.colno)
Exception.__init__(self, message)
# The exception for when a positional flag has been turned on in the old
# behaviour.
class _UnscopedFlagSet(Exception):
pass
# The exception for when parsing fails and we want to try something else.
class ParseError(Exception):
pass
# The exception for when there isn't a valid first set.
class _FirstSetError(Exception):
pass
# Flags.
A = ASCII = 0x80 # Assume ASCII locale.
B = BESTMATCH = 0x1000 # Best fuzzy match.
D = DEBUG = 0x200 # Print parsed pattern.
E = ENHANCEMATCH = 0x8000 # Attempt to improve the fit after finding the first
# fuzzy match.
F = FULLCASE = 0x4000 # Unicode full case-folding.
I = IGNORECASE = 0x2 # Ignore case.
L = LOCALE = 0x4 # Assume current 8-bit locale.
M = MULTILINE = 0x8 # Make anchors look for newline.
R = REVERSE = 0x400 # Search backwards.
S = DOTALL = 0x10 # Make dot match newline.
U = UNICODE = 0x20 # Assume Unicode locale.
V0 = VERSION0 = 0x2000 # Old legacy behaviour.
V1 = VERSION1 = 0x100 # New enhanced behaviour.
W = WORD = 0x800 # Default Unicode word breaks.
X = VERBOSE = 0x40 # Ignore whitespace and comments.
T = TEMPLATE = 0x1 # Template (present because re module has it).
DEFAULT_VERSION = VERSION1
_ALL_VERSIONS = VERSION0 | VERSION1
_ALL_ENCODINGS = ASCII | LOCALE | UNICODE
# The default flags for the various versions.
DEFAULT_FLAGS = {VERSION0: 0, VERSION1: FULLCASE}
# The mask for the flags.
GLOBAL_FLAGS = (_ALL_ENCODINGS | _ALL_VERSIONS | BESTMATCH | DEBUG |
ENHANCEMATCH | REVERSE)
SCOPED_FLAGS = FULLCASE | IGNORECASE | MULTILINE | DOTALL | WORD | VERBOSE
ALPHA = frozenset(string.ascii_letters)
DIGITS = frozenset(string.digits)
ALNUM = ALPHA | DIGITS
OCT_DIGITS = frozenset(string.octdigits)
HEX_DIGITS = frozenset(string.hexdigits)
SPECIAL_CHARS = frozenset("()|?*+{^$.[\\#") | frozenset([""])
NAMED_CHAR_PART = ALNUM | frozenset(" -")
PROPERTY_NAME_PART = ALNUM | frozenset(" &_-.")
SET_OPS = ("||", "~~", "&&", "--")
# The width of the code words inside the regex engine.
BYTES_PER_CODE = _regex.get_code_size()
BITS_PER_CODE = BYTES_PER_CODE * 8
# The repeat count which represents infinity.
UNLIMITED = (1 << BITS_PER_CODE) - 1
# The regular expression flags.
REGEX_FLAGS = {"a": ASCII, "b": BESTMATCH, "e": ENHANCEMATCH, "f": FULLCASE,
"i": IGNORECASE, "L": LOCALE, "m": MULTILINE, "r": REVERSE, "s": DOTALL, "u":
UNICODE, "V0": VERSION0, "V1": VERSION1, "w": WORD, "x": VERBOSE}
# The case flags.
CASE_FLAGS = FULLCASE | IGNORECASE
NOCASE = 0
FULLIGNORECASE = FULLCASE | IGNORECASE
FULL_CASE_FOLDING = UNICODE | FULLIGNORECASE
# The number of digits in hexadecimal escapes.
HEX_ESCAPES = {"x": 2, "u": 4, "U": 8}
# A singleton which indicates a comment within a pattern.
COMMENT = object()
FLAGS = object()
# The names of the opcodes.
OPCODES = """
FAILURE
SUCCESS
ANY
ANY_ALL
ANY_ALL_REV
ANY_REV
ANY_U
ANY_U_REV
ATOMIC
BOUNDARY
BRANCH
CALL_REF
CHARACTER
CHARACTER_IGN
CHARACTER_IGN_REV
CHARACTER_REV
DEFAULT_BOUNDARY
DEFAULT_END_OF_WORD
DEFAULT_START_OF_WORD
END
END_OF_LINE
END_OF_LINE_U
END_OF_STRING
END_OF_STRING_LINE
END_OF_STRING_LINE_U
END_OF_WORD
FUZZY
GRAPHEME_BOUNDARY
GREEDY_REPEAT
GROUP
GROUP_CALL
GROUP_EXISTS
LAZY_REPEAT
LOOKAROUND
NEXT
PROPERTY
PROPERTY_IGN
PROPERTY_IGN_REV
PROPERTY_REV
RANGE
RANGE_IGN
RANGE_IGN_REV
RANGE_REV
REF_GROUP
REF_GROUP_FLD
REF_GROUP_FLD_REV
REF_GROUP_IGN
REF_GROUP_IGN_REV
REF_GROUP_REV
SEARCH_ANCHOR
SET_DIFF
SET_DIFF_IGN
SET_DIFF_IGN_REV
SET_DIFF_REV
SET_INTER
SET_INTER_IGN
SET_INTER_IGN_REV
SET_INTER_REV
SET_SYM_DIFF
SET_SYM_DIFF_IGN
SET_SYM_DIFF_IGN_REV
SET_SYM_DIFF_REV
SET_UNION
SET_UNION_IGN
SET_UNION_IGN_REV
SET_UNION_REV
START_OF_LINE
START_OF_LINE_U
START_OF_STRING
START_OF_WORD
STRING
STRING_FLD
STRING_FLD_REV
STRING_IGN
STRING_IGN_REV
STRING_REV
STRING_SET
STRING_SET_FLD
STRING_SET_FLD_REV
STRING_SET_IGN
STRING_SET_IGN_REV
STRING_SET_REV
"""
# Define the opcodes in a namespace.
class Namespace(object):
pass
OP = Namespace()
for i, op in enumerate(OPCODES.split()):
setattr(OP, op, i)
def _shrink_cache(cache_dict, args_dict, locale_sensitive, max_length, divisor=5):
"""Make room in the given cache.
Args:
cache_dict: The cache dictionary to modify.
args_dict: The dictionary of named list args used by patterns.
max_length: Maximum # of entries in cache_dict before it is shrunk.
divisor: Cache will shrink to max_length - 1/divisor*max_length items.
"""
# Toss out a fraction of the entries at random to make room for new ones.
# A random algorithm was chosen as opposed to simply cache_dict.popitem()
# as popitem could penalize the same regular expression repeatedly based
# on its internal hash value. Being random should spread the cache miss
# love around.
cache_keys = tuple(cache_dict.keys())
overage = len(cache_keys) - max_length
if overage < 0:
# Cache is already within limits. Normally this should not happen
# but it could due to multithreading.
return
number_to_toss = max_length // divisor + overage
# The import is done here to avoid a circular dependency.
import random
if not hasattr(random, 'sample'):
# Do nothing while resolving the circular dependency:
# re->random->warnings->tokenize->string->re
return
for doomed_key in random.sample(cache_keys, number_to_toss):
try:
del cache_dict[doomed_key]
except KeyError:
# Ignore problems if the cache changed from another thread.
pass
# Rebuild the arguments and locale-sensitivity dictionaries.
args_dict.clear()
sensitivity_dict = {}
for pattern, pattern_type, flags, args, default_version, locale in cache_dict:
args_dict[pattern, pattern_type, flags, default_version, locale] = args
try:
sensitivity_dict[pattern_type, pattern] = locale_sensitive[pattern_type, pattern]
except KeyError:
pass
locale_sensitive.clear()
locale_sensitive.update(sensitivity_dict)
def _fold_case(info, string):
"Folds the case of a string."
flags = info.flags
if (flags & _ALL_ENCODINGS) == 0:
flags |= info.guess_encoding
return _regex.fold_case(flags, string)
def is_cased(info, char):
"Checks whether a character is cased."
return len(_regex.get_all_cases(info.flags, char)) > 1
def _compile_firstset(info, fs):
"Compiles the firstset for the pattern."
if not fs or None in fs:
return []
# If we ignore the case, for simplicity we won't build a firstset.
members = set()
for i in fs:
if i.case_flags:
if isinstance(i, Character):
if is_cased(info, i.value):
return []
elif isinstance(i, SetBase):
return []
members.add(i.with_flags(case_flags=NOCASE))
# Build the firstset.
fs = SetUnion(info, list(members), zerowidth=True)
fs = fs.optimise(info, in_set=True)
# Compile the firstset.
return fs.compile(bool(info.flags & REVERSE))
def _flatten_code(code):
"Flattens the code from a list of tuples."
flat_code = []
for c in code:
flat_code.extend(c)
return flat_code
def make_character(info, value, in_set=False):
"Makes a character literal."
if in_set:
# A character set is built case-sensitively.
return Character(value)
return Character(value, case_flags=info.flags & CASE_FLAGS)
def make_ref_group(info, name, position):
"Makes a group reference."
return RefGroup(info, name, position, case_flags=info.flags & CASE_FLAGS)
def make_string_set(info, name):
"Makes a string set."
return StringSet(info, name, case_flags=info.flags & CASE_FLAGS)
def make_property(info, prop, in_set):
"Makes a property."
if in_set:
return prop
return prop.with_flags(case_flags=info.flags & CASE_FLAGS)
def _parse_pattern(source, info):
"Parses a pattern, eg. 'a|b|c'."
branches = [parse_sequence(source, info)]
while source.match("|"):
branches.append(parse_sequence(source, info))
if len(branches) == 1:
return branches[0]
return Branch(branches)
def parse_sequence(source, info):
"Parses a sequence, eg. 'abc'."
sequence = []
applied = False
while True:
# Get literal characters followed by an element.
characters, case_flags, element = parse_literal_and_element(source,
info)
if not element:
# No element, just a literal. We've also reached the end of the
# sequence.
append_literal(characters, case_flags, sequence)
break
if element is COMMENT or element is FLAGS:
append_literal(characters, case_flags, sequence)
elif type(element) is tuple:
# It looks like we've found a quantifier.
ch, saved_pos = element
counts = parse_quantifier(source, info, ch)
if counts:
# It _is_ a quantifier.
apply_quantifier(source, info, counts, characters, case_flags,
ch, saved_pos, applied, sequence)
applied = True
else:
# It's not a quantifier. Maybe it's a fuzzy constraint.
constraints = parse_fuzzy(source, ch)
if constraints:
# It _is_ a fuzzy constraint.
apply_constraint(source, info, constraints, characters,
case_flags, saved_pos, applied, sequence)
applied = True
else:
# The element was just a literal.
characters.append(ord(ch))
append_literal(characters, case_flags, sequence)
applied = False
else:
# We have a literal followed by something else.
append_literal(characters, case_flags, sequence)
sequence.append(element)
applied = False
return make_sequence(sequence)
def apply_quantifier(source, info, counts, characters, case_flags, ch,
saved_pos, applied, sequence):
if characters:
# The quantifier applies to the last character.
append_literal(characters[ : -1], case_flags, sequence)
element = Character(characters[-1], case_flags=case_flags)
else:
# The quantifier applies to the last item in the sequence.
if applied or not sequence:
raise error("nothing to repeat", source.string, saved_pos)
element = sequence.pop()
min_count, max_count = counts
saved_pos = source.pos
ch = source.get()
if ch == "?":
# The "?" suffix that means it's a lazy repeat.
repeated = LazyRepeat
elif ch == "+":
# The "+" suffix that means it's a possessive repeat.
repeated = PossessiveRepeat
else:
# No suffix means that it's a greedy repeat.
source.pos = saved_pos
repeated = GreedyRepeat
# Ignore the quantifier if it applies to a zero-width item or the number of
# repeats is fixed at 1.
if not element.is_empty() and (min_count != 1 or max_count != 1):
element = repeated(element, min_count, max_count)
sequence.append(element)
def apply_constraint(source, info, constraints, characters, case_flags,
saved_pos, applied, sequence):
if characters:
# The constraint applies to the last character.
append_literal(characters[ : -1], case_flags, sequence)
element = Character(characters[-1], case_flags=case_flags)
sequence.append(Fuzzy(element, constraints))
else:
# The constraint applies to the last item in the sequence.
if applied or not sequence:
raise error("nothing for fuzzy constraint", source.string,
saved_pos)
element = sequence.pop()
# If a group is marked as fuzzy then put all of the fuzzy part in the
# group.
if isinstance(element, Group):
element.subpattern = Fuzzy(element.subpattern, constraints)
sequence.append(element)
else:
sequence.append(Fuzzy(element, constraints))
def append_literal(characters, case_flags, sequence):
if characters:
sequence.append(Literal(characters, case_flags=case_flags))
def PossessiveRepeat(element, min_count, max_count):
"Builds a possessive repeat."
return Atomic(GreedyRepeat(element, min_count, max_count))
_QUANTIFIERS = {"?": (0, 1), "*": (0, None), "+": (1, None)}
def parse_quantifier(source, info, ch):
"Parses a quantifier."
q = _QUANTIFIERS.get(ch)
if q:
# It's a quantifier.
return q
if ch == "{":
# Looks like a limited repeated element, eg. 'a{2,3}'.
counts = parse_limited_quantifier(source)
if counts:
return counts
return None
def is_above_limit(count):
"Checks whether a count is above the maximum."
return count is not None and count >= UNLIMITED
def parse_limited_quantifier(source):
"Parses a limited quantifier."
saved_pos = source.pos
min_count = parse_count(source)
if source.match(","):
max_count = parse_count(source)
# No minimum means 0 and no maximum means unlimited.
min_count = int(min_count or 0)
max_count = int(max_count) if max_count else None
if max_count is not None and min_count > max_count:
raise error("min repeat greater than max repeat", source.string,
saved_pos)
else:
if not min_count:
source.pos = saved_pos
return None
min_count = max_count = int(min_count)
if is_above_limit(min_count) or is_above_limit(max_count):
raise error("repeat count too big", source.string, saved_pos)
if not source.match ("}"):
source.pos = saved_pos
return None
return min_count, max_count
def parse_fuzzy(source, ch):
"Parses a fuzzy setting, if present."
if ch != "{":
return None
saved_pos = source.pos
constraints = {}
try:
parse_fuzzy_item(source, constraints)
while source.match(","):
parse_fuzzy_item(source, constraints)
except ParseError:
source.pos = saved_pos
return None
if not source.match("}"):
raise error("expected }", source.string, source.pos)
return constraints
def parse_fuzzy_item(source, constraints):
"Parses a fuzzy setting item."
saved_pos = source.pos
try:
parse_cost_constraint(source, constraints)
except ParseError:
source.pos = saved_pos
parse_cost_equation(source, constraints)
def parse_cost_constraint(source, constraints):
"Parses a cost constraint."
saved_pos = source.pos
ch = source.get()
if ch in ALPHA:
# Syntax: constraint [("<=" | "<") cost]
constraint = parse_constraint(source, constraints, ch)
max_inc = parse_fuzzy_compare(source)
if max_inc is None:
# No maximum cost.
constraints[constraint] = 0, None
else:
# There's a maximum cost.
cost_pos = source.pos
max_cost = int(parse_count(source))
# Inclusive or exclusive limit?
if not max_inc:
max_cost -= 1
if max_cost < 0:
raise error("bad fuzzy cost limit", source.string, cost_pos)
constraints[constraint] = 0, max_cost
elif ch in DIGITS:
# Syntax: cost ("<=" | "<") constraint ("<=" | "<") cost
source.pos = saved_pos
try:
# Minimum cost.
min_cost = int(parse_count(source))
min_inc = parse_fuzzy_compare(source)
if min_inc is None:
raise ParseError()
constraint = parse_constraint(source, constraints, source.get())
max_inc = parse_fuzzy_compare(source)
if max_inc is None:
raise ParseError()
# Maximum cost.
cost_pos = source.pos
max_cost = int(parse_count(source))
# Inclusive or exclusive limits?
if not min_inc:
min_cost += 1
if not max_inc:
max_cost -= 1
if not 0 <= min_cost <= max_cost:
raise error("bad fuzzy cost limit", source.string, cost_pos)
constraints[constraint] = min_cost, max_cost
except ValueError:
raise ParseError()
else:
raise ParseError()
def parse_constraint(source, constraints, ch):
"Parses a constraint."
if ch not in "deis":
raise error("bad fuzzy constraint", source.string, source.pos)
if ch in constraints:
raise error("repeated fuzzy constraint", source.string, source.pos)
return ch
def parse_fuzzy_compare(source):
"Parses a cost comparator."
if source.match("<="):
return True
elif source.match("<"):
return False
else:
return None
def parse_cost_equation(source, constraints):
"Parses a cost equation."
if "cost" in constraints:
raise error("more than one cost equation", source.string, source.pos)
cost = {}
parse_cost_term(source, cost)
while source.match("+"):
parse_cost_term(source, cost)
max_inc = parse_fuzzy_compare(source)
if max_inc is None:
raise error("missing fuzzy cost limit", source.string, source.pos)
max_cost = int(parse_count(source))
if not max_inc:
max_cost -= 1
if max_cost < 0:
raise error("bad fuzzy cost limit", source.string, source.pos)
cost["max"] = max_cost
constraints["cost"] = cost
def parse_cost_term(source, cost):
"Parses a cost equation term."
coeff = parse_count(source)
ch = source.get()
if ch not in "dis":
raise ParseError()
if ch in cost:
raise error("repeated fuzzy cost", source.string, source.pos)
cost[ch] = int(coeff or 1)
def parse_count(source):
"Parses a quantifier's count, which can be empty."
return source.get_while(DIGITS)
def parse_literal_and_element(source, info):
"""Parses a literal followed by an element. The element is FLAGS if it's an
inline flag or None if it has reached the end of a sequence.
"""
characters = []
case_flags = info.flags & CASE_FLAGS
while True:
saved_pos = source.pos
ch = source.get()
if ch in SPECIAL_CHARS:
if ch in ")|":
# The end of a sequence. At the end of the pattern ch is "".
source.pos = saved_pos
return characters, case_flags, None
elif ch == "\\":
# An escape sequence outside a set.
element = parse_escape(source, info, False)
return characters, case_flags, element
elif ch == "(":
# A parenthesised subpattern or a flag.
element = parse_paren(source, info)
if element and element is not COMMENT:
return characters, case_flags, element
elif ch == ".":
# Any character.
if info.flags & DOTALL:
element = AnyAll()
elif info.flags & WORD:
element = AnyU()
else:
element = Any()
return characters, case_flags, element
elif ch == "[":
# A character set.
element = parse_set(source, info)
return characters, case_flags, element
elif ch == "^":
# The start of a line or the string.
if info.flags & MULTILINE:
if info.flags & WORD:
element = StartOfLineU()
else:
element = StartOfLine()
else:
element = StartOfString()
return characters, case_flags, element
elif ch == "$":
# The end of a line or the string.
if info.flags & MULTILINE:
if info.flags & WORD:
element = EndOfLineU()
else:
element = EndOfLine()
else:
if info.flags & WORD:
element = EndOfStringLineU()
else:
element = EndOfStringLine()
return characters, case_flags, element
elif ch in "?*+{":
# Looks like a quantifier.
return characters, case_flags, (ch, saved_pos)
else:
# A literal.
characters.append(ord(ch))
else:
# A literal.
characters.append(ord(ch))
def parse_paren(source, info):
"""Parses a parenthesised subpattern or a flag. Returns FLAGS if it's an
inline flag.
"""
saved_pos = source.pos
ch = source.get()
if ch == "?":
# (?...
saved_pos_2 = source.pos
ch = source.get()
if ch == "<":
# (?<...
saved_pos_3 = source.pos
ch = source.get()
if ch in ("=", "!"):
# (?<=... or (?<!...: lookbehind.
return parse_lookaround(source, info, True, ch == "=")
# (?<...: a named capture group.
source.pos = saved_pos_3
name = parse_name(source)
group = info.open_group(name)
source.expect(">")
saved_flags = info.flags
try:
subpattern = _parse_pattern(source, info)
source.expect(")")
finally:
info.flags = saved_flags
source.ignore_space = bool(info.flags & VERBOSE)
info.close_group()
return Group(info, group, subpattern)
if ch in ("=", "!"):
# (?=... or (?!...: lookahead.
return parse_lookaround(source, info, False, ch == "=")
if ch == "P":
# (?P...: a Python extension.
return parse_extension(source, info)
if ch == "#":
# (?#...: a comment.
return parse_comment(source)
if ch == "(":
# (?(...: a conditional subpattern.
return parse_conditional(source, info)
if ch == ">":
# (?>...: an atomic subpattern.
return parse_atomic(source, info)
if ch == "|":
# (?|...: a common/reset groups branch.
return parse_common(source, info)
if ch == "R" or "0" <= ch <= "9":
# (?R...: probably a call to a group.
return parse_call_group(source, info, ch, saved_pos_2)
if ch == "&":
# (?&...: a call to a named group.
return parse_call_named_group(source, info, saved_pos_2)
# (?...: probably a flags subpattern.
source.pos = saved_pos_2
return parse_flags_subpattern(source, info)
# (...: an unnamed capture group.
source.pos = saved_pos
group = info.open_group()
saved_flags = info.flags
try:
subpattern = _parse_pattern(source, info)
source.expect(")")
finally:
info.flags = saved_flags
source.ignore_space = bool(info.flags & VERBOSE)
info.close_group()
return Group(info, group, subpattern)
def parse_extension(source, info):
"Parses a Python extension."
saved_pos = source.pos
ch = source.get()
if ch == "<":
# (?P<...: a named capture group.
name = parse_name(source)
group = info.open_group(name)
source.expect(">")
saved_flags = info.flags
try:
subpattern = _parse_pattern(source, info)
source.expect(")")
finally:
info.flags = saved_flags
source.ignore_space = bool(info.flags & VERBOSE)
info.close_group()
return Group(info, group, subpattern)
if ch == "=":
# (?P=...: a named group reference.
name = parse_name(source, allow_numeric=True)
source.expect(")")
if info.is_open_group(name):
raise error("cannot refer to an open group", source.string,
saved_pos)
return make_ref_group(info, name, saved_pos)
if ch == ">" or ch == "&":
# (?P>...: a call to a group.
return parse_call_named_group(source, info, saved_pos)
source.pos = saved_pos
raise error("unknown extension", source.string, saved_pos)
def parse_comment(source):
"Parses a comment."
source.skip_while(set(")"), include=False)
source.expect(")")
return COMMENT
def parse_lookaround(source, info, behind, positive):
"Parses a lookaround."
saved_flags = info.flags
try:
subpattern = _parse_pattern(source, info)
source.expect(")")
finally:
info.flags = saved_flags
source.ignore_space = bool(info.flags & VERBOSE)
return LookAround(behind, positive, subpattern)
def parse_conditional(source, info):
"Parses a conditional subpattern."
saved_flags = info.flags
saved_pos = source.pos
try:
group = parse_name(source, True)
source.expect(")")
yes_branch = parse_sequence(source, info)
if source.match("|"):
no_branch = parse_sequence(source, info)
else:
no_branch = Sequence()
source.expect(")")
finally:
info.flags = saved_flags
source.ignore_space = bool(info.flags & VERBOSE)
if yes_branch.is_empty() and no_branch.is_empty():
return Sequence()
return Conditional(info, group, yes_branch, no_branch, saved_pos)
def parse_atomic(source, info):
"Parses an atomic subpattern."
saved_flags = info.flags
try:
subpattern = _parse_pattern(source, info)
source.expect(")")
finally:
info.flags = saved_flags
source.ignore_space = bool(info.flags & VERBOSE)
return Atomic(subpattern)
def parse_common(source, info):
"Parses a common groups branch."
# Capture group numbers in different branches can reuse the group numbers.
initial_group_count = info.group_count
branches = [parse_sequence(source, info)]
final_group_count = info.group_count
while source.match("|"):
info.group_count = initial_group_count
branches.append(parse_sequence(source, info))
final_group_count = max(final_group_count, info.group_count)
info.group_count = final_group_count
source.expect(")")
if len(branches) == 1:
return branches[0]
return Branch(branches)
def parse_call_group(source, info, ch, pos):
"Parses a call to a group."
if ch == "R":
group = "0"
else:
group = ch + source.get_while(DIGITS)
source.expect(")")
return CallGroup(info, group, pos)
def parse_call_named_group(source, info, pos):
"Parses a call to a named group."
group = parse_name(source)
source.expect(")")
return CallGroup(info, group, pos)
def parse_flag_set(source):
"Parses a set of inline flags."
flags = 0
try:
while True:
saved_pos = source.pos
ch = source.get()
if ch == "V":
ch += source.get()
flags |= REGEX_FLAGS[ch]
except KeyError:
source.pos = saved_pos
return flags
def parse_flags(source, info):
"Parses flags being turned on/off."
flags_on = parse_flag_set(source)
if source.match("-"):
flags_off = parse_flag_set(source)
if not flags_off:
raise error("bad inline flags: no flags after '-'", source.string,
source.pos)
else:
flags_off = 0
if flags_on & LOCALE:
# Remember that this pattern as an inline locale flag.
info.inline_locale = True
return flags_on, flags_off
def parse_subpattern(source, info, flags_on, flags_off):
"Parses a subpattern with scoped flags."
saved_flags = info.flags
info.flags = (info.flags | flags_on) & ~flags_off
source.ignore_space = bool(info.flags & VERBOSE)
try:
subpattern = _parse_pattern(source, info)
source.expect(")")
finally:
info.flags = saved_flags
source.ignore_space = bool(info.flags & VERBOSE)
return subpattern
def parse_flags_subpattern(source, info):
"""Parses a flags subpattern. It could be inline flags or a subpattern
possibly with local flags. If it's a subpattern, then that's returned;
if it's a inline flags, then FLAGS is returned.
"""
flags_on, flags_off = parse_flags(source, info)
if flags_off & GLOBAL_FLAGS:
raise error("bad inline flags: cannot turn off global flag",
source.string, source.pos)
if flags_on & flags_off:
raise error("bad inline flags: flag turned on and off", source.string,
source.pos)
# Handle flags which are global in all regex behaviours.
new_global_flags = (flags_on & ~info.global_flags) & GLOBAL_FLAGS
if new_global_flags:
info.global_flags |= new_global_flags
# A global has been turned on, so reparse the pattern.
raise _UnscopedFlagSet(info.global_flags)
# Ensure that from now on we have only scoped flags.
flags_on &= ~GLOBAL_FLAGS
if source.match(":"):
return parse_subpattern(source, info, flags_on, flags_off)
if source.match(")"):
parse_positional_flags(source, info, flags_on, flags_off)
return FLAGS
raise error("unknown extension", source.string, source.pos)
def parse_positional_flags(source, info, flags_on, flags_off):
"Parses positional flags."
version = (info.flags & _ALL_VERSIONS) or DEFAULT_VERSION
if version == VERSION0:
# Positional flags are global and can only be turned on.
if flags_off:
raise error("bad inline flags: cannot turn flags off",
source.string, source.pos)
new_global_flags = flags_on & ~info.global_flags
if new_global_flags:
info.global_flags |= new_global_flags
# A global has been turned on, so reparse the pattern.
raise _UnscopedFlagSet(info.global_flags)
else:
info.flags = (info.flags | flags_on) & ~flags_off
source.ignore_space = bool(info.flags & VERBOSE)
def parse_name(source, allow_numeric=False, allow_group_0=False):
"Parses a name."
name = source.get_while(set(")>"), include=False)
if not name:
raise error("bad group name", source.string, source.pos)
if name.isdigit():
min_group = 0 if allow_group_0 else 1
if not allow_numeric or int(name) < min_group:
raise error("bad group name", source.string, source.pos)
else:
if not is_identifier(name):
raise error("bad group name", source.string, source.pos)
return name
def is_identifier(name):
if not name:
return False
if name[0] not in ALPHA and name[0] != "_":
return False
name = name.replace("_", "")
return not name or all(c in ALNUM for c in name)
def is_octal(string):
"Checks whether a string is octal."
return all(ch in OCT_DIGITS for ch in string)
def is_decimal(string):
"Checks whether a string is decimal."
return all(ch in DIGITS for ch in string)
def is_hexadecimal(string):
"Checks whether a string is hexadecimal."
return all(ch in HEX_DIGITS for ch in string)
def parse_escape(source, info, in_set):
"Parses an escape sequence."
saved_ignore = source.ignore_space
source.ignore_space = False
ch = source.get()
source.ignore_space = saved_ignore
if not ch:
# A backslash at the end of the pattern.
raise error("bad escape", source.string, source.pos)
if ch in HEX_ESCAPES:
# A hexadecimal escape sequence.
return parse_hex_escape(source, info, HEX_ESCAPES[ch], in_set)
elif ch == "g" and not in_set:
# A group reference.
saved_pos = source.pos
try:
return parse_group_ref(source, info)
except error:
# Invalid as a group reference, so assume it's a literal.
source.pos = saved_pos
return make_character(info, ord(ch), in_set)
elif ch == "G" and not in_set:
# A search anchor.
return SearchAnchor()
elif ch == "L" and not in_set:
# A string set.
return parse_string_set(source, info)
elif ch == "N":
# A named codepoint.
return parse_named_char(source, info, in_set)
elif ch in "pP":
# A Unicode property, positive or negative.
return parse_property(source, info, ch == "p", in_set)
elif ch == "X" and not in_set:
# A grapheme cluster.
return Grapheme()
elif ch in ALPHA:
# An alphabetic escape sequence.
# Positional escapes aren't allowed inside a character set.
if not in_set:
if info.flags & WORD:
value = WORD_POSITION_ESCAPES.get(ch)
else:
value = POSITION_ESCAPES.get(ch)
if value:
return value
value = CHARSET_ESCAPES.get(ch)
if value:
return value
value = CHARACTER_ESCAPES.get(ch)
if value:
return Character(ord(value))
return make_character(info, ord(ch), in_set)
elif ch in DIGITS:
# A numeric escape sequence.
return parse_numeric_escape(source, info, ch, in_set)
else:
# A literal.
return make_character(info, ord(ch), in_set)
def parse_numeric_escape(source, info, ch, in_set):
"Parses a numeric escape sequence."
if in_set or ch == "0":
# Octal escape sequence, max 3 digits.
return parse_octal_escape(source, info, [ch], in_set)
# At least 1 digit, so either octal escape or group.
digits = ch
saved_pos = source.pos
ch = source.get()
if ch in DIGITS:
# At least 2 digits, so either octal escape or group.
digits += ch
saved_pos = source.pos
ch = source.get()
if is_octal(digits) and ch in OCT_DIGITS:
# 3 octal digits, so octal escape sequence.
encoding = info.flags & _ALL_ENCODINGS
if encoding == ASCII or encoding == LOCALE:
octal_mask = 0xFF
else:
octal_mask = 0x1FF
value = int(digits + ch, 8) & octal_mask
return make_character(info, value)
# Group reference.
source.pos = saved_pos
if info.is_open_group(digits):
raise error("cannot refer to an open group", source.string, source.pos)
return make_ref_group(info, digits, source.pos)
def parse_octal_escape(source, info, digits, in_set):
"Parses an octal escape sequence."
saved_pos = source.pos
ch = source.get()
while len(digits) < 3 and ch in OCT_DIGITS:
digits.append(ch)
saved_pos = source.pos
ch = source.get()
source.pos = saved_pos
try:
value = int("".join(digits), 8)
return make_character(info, value, in_set)
except ValueError:
raise error("bad octal escape", source.string, source.pos)
def parse_hex_escape(source, info, expected_len, in_set):
"Parses a hex escape sequence."
digits = []
for i in range(expected_len):
ch = source.get()
if ch not in HEX_DIGITS:
raise error("bad hex escape", source.string, source.pos)
digits.append(ch)
value = int("".join(digits), 16)
return make_character(info, value, in_set)
def parse_group_ref(source, info):
"Parses a group reference."
source.expect("<")
saved_pos = source.pos
name = parse_name(source, True)
source.expect(">")
if info.is_open_group(name):
raise error("cannot refer to an open group", source.string, source.pos)
return make_ref_group(info, name, saved_pos)
def parse_string_set(source, info):
"Parses a string set reference."
source.expect("<")
name = parse_name(source, True)
source.expect(">")
if name is None or name not in info.kwargs:
raise error("undefined named list", source.string, source.pos)
return make_string_set(info, name)
def parse_named_char(source, info, in_set):
"Parses a named character."
saved_pos = source.pos
if source.match("{"):
name = source.get_while(NAMED_CHAR_PART)
if source.match("}"):
try:
value = unicodedata.lookup(name)
return make_character(info, ord(value), in_set)
except KeyError:
raise error("undefined character name", source.string,
source.pos)
source.pos = saved_pos
return make_character(info, ord("N"), in_set)
def parse_property(source, info, positive, in_set):
"Parses a Unicode property."
saved_pos = source.pos
ch = source.get()
if ch == "{":
negate = source.match("^")
prop_name, name = parse_property_name(source)
if source.match("}"):
# It's correctly delimited.
prop = lookup_property(prop_name, name, positive != negate, source)
return make_property(info, prop, in_set)
elif ch and ch in "CLMNPSZ":
# An abbreviated property, eg \pL.
prop = lookup_property(None, ch, positive, source)
return make_property(info, prop, in_set)
# Not a property, so treat as a literal "p" or "P".
source.pos = saved_pos
ch = "p" if positive else "P"
return make_character(info, ord(ch), in_set)
def parse_property_name(source):
"Parses a property name, which may be qualified."
name = source.get_while(PROPERTY_NAME_PART)
saved_pos = source.pos
ch = source.get()
if ch and ch in ":=":
prop_name = name
name = source.get_while(ALNUM | set(" &_-./")).strip()
if name:
# Name after the ":" or "=", so it's a qualified name.
saved_pos = source.pos
else:
# No name after the ":" or "=", so assume it's an unqualified name.
prop_name, name = None, prop_name
else:
prop_name = None
source.pos = saved_pos
return prop_name, name
def parse_set(source, info):
"Parses a character set."
version = (info.flags & _ALL_VERSIONS) or DEFAULT_VERSION
saved_ignore = source.ignore_space
source.ignore_space = False
# Negative set?
negate = source.match("^")
try:
if version == VERSION0:
item = parse_set_imp_union(source, info)
else:
item = parse_set_union(source, info)
if not source.match("]"):
raise error("missing ]", source.string, source.pos)
finally:
source.ignore_space = saved_ignore
if negate:
item = item.with_flags(positive=not item.positive)
item = item.with_flags(case_flags=info.flags & CASE_FLAGS)
return item
def parse_set_union(source, info):
"Parses a set union ([x||y])."
items = [parse_set_symm_diff(source, info)]
while source.match("||"):
items.append(parse_set_symm_diff(source, info))
if len(items) == 1:
return items[0]
return SetUnion(info, items)
def parse_set_symm_diff(source, info):
"Parses a set symmetric difference ([x~~y])."
items = [parse_set_inter(source, info)]
while source.match("~~"):
items.append(parse_set_inter(source, info))
if len(items) == 1:
return items[0]
return SetSymDiff(info, items)
def parse_set_inter(source, info):
"Parses a set intersection ([x&&y])."
items = [parse_set_diff(source, info)]
while source.match("&&"):
items.append(parse_set_diff(source, info))
if len(items) == 1:
return items[0]
return SetInter(info, items)
def parse_set_diff(source, info):
"Parses a set difference ([x--y])."
items = [parse_set_imp_union(source, info)]
while source.match("--"):
items.append(parse_set_imp_union(source, info))
if len(items) == 1:
return items[0]
return SetDiff(info, items)
def parse_set_imp_union(source, info):
"Parses a set implicit union ([xy])."
version = (info.flags & _ALL_VERSIONS) or DEFAULT_VERSION
items = [parse_set_member(source, info)]
while True:
saved_pos = source.pos
if source.match("]"):
# End of the set.
source.pos = saved_pos
break
if version == VERSION1 and any(source.match(op) for op in SET_OPS):
# The new behaviour has set operators.
source.pos = saved_pos
break
items.append(parse_set_member(source, info))
if len(items) == 1:
return items[0]
return SetUnion(info, items)
def parse_set_member(source, info):
"Parses a member in a character set."
# Parse a set item.
start = parse_set_item(source, info)
saved_pos1 = source.pos
if (not isinstance(start, Character) or not start.positive or not
source.match("-")):
# It's not the start of a range.
return start
version = (info.flags & _ALL_VERSIONS) or DEFAULT_VERSION
# It looks like the start of a range of characters.
saved_pos2 = source.pos
if version == VERSION1 and source.match("-"):
# It's actually the set difference operator '--', so return the
# character.
source.pos = saved_pos1
return start
if source.match("]"):
# We've reached the end of the set, so return both the character and
# hyphen.
source.pos = saved_pos2
return SetUnion(info, [start, Character(ord("-"))])
# Parse a set item.
end = parse_set_item(source, info)
if not isinstance(end, Character) or not end.positive:
# It's not a range, so return the character, hyphen and property.
return SetUnion(info, [start, Character(ord("-")), end])
# It _is_ a range.
if start.value > end.value:
raise error("bad character range", source.string, source.pos)
if start.value == end.value:
return start
return Range(start.value, end.value)
def parse_set_item(source, info):
"Parses an item in a character set."
version = (info.flags & _ALL_VERSIONS) or DEFAULT_VERSION
if source.match("\\"):
# An escape sequence in a set.
return parse_escape(source, info, True)
saved_pos = source.pos
if source.match("[:"):
# Looks like a POSIX character class.
try:
return parse_posix_class(source, info)
except ParseError:
# Not a POSIX character class.
source.pos = saved_pos
if version == VERSION1 and source.match("["):
# It's the start of a nested set.
# Negative set?
negate = source.match("^")
item = parse_set_union(source, info)
if not source.match("]"):
raise error("missing ]", source.string, source.pos)
if negate:
item = item.with_flags(positive=not item.positive)
return item
ch = source.get()
if not ch:
raise error("bad set", source.string, source.pos)
return Character(ord(ch))
def parse_posix_class(source, info):
"Parses a POSIX character class."
negate = source.match("^")
prop_name, name = parse_property_name(source)
if not source.match(":]"):
raise ParseError()
return lookup_property(prop_name, name, not negate, source)
def float_to_rational(flt):
"Converts a float to a rational pair."
int_part = int(flt)
error = flt - int_part
if abs(error) < 0.0001:
return int_part, 1
den, num = float_to_rational(1.0 / error)
return int_part * den + num, den
def numeric_to_rational(numeric):
"Converts a numeric string to a rational string, if possible."
if numeric[ : 1] == "-":
sign, numeric = numeric[0], numeric[1 : ]
else:
sign = ""
parts = numeric.split("/")
if len(parts) == 2:
num, den = float_to_rational(float(parts[0]) / float(parts[1]))
elif len(parts) == 1:
num, den = float_to_rational(float(parts[0]))
else:
raise ValueError()
result = "%s%s/%s" % (sign, num, den)
if result.endswith("/1"):
return result[ : -2]
return result
def standardise_name(name):
"Standardises a property or value name."
try:
return numeric_to_rational("".join(name))
except (ValueError, ZeroDivisionError):
return "".join(ch for ch in name if ch not in "_- ").upper()
def lookup_property(property, value, positive, source=None):
"Looks up a property."
# Normalise the names (which may still be lists).
property = standardise_name(property) if property else None
value = standardise_name(value)
if (property, value) == ("GENERALCATEGORY", "ASSIGNED"):
property, value, positive = "GENERALCATEGORY", "UNASSIGNED", not positive
if property:
# Both the property and the value are provided.
prop = PROPERTIES.get(property)
if not prop:
if not source:
raise error("unknown property")
raise error("unknown property", source.string, source.pos)
prop_id, value_dict = prop
val_id = value_dict.get(value)
if val_id is None:
if not source:
raise error("unknown property value")
raise error("unknown property value", source.string, source.pos)
if "YES" in value_dict and val_id == 0:
positive, val_id = not positive, 1
return Property((prop_id << 16) | val_id, positive)
# Only the value is provided.
# It might be the name of a GC, script or block value.
for property in ("GC", "SCRIPT", "BLOCK"):
prop_id, value_dict = PROPERTIES.get(property)
val_id = value_dict.get(value)
if val_id is not None:
return Property((prop_id << 16) | val_id, positive)
# It might be the name of a binary property.
prop = PROPERTIES.get(value)
if prop:
prop_id, value_dict = prop
if "YES" in value_dict:
return Property((prop_id << 16) | 1, positive)
# It might be the name of a binary property starting with a prefix.
if value.startswith("IS"):
prop = PROPERTIES.get(value[2 : ])
if prop:
prop_id, value_dict = prop
if "YES" in value_dict:
return Property((prop_id << 16) | 1, positive)
# It might be the name of a script or block starting with a prefix.
for prefix, property in (("IS", "SCRIPT"), ("IN", "BLOCK")):
if value.startswith(prefix):
prop_id, value_dict = PROPERTIES.get(property)
val_id = value_dict.get(value[2 : ])
if val_id is not None:
return Property((prop_id << 16) | val_id, positive)
# Unknown property.
if not source:
raise error("unknown property")
raise error("unknown property", source.string, source.pos)
def _compile_replacement(source, pattern, is_unicode):
"Compiles a replacement template escape sequence."
ch = source.get()
if ch in ALPHA:
# An alphabetic escape sequence.
value = CHARACTER_ESCAPES.get(ch)
if value:
return False, [ord(value)]
if ch in HEX_ESCAPES and (ch == "x" or is_unicode):
# A hexadecimal escape sequence.
return False, [parse_repl_hex_escape(source, HEX_ESCAPES[ch])]
if ch == "g":
# A group preference.
return True, [compile_repl_group(source, pattern)]
if ch == "N" and is_unicode:
# A named character.
value = parse_repl_named_char(source)
if value is not None:
return False, [value]
return False, [ord("\\"), ord(ch)]
if isinstance(source.sep, str):
octal_mask = 0xFF
else:
octal_mask = 0x1FF
if ch == "0":
# An octal escape sequence.
digits = ch
while len(digits) < 3:
saved_pos = source.pos
ch = source.get()
if ch not in OCT_DIGITS:
source.pos = saved_pos
break
digits += ch
return False, [int(digits, 8) & octal_mask]
if ch in DIGITS:
# Either an octal escape sequence (3 digits) or a group reference (max
# 2 digits).
digits = ch
saved_pos = source.pos
ch = source.get()
if ch in DIGITS:
digits += ch
saved_pos = source.pos
ch = source.get()
if ch and is_octal(digits + ch):
# An octal escape sequence.
return False, [int(digits + ch, 8) & octal_mask]
# A group reference.
source.pos = saved_pos
return True, [int(digits)]
if ch == "\\":
# An escaped backslash is a backslash.
return False, [ord("\\")]
if not ch:
# A trailing backslash.
raise error("bad escape", source.string, source.pos)
# An escaped non-backslash is a backslash followed by the literal.
return False, [ord("\\"), ord(ch)]
def parse_repl_hex_escape(source, expected_len):
"Parses a hex escape sequence in a replacement string."
digits = []
for i in range(expected_len):
ch = source.get()
if ch not in HEX_DIGITS:
raise error("bad hex escape", source.string, source.pos)
digits.append(ch)
return int("".join(digits), 16)
def parse_repl_named_char(source):
"Parses a named character in a replacement string."
saved_pos = source.pos
if source.match("{"):
name = source.get_while(ALPHA | set(" "))
if source.match("}"):
try:
value = unicodedata.lookup(name)
return ord(value)
except KeyError:
raise error("undefined character name", source.string,
source.pos)
source.pos = saved_pos
return None
def compile_repl_group(source, pattern):
"Compiles a replacement template group reference."
source.expect("<")
name = parse_name(source, True, True)
source.expect(">")
if name.isdigit():
index = int(name)
if not 0 <= index <= pattern.groups:
raise error("invalid group", source.string, source.pos)
return index
try:
return pattern.groupindex[name]
except KeyError:
raise IndexError("unknown group")
# The regular expression is parsed into a syntax tree. The different types of
# node are defined below.
INDENT = " "
POSITIVE_OP = 0x1
ZEROWIDTH_OP = 0x2
FUZZY_OP = 0x4
REVERSE_OP = 0x8
REQUIRED_OP = 0x10
POS_TEXT = {False: "NON-MATCH", True: "MATCH"}
CASE_TEXT = {NOCASE: "", IGNORECASE: " SIMPLE_IGNORE_CASE", FULLCASE: "",
FULLIGNORECASE: " FULL_IGNORE_CASE"}
def make_sequence(items):
if len(items) == 1:
return items[0]
return Sequence(items)
# Common base class for all nodes.
class RegexBase(object):
def __init__(self):
self._key = self.__class__
def with_flags(self, positive=None, case_flags=None, zerowidth=None):
if positive is None:
positive = self.positive
else:
positive = bool(positive)
if case_flags is None:
case_flags = self.case_flags
else:
case_flags = case_flags & CASE_FLAGS
if zerowidth is None:
zerowidth = self.zerowidth
else:
zerowidth = bool(zerowidth)
if (positive == self.positive and case_flags == self.case_flags and
zerowidth == self.zerowidth):
return self
return self.rebuild(positive, case_flags, zerowidth)
def fix_groups(self, pattern, reverse, fuzzy):
pass
def optimise(self, info):
return self
def pack_characters(self, info):
return self
def remove_captures(self):
return self
def is_atomic(self):
return True
def can_be_affix(self):
return True
def contains_group(self):
return False
def get_firstset(self, reverse):
raise _FirstSetError()
def has_simple_start(self):
return False
def compile(self, reverse=False, fuzzy=False):
return self._compile(reverse, fuzzy)
def dump(self, indent, reverse):
self._dump(indent, reverse)
def is_empty(self):
return False
def __hash__(self):
return hash(self._key)
def __eq__(self, other):
return type(self) is type(other) and self._key == other._key
def __ne__(self, other):
return not self.__eq__(other)
def get_required_string(self, reverse):
return self.max_width(), None
# Base class for zero-width nodes.
class ZeroWidthBase(RegexBase):
def __init__(self, positive=True):
RegexBase.__init__(self)
self.positive = bool(positive)
self._key = self.__class__, self.positive
def get_firstset(self, reverse):
return set([None])
def _compile(self, reverse, fuzzy):
flags = 0
if self.positive:
flags |= POSITIVE_OP
if fuzzy:
flags |= FUZZY_OP
if reverse:
flags |= REVERSE_OP
return [(self._opcode, flags)]
def _dump(self, indent, reverse):
print "%s%s %s" % (INDENT * indent, self._op_name,
POS_TEXT[self.positive])
def max_width(self):
return 0
class Any(RegexBase):
_opcode = {False: OP.ANY, True: OP.ANY_REV}
_op_name = "ANY"
def has_simple_start(self):
return True
def _compile(self, reverse, fuzzy):
flags = 0
if fuzzy:
flags |= FUZZY_OP
return [(self._opcode[reverse], flags)]
def _dump(self, indent, reverse):
print "%s%s" % (INDENT * indent, self._op_name)
def max_width(self):
return 1
class AnyAll(Any):
_opcode = {False: OP.ANY_ALL, True: OP.ANY_ALL_REV}
_op_name = "ANY_ALL"
class AnyU(Any):
_opcode = {False: OP.ANY_U, True: OP.ANY_U_REV}
_op_name = "ANY_U"
class Atomic(RegexBase):
def __init__(self, subpattern):
RegexBase.__init__(self)
self.subpattern = subpattern
def fix_groups(self, pattern, reverse, fuzzy):
self.subpattern.fix_groups(pattern, reverse, fuzzy)
def optimise(self, info):
self.subpattern = self.subpattern.optimise(info)
if self.subpattern.is_empty():
return self.subpattern
return self
def pack_characters(self, info):
self.subpattern = self.subpattern.pack_characters(info)
return self
def remove_captures(self):
self.subpattern = self.subpattern.remove_captures()
return self
def can_be_affix(self):
return self.subpattern.can_be_affix()
def contains_group(self):
return self.subpattern.contains_group()
def get_firstset(self, reverse):
return self.subpattern.get_firstset(reverse)
def has_simple_start(self):
return self.subpattern.has_simple_start()
def _compile(self, reverse, fuzzy):
return ([(OP.ATOMIC, )] + self.subpattern.compile(reverse, fuzzy) +
[(OP.END, )])
def _dump(self, indent, reverse):
print "%sATOMIC" % (INDENT * indent)
self.subpattern.dump(indent + 1, reverse)
def is_empty(self):
return self.subpattern.is_empty()
def __eq__(self, other):
return (type(self) is type(other) and self.subpattern ==
other.subpattern)
def max_width(self):
return self.subpattern.max_width()
def get_required_string(self, reverse):
return self.subpattern.get_required_string(reverse)
class Boundary(ZeroWidthBase):
_opcode = OP.BOUNDARY
_op_name = "BOUNDARY"
class Branch(RegexBase):
def __init__(self, branches):
RegexBase.__init__(self)
self.branches = branches
def fix_groups(self, pattern, reverse, fuzzy):
for b in self.branches:
b.fix_groups(pattern, reverse, fuzzy)
def optimise(self, info):
# Flatten branches within branches.
branches = Branch._flatten_branches(info, self.branches)
# Move any common prefix or suffix out of the branches.
prefix, branches = Branch._split_common_prefix(info, branches)
suffix, branches = Branch._split_common_suffix(info, branches)
# Merge branches starting with the same character. (If a character
# prefix doesn't match in one branch, it won't match in any of the
# others starting with that same character.)
branches = Branch._merge_common_prefixes(info, branches)
# Try to reduce adjacent single-character branches to sets.
branches = Branch._reduce_to_set(info, branches)
if len(branches) > 1:
sequence = prefix + [Branch(branches)] + suffix
else:
sequence = prefix + branches + suffix
return make_sequence(sequence)
def optimise(self, info):
# Flatten branches within branches.
branches = Branch._flatten_branches(info, self.branches)
# Try to reduce adjacent single-character branches to sets.
branches = Branch._reduce_to_set(info, branches)
if len(branches) > 1:
sequence = [Branch(branches)]
else:
sequence = branches
return make_sequence(sequence)
def pack_characters(self, info):
self.branches = [b.pack_characters(info) for b in self.branches]
return self
def remove_captures(self):
self.branches = [b.remove_captures() for b in self.branches]
return self
def is_atomic(self):
return all(b.is_atomic() for b in self.branches)
def can_be_affix(self):
return all(b.can_be_affix() for b in self.branches)
def contains_group(self):
return any(b.contains_group() for b in self.branches)
def get_firstset(self, reverse):
fs = set()
for b in self.branches:
fs |= b.get_firstset(reverse)
return fs or set([None])
def _compile(self, reverse, fuzzy):
code = [(OP.BRANCH, )]
for b in self.branches:
code.extend(b.compile(reverse, fuzzy))
code.append((OP.NEXT, ))
code[-1] = (OP.END, )
return code
def _dump(self, indent, reverse):
print "%sBRANCH" % (INDENT * indent)
self.branches[0].dump(indent + 1, reverse)
for b in self.branches[1 : ]:
print "%sOR" % (INDENT * indent)
b.dump(indent + 1, reverse)
@staticmethod
def _flatten_branches(info, branches):
# Flatten the branches so that there aren't branches of branches.
new_branches = []
for b in branches:
b = b.optimise(info)
if isinstance(b, Branch):
new_branches.extend(b.branches)
else:
new_branches.append(b)
return new_branches
@staticmethod
def _split_common_prefix(info, branches):
# Common leading items can be moved out of the branches.
# Get the items in the branches.
alternatives = []
for b in branches:
if isinstance(b, Sequence):
alternatives.append(b.items)
else:
alternatives.append([b])
# What is the maximum possible length of the prefix?
max_count = min(len(a) for a in alternatives)
# What is the longest common prefix?
prefix = alternatives[0]
pos = 0
end_pos = max_count
while pos < end_pos and prefix[pos].can_be_affix() and all(a[pos] ==
prefix[pos] for a in alternatives):
pos += 1
count = pos
if info.flags & UNICODE:
# We need to check that we're not splitting a sequence of
# characters which could form part of full case-folding.
count = pos
while count > 0 and not all(Branch._can_split(a, count) for a in
alternatives):
count -= 1
# No common prefix is possible.
if count == 0:
return [], branches
# Rebuild the branches.
new_branches = []
for a in alternatives:
new_branches.append(make_sequence(a[count : ]))
return prefix[ : count], new_branches
@staticmethod
def _split_common_suffix(info, branches):
# Common trailing items can be moved out of the branches.
# Get the items in the branches.
alternatives = []
for b in branches:
if isinstance(b, Sequence):
alternatives.append(b.items)
else:
alternatives.append([b])
# What is the maximum possible length of the suffix?
max_count = min(len(a) for a in alternatives)
# What is the longest common suffix?
suffix = alternatives[0]
pos = -1
end_pos = -1 - max_count
while pos > end_pos and suffix[pos].can_be_affix() and all(a[pos] ==
suffix[pos] for a in alternatives):
pos -= 1
count = -1 - pos
if info.flags & UNICODE:
# We need to check that we're not splitting a sequence of
# characters which could form part of full case-folding.
while count > 0 and not all(Branch._can_split_rev(a, count) for a
in alternatives):
count -= 1
# No common suffix is possible.
if count == 0:
return [], branches
# Rebuild the branches.
new_branches = []
for a in alternatives:
new_branches.append(make_sequence(a[ : -count]))
return suffix[-count : ], new_branches
@staticmethod
def _can_split(items, count):
# Check the characters either side of the proposed split.
if not Branch._is_full_case(items, count - 1):
return True
if not Branch._is_full_case(items, count):
return True
# Check whether a 1-1 split would be OK.
if Branch._is_folded(items[count - 1 : count + 1]):
return False
# Check whether a 1-2 split would be OK.
if (Branch._is_full_case(items, count + 2) and
Branch._is_folded(items[count - 1 : count + 2])):
return False
# Check whether a 2-1 split would be OK.
if (Branch._is_full_case(items, count - 2) and
Branch._is_folded(items[count - 2 : count + 1])):
return False
return True
@staticmethod
def _can_split_rev(items, count):
end = len(items)
# Check the characters either side of the proposed split.
if not Branch._is_full_case(items, end - count):
return True
if not Branch._is_full_case(items, end - count - 1):
return True
# Check whether a 1-1 split would be OK.
if Branch._is_folded(items[end - count - 1 : end - count + 1]):
return False
# Check whether a 1-2 split would be OK.
if (Branch._is_full_case(items, end - count + 2) and
Branch._is_folded(items[end - count - 1 : end - count + 2])):
return False
# Check whether a 2-1 split would be OK.
if (Branch._is_full_case(items, end - count - 2) and
Branch._is_folded(items[end - count - 2 : end - count + 1])):
return False
return True
@staticmethod
def _merge_common_prefixes(info, branches):
# Branches with the same case-sensitive character prefix can be grouped
# together if they are separated only by other branches with a
# character prefix.
prefixed = defaultdict(list)
order = {}
new_branches = []
for b in branches:
if Branch._is_simple_character(b):
# Branch starts with a simple character.
prefixed[b.value].append([b])
order.setdefault(b.value, len(order))
elif (isinstance(b, Sequence) and b.items and
Branch._is_simple_character(b.items[0])):
# Branch starts with a simple character.
prefixed[b.items[0].value].append(b.items)
order.setdefault(b.items[0].value, len(order))
else:
Branch._flush_char_prefix(info, prefixed, order, new_branches)
new_branches.append(b)
Branch._flush_char_prefix(info, prefixed, order, new_branches)
return new_branches
@staticmethod
def _is_simple_character(c):
return isinstance(c, Character) and c.positive and not c.case_flags
@staticmethod
def _reduce_to_set(info, branches):
# Can the branches be reduced to a set?
new_branches = []
items = set()
case_flags = NOCASE
for b in branches:
if isinstance(b, (Character, Property, SetBase)):
# Branch starts with a single character.
if b.case_flags != case_flags:
# Different case sensitivity, so flush.
Branch._flush_set_members(info, items, case_flags,
new_branches)
case_flags = b.case_flags
items.add(b.with_flags(case_flags=NOCASE))
else:
Branch._flush_set_members(info, items, case_flags,
new_branches)
new_branches.append(b)
Branch._flush_set_members(info, items, case_flags, new_branches)
return new_branches
@staticmethod
def _flush_char_prefix(info, prefixed, order, new_branches):
# Flush the prefixed branches.
if not prefixed:
return
for value, branches in sorted(prefixed.items(), key=lambda pair:
order[pair[0]]):
if len(branches) == 1:
new_branches.append(make_sequence(branches[0]))
else:
subbranches = []
optional = False
for b in branches:
if len(b) > 1:
subbranches.append(make_sequence(b[1 : ]))
elif not optional:
subbranches.append(Sequence())
optional = True
sequence = Sequence([Character(value), Branch(subbranches)])
new_branches.append(sequence.optimise(info))
prefixed.clear()
order.clear()
@staticmethod
def _flush_set_members(info, items, case_flags, new_branches):
# Flush the set members.
if not items:
return
if len(items) == 1:
item = list(items)[0]
else:
item = SetUnion(info, list(items)).optimise(info)
new_branches.append(item.with_flags(case_flags=case_flags))
items.clear()
@staticmethod
def _is_full_case(items, i):
if not 0 <= i < len(items):
return False
item = items[i]
return (isinstance(item, Character) and item.positive and
(item.case_flags & FULLIGNORECASE) == FULLIGNORECASE)
@staticmethod
def _is_folded(items):
if len(items) < 2:
return False
for i in items:
if (not isinstance(i, Character) or not i.positive or not
i.case_flags):
return False
folded = u"".join(unichr(i.value) for i in items)
folded = _regex.fold_case(FULL_CASE_FOLDING, folded)
# Get the characters which expand to multiple codepoints on folding.
expanding_chars = _regex.get_expand_on_folding()
for c in expanding_chars:
if folded == _regex.fold_case(FULL_CASE_FOLDING, c):
return True
return False
def is_empty(self):
return all(b.is_empty() for b in self.branches)
def __eq__(self, other):
return type(self) is type(other) and self.branches == other.branches
def max_width(self):
return max(b.max_width() for b in self.branches)
class CallGroup(RegexBase):
def __init__(self, info, group, position):
RegexBase.__init__(self)
self.info = info
self.group = group
self.position = position
self._key = self.__class__, self.group
def fix_groups(self, pattern, reverse, fuzzy):
try:
self.group = int(self.group)
except ValueError:
try:
self.group = self.info.group_index[self.group]
except KeyError:
raise error("unknown group", pattern, self.position)
if not 0 <= self.group <= self.info.group_count:
raise error("unknown group", pattern, self.position)
if self.group > 0 and self.info.open_group_count[self.group] > 1:
raise error("ambiguous group reference", pattern, self.position)
self.info.group_calls.append((self, reverse, fuzzy))
self._key = self.__class__, self.group
def remove_captures(self):
raise error("group reference not allowed", pattern, self.position)
def _compile(self, reverse, fuzzy):
return [(OP.GROUP_CALL, self.call_ref)]
def _dump(self, indent, reverse):
print "%sGROUP_CALL %s" % (INDENT * indent, self.group)
def __eq__(self, other):
return type(self) is type(other) and self.group == other.group
def max_width(self):
return UNLIMITED
class Character(RegexBase):
_opcode = {(NOCASE, False): OP.CHARACTER, (IGNORECASE, False):
OP.CHARACTER_IGN, (FULLCASE, False): OP.CHARACTER, (FULLIGNORECASE,
False): OP.CHARACTER_IGN, (NOCASE, True): OP.CHARACTER_REV, (IGNORECASE,
True): OP.CHARACTER_IGN_REV, (FULLCASE, True): OP.CHARACTER_REV,
(FULLIGNORECASE, True): OP.CHARACTER_IGN_REV}
def __init__(self, value, positive=True, case_flags=NOCASE,
zerowidth=False):
RegexBase.__init__(self)
self.value = value
self.positive = bool(positive)
self.case_flags = case_flags
self.zerowidth = bool(zerowidth)
if (self.positive and (self.case_flags & FULLIGNORECASE) ==
FULLIGNORECASE):
self.folded = _regex.fold_case(FULL_CASE_FOLDING, unichr(self.value))
else:
self.folded = unichr(self.value)
self._key = (self.__class__, self.value, self.positive,
self.case_flags, self.zerowidth)
def rebuild(self, positive, case_flags, zerowidth):
return Character(self.value, positive, case_flags, zerowidth)
def optimise(self, info, in_set=False):
return self
def get_firstset(self, reverse):
return set([self])
def has_simple_start(self):
return True
def _compile(self, reverse, fuzzy):
flags = 0
if self.positive:
flags |= POSITIVE_OP
if self.zerowidth:
flags |= ZEROWIDTH_OP
if fuzzy:
flags |= FUZZY_OP
code = PrecompiledCode([self._opcode[self.case_flags, reverse], flags,
self.value])
if len(self.folded) > 1:
# The character expands on full case-folding.
code = Branch([code, String([ord(c) for c in self.folded],
case_flags=self.case_flags)])
return code.compile(reverse, fuzzy)
def _dump(self, indent, reverse):
display = repr(unichr(self.value)).lstrip("bu")
print "%sCHARACTER %s %s%s" % (INDENT * indent,
POS_TEXT[self.positive], display, CASE_TEXT[self.case_flags])
def matches(self, ch):
return (ch == self.value) == self.positive
def max_width(self):
return len(self.folded)
def get_required_string(self, reverse):
if not self.positive:
return 1, None
self.folded_characters = tuple(ord(c) for c in self.folded)
return 0, self
class Conditional(RegexBase):
def __init__(self, info, group, yes_item, no_item, position):
RegexBase.__init__(self)
self.info = info
self.group = group
self.yes_item = yes_item
self.no_item = no_item
self.position = position
def fix_groups(self, pattern, reverse, fuzzy):
try:
self.group = int(self.group)
except ValueError:
try:
self.group = self.info.group_index[self.group]
except KeyError:
raise error("unknown group", pattern, self.position)
if not 1 <= self.group <= self.info.group_count:
raise error("unknown group", pattern, self.position)
self.yes_item.fix_groups(pattern, reverse, fuzzy)
self.no_item.fix_groups(pattern, reverse, fuzzy)
def optimise(self, info):
yes_item = self.yes_item.optimise(info)
no_item = self.no_item.optimise(info)
return Conditional(info, self.group, yes_item, no_item, self.position)
def pack_characters(self, info):
self.yes_item = self.yes_item.pack_characters(info)
self.no_item = self.no_item.pack_characters(info)
return self
def remove_captures(self):
self.yes_item = self.yes_item.remove_captures()
self.no_item = self.no_item.remove_captures()
def is_atomic(self):
return self.yes_item.is_atomic() and self.no_item.is_atomic()
def can_be_affix(self):
return self.yes_item.can_be_affix() and self.no_item.can_be_affix()
def contains_group(self):
return self.yes_item.contains_group() or self.no_item.contains_group()
def get_firstset(self, reverse):
return (self.yes_item.get_firstset(reverse) |
self.no_item.get_firstset(reverse))
def _compile(self, reverse, fuzzy):
code = [(OP.GROUP_EXISTS, self.group)]
code.extend(self.yes_item.compile(reverse, fuzzy))
add_code = self.no_item.compile(reverse, fuzzy)
if add_code:
code.append((OP.NEXT, ))
code.extend(add_code)
code.append((OP.END, ))
return code
def _dump(self, indent, reverse):
print "%sGROUP_EXISTS %s" % (INDENT * indent, self.group)
self.yes_item.dump(indent + 1, reverse)
if self.no_item:
print "%sOR" % (INDENT * indent)
self.no_item.dump(indent + 1, reverse)
def is_empty(self):
return self.yes_item.is_empty() and self.no_item.is_empty()
def __eq__(self, other):
return type(self) is type(other) and (self.group, self.yes_item,
self.no_item) == (other.group, other.yes_item, other.no_item)
def max_width(self):
return max(self.yes_item.max_width(), self.no_item.max_width())
class DefaultBoundary(ZeroWidthBase):
_opcode = OP.DEFAULT_BOUNDARY
_op_name = "DEFAULT_BOUNDARY"
class DefaultEndOfWord(ZeroWidthBase):
_opcode = OP.DEFAULT_END_OF_WORD
_op_name = "DEFAULT_END_OF_WORD"
class DefaultStartOfWord(ZeroWidthBase):
_opcode = OP.DEFAULT_START_OF_WORD
_op_name = "DEFAULT_START_OF_WORD"
class EndOfLine(ZeroWidthBase):
_opcode = OP.END_OF_LINE
_op_name = "END_OF_LINE"
class EndOfLineU(EndOfLine):
_opcode = OP.END_OF_LINE_U
_op_name = "END_OF_LINE_U"
class EndOfString(ZeroWidthBase):
_opcode = OP.END_OF_STRING
_op_name = "END_OF_STRING"
class EndOfStringLine(ZeroWidthBase):
_opcode = OP.END_OF_STRING_LINE
_op_name = "END_OF_STRING_LINE"
class EndOfStringLineU(EndOfStringLine):
_opcode = OP.END_OF_STRING_LINE_U
_op_name = "END_OF_STRING_LINE_U"
class EndOfWord(ZeroWidthBase):
_opcode = OP.END_OF_WORD
_op_name = "END_OF_WORD"
class Fuzzy(RegexBase):
def __init__(self, subpattern, constraints=None):
RegexBase.__init__(self)
if constraints is None:
constraints = {}
self.subpattern = subpattern
self.constraints = constraints
# If an error type is mentioned in the cost equation, then its maximum
# defaults to unlimited.
if "cost" in constraints:
for e in "dis":
if e in constraints["cost"]:
constraints.setdefault(e, (0, None))
# If any error type is mentioned, then all the error maxima default to
# 0, otherwise they default to unlimited.
if set(constraints) & set("dis"):
for e in "dis":
constraints.setdefault(e, (0, 0))
else:
for e in "dis":
constraints.setdefault(e, (0, None))
# The maximum of the generic error type defaults to unlimited.
constraints.setdefault("e", (0, None))
# The cost equation defaults to equal costs. Also, the cost of any
# error type not mentioned in the cost equation defaults to 0.
if "cost" in constraints:
for e in "dis":
constraints["cost"].setdefault(e, 0)
else:
constraints["cost"] = {"d": 1, "i": 1, "s": 1, "max":
constraints["e"][1]}
def fix_groups(self, pattern, reverse, fuzzy):
self.subpattern.fix_groups(pattern, reverse, True)
def pack_characters(self, info):
self.subpattern = self.subpattern.pack_characters(info)
return self
def remove_captures(self):
self.subpattern = self.subpattern.remove_captures()
return self
def is_atomic(self):
return self.subpattern.is_atomic()
def contains_group(self):
return self.subpattern.contains_group()
def _compile(self, reverse, fuzzy):
# The individual limits.
arguments = []
for e in "dise":
v = self.constraints[e]
arguments.append(v[0])
arguments.append(UNLIMITED if v[1] is None else v[1])
# The coeffs of the cost equation.
for e in "dis":
arguments.append(self.constraints["cost"][e])
# The maximum of the cost equation.
v = self.constraints["cost"]["max"]
arguments.append(UNLIMITED if v is None else v)
flags = 0
if reverse:
flags |= REVERSE_OP
return ([(OP.FUZZY, flags) + tuple(arguments)] +
self.subpattern.compile(reverse, True) + [(OP.END,)])
def _dump(self, indent, reverse):
constraints = self._constraints_to_string()
if constraints:
constraints = " " + constraints
print "%sFUZZY%s" % (INDENT * indent, constraints)
self.subpattern.dump(indent + 1, reverse)
def is_empty(self):
return self.subpattern.is_empty()
def __eq__(self, other):
return (type(self) is type(other) and self.subpattern ==
other.subpattern)
def max_width(self):
return UNLIMITED
def _constraints_to_string(self):
constraints = []
for name in "ids":
min, max = self.constraints[name]
if max == 0:
continue
con = ""
if min > 0:
con = "%s<=" % min
con += name
if max is not None:
con += "<=%s" % max
constraints.append(con)
cost = []
for name in "ids":
coeff = self.constraints["cost"][name]
if coeff > 0:
cost.append("%s%s" % (coeff, name))
limit = self.constraints["cost"]["max"]
if limit is not None and limit > 0:
cost = "%s<=%s" % ("+".join(cost), limit)
constraints.append(cost)
return ",".join(constraints)
class Grapheme(RegexBase):
def _compile(self, reverse, fuzzy):
# Match at least 1 character until a grapheme boundary is reached. Note
# that this is the same whether matching forwards or backwards.
character_matcher = LazyRepeat(AnyAll(), 1, None).compile(reverse,
fuzzy)
boundary_matcher = [(OP.GRAPHEME_BOUNDARY, 1)]
return character_matcher + boundary_matcher
def _dump(self, indent, reverse):
print "%sGRAPHEME" % (INDENT * indent)
def max_width(self):
return UNLIMITED
class GreedyRepeat(RegexBase):
_opcode = OP.GREEDY_REPEAT
_op_name = "GREEDY_REPEAT"
def __init__(self, subpattern, min_count, max_count):
RegexBase.__init__(self)
self.subpattern = subpattern
self.min_count = min_count
self.max_count = max_count
def fix_groups(self, pattern, reverse, fuzzy):
self.subpattern.fix_groups(pattern, reverse, fuzzy)
def optimise(self, info):
subpattern = self.subpattern.optimise(info)
return type(self)(subpattern, self.min_count, self.max_count)
def pack_characters(self, info):
self.subpattern = self.subpattern.pack_characters(info)
return self
def remove_captures(self):
self.subpattern = self.subpattern.remove_captures()
return self
def is_atomic(self):
return self.min_count == self.max_count and self.subpattern.is_atomic()
def contains_group(self):
return self.subpattern.contains_group()
def get_firstset(self, reverse):
fs = self.subpattern.get_firstset(reverse)
if self.min_count == 0:
fs.add(None)
return fs
def _compile(self, reverse, fuzzy):
repeat = [self._opcode, self.min_count]
if self.max_count is None:
repeat.append(UNLIMITED)
else:
repeat.append(self.max_count)
subpattern = self.subpattern.compile(reverse, fuzzy)
if not subpattern:
return []
return ([tuple(repeat)] + subpattern + [(OP.END, )])
def _dump(self, indent, reverse):
if self.max_count is None:
limit = "INF"
else:
limit = self.max_count
print "%s%s %s %s" % (INDENT * indent, self._op_name, self.min_count,
limit)
self.subpattern.dump(indent + 1, reverse)
def is_empty(self):
return self.subpattern.is_empty()
def __eq__(self, other):
return type(self) is type(other) and (self.subpattern, self.min_count,
self.max_count) == (other.subpattern, other.min_count,
other.max_count)
def max_width(self):
if self.max_count is None:
return UNLIMITED
return self.subpattern.max_width() * self.max_count
def get_required_string(self, reverse):
max_count = UNLIMITED if self.max_count is None else self.max_count
if self.min_count == 0:
w = self.subpattern.max_width() * max_count
return min(w, UNLIMITED), None
ofs, req = self.subpattern.get_required_string(reverse)
if req:
return ofs, req
w = self.subpattern.max_width() * max_count
return min(w, UNLIMITED), None
class Group(RegexBase):
def __init__(self, info, group, subpattern):
RegexBase.__init__(self)
self.info = info
self.group = group
self.subpattern = subpattern
self.call_ref = None
def fix_groups(self, pattern, reverse, fuzzy):
self.info.defined_groups[self.group] = (self, reverse, fuzzy)
self.subpattern.fix_groups(pattern, reverse, fuzzy)
def optimise(self, info):
subpattern = self.subpattern.optimise(info)
return Group(self.info, self.group, subpattern)
def pack_characters(self, info):
self.subpattern = self.subpattern.pack_characters(info)
return self
def remove_captures(self):
return self.subpattern.remove_captures()
def is_atomic(self):
return self.subpattern.is_atomic()
def can_be_affix(self):
return False
def contains_group(self):
return True
def get_firstset(self, reverse):
return self.subpattern.get_firstset(reverse)
def has_simple_start(self):
return self.subpattern.has_simple_start()
def _compile(self, reverse, fuzzy):
code = []
key = self.group, reverse, fuzzy
ref = self.info.call_refs.get(key)
if ref is not None:
code += [(OP.CALL_REF, ref)]
public_group = private_group = self.group
if private_group < 0:
public_group = self.info.private_groups[private_group]
private_group = self.info.group_count - private_group
code += ([(OP.GROUP, private_group, public_group)] +
self.subpattern.compile(reverse, fuzzy) + [(OP.END, )])
if ref is not None:
code += [(OP.END, )]
return code
def _dump(self, indent, reverse):
group = self.group
if group < 0:
group = private_groups[group]
print "%sGROUP %s" % (INDENT * indent, group)
self.subpattern.dump(indent + 1, reverse)
def __eq__(self, other):
return (type(self) is type(other) and (self.group, self.subpattern) ==
(other.group, other.subpattern))
def max_width(self):
return self.subpattern.max_width()
def get_required_string(self, reverse):
return self.subpattern.get_required_string(reverse)
class LazyRepeat(GreedyRepeat):
_opcode = OP.LAZY_REPEAT
_op_name = "LAZY_REPEAT"
class LookAround(RegexBase):
_dir_text = {False: "AHEAD", True: "BEHIND"}
def __new__(cls, behind, positive, subpattern):
if positive and subpattern.is_empty():
return subpattern
return RegexBase.__new__(cls)
def __init__(self, behind, positive, subpattern):
RegexBase.__init__(self)
self.behind = bool(behind)
self.positive = bool(positive)
self.subpattern = subpattern
def fix_groups(self, pattern, reverse, fuzzy):
self.subpattern.fix_groups(pattern, self.behind, fuzzy)
def optimise(self, info):
subpattern = self.subpattern.optimise(info)
return LookAround(self.behind, self.positive, subpattern)
def pack_characters(self, info):
self.subpattern = self.subpattern.pack_characters(info)
return self
def remove_captures(self):
return self.subpattern.remove_captures()
def is_atomic(self):
return self.subpattern.is_atomic()
def can_be_affix(self):
return self.subpattern.can_be_affix()
def contains_group(self):
return self.subpattern.contains_group()
def _compile(self, reverse, fuzzy):
return ([(OP.LOOKAROUND, int(self.positive), int(not self.behind))] +
self.subpattern.compile(self.behind) + [(OP.END, )])
def _dump(self, indent, reverse):
print "%sLOOK%s %s" % (INDENT * indent, self._dir_text[self.behind],
POS_TEXT[self.positive])
self.subpattern.dump(indent + 1, self.behind)
def is_empty(self):
return self.subpattern.is_empty()
def __eq__(self, other):
return type(self) is type(other) and (self.behind, self.positive,
self.subpattern) == (other.behind, other.positive, other.subpattern)
def max_width(self):
return 0
class PrecompiledCode(RegexBase):
def __init__(self, code):
self.code = code
def _compile(self, reverse, fuzzy):
return [tuple(self.code)]
class Property(RegexBase):
_opcode = {(NOCASE, False): OP.PROPERTY, (IGNORECASE, False):
OP.PROPERTY_IGN, (FULLCASE, False): OP.PROPERTY, (FULLIGNORECASE, False):
OP.PROPERTY_IGN, (NOCASE, True): OP.PROPERTY_REV, (IGNORECASE, True):
OP.PROPERTY_IGN_REV, (FULLCASE, True): OP.PROPERTY_REV, (FULLIGNORECASE,
True): OP.PROPERTY_IGN_REV}
def __init__(self, value, positive=True, case_flags=NOCASE,
zerowidth=False):
RegexBase.__init__(self)
self.value = value
self.positive = bool(positive)
self.case_flags = case_flags
self.zerowidth = bool(zerowidth)
self._key = (self.__class__, self.value, self.positive,
self.case_flags, self.zerowidth)
def rebuild(self, positive, case_flags, zerowidth):
return Property(self.value, positive, case_flags, zerowidth)
def optimise(self, info, in_set=False):
return self
def get_firstset(self, reverse):
return set([self])
def has_simple_start(self):
return True
def _compile(self, reverse, fuzzy):
flags = 0
if self.positive:
flags |= POSITIVE_OP
if self.zerowidth:
flags |= ZEROWIDTH_OP
if fuzzy:
flags |= FUZZY_OP
return [(self._opcode[self.case_flags, reverse], flags, self.value)]
def _dump(self, indent, reverse):
prop = PROPERTY_NAMES[self.value >> 16]
name, value = prop[0], prop[1][self.value & 0xFFFF]
print "%sPROPERTY %s %s:%s%s" % (INDENT * indent,
POS_TEXT[self.positive], name, value, CASE_TEXT[self.case_flags])
def matches(self, ch):
return _regex.has_property_value(self.value, ch) == self.positive
def max_width(self):
return 1
class Range(RegexBase):
_opcode = {(NOCASE, False): OP.RANGE, (IGNORECASE, False): OP.RANGE_IGN,
(FULLCASE, False): OP.RANGE, (FULLIGNORECASE, False): OP.RANGE_IGN,
(NOCASE, True): OP.RANGE_REV, (IGNORECASE, True): OP.RANGE_IGN_REV,
(FULLCASE, True): OP.RANGE_REV, (FULLIGNORECASE, True): OP.RANGE_IGN_REV}
_op_name = "RANGE"
def __init__(self, lower, upper, positive=True, case_flags=NOCASE,
zerowidth=False):
RegexBase.__init__(self)
self.lower = lower
self.upper = upper
self.positive = bool(positive)
self.case_flags = case_flags
self.zerowidth = bool(zerowidth)
self._key = (self.__class__, self.lower, self.upper, self.positive,
self.case_flags, self.zerowidth)
def rebuild(self, positive, case_flags, zerowidth):
return Range(self.lower, self.upper, positive, case_flags, zerowidth)
def optimise(self, info, in_set=False):
# Is the range case-sensitive?
if not self.positive or not (self.case_flags & IGNORECASE) or in_set:
return self
# Is full case-folding possible?
if (not (info.flags & UNICODE) or (self.case_flags & FULLIGNORECASE) !=
FULLIGNORECASE):
return self
# Get the characters which expand to multiple codepoints on folding.
expanding_chars = _regex.get_expand_on_folding()
# Get the folded characters in the range.
items = []
for ch in expanding_chars:
if self.lower <= ord(ch) <= self.upper:
folded = _regex.fold_case(FULL_CASE_FOLDING, ch)
items.append(String([ord(c) for c in folded],
case_flags=self.case_flags))
if not items:
# We can fall back to simple case-folding.
return self
if len(items) < self.upper - self.lower + 1:
# Not all the characters are covered by the full case-folding.
items.insert(0, self)
return Branch(items)
def _compile(self, reverse, fuzzy):
flags = 0
if self.positive:
flags |= POSITIVE_OP
if self.zerowidth:
flags |= ZEROWIDTH_OP
if fuzzy:
flags |= FUZZY_OP
return [(self._opcode[self.case_flags, reverse], flags, self.lower,
self.upper)]
def _dump(self, indent, reverse):
display_lower = repr(unichr(self.lower)).lstrip("bu")
display_upper = repr(unichr(self.upper)).lstrip("bu")
print "%sRANGE %s %s %s%s" % (INDENT * indent, POS_TEXT[self.positive],
display_lower, display_upper, CASE_TEXT[self.case_flags])
def matches(self, ch):
return (self.lower <= ch <= self.upper) == self.positive
def max_width(self):
return 1
class RefGroup(RegexBase):
_opcode = {(NOCASE, False): OP.REF_GROUP, (IGNORECASE, False):
OP.REF_GROUP_IGN, (FULLCASE, False): OP.REF_GROUP, (FULLIGNORECASE,
False): OP.REF_GROUP_FLD, (NOCASE, True): OP.REF_GROUP_REV, (IGNORECASE,
True): OP.REF_GROUP_IGN_REV, (FULLCASE, True): OP.REF_GROUP_REV,
(FULLIGNORECASE, True): OP.REF_GROUP_FLD_REV}
def __init__(self, info, group, position, case_flags=NOCASE):
RegexBase.__init__(self)
self.info = info
self.group = group
self.position = position
self.case_flags = case_flags
self._key = self.__class__, self.group, self.case_flags
def fix_groups(self, pattern, reverse, fuzzy):
try:
self.group = int(self.group)
except ValueError:
try:
self.group = self.info.group_index[self.group]
except KeyError:
raise error("unknown group", pattern, self.position)
if not 1 <= self.group <= self.info.group_count:
raise error("unknown group", pattern, self.position)
self._key = self.__class__, self.group, self.case_flags
def remove_captures(self):
raise error("group reference not allowed", pattern, self.position)
def _compile(self, reverse, fuzzy):
flags = 0
if fuzzy:
flags |= FUZZY_OP
return [(self._opcode[self.case_flags, reverse], flags, self.group)]
def _dump(self, indent, reverse):
print "%sREF_GROUP %s%s" % (INDENT * indent, self.group,
CASE_TEXT[self.case_flags])
def max_width(self):
return UNLIMITED
class SearchAnchor(ZeroWidthBase):
_opcode = OP.SEARCH_ANCHOR
_op_name = "SEARCH_ANCHOR"
class Sequence(RegexBase):
def __init__(self, items=None):
RegexBase.__init__(self)
if items is None:
items = []
self.items = items
def fix_groups(self, pattern, reverse, fuzzy):
for s in self.items:
s.fix_groups(pattern, reverse, fuzzy)
def optimise(self, info):
# Flatten the sequences.
items = []
for s in self.items:
s = s.optimise(info)
if isinstance(s, Sequence):
items.extend(s.items)
else:
items.append(s)
return make_sequence(items)
def pack_characters(self, info):
"Packs sequences of characters into strings."
items = []
characters = []
case_flags = NOCASE
for s in self.items:
if type(s) is Character and s.positive:
if s.case_flags != case_flags:
# Different case sensitivity, so flush, unless neither the
# previous nor the new character are cased.
if s.case_flags or is_cased(info, s.value):
Sequence._flush_characters(info, characters,
case_flags, items)
case_flags = s.case_flags
characters.append(s.value)
elif type(s) is String or type(s) is Literal:
if s.case_flags != case_flags:
# Different case sensitivity, so flush, unless the neither
# the previous nor the new string are cased.
if s.case_flags or any(is_cased(info, c) for c in
characters):
Sequence._flush_characters(info, characters,
case_flags, items)
case_flags = s.case_flags
characters.extend(s.characters)
else:
Sequence._flush_characters(info, characters, case_flags, items)
items.append(s.pack_characters(info))
Sequence._flush_characters(info, characters, case_flags, items)
return make_sequence(items)
def remove_captures(self):
self.items = [s.remove_captures() for s in self.items]
return self
def is_atomic(self):
return all(s.is_atomic() for s in self.items)
def can_be_affix(self):
return False
def contains_group(self):
return any(s.contains_group() for s in self.items)
def get_firstset(self, reverse):
fs = set()
items = self.items
if reverse:
items.reverse()
for s in items:
fs |= s.get_firstset(reverse)
if None not in fs:
return fs
fs.discard(None)
return fs | set([None])
def has_simple_start(self):
return self.items and self.items[0].has_simple_start()
def _compile(self, reverse, fuzzy):
seq = self.items
if reverse:
seq = seq[::-1]
code = []
for s in seq:
code.extend(s.compile(reverse, fuzzy))
return code
def _dump(self, indent, reverse):
for s in self.items:
s.dump(indent, reverse)
@staticmethod
def _flush_characters(info, characters, case_flags, items):
if not characters:
return
# Disregard case_flags if all of the characters are case-less.
if case_flags & IGNORECASE:
if not any(is_cased(info, c) for c in characters):
case_flags = NOCASE
if len(characters) == 1:
items.append(Character(characters[0], case_flags=case_flags))
else:
items.append(String(characters, case_flags=case_flags))
characters[:] = []
def is_empty(self):
return all(i.is_empty() for i in self.items)
def __eq__(self, other):
return type(self) is type(other) and self.items == other.items
def max_width(self):
return sum(s.max_width() for s in self.items)
def get_required_string(self, reverse):
seq = self.items
if reverse:
seq = seq[::-1]
offset = 0
for s in seq:
ofs, req = s.get_required_string(reverse)
offset += ofs
if req:
return offset, req
return offset, None
class SetBase(RegexBase):
def __init__(self, info, items, positive=True, case_flags=NOCASE,
zerowidth=False):
RegexBase.__init__(self)
self.info = info
self.items = tuple(items)
self.positive = bool(positive)
self.case_flags = case_flags
self.zerowidth = bool(zerowidth)
self.char_width = 1
self._key = (self.__class__, self.items, self.positive,
self.case_flags, self.zerowidth)
def rebuild(self, positive, case_flags, zerowidth):
return type(self)(self.info, self.items, positive, case_flags,
zerowidth).optimise(self.info)
def get_firstset(self, reverse):
return set([self])
def has_simple_start(self):
return True
def _compile(self, reverse, fuzzy):
flags = 0
if self.positive:
flags |= POSITIVE_OP
if self.zerowidth:
flags |= ZEROWIDTH_OP
if fuzzy:
flags |= FUZZY_OP
code = [(self._opcode[self.case_flags, reverse], flags)]
for m in self.items:
code.extend(m.compile())
code.append((OP.END, ))
return code
def _dump(self, indent, reverse):
print "%s%s %s%s" % (INDENT * indent, self._op_name,
POS_TEXT[self.positive], CASE_TEXT[self.case_flags])
for i in self.items:
i.dump(indent + 1, reverse)
def _handle_case_folding(self, info, in_set):
# Is the set case-sensitive?
if not self.positive or not (self.case_flags & IGNORECASE) or in_set:
return self
# Is full case-folding possible?
if (not (self.info.flags & UNICODE) or (self.case_flags &
FULLIGNORECASE) !=
FULLIGNORECASE):
return self
# Get the characters which expand to multiple codepoints on folding.
expanding_chars = _regex.get_expand_on_folding()
# Get the folded characters in the set.
items = []
seen = set()
for ch in expanding_chars:
if self.matches(ord(ch)):
folded = _regex.fold_case(FULL_CASE_FOLDING, ch)
if folded not in seen:
items.append(String([ord(c) for c in folded],
case_flags=self.case_flags))
seen.add(folded)
if not items:
# We can fall back to simple case-folding.
return self
return Branch([self] + items)
def max_width(self):
# Is the set case-sensitive?
if not self.positive or not (self.case_flags & IGNORECASE):
return 1
# Is full case-folding possible?
if (not (self.info.flags & UNICODE) or (self.case_flags &
FULLIGNORECASE) != FULLIGNORECASE):
return 1
# Get the characters which expand to multiple codepoints on folding.
expanding_chars = _regex.get_expand_on_folding()
# Get the folded characters in the set.
seen = set()
for ch in expanding_chars:
if self.matches(ord(ch)):
folded = _regex.fold_case(FULL_CASE_FOLDING, ch)
seen.add(folded)
if not seen:
return 1
return max(len(folded) for folded in seen)
class SetDiff(SetBase):
_opcode = {(NOCASE, False): OP.SET_DIFF, (IGNORECASE, False):
OP.SET_DIFF_IGN, (FULLCASE, False): OP.SET_DIFF, (FULLIGNORECASE, False):
OP.SET_DIFF_IGN, (NOCASE, True): OP.SET_DIFF_REV, (IGNORECASE, True):
OP.SET_DIFF_IGN_REV, (FULLCASE, True): OP.SET_DIFF_REV, (FULLIGNORECASE,
True): OP.SET_DIFF_IGN_REV}
_op_name = "SET_DIFF"
def optimise(self, info, in_set=False):
items = self.items
if len(items) > 2:
items = [items[0], SetUnion(info, items[1 : ])]
if len(items) == 1:
return items[0].with_flags(case_flags=self.case_flags,
zerowidth=self.zerowidth).optimise(info, in_set)
self.items = tuple(m.optimise(info, in_set=True) for m in items)
return self._handle_case_folding(info, in_set)
def matches(self, ch):
m = self.items[0].matches(ch) and not self.items[1].matches(ch)
return m == self.positive
class SetInter(SetBase):
_opcode = {(NOCASE, False): OP.SET_INTER, (IGNORECASE, False):
OP.SET_INTER_IGN, (FULLCASE, False): OP.SET_INTER, (FULLIGNORECASE,
False): OP.SET_INTER_IGN, (NOCASE, True): OP.SET_INTER_REV, (IGNORECASE,
True): OP.SET_INTER_IGN_REV, (FULLCASE, True): OP.SET_INTER_REV,
(FULLIGNORECASE, True): OP.SET_INTER_IGN_REV}
_op_name = "SET_INTER"
def optimise(self, info, in_set=False):
items = []
for m in self.items:
m = m.optimise(info, in_set=True)
if isinstance(m, SetInter) and m.positive:
# Intersection in intersection.
items.extend(m.items)
else:
items.append(m)
if len(items) == 1:
return items[0].with_flags(case_flags=self.case_flags,
zerowidth=self.zerowidth).optimise(info, in_set)
self.items = tuple(items)
return self._handle_case_folding(info, in_set)
def matches(self, ch):
m = all(i.matches(ch) for i in self.items)
return m == self.positive
class SetSymDiff(SetBase):
_opcode = {(NOCASE, False): OP.SET_SYM_DIFF, (IGNORECASE, False):
OP.SET_SYM_DIFF_IGN, (FULLCASE, False): OP.SET_SYM_DIFF, (FULLIGNORECASE,
False): OP.SET_SYM_DIFF_IGN, (NOCASE, True): OP.SET_SYM_DIFF_REV,
(IGNORECASE, True): OP.SET_SYM_DIFF_IGN_REV, (FULLCASE, True):
OP.SET_SYM_DIFF_REV, (FULLIGNORECASE, True): OP.SET_SYM_DIFF_IGN_REV}
_op_name = "SET_SYM_DIFF"
def optimise(self, info, in_set=False):
items = []
for m in self.items:
m = m.optimise(info, in_set=True)
if isinstance(m, SetSymDiff) and m.positive:
# Symmetric difference in symmetric difference.
items.extend(m.items)
else:
items.append(m)
if len(items) == 1:
return items[0].with_flags(case_flags=self.case_flags,
zerowidth=self.zerowidth).optimise(info, in_set)
self.items = tuple(items)
return self._handle_case_folding(info, in_set)
def matches(self, ch):
m = False
for i in self.items:
m = m != i.matches(ch)
return m == self.positive
class SetUnion(SetBase):
_opcode = {(NOCASE, False): OP.SET_UNION, (IGNORECASE, False):
OP.SET_UNION_IGN, (FULLCASE, False): OP.SET_UNION, (FULLIGNORECASE,
False): OP.SET_UNION_IGN, (NOCASE, True): OP.SET_UNION_REV, (IGNORECASE,
True): OP.SET_UNION_IGN_REV, (FULLCASE, True): OP.SET_UNION_REV,
(FULLIGNORECASE, True): OP.SET_UNION_IGN_REV}
_op_name = "SET_UNION"
def optimise(self, info, in_set=False):
items = []
for m in self.items:
m = m.optimise(info, in_set=True)
if isinstance(m, SetUnion) and m.positive:
# Union in union.
items.extend(m.items)
else:
items.append(m)
if len(items) == 1:
i = items[0]
return i.with_flags(positive=i.positive == self.positive,
case_flags=self.case_flags,
zerowidth=self.zerowidth).optimise(info, in_set)
self.items = tuple(items)
return self._handle_case_folding(info, in_set)
def _compile(self, reverse, fuzzy):
flags = 0
if self.positive:
flags |= POSITIVE_OP
if self.zerowidth:
flags |= ZEROWIDTH_OP
if fuzzy:
flags |= FUZZY_OP
characters, others = defaultdict(list), []
for m in self.items:
if isinstance(m, Character):
characters[m.positive].append(m.value)
else:
others.append(m)
code = [(self._opcode[self.case_flags, reverse], flags)]
for positive, values in characters.items():
flags = 0
if positive:
flags |= POSITIVE_OP
if len(values) == 1:
code.append((OP.CHARACTER, flags, values[0]))
else:
code.append((OP.STRING, flags, len(values)) + tuple(values))
for m in others:
code.extend(m.compile())
code.append((OP.END, ))
return code
def matches(self, ch):
m = any(i.matches(ch) for i in self.items)
return m == self.positive
class StartOfLine(ZeroWidthBase):
_opcode = OP.START_OF_LINE
_op_name = "START_OF_LINE"
class StartOfLineU(StartOfLine):
_opcode = OP.START_OF_LINE_U
_op_name = "START_OF_LINE_U"
class StartOfString(ZeroWidthBase):
_opcode = OP.START_OF_STRING
_op_name = "START_OF_STRING"
class StartOfWord(ZeroWidthBase):
_opcode = OP.START_OF_WORD
_op_name = "START_OF_WORD"
class String(RegexBase):
_opcode = {(NOCASE, False): OP.STRING, (IGNORECASE, False): OP.STRING_IGN,
(FULLCASE, False): OP.STRING, (FULLIGNORECASE, False): OP.STRING_FLD,
(NOCASE, True): OP.STRING_REV, (IGNORECASE, True): OP.STRING_IGN_REV,
(FULLCASE, True): OP.STRING_REV, (FULLIGNORECASE, True):
OP.STRING_FLD_REV}
def __init__(self, characters, case_flags=NOCASE):
self.characters = tuple(characters)
self.case_flags = case_flags
if (self.case_flags & FULLIGNORECASE) == FULLIGNORECASE:
folded_characters = []
for char in self.characters:
folded = _regex.fold_case(FULL_CASE_FOLDING, unichr(char))
folded_characters.extend(ord(c) for c in folded)
else:
folded_characters = self.characters
self.folded_characters = tuple(folded_characters)
self.required = False
self._key = self.__class__, self.characters, self.case_flags
def get_firstset(self, reverse):
if reverse:
pos = -1
else:
pos = 0
return set([Character(self.characters[pos],
case_flags=self.case_flags)])
def has_simple_start(self):
return True
def _compile(self, reverse, fuzzy):
flags = 0
if fuzzy:
flags |= FUZZY_OP
if self.required:
flags |= REQUIRED_OP
return [(self._opcode[self.case_flags, reverse], flags,
len(self.folded_characters)) + self.folded_characters]
def _dump(self, indent, reverse):
display = repr("".join(unichr(c) for c in self.characters)).lstrip("bu")
print "%sSTRING %s%s" % (INDENT * indent, display,
CASE_TEXT[self.case_flags])
def max_width(self):
return len(self.folded_characters)
def get_required_string(self, reverse):
return 0, self
class Literal(String):
def _dump(self, indent, reverse):
for c in self.characters:
display = repr(unichr(c)).lstrip("bu")
print "%sCHARACTER MATCH %s%s" % (INDENT * indent, display,
CASE_TEXT[self.case_flags])
class StringSet(RegexBase):
_opcode = {(NOCASE, False): OP.STRING_SET, (IGNORECASE, False):
OP.STRING_SET_IGN, (FULLCASE, False): OP.STRING_SET, (FULLIGNORECASE,
False): OP.STRING_SET_FLD, (NOCASE, True): OP.STRING_SET_REV,
(IGNORECASE, True): OP.STRING_SET_IGN_REV, (FULLCASE, True):
OP.STRING_SET_REV, (FULLIGNORECASE, True): OP.STRING_SET_FLD_REV}
def __init__(self, info, name, case_flags=NOCASE):
self.info = info
self.name = name
self.case_flags = case_flags
self._key = self.__class__, self.name, self.case_flags
self.set_key = (name, self.case_flags)
if self.set_key not in info.named_lists_used:
info.named_lists_used[self.set_key] = len(info.named_lists_used)
def _compile(self, reverse, fuzzy):
index = self.info.named_lists_used[self.set_key]
items = self.info.kwargs[self.name]
case_flags = self.case_flags
if not items:
return []
encoding = self.info.flags & _ALL_ENCODINGS
fold_flags = encoding | case_flags
if fuzzy:
choices = [self._folded(fold_flags, i) for i in items]
# Sort from longest to shortest.
choices.sort(key=lambda s: (-len(s), s))
branches = []
for string in choices:
branches.append(Sequence([Character(c, case_flags=case_flags)
for c in string]))
if len(branches) > 1:
branch = Branch(branches)
else:
branch = branches[0]
branch = branch.optimise(self.info).pack_characters(self.info)
return branch.compile(reverse, fuzzy)
else:
min_len = min(len(i) for i in items)
max_len = max(len(self._folded(fold_flags, i)) for i in items)
return [(self._opcode[case_flags, reverse], index, min_len,
max_len)]
def _dump(self, indent, reverse):
print "%sSTRING_SET %s%s" % (INDENT * indent, self.name,
CASE_TEXT[self.case_flags])
def _folded(self, fold_flags, item):
if isinstance(item, unicode):
return [ord(c) for c in _regex.fold_case(fold_flags, item)]
else:
return [ord(c) for c in item]
def _flatten(self, s):
# Flattens the branches.
if isinstance(s, Branch):
for b in s.branches:
self._flatten(b)
elif isinstance(s, Sequence) and s.items:
seq = s.items
while isinstance(seq[-1], Sequence):
seq[-1 : ] = seq[-1].items
n = 0
while n < len(seq) and isinstance(seq[n], Character):
n += 1
if n > 1:
seq[ : n] = [String([c.value for c in seq[ : n]],
case_flags=self.case_flags)]
self._flatten(seq[-1])
def max_width(self):
if not self.info.kwargs[self.name]:
return 0
if self.case_flags & IGNORECASE:
fold_flags = (self.info.flags & _ALL_ENCODINGS) | self.case_flags
return max(len(_regex.fold_case(fold_flags, i)) for i in
self.info.kwargs[self.name])
else:
return max(len(i) for i in self.info.kwargs[self.name])
class Source(object):
"Scanner for the regular expression source string."
def __init__(self, string):
if isinstance(string, unicode):
self.string = string
self.char_type = unichr
else:
self.string = string
self.char_type = chr
self.pos = 0
self.ignore_space = False
self.sep = string[ : 0]
def get(self):
string = self.string
pos = self.pos
try:
if self.ignore_space:
while True:
if string[pos].isspace():
# Skip over the whitespace.
pos += 1
elif string[pos] == "#":
# Skip over the comment to the end of the line.
pos = string.index("\n", pos)
else:
break
ch = string[pos]
self.pos = pos + 1
return ch
except IndexError:
# We've reached the end of the string.
self.pos = pos
return string[ : 0]
except ValueError:
# The comment extended to the end of the string.
self.pos = len(string)
return string[ : 0]
def get_many(self, count=1):
string = self.string
pos = self.pos
try:
if self.ignore_space:
substring = []
while len(substring) < count:
while True:
if string[pos].isspace():
# Skip over the whitespace.
pos += 1
elif string[pos] == "#":
# Skip over the comment to the end of the line.
pos = string.index("\n", pos)
else:
break
substring.append(string[pos])
pos += 1
substring = "".join(substring)
else:
substring = string[pos : pos + count]
pos += len(substring)
self.pos = pos
return substring
except IndexError:
# We've reached the end of the string.
self.pos = len(string)
return "".join(substring)
except ValueError:
# The comment extended to the end of the string.
self.pos = len(string)
return "".join(substring)
def get_while(self, test_set, include=True):
string = self.string
pos = self.pos
if self.ignore_space:
try:
substring = []
while True:
if string[pos].isspace():
# Skip over the whitespace.
pos += 1
elif string[pos] == "#":
# Skip over the comment to the end of the line.
pos = string.index("\n", pos)
elif (string[pos] in test_set) == include:
substring.append(string[pos])
pos += 1
else:
break
self.pos = pos
except IndexError:
# We've reached the end of the string.
self.pos = len(string)
except ValueError:
# The comment extended to the end of the string.
self.pos = len(string)
return "".join(substring)
else:
try:
while (string[pos] in test_set) == include:
pos += 1
substring = string[self.pos : pos]
self.pos = pos
return substring
except IndexError:
# We've reached the end of the string.
substring = string[self.pos : pos]
self.pos = pos
return substring
def skip_while(self, test_set, include=True):
string = self.string
pos = self.pos
try:
if self.ignore_space:
while True:
if string[pos].isspace():
# Skip over the whitespace.
pos += 1
elif string[pos] == "#":
# Skip over the comment to the end of the line.
pos = string.index("\n", pos)
elif (string[pos] in test_set) == include:
pos += 1
else:
break
else:
while (string[pos] in test_set) == include:
pos += 1
self.pos = pos
except IndexError:
# We've reached the end of the string.
self.pos = len(string)
except ValueError:
# The comment extended to the end of the string.
self.pos = len(string)
def match(self, substring):
string = self.string
pos = self.pos
if self.ignore_space:
try:
for c in substring:
while True:
if string[pos].isspace():
# Skip over the whitespace.
pos += 1
elif string[pos] == "#":
# Skip over the comment to the end of the line.
pos = string.index("\n", pos)
else:
break
if string[pos] != c:
return False
pos += 1
self.pos = pos
return True
except IndexError:
# We've reached the end of the string.
return False
except ValueError:
# The comment extended to the end of the string.
return False
else:
if not string.startswith(substring, pos):
return False
self.pos = pos + len(substring)
return True
def expect(self, substring):
if not self.match(substring):
raise error("missing %s" % substring, self.string, self.pos)
def at_end(self):
string = self.string
pos = self.pos
try:
if self.ignore_space:
while True:
if string[pos].isspace():
pos += 1
elif string[pos] == "#":
pos = string.index("\n", pos)
else:
break
return pos >= len(string)
except IndexError:
# We've reached the end of the string.
return True
except ValueError:
# The comment extended to the end of the string.
return True
class Info(object):
"Info about the regular expression."
def __init__(self, flags=0, char_type=None, kwargs={}):
flags |= DEFAULT_FLAGS[(flags & _ALL_VERSIONS) or DEFAULT_VERSION]
self.flags = flags
self.global_flags = flags
self.inline_locale = False
self.kwargs = kwargs
self.group_count = 0
self.group_index = {}
self.group_name = {}
self.char_type = char_type
self.named_lists_used = {}
self.open_groups = []
self.open_group_count = {}
self.defined_groups = {}
self.group_calls = []
self.private_groups = {}
def open_group(self, name=None):
group = self.group_index.get(name)
if group is None:
while True:
self.group_count += 1
if name is None or self.group_count not in self.group_name:
break
group = self.group_count
if name:
self.group_index[name] = group
self.group_name[group] = name
if group in self.open_groups:
# We have a nested named group. We'll assign it a private group
# number, initially negative until we can assign a proper
# (positive) number.
group_alias = -(len(self.private_groups) + 1)
self.private_groups[group_alias] = group
group = group_alias
self.open_groups.append(group)
self.open_group_count[group] = self.open_group_count.get(group, 0) + 1
return group
def close_group(self):
self.open_groups.pop()
def is_open_group(self, name):
# In version 1, a group reference can refer to an open group. We'll
# just pretend the group isn't open.
version = (self.flags & _ALL_VERSIONS) or DEFAULT_VERSION
if version == VERSION1:
return False
if name.isdigit():
group = int(name)
else:
group = self.group_index.get(name)
return group in self.open_groups
def _check_group_features(info, parsed):
"""Checks whether the reverse and fuzzy features of the group calls match
the groups which they call.
"""
call_refs = {}
additional_groups = []
for call, reverse, fuzzy in info.group_calls:
# Look up the reference of this group call.
key = (call.group, reverse, fuzzy)
ref = call_refs.get(key)
if ref is None:
# This group doesn't have a reference yet, so look up its features.
if call.group == 0:
# Calling the pattern as a whole.
rev = bool(info.flags & REVERSE)
fuz = isinstance(parsed, Fuzzy)
if (rev, fuz) != (reverse, fuzzy):
# The pattern as a whole doesn't have the features we want,
# so we'll need to make a copy of it with the desired
# features.
additional_groups.append((parsed, reverse, fuzzy))
else:
# Calling a capture group.
def_info = info.defined_groups[call.group]
group = def_info[0]
if def_info[1 : ] != (reverse, fuzzy):
# The group doesn't have the features we want, so we'll
# need to make a copy of it with the desired features.
additional_groups.append((group, reverse, fuzzy))
ref = len(call_refs)
call_refs[key] = ref
call.call_ref = ref
info.call_refs = call_refs
info.additional_groups = additional_groups
def _get_required_string(parsed, flags):
"Gets the required string and related info of a parsed pattern."
req_offset, required = parsed.get_required_string(bool(flags & REVERSE))
if required:
required.required = True
if req_offset >= UNLIMITED:
req_offset = -1
req_flags = required.case_flags
if not (flags & UNICODE):
req_flags &= ~UNICODE
req_chars = required.folded_characters
else:
req_offset = 0
req_chars = ()
req_flags = 0
return req_offset, req_chars, req_flags
class Scanner:
def __init__(self, lexicon, flags=0):
self.lexicon = lexicon
# Combine phrases into a compound pattern.
patterns = []
for phrase, action in lexicon:
# Parse the regular expression.
source = Source(phrase)
info = Info(flags, source.char_type)
source.ignore_space = bool(info.flags & VERBOSE)
parsed = _parse_pattern(source, info)
if not source.at_end():
raise error("trailing characters", source.string, source.pos)
# We want to forbid capture groups within each phrase.
patterns.append(parsed.remove_captures())
# Combine all the subpatterns into one pattern.
info = Info(flags)
patterns = [Group(info, g + 1, p) for g, p in enumerate(patterns)]
parsed = Branch(patterns)
# Optimise the compound pattern.
parsed = parsed.optimise(info)
parsed = parsed.pack_characters(info)
# Get the required string.
req_offset, req_chars, req_flags = _get_required_string(parsed,
info.flags)
# Check the features of the groups.
_check_group_features(info, parsed)
# Complain if there are any group calls. They are not supported by the
# Scanner class.
if info.call_refs:
raise error("recursive regex not supported by Scanner",
source.string, source.pos)
reverse = bool(info.flags & REVERSE)
# Compile the compound pattern. The result is a list of tuples.
code = parsed.compile(reverse) + [(OP.SUCCESS, )]
# Flatten the code into a list of ints.
code = _flatten_code(code)
if not parsed.has_simple_start():
# Get the first set, if possible.
try:
fs_code = _compile_firstset(info, parsed.get_firstset(reverse))
fs_code = _flatten_code(fs_code)
code = fs_code + code
except _FirstSetError:
pass
# Check the global flags for conflicts.
version = (info.flags & _ALL_VERSIONS) or DEFAULT_VERSION
if version not in (0, VERSION0, VERSION1):
raise ValueError("VERSION0 and VERSION1 flags are mutually incompatible")
# Create the PatternObject.
#
# Local flags like IGNORECASE affect the code generation, but aren't
# needed by the PatternObject itself. Conversely, global flags like
# LOCALE _don't_ affect the code generation but _are_ needed by the
# PatternObject.
self.scanner = _regex.compile(None, (flags & GLOBAL_FLAGS) | version,
code, {}, {}, {}, [], req_offset, req_chars, req_flags,
len(patterns))
def scan(self, string):
result = []
append = result.append
match = self.scanner.scanner(string).match
i = 0
while True:
m = match()
if not m:
break
j = m.end()
if i == j:
break
action = self.lexicon[m.lastindex - 1][1]
if hasattr(action, '__call__'):
self.match = m
action = action(self, m.group())
if action is not None:
append(action)
i = j
return result, string[i : ]
# Get the known properties dict.
PROPERTIES = _regex.get_properties()
# Build the inverse of the properties dict.
PROPERTY_NAMES = {}
for prop_name, (prop_id, values) in PROPERTIES.items():
name, prop_values = PROPERTY_NAMES.get(prop_id, ("", {}))
name = max(name, prop_name, key=len)
PROPERTY_NAMES[prop_id] = name, prop_values
for val_name, val_id in values.items():
prop_values[val_id] = max(prop_values.get(val_id, ""), val_name,
key=len)
# Character escape sequences.
CHARACTER_ESCAPES = {
"a": "\a",
"b": "\b",
"f": "\f",
"n": "\n",
"r": "\r",
"t": "\t",
"v": "\v",
}
# Predefined character set escape sequences.
CHARSET_ESCAPES = {
"d": lookup_property(None, "Digit", True),
"D": lookup_property(None, "Digit", False),
"s": lookup_property(None, "Space", True),
"S": lookup_property(None, "Space", False),
"w": lookup_property(None, "Word", True),
"W": lookup_property(None, "Word", False),
}
# Positional escape sequences.
POSITION_ESCAPES = {
"A": StartOfString(),
"b": Boundary(),
"B": Boundary(False),
"m": StartOfWord(),
"M": EndOfWord(),
"Z": EndOfString(),
}
# Positional escape sequences when WORD flag set.
WORD_POSITION_ESCAPES = dict(POSITION_ESCAPES)
WORD_POSITION_ESCAPES.update({
"b": DefaultBoundary(),
"B": DefaultBoundary(False),
"m": DefaultStartOfWord(),
"M": DefaultEndOfWord(),
})
| Python |
#
# Secret Labs' Regular Expression Engine
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# This version of the SRE library can be redistributed under CNRI's
# Python 1.6 license. For any other use, please contact Secret Labs
# AB (info@pythonware.com).
#
# Portions of this engine have been developed in cooperation with
# CNRI. Hewlett-Packard provided funding for 1.6 integration and
# other compatibility work.
#
# 2010-01-16 mrab Python front-end re-written and extended
r"""Support for regular expressions (RE).
This module provides regular expression matching operations similar to those
found in Perl. It supports both 8-bit and Unicode strings; both the pattern and
the strings being processed can contain null bytes and characters outside the
US ASCII range.
Regular expressions can contain both special and ordinary characters. Most
ordinary characters, like "A", "a", or "0", are the simplest regular
expressions; they simply match themselves. You can concatenate ordinary
characters, so last matches the string 'last'.
There are a few differences between the old (legacy) behaviour and the new
(enhanced) behaviour, which are indicated by VERSION0 or VERSION1.
The special characters are:
"." Matches any character except a newline.
"^" Matches the start of the string.
"$" Matches the end of the string or just before the
newline at the end of the string.
"*" Matches 0 or more (greedy) repetitions of the preceding
RE. Greedy means that it will match as many repetitions
as possible.
"+" Matches 1 or more (greedy) repetitions of the preceding
RE.
"?" Matches 0 or 1 (greedy) of the preceding RE.
*?,+?,?? Non-greedy versions of the previous three special
characters.
*+,++,?+ Possessive versions of the previous three special
characters.
{m,n} Matches from m to n repetitions of the preceding RE.
{m,n}? Non-greedy version of the above.
{m,n}+ Possessive version of the above.
{...} Fuzzy matching constraints.
"\\" Either escapes special characters or signals a special
sequence.
[...] Indicates a set of characters. A "^" as the first
character indicates a complementing set.
"|" A|B, creates an RE that will match either A or B.
(...) Matches the RE inside the parentheses. The contents are
captured and can be retrieved or matched later in the
string.
(?flags-flags) VERSION1: Sets/clears the flags for the remainder of
the group or pattern; VERSION0: Sets the flags for the
entire pattern.
(?:...) Non-capturing version of regular parentheses.
(?>...) Atomic non-capturing version of regular parentheses.
(?flags-flags:...) Non-capturing version of regular parentheses with local
flags.
(?P<name>...) The substring matched by the group is accessible by
name.
(?<name>...) The substring matched by the group is accessible by
name.
(?P=name) Matches the text matched earlier by the group named
name.
(?#...) A comment; ignored.
(?=...) Matches if ... matches next, but doesn't consume the
string.
(?!...) Matches if ... doesn't match next.
(?<=...) Matches if preceded by ....
(?<!...) Matches if not preceded by ....
(?(id)yes|no) Matches yes pattern if group id matched, the (optional)
no pattern otherwise.
(?|...|...) (?|A|B), creates an RE that will match either A or B,
but reuses capture group numbers across the
alternatives.
The fuzzy matching constraints are: "i" to permit insertions, "d" to permit
deletions, "s" to permit substitutions, "e" to permit any of these. Limits are
optional with "<=" and "<". If any type of error is provided then any type not
provided is not permitted.
A cost equation may be provided.
Examples:
(?:fuzzy){i<=2}
(?:fuzzy){i<=1,s<=2,d<=1,1i+1s+1d<3}
VERSION1: Set operators are supported, and a set can include nested sets. The
set operators, in order of increasing precedence, are:
|| Set union ("x||y" means "x or y").
~~ (double tilde) Symmetric set difference ("x~~y" means "x or y, but not
both").
&& Set intersection ("x&&y" means "x and y").
-- (double dash) Set difference ("x--y" means "x but not y").
Implicit union, ie, simple juxtaposition like in [ab], has the highest
precedence.
VERSION0 and VERSION1:
The special sequences consist of "\\" and a character from the list below. If
the ordinary character is not on the list, then the resulting RE will match the
second character.
\number Matches the contents of the group of the same number if
number is no more than 2 digits, otherwise the character
with the 3-digit octal code.
\a Matches the bell character.
\A Matches only at the start of the string.
\b Matches the empty string, but only at the start or end of a
word.
\B Matches the empty string, but not at the start or end of a
word.
\d Matches any decimal digit; equivalent to the set [0-9] when
matching a bytestring or a Unicode string with the ASCII
flag, or the whole range of Unicode digits when matching a
Unicode string.
\D Matches any non-digit character; equivalent to [^\d].
\f Matches the formfeed character.
\g<name> Matches the text matched by the group named name.
\G Matches the empty string, but only at the position where
the search started.
\L<name> Named list. The list is provided as a keyword argument.
\m Matches the empty string, but only at the start of a word.
\M Matches the empty string, but only at the end of a word.
\n Matches the newline character.
\N{name} Matches the named character.
\p{name=value} Matches the character if its property has the specified
value.
\P{name=value} Matches the character if its property hasn't the specified
value.
\r Matches the carriage-return character.
\s Matches any whitespace character; equivalent to
[ \t\n\r\f\v].
\S Matches any non-whitespace character; equivalent to [^\s].
\t Matches the tab character.
\uXXXX Matches the Unicode codepoint with 4-digit hex code XXXX.
\UXXXXXXXX Matches the Unicode codepoint with 8-digit hex code
XXXXXXXX.
\v Matches the vertical tab character.
\w Matches any alphanumeric character; equivalent to
[a-zA-Z0-9_] when matching a bytestring or a Unicode string
with the ASCII flag, or the whole range of Unicode
alphanumeric characters (letters plus digits plus
underscore) when matching a Unicode string. With LOCALE, it
will match the set [0-9_] plus characters defined as
letters for the current locale.
\W Matches the complement of \w; equivalent to [^\w].
\xXX Matches the character with 2-digit hex code XX.
\X Matches a grapheme.
\Z Matches only at the end of the string.
\\ Matches a literal backslash.
This module exports the following functions:
match Match a regular expression pattern at the beginning of a string.
fullmatch Match a regular expression pattern against all of a string.
search Search a string for the presence of a pattern.
sub Substitute occurrences of a pattern found in a string using a
template string.
subf Substitute occurrences of a pattern found in a string using a
format string.
subn Same as sub, but also return the number of substitutions made.
subfn Same as subf, but also return the number of substitutions made.
split Split a string by the occurrences of a pattern. VERSION1: will
split at zero-width match; VERSION0: won't split at zero-width
match.
splititer Return an iterator yielding the parts of a split string.
findall Find all occurrences of a pattern in a string.
finditer Return an iterator yielding a match object for each match.
compile Compile a pattern into a Pattern object.
purge Clear the regular expression cache.
escape Backslash all non-alphanumerics or special characters in a
string.
Most of the functions support a concurrent parameter: if True, the GIL will be
released during matching, allowing other Python threads to run concurrently. If
the string changes during matching, the behaviour is undefined. This parameter
is not needed when working on the builtin (immutable) string classes.
Some of the functions in this module take flags as optional parameters. Most of
these flags can also be set within an RE:
A a ASCII Make \w, \W, \b, \B, \d, and \D match the
corresponding ASCII character categories. Default
when matching a bytestring.
B b BESTMATCH Find the best fuzzy match (default is first).
D DEBUG Print the parsed pattern.
F f FULLCASE Use full case-folding when performing
case-insensitive matching in Unicode.
I i IGNORECASE Perform case-insensitive matching.
L L LOCALE Make \w, \W, \b, \B, \d, and \D dependent on the
current locale. (One byte per character only.)
M m MULTILINE "^" matches the beginning of lines (after a newline)
as well as the string. "$" matches the end of lines
(before a newline) as well as the end of the string.
E e ENHANCEMATCH Attempt to improve the fit after finding the first
fuzzy match.
R r REVERSE Searches backwards.
S s DOTALL "." matches any character at all, including the
newline.
U u UNICODE Make \w, \W, \b, \B, \d, and \D dependent on the
Unicode locale. Default when matching a Unicode
string.
V0 V0 VERSION0 Turn on the old legacy behaviour.
V1 V1 VERSION1 Turn on the new enhanced behaviour. This flag
includes the FULLCASE flag.
W w WORD Make \b and \B work with default Unicode word breaks
and make ".", "^" and "$" work with Unicode line
breaks.
X x VERBOSE Ignore whitespace and comments for nicer looking REs.
This module also defines an exception 'error'.
"""
# Public symbols.
__all__ = ["compile", "escape", "findall", "finditer", "fullmatch", "match",
"purge", "search", "split", "splititer", "sub", "subf", "subfn", "subn",
"template", "Scanner", "A", "ASCII", "B", "BESTMATCH", "D", "DEBUG", "E",
"ENHANCEMATCH", "S", "DOTALL", "F", "FULLCASE", "I", "IGNORECASE", "L",
"LOCALE", "M", "MULTILINE", "R", "REVERSE", "T", "TEMPLATE", "U", "UNICODE",
"V0", "VERSION0", "V1", "VERSION1", "X", "VERBOSE", "W", "WORD", "error",
"Regex"]
__version__ = "2.4.58"
# --------------------------------------------------------------------
# Public interface.
def match(pattern, string, flags=0, pos=None, endpos=None, partial=False,
concurrent=None, **kwargs):
"""Try to apply the pattern at the start of the string, returning a match
object, or None if no match was found."""
return _compile(pattern, flags, kwargs).match(string, pos, endpos,
concurrent, partial)
def fullmatch(pattern, string, flags=0, pos=None, endpos=None, partial=False,
concurrent=None, **kwargs):
"""Try to apply the pattern against all of the string, returning a match
object, or None if no match was found."""
return _compile(pattern, flags, kwargs).fullmatch(string, pos, endpos,
concurrent, partial)
def search(pattern, string, flags=0, pos=None, endpos=None, partial=False,
concurrent=None, **kwargs):
"""Search through string looking for a match to the pattern, returning a
match object, or None if no match was found."""
return _compile(pattern, flags, kwargs).search(string, pos, endpos,
concurrent, partial)
def sub(pattern, repl, string, count=0, flags=0, pos=None, endpos=None,
concurrent=None, **kwargs):
"""Return the string obtained by replacing the leftmost (or rightmost with a
reverse pattern) non-overlapping occurrences of the pattern in string by the
replacement repl. repl can be either a string or a callable; if a string,
backslash escapes in it are processed; if a callable, it's passed the match
object and must return a replacement string to be used."""
return _compile(pattern, flags, kwargs).sub(repl, string, count, pos,
endpos, concurrent)
def subf(pattern, format, string, count=0, flags=0, pos=None, endpos=None,
concurrent=None, **kwargs):
"""Return the string obtained by replacing the leftmost (or rightmost with a
reverse pattern) non-overlapping occurrences of the pattern in string by the
replacement format. format can be either a string or a callable; if a string,
it's treated as a format string; if a callable, it's passed the match object
and must return a replacement string to be used."""
return _compile(pattern, flags, kwargs).subf(format, string, count, pos,
endpos, concurrent)
def subn(pattern, repl, string, count=0, flags=0, pos=None, endpos=None,
concurrent=None, **kwargs):
"""Return a 2-tuple containing (new_string, number). new_string is the string
obtained by replacing the leftmost (or rightmost with a reverse pattern)
non-overlapping occurrences of the pattern in the source string by the
replacement repl. number is the number of substitutions that were made. repl
can be either a string or a callable; if a string, backslash escapes in it
are processed; if a callable, it's passed the match object and must return a
replacement string to be used."""
return _compile(pattern, flags, kwargs).subn(repl, string, count, pos,
endpos, concurrent)
def subfn(pattern, format, string, count=0, flags=0, pos=None, endpos=None,
concurrent=None, **kwargs):
"""Return a 2-tuple containing (new_string, number). new_string is the string
obtained by replacing the leftmost (or rightmost with a reverse pattern)
non-overlapping occurrences of the pattern in the source string by the
replacement format. number is the number of substitutions that were made. format
can be either a string or a callable; if a string, it's treated as a format
string; if a callable, it's passed the match object and must return a
replacement string to be used."""
return _compile(pattern, flags, kwargs).subfn(format, string, count, pos,
endpos, concurrent)
def split(pattern, string, maxsplit=0, flags=0, concurrent=None, **kwargs):
"""Split the source string by the occurrences of the pattern, returning a
list containing the resulting substrings. If capturing parentheses are used
in pattern, then the text of all groups in the pattern are also returned as
part of the resulting list. If maxsplit is nonzero, at most maxsplit splits
occur, and the remainder of the string is returned as the final element of
the list."""
return _compile(pattern, flags, kwargs).split(string, maxsplit, concurrent)
def splititer(pattern, string, maxsplit=0, flags=0, concurrent=None, **kwargs):
"Return an iterator yielding the parts of a split string."
return _compile(pattern, flags, kwargs).splititer(string, maxsplit,
concurrent)
def findall(pattern, string, flags=0, pos=None, endpos=None, overlapped=False,
concurrent=None, **kwargs):
"""Return a list of all matches in the string. The matches may be overlapped
if overlapped is True. If one or more groups are present in the pattern,
return a list of groups; this will be a list of tuples if the pattern has
more than one group. Empty matches are included in the result."""
return _compile(pattern, flags, kwargs).findall(string, pos, endpos,
overlapped, concurrent)
def finditer(pattern, string, flags=0, pos=None, endpos=None, overlapped=False,
partial=False, concurrent=None, **kwargs):
"""Return an iterator over all matches in the string. The matches may be
overlapped if overlapped is True. For each match, the iterator returns a
match object. Empty matches are included in the result."""
return _compile(pattern, flags, kwargs).finditer(string, pos, endpos,
overlapped, concurrent, partial)
def compile(pattern, flags=0, **kwargs):
"Compile a regular expression pattern, returning a pattern object."
return _compile(pattern, flags, kwargs)
def purge():
"Clear the regular expression cache"
_cache.clear()
_locale_sensitive.clear()
def template(pattern, flags=0):
"Compile a template pattern, returning a pattern object."
return _compile(pattern, flags | TEMPLATE)
def escape(pattern, special_only=False):
"Escape all non-alphanumeric characters or special characters in pattern."
if isinstance(pattern, unicode):
s = []
if special_only:
for c in pattern:
if c in _METACHARS:
s.append(u"\\")
s.append(c)
elif c == u"\x00":
s.append(u"\\000")
else:
s.append(c)
else:
for c in pattern:
if c in _ALNUM:
s.append(c)
elif c == u"\x00":
s.append(u"\\000")
else:
s.append(u"\\")
s.append(c)
return u"".join(s)
else:
s = []
if special_only:
for c in pattern:
if c in _METACHARS:
s.append("\\")
s.append(c)
elif c == "\x00":
s.append("\\000")
else:
s.append(c)
else:
for c in pattern:
if c in _ALNUM:
s.append(c)
elif c == "\x00":
s.append("\\000")
else:
s.append("\\")
s.append(c)
return "".join(s)
# --------------------------------------------------------------------
# Internals.
import _regex_core
import _regex
from threading import RLock as _RLock
from locale import getlocale as _getlocale
from _regex_core import *
from _regex_core import (_ALL_VERSIONS, _ALL_ENCODINGS, _FirstSetError,
_UnscopedFlagSet, _check_group_features, _compile_firstset,
_compile_replacement, _flatten_code, _fold_case, _get_required_string,
_parse_pattern, _shrink_cache)
from _regex_core import (ALNUM as _ALNUM, Info as _Info, OP as _OP, Source as
_Source, Fuzzy as _Fuzzy)
# Version 0 is the old behaviour, compatible with the original 're' module.
# Version 1 is the new behaviour, which differs slightly.
DEFAULT_VERSION = VERSION0
_METACHARS = frozenset("()[]{}?*+|^$\\.")
_regex_core.DEFAULT_VERSION = DEFAULT_VERSION
# Caches for the patterns and replacements.
_cache = {}
_cache_lock = _RLock()
_named_args = {}
_replacement_cache = {}
_locale_sensitive = {}
# Maximum size of the cache.
_MAXCACHE = 500
_MAXREPCACHE = 500
def _compile(pattern, flags=0, kwargs={}):
"Compiles a regular expression to a PatternObject."
# We won't bother to cache the pattern if we're debugging.
debugging = (flags & DEBUG) != 0
# What locale is this pattern using?
locale_key = (type(pattern), pattern)
if _locale_sensitive.get(locale_key, True) or (flags & LOCALE) != 0:
# This pattern is, or might be, locale-sensitive.
pattern_locale = _getlocale()[1]
else:
# This pattern is definitely not locale-sensitive.
pattern_locale = None
if not debugging:
try:
# Do we know what keyword arguments are needed?
args_key = pattern, type(pattern), flags
args_needed = _named_args[args_key]
# Are we being provided with its required keyword arguments?
args_supplied = set()
if args_needed:
for k, v in args_needed:
try:
args_supplied.add((k, frozenset(kwargs[k])))
except KeyError:
raise error("missing named list: {!r}".format(k))
args_supplied = frozenset(args_supplied)
# Have we already seen this regular expression and named list?
pattern_key = (pattern, type(pattern), flags, args_supplied,
DEFAULT_VERSION, pattern_locale)
return _cache[pattern_key]
except KeyError:
# It's a new pattern, or new named list for a known pattern.
pass
# Guess the encoding from the class of the pattern string.
if isinstance(pattern, unicode):
guess_encoding = UNICODE
elif isinstance(pattern, str):
guess_encoding = ASCII
elif isinstance(pattern, _pattern_type):
if flags:
raise ValueError("cannot process flags argument with a compiled pattern")
return pattern
else:
raise TypeError("first argument must be a string or compiled pattern")
# Set the default version in the core code in case it has been changed.
_regex_core.DEFAULT_VERSION = DEFAULT_VERSION
caught_exception = None
global_flags = flags
while True:
try:
source = _Source(pattern)
info = _Info(global_flags, source.char_type, kwargs)
info.guess_encoding = guess_encoding
source.ignore_space = bool(info.flags & VERBOSE)
parsed = _parse_pattern(source, info)
break
except _UnscopedFlagSet:
# Remember the global flags for the next attempt.
global_flags = info.global_flags
except error, e:
caught_exception = e
if caught_exception:
raise error(caught_exception.msg, caught_exception.pattern,
caught_exception.pos)
if not source.at_end():
raise error("trailing characters in pattern", pattern, source.pos)
# Check the global flags for conflicts.
version = (info.flags & _ALL_VERSIONS) or DEFAULT_VERSION
if version not in (0, VERSION0, VERSION1):
raise ValueError("VERSION0 and VERSION1 flags are mutually incompatible")
if (info.flags & _ALL_ENCODINGS) not in (0, ASCII, LOCALE, UNICODE):
raise ValueError("ASCII, LOCALE and UNICODE flags are mutually incompatible")
if not (info.flags & _ALL_ENCODINGS):
if isinstance(pattern, unicode):
info.flags |= UNICODE
else:
info.flags |= ASCII
reverse = bool(info.flags & REVERSE)
fuzzy = isinstance(parsed, _Fuzzy)
# Remember whether this pattern as an inline locale flag.
_locale_sensitive[locale_key] = info.inline_locale
# Should we print the parsed pattern?
if flags & DEBUG:
parsed.dump(indent=0, reverse=reverse)
# Fix the group references.
parsed.fix_groups(pattern, reverse, False)
# Optimise the parsed pattern.
parsed = parsed.optimise(info)
parsed = parsed.pack_characters(info)
# Get the required string.
req_offset, req_chars, req_flags = _get_required_string(parsed, info.flags)
# Build the named lists.
named_lists = {}
named_list_indexes = [None] * len(info.named_lists_used)
args_needed = set()
for key, index in info.named_lists_used.items():
name, case_flags = key
values = frozenset(kwargs[name])
if case_flags:
items = frozenset(_fold_case(info, v) for v in values)
else:
items = values
named_lists[name] = values
named_list_indexes[index] = items
args_needed.add((name, values))
# Check the features of the groups.
_check_group_features(info, parsed)
# Compile the parsed pattern. The result is a list of tuples.
code = parsed.compile(reverse)
# Is there a group call to the pattern as a whole?
key = (0, reverse, fuzzy)
ref = info.call_refs.get(key)
if ref is not None:
code = [(_OP.CALL_REF, ref)] + code + [(_OP.END, )]
# Add the final 'success' opcode.
code += [(_OP.SUCCESS, )]
# Compile the additional copies of the groups that we need.
for group, rev, fuz in info.additional_groups:
code += group.compile(rev, fuz)
# Flatten the code into a list of ints.
code = _flatten_code(code)
if not parsed.has_simple_start():
# Get the first set, if possible.
try:
fs_code = _compile_firstset(info, parsed.get_firstset(reverse))
fs_code = _flatten_code(fs_code)
code = fs_code + code
except _FirstSetError:
pass
# The named capture groups.
index_group = dict((v, n) for n, v in info.group_index.items())
# Create the PatternObject.
#
# Local flags like IGNORECASE affect the code generation, but aren't needed
# by the PatternObject itself. Conversely, global flags like LOCALE _don't_
# affect the code generation but _are_ needed by the PatternObject.
compiled_pattern = _regex.compile(pattern, info.flags | version, code,
info.group_index, index_group, named_lists, named_list_indexes,
req_offset, req_chars, req_flags, info.group_count)
# Do we need to reduce the size of the cache?
if len(_cache) >= _MAXCACHE:
_cache_lock.acquire()
try:
_shrink_cache(_cache, _named_args, _locale_sensitive, _MAXCACHE)
finally:
_cache_lock.release()
if not debugging:
if (info.flags & LOCALE) == 0:
pattern_locale = None
args_needed = frozenset(args_needed)
# Store this regular expression and named list.
pattern_key = (pattern, type(pattern), flags, args_needed,
DEFAULT_VERSION, pattern_locale)
_cache[pattern_key] = compiled_pattern
# Store what keyword arguments are needed.
_named_args[args_key] = args_needed
return compiled_pattern
def _compile_replacement_helper(pattern, template):
"Compiles a replacement template."
# This function is called by the _regex module.
# Have we seen this before?
key = pattern.pattern, pattern.flags, template
compiled = _replacement_cache.get(key)
if compiled is not None:
return compiled
if len(_replacement_cache) >= _MAXREPCACHE:
_replacement_cache.clear()
is_unicode = isinstance(template, unicode)
source = _Source(template)
if is_unicode:
def make_string(char_codes):
return u"".join(unichr(c) for c in char_codes)
else:
def make_string(char_codes):
return "".join(chr(c) for c in char_codes)
compiled = []
literal = []
while True:
ch = source.get()
if not ch:
break
if ch == "\\":
# '_compile_replacement' will return either an int group reference
# or a string literal. It returns items (plural) in order to handle
# a 2-character literal (an invalid escape sequence).
is_group, items = _compile_replacement(source, pattern, is_unicode)
if is_group:
# It's a group, so first flush the literal.
if literal:
compiled.append(make_string(literal))
literal = []
compiled.extend(items)
else:
literal.extend(items)
else:
literal.append(ord(ch))
# Flush the literal.
if literal:
compiled.append(make_string(literal))
_replacement_cache[key] = compiled
return compiled
# We define _pattern_type here after all the support objects have been defined.
_pattern_type = type(_compile("", 0, {}))
# We'll define an alias for the 'compile' function so that the repr of a
# pattern object is eval-able.
Regex = compile
# Register myself for pickling.
import copy_reg as _copy_reg
def _pickle(p):
return _compile, (p.pattern, p.flags)
_copy_reg.pickle(_pattern_type, _pickle, _compile)
if not hasattr(str, "format"):
# Strings don't have the .format method (below Python 2.6).
while True:
_start = __doc__.find(" subf")
if _start < 0:
break
_end = __doc__.find("\n", _start) + 1
while __doc__.startswith(" ", _end):
_end = __doc__.find("\n", _end) + 1
__doc__ = __doc__[ : _start] + __doc__[_end : ]
__all__ = [_name for _name in __all__ if not _name.startswith("subf")]
del _start, _end
del subf, subfn
| Python |
#
# Secret Labs' Regular Expression Engine core module
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# This version of the SRE library can be redistributed under CNRI's
# Python 1.6 license. For any other use, please contact Secret Labs
# AB (info@pythonware.com).
#
# Portions of this engine have been developed in cooperation with
# CNRI. Hewlett-Packard provided funding for 1.6 integration and
# other compatibility work.
#
# 2010-01-16 mrab Python front-end re-written and extended
import string
import sys
import unicodedata
from collections import defaultdict
import _regex
__all__ = ["A", "ASCII", "B", "BESTMATCH", "D", "DEBUG", "E", "ENHANCEMATCH",
"F", "FULLCASE", "I", "IGNORECASE", "L", "LOCALE", "M", "MULTILINE", "R",
"REVERSE", "S", "DOTALL", "T", "TEMPLATE", "U", "UNICODE", "V0", "VERSION0",
"V1", "VERSION1", "W", "WORD", "X", "VERBOSE", "error",
"Scanner"]
# The regex exception.
class error(Exception):
def __init__(self, message, pattern=None, pos=None):
newline = '\n' if isinstance(pattern, str) else b'\n'
self.msg = message
self.pattern = pattern
self.pos = pos
if pattern is not None and pos is not None:
self.lineno = pattern.count(newline, 0, pos) + 1
self.colno = pos - pattern.rfind(newline, 0, pos)
message = "{} at position {}".format(message, pos)
if newline in pattern:
message += " (line {}, column {})".format(self.lineno,
self.colno)
Exception.__init__(self, message)
# The exception for when a positional flag has been turned on in the old
# behaviour.
class _UnscopedFlagSet(Exception):
pass
# The exception for when parsing fails and we want to try something else.
class ParseError(Exception):
pass
# The exception for when there isn't a valid first set.
class _FirstSetError(Exception):
pass
# Flags.
A = ASCII = 0x80 # Assume ASCII locale.
B = BESTMATCH = 0x1000 # Best fuzzy match.
D = DEBUG = 0x200 # Print parsed pattern.
E = ENHANCEMATCH = 0x8000 # Attempt to improve the fit after finding the first
# fuzzy match.
F = FULLCASE = 0x4000 # Unicode full case-folding.
I = IGNORECASE = 0x2 # Ignore case.
L = LOCALE = 0x4 # Assume current 8-bit locale.
M = MULTILINE = 0x8 # Make anchors look for newline.
R = REVERSE = 0x400 # Search backwards.
S = DOTALL = 0x10 # Make dot match newline.
U = UNICODE = 0x20 # Assume Unicode locale.
V0 = VERSION0 = 0x2000 # Old legacy behaviour.
V1 = VERSION1 = 0x100 # New enhanced behaviour.
W = WORD = 0x800 # Default Unicode word breaks.
X = VERBOSE = 0x40 # Ignore whitespace and comments.
T = TEMPLATE = 0x1 # Template (present because re module has it).
DEFAULT_VERSION = VERSION1
_ALL_VERSIONS = VERSION0 | VERSION1
_ALL_ENCODINGS = ASCII | LOCALE | UNICODE
# The default flags for the various versions.
DEFAULT_FLAGS = {VERSION0: 0, VERSION1: FULLCASE}
# The mask for the flags.
GLOBAL_FLAGS = (_ALL_ENCODINGS | _ALL_VERSIONS | BESTMATCH | DEBUG |
ENHANCEMATCH | REVERSE)
SCOPED_FLAGS = FULLCASE | IGNORECASE | MULTILINE | DOTALL | WORD | VERBOSE
ALPHA = frozenset(string.ascii_letters)
DIGITS = frozenset(string.digits)
ALNUM = ALPHA | DIGITS
OCT_DIGITS = frozenset(string.octdigits)
HEX_DIGITS = frozenset(string.hexdigits)
SPECIAL_CHARS = frozenset("()|?*+{^$.[\\#") | frozenset([""])
NAMED_CHAR_PART = ALNUM | frozenset(" -")
PROPERTY_NAME_PART = ALNUM | frozenset(" &_-.")
SET_OPS = ("||", "~~", "&&", "--")
# The width of the code words inside the regex engine.
BYTES_PER_CODE = _regex.get_code_size()
BITS_PER_CODE = BYTES_PER_CODE * 8
# The repeat count which represents infinity.
UNLIMITED = (1 << BITS_PER_CODE) - 1
# The regular expression flags.
REGEX_FLAGS = {"a": ASCII, "b": BESTMATCH, "e": ENHANCEMATCH, "f": FULLCASE,
"i": IGNORECASE, "L": LOCALE, "m": MULTILINE, "r": REVERSE, "s": DOTALL, "u":
UNICODE, "V0": VERSION0, "V1": VERSION1, "w": WORD, "x": VERBOSE}
# The case flags.
CASE_FLAGS = FULLCASE | IGNORECASE
NOCASE = 0
FULLIGNORECASE = FULLCASE | IGNORECASE
FULL_CASE_FOLDING = UNICODE | FULLIGNORECASE
# The number of digits in hexadecimal escapes.
HEX_ESCAPES = {"x": 2, "u": 4, "U": 8}
# A singleton which indicates a comment within a pattern.
COMMENT = object()
FLAGS = object()
# The names of the opcodes.
OPCODES = """
FAILURE
SUCCESS
ANY
ANY_ALL
ANY_ALL_REV
ANY_REV
ANY_U
ANY_U_REV
ATOMIC
BOUNDARY
BRANCH
CALL_REF
CHARACTER
CHARACTER_IGN
CHARACTER_IGN_REV
CHARACTER_REV
DEFAULT_BOUNDARY
DEFAULT_END_OF_WORD
DEFAULT_START_OF_WORD
END
END_OF_LINE
END_OF_LINE_U
END_OF_STRING
END_OF_STRING_LINE
END_OF_STRING_LINE_U
END_OF_WORD
FUZZY
GRAPHEME_BOUNDARY
GREEDY_REPEAT
GROUP
GROUP_CALL
GROUP_EXISTS
LAZY_REPEAT
LOOKAROUND
NEXT
PROPERTY
PROPERTY_IGN
PROPERTY_IGN_REV
PROPERTY_REV
RANGE
RANGE_IGN
RANGE_IGN_REV
RANGE_REV
REF_GROUP
REF_GROUP_FLD
REF_GROUP_FLD_REV
REF_GROUP_IGN
REF_GROUP_IGN_REV
REF_GROUP_REV
SEARCH_ANCHOR
SET_DIFF
SET_DIFF_IGN
SET_DIFF_IGN_REV
SET_DIFF_REV
SET_INTER
SET_INTER_IGN
SET_INTER_IGN_REV
SET_INTER_REV
SET_SYM_DIFF
SET_SYM_DIFF_IGN
SET_SYM_DIFF_IGN_REV
SET_SYM_DIFF_REV
SET_UNION
SET_UNION_IGN
SET_UNION_IGN_REV
SET_UNION_REV
START_OF_LINE
START_OF_LINE_U
START_OF_STRING
START_OF_WORD
STRING
STRING_FLD
STRING_FLD_REV
STRING_IGN
STRING_IGN_REV
STRING_REV
STRING_SET
STRING_SET_FLD
STRING_SET_FLD_REV
STRING_SET_IGN
STRING_SET_IGN_REV
STRING_SET_REV
"""
# Define the opcodes in a namespace.
class Namespace:
pass
OP = Namespace()
for i, op in enumerate(OPCODES.split()):
setattr(OP, op, i)
def _shrink_cache(cache_dict, args_dict, locale_sensitive, max_length, divisor=5):
"""Make room in the given cache.
Args:
cache_dict: The cache dictionary to modify.
args_dict: The dictionary of named list args used by patterns.
max_length: Maximum # of entries in cache_dict before it is shrunk.
divisor: Cache will shrink to max_length - 1/divisor*max_length items.
"""
# Toss out a fraction of the entries at random to make room for new ones.
# A random algorithm was chosen as opposed to simply cache_dict.popitem()
# as popitem could penalize the same regular expression repeatedly based
# on its internal hash value. Being random should spread the cache miss
# love around.
cache_keys = tuple(cache_dict.keys())
overage = len(cache_keys) - max_length
if overage < 0:
# Cache is already within limits. Normally this should not happen
# but it could due to multithreading.
return
number_to_toss = max_length // divisor + overage
# The import is done here to avoid a circular dependency.
import random
if not hasattr(random, 'sample'):
# Do nothing while resolving the circular dependency:
# re->random->warnings->tokenize->string->re
return
for doomed_key in random.sample(cache_keys, number_to_toss):
try:
del cache_dict[doomed_key]
except KeyError:
# Ignore problems if the cache changed from another thread.
pass
# Rebuild the arguments and locale-sensitivity dictionaries.
args_dict.clear()
sensitivity_dict = {}
for pattern, pattern_type, flags, args, default_version, locale in cache_dict:
args_dict[pattern, pattern_type, flags, default_version, locale] = args
try:
sensitivity_dict[pattern_type, pattern] = locale_sensitive[pattern_type, pattern]
except KeyError:
pass
locale_sensitive.clear()
locale_sensitive.update(sensitivity_dict)
def _fold_case(info, string):
"Folds the case of a string."
flags = info.flags
if (flags & _ALL_ENCODINGS) == 0:
flags |= info.guess_encoding
return _regex.fold_case(flags, string)
def is_cased(info, char):
"Checks whether a character is cased."
return len(_regex.get_all_cases(info.flags, char)) > 1
def _compile_firstset(info, fs):
"Compiles the firstset for the pattern."
if not fs or None in fs:
return []
# If we ignore the case, for simplicity we won't build a firstset.
members = set()
for i in fs:
if i.case_flags:
if isinstance(i, Character):
if is_cased(info, i.value):
return []
elif isinstance(i, SetBase):
return []
members.add(i.with_flags(case_flags=NOCASE))
# Build the firstset.
fs = SetUnion(info, list(members), zerowidth=True)
fs = fs.optimise(info, in_set=True)
# Compile the firstset.
return fs.compile(bool(info.flags & REVERSE))
def _flatten_code(code):
"Flattens the code from a list of tuples."
flat_code = []
for c in code:
flat_code.extend(c)
return flat_code
def make_character(info, value, in_set=False):
"Makes a character literal."
if in_set:
# A character set is built case-sensitively.
return Character(value)
return Character(value, case_flags=info.flags & CASE_FLAGS)
def make_ref_group(info, name, position):
"Makes a group reference."
return RefGroup(info, name, position, case_flags=info.flags & CASE_FLAGS)
def make_string_set(info, name):
"Makes a string set."
return StringSet(info, name, case_flags=info.flags & CASE_FLAGS)
def make_property(info, prop, in_set):
"Makes a property."
if in_set:
return prop
return prop.with_flags(case_flags=info.flags & CASE_FLAGS)
def _parse_pattern(source, info):
"Parses a pattern, eg. 'a|b|c'."
branches = [parse_sequence(source, info)]
while source.match("|"):
branches.append(parse_sequence(source, info))
if len(branches) == 1:
return branches[0]
return Branch(branches)
def parse_sequence(source, info):
"Parses a sequence, eg. 'abc'."
sequence = []
applied = False
while True:
# Get literal characters followed by an element.
characters, case_flags, element = parse_literal_and_element(source,
info)
if not element:
# No element, just a literal. We've also reached the end of the
# sequence.
append_literal(characters, case_flags, sequence)
break
if element is COMMENT or element is FLAGS:
append_literal(characters, case_flags, sequence)
elif type(element) is tuple:
# It looks like we've found a quantifier.
ch, saved_pos = element
counts = parse_quantifier(source, info, ch)
if counts:
# It _is_ a quantifier.
apply_quantifier(source, info, counts, characters, case_flags,
ch, saved_pos, applied, sequence)
applied = True
else:
# It's not a quantifier. Maybe it's a fuzzy constraint.
constraints = parse_fuzzy(source, ch)
if constraints:
# It _is_ a fuzzy constraint.
apply_constraint(source, info, constraints, characters,
case_flags, saved_pos, applied, sequence)
applied = True
else:
# The element was just a literal.
characters.append(ord(ch))
append_literal(characters, case_flags, sequence)
applied = False
else:
# We have a literal followed by something else.
append_literal(characters, case_flags, sequence)
sequence.append(element)
applied = False
return make_sequence(sequence)
def apply_quantifier(source, info, counts, characters, case_flags, ch,
saved_pos, applied, sequence):
if characters:
# The quantifier applies to the last character.
append_literal(characters[ : -1], case_flags, sequence)
element = Character(characters[-1], case_flags=case_flags)
else:
# The quantifier applies to the last item in the sequence.
if applied or not sequence:
raise error("nothing to repeat", source.string, saved_pos)
element = sequence.pop()
min_count, max_count = counts
saved_pos = source.pos
ch = source.get()
if ch == "?":
# The "?" suffix that means it's a lazy repeat.
repeated = LazyRepeat
elif ch == "+":
# The "+" suffix that means it's a possessive repeat.
repeated = PossessiveRepeat
else:
# No suffix means that it's a greedy repeat.
source.pos = saved_pos
repeated = GreedyRepeat
# Ignore the quantifier if it applies to a zero-width item or the number of
# repeats is fixed at 1.
if not element.is_empty() and (min_count != 1 or max_count != 1):
element = repeated(element, min_count, max_count)
sequence.append(element)
def apply_constraint(source, info, constraints, characters, case_flags,
saved_pos, applied, sequence):
if characters:
# The constraint applies to the last character.
append_literal(characters[ : -1], case_flags, sequence)
element = Character(characters[-1], case_flags=case_flags)
sequence.append(Fuzzy(element, constraints))
else:
# The constraint applies to the last item in the sequence.
if applied or not sequence:
raise error("nothing for fuzzy constraint", source.string,
saved_pos)
element = sequence.pop()
# If a group is marked as fuzzy then put all of the fuzzy part in the
# group.
if isinstance(element, Group):
element.subpattern = Fuzzy(element.subpattern, constraints)
sequence.append(element)
else:
sequence.append(Fuzzy(element, constraints))
def append_literal(characters, case_flags, sequence):
if characters:
sequence.append(Literal(characters, case_flags=case_flags))
def PossessiveRepeat(element, min_count, max_count):
"Builds a possessive repeat."
return Atomic(GreedyRepeat(element, min_count, max_count))
_QUANTIFIERS = {"?": (0, 1), "*": (0, None), "+": (1, None)}
def parse_quantifier(source, info, ch):
"Parses a quantifier."
q = _QUANTIFIERS.get(ch)
if q:
# It's a quantifier.
return q
if ch == "{":
# Looks like a limited repeated element, eg. 'a{2,3}'.
counts = parse_limited_quantifier(source)
if counts:
return counts
return None
def is_above_limit(count):
"Checks whether a count is above the maximum."
return count is not None and count >= UNLIMITED
def parse_limited_quantifier(source):
"Parses a limited quantifier."
saved_pos = source.pos
min_count = parse_count(source)
if source.match(","):
max_count = parse_count(source)
# No minimum means 0 and no maximum means unlimited.
min_count = int(min_count or 0)
max_count = int(max_count) if max_count else None
if max_count is not None and min_count > max_count:
raise error("min repeat greater than max repeat", source.string,
saved_pos)
else:
if not min_count:
source.pos = saved_pos
return None
min_count = max_count = int(min_count)
if is_above_limit(min_count) or is_above_limit(max_count):
raise error("repeat count too big", source.string, saved_pos)
if not source.match ("}"):
source.pos = saved_pos
return None
return min_count, max_count
def parse_fuzzy(source, ch):
"Parses a fuzzy setting, if present."
if ch != "{":
return None
saved_pos = source.pos
constraints = {}
try:
parse_fuzzy_item(source, constraints)
while source.match(","):
parse_fuzzy_item(source, constraints)
except ParseError:
source.pos = saved_pos
return None
if not source.match("}"):
raise error("expected }", source.string, source.pos)
return constraints
def parse_fuzzy_item(source, constraints):
"Parses a fuzzy setting item."
saved_pos = source.pos
try:
parse_cost_constraint(source, constraints)
except ParseError:
source.pos = saved_pos
parse_cost_equation(source, constraints)
def parse_cost_constraint(source, constraints):
"Parses a cost constraint."
saved_pos = source.pos
ch = source.get()
if ch in ALPHA:
# Syntax: constraint [("<=" | "<") cost]
constraint = parse_constraint(source, constraints, ch)
max_inc = parse_fuzzy_compare(source)
if max_inc is None:
# No maximum cost.
constraints[constraint] = 0, None
else:
# There's a maximum cost.
cost_pos = source.pos
max_cost = int(parse_count(source))
# Inclusive or exclusive limit?
if not max_inc:
max_cost -= 1
if max_cost < 0:
raise error("bad fuzzy cost limit", source.string, cost_pos)
constraints[constraint] = 0, max_cost
elif ch in DIGITS:
# Syntax: cost ("<=" | "<") constraint ("<=" | "<") cost
source.pos = saved_pos
try:
# Minimum cost.
min_cost = int(parse_count(source))
min_inc = parse_fuzzy_compare(source)
if min_inc is None:
raise ParseError()
constraint = parse_constraint(source, constraints, source.get())
max_inc = parse_fuzzy_compare(source)
if max_inc is None:
raise ParseError()
# Maximum cost.
cost_pos = source.pos
max_cost = int(parse_count(source))
# Inclusive or exclusive limits?
if not min_inc:
min_cost += 1
if not max_inc:
max_cost -= 1
if not 0 <= min_cost <= max_cost:
raise error("bad fuzzy cost limit", source.string, cost_pos)
constraints[constraint] = min_cost, max_cost
except ValueError:
raise ParseError()
else:
raise ParseError()
def parse_constraint(source, constraints, ch):
"Parses a constraint."
if ch not in "deis":
raise error("bad fuzzy constraint", source.string, source.pos)
if ch in constraints:
raise error("repeated fuzzy constraint", source.string, source.pos)
return ch
def parse_fuzzy_compare(source):
"Parses a cost comparator."
if source.match("<="):
return True
elif source.match("<"):
return False
else:
return None
def parse_cost_equation(source, constraints):
"Parses a cost equation."
if "cost" in constraints:
raise error("more than one cost equation", source.string, source.pos)
cost = {}
parse_cost_term(source, cost)
while source.match("+"):
parse_cost_term(source, cost)
max_inc = parse_fuzzy_compare(source)
if max_inc is None:
raise error("missing fuzzy cost limit", source.string, source.pos)
max_cost = int(parse_count(source))
if not max_inc:
max_cost -= 1
if max_cost < 0:
raise error("bad fuzzy cost limit", source.string, source.pos)
cost["max"] = max_cost
constraints["cost"] = cost
def parse_cost_term(source, cost):
"Parses a cost equation term."
coeff = parse_count(source)
ch = source.get()
if ch not in "dis":
raise ParseError()
if ch in cost:
raise error("repeated fuzzy cost", source.string, source.pos)
cost[ch] = int(coeff or 1)
def parse_count(source):
"Parses a quantifier's count, which can be empty."
return source.get_while(DIGITS)
def parse_literal_and_element(source, info):
"""Parses a literal followed by an element. The element is FLAGS if it's an
inline flag or None if it has reached the end of a sequence.
"""
characters = []
case_flags = info.flags & CASE_FLAGS
while True:
saved_pos = source.pos
ch = source.get()
if ch in SPECIAL_CHARS:
if ch in ")|":
# The end of a sequence. At the end of the pattern ch is "".
source.pos = saved_pos
return characters, case_flags, None
elif ch == "\\":
# An escape sequence outside a set.
element = parse_escape(source, info, False)
return characters, case_flags, element
elif ch == "(":
# A parenthesised subpattern or a flag.
element = parse_paren(source, info)
if element and element is not COMMENT:
return characters, case_flags, element
elif ch == ".":
# Any character.
if info.flags & DOTALL:
element = AnyAll()
elif info.flags & WORD:
element = AnyU()
else:
element = Any()
return characters, case_flags, element
elif ch == "[":
# A character set.
element = parse_set(source, info)
return characters, case_flags, element
elif ch == "^":
# The start of a line or the string.
if info.flags & MULTILINE:
if info.flags & WORD:
element = StartOfLineU()
else:
element = StartOfLine()
else:
element = StartOfString()
return characters, case_flags, element
elif ch == "$":
# The end of a line or the string.
if info.flags & MULTILINE:
if info.flags & WORD:
element = EndOfLineU()
else:
element = EndOfLine()
else:
if info.flags & WORD:
element = EndOfStringLineU()
else:
element = EndOfStringLine()
return characters, case_flags, element
elif ch in "?*+{":
# Looks like a quantifier.
return characters, case_flags, (ch, saved_pos)
else:
# A literal.
characters.append(ord(ch))
else:
# A literal.
characters.append(ord(ch))
def parse_paren(source, info):
"""Parses a parenthesised subpattern or a flag. Returns FLAGS if it's an
inline flag.
"""
saved_pos = source.pos
ch = source.get()
if ch == "?":
# (?...
saved_pos_2 = source.pos
ch = source.get()
if ch == "<":
# (?<...
saved_pos_3 = source.pos
ch = source.get()
if ch in ("=", "!"):
# (?<=... or (?<!...: lookbehind.
return parse_lookaround(source, info, True, ch == "=")
# (?<...: a named capture group.
source.pos = saved_pos_3
name = parse_name(source)
group = info.open_group(name)
source.expect(">")
saved_flags = info.flags
try:
subpattern = _parse_pattern(source, info)
source.expect(")")
finally:
info.flags = saved_flags
source.ignore_space = bool(info.flags & VERBOSE)
info.close_group()
return Group(info, group, subpattern)
if ch in ("=", "!"):
# (?=... or (?!...: lookahead.
return parse_lookaround(source, info, False, ch == "=")
if ch == "P":
# (?P...: a Python extension.
return parse_extension(source, info)
if ch == "#":
# (?#...: a comment.
return parse_comment(source)
if ch == "(":
# (?(...: a conditional subpattern.
return parse_conditional(source, info)
if ch == ">":
# (?>...: an atomic subpattern.
return parse_atomic(source, info)
if ch == "|":
# (?|...: a common/reset groups branch.
return parse_common(source, info)
if ch == "R" or "0" <= ch <= "9":
# (?R...: probably a call to a group.
return parse_call_group(source, info, ch, saved_pos_2)
if ch == "&":
# (?&...: a call to a named group.
return parse_call_named_group(source, info, saved_pos_2)
# (?...: probably a flags subpattern.
source.pos = saved_pos_2
return parse_flags_subpattern(source, info)
# (...: an unnamed capture group.
source.pos = saved_pos
group = info.open_group()
saved_flags = info.flags
try:
subpattern = _parse_pattern(source, info)
source.expect(")")
finally:
info.flags = saved_flags
source.ignore_space = bool(info.flags & VERBOSE)
info.close_group()
return Group(info, group, subpattern)
def parse_extension(source, info):
"Parses a Python extension."
saved_pos = source.pos
ch = source.get()
if ch == "<":
# (?P<...: a named capture group.
name = parse_name(source)
group = info.open_group(name)
source.expect(">")
saved_flags = info.flags
try:
subpattern = _parse_pattern(source, info)
source.expect(")")
finally:
info.flags = saved_flags
source.ignore_space = bool(info.flags & VERBOSE)
info.close_group()
return Group(info, group, subpattern)
if ch == "=":
# (?P=...: a named group reference.
name = parse_name(source, allow_numeric=True)
source.expect(")")
if info.is_open_group(name):
raise error("cannot refer to an open group", source.string,
saved_pos)
return make_ref_group(info, name, saved_pos)
if ch == ">" or ch == "&":
# (?P>...: a call to a group.
return parse_call_named_group(source, info, saved_pos)
source.pos = saved_pos
raise error("unknown extension", source.string, saved_pos)
def parse_comment(source):
"Parses a comment."
source.skip_while(set(")"), include=False)
source.expect(")")
return COMMENT
def parse_lookaround(source, info, behind, positive):
"Parses a lookaround."
saved_flags = info.flags
try:
subpattern = _parse_pattern(source, info)
source.expect(")")
finally:
info.flags = saved_flags
source.ignore_space = bool(info.flags & VERBOSE)
return LookAround(behind, positive, subpattern)
def parse_conditional(source, info):
"Parses a conditional subpattern."
saved_flags = info.flags
saved_pos = source.pos
try:
group = parse_name(source, True)
source.expect(")")
yes_branch = parse_sequence(source, info)
if source.match("|"):
no_branch = parse_sequence(source, info)
else:
no_branch = Sequence()
source.expect(")")
finally:
info.flags = saved_flags
source.ignore_space = bool(info.flags & VERBOSE)
if yes_branch.is_empty() and no_branch.is_empty():
return Sequence()
return Conditional(info, group, yes_branch, no_branch, saved_pos)
def parse_atomic(source, info):
"Parses an atomic subpattern."
saved_flags = info.flags
try:
subpattern = _parse_pattern(source, info)
source.expect(")")
finally:
info.flags = saved_flags
source.ignore_space = bool(info.flags & VERBOSE)
return Atomic(subpattern)
def parse_common(source, info):
"Parses a common groups branch."
# Capture group numbers in different branches can reuse the group numbers.
initial_group_count = info.group_count
branches = [parse_sequence(source, info)]
final_group_count = info.group_count
while source.match("|"):
info.group_count = initial_group_count
branches.append(parse_sequence(source, info))
final_group_count = max(final_group_count, info.group_count)
info.group_count = final_group_count
source.expect(")")
if len(branches) == 1:
return branches[0]
return Branch(branches)
def parse_call_group(source, info, ch, pos):
"Parses a call to a group."
if ch == "R":
group = "0"
else:
group = ch + source.get_while(DIGITS)
source.expect(")")
return CallGroup(info, group, pos)
def parse_call_named_group(source, info, pos):
"Parses a call to a named group."
group = parse_name(source)
source.expect(")")
return CallGroup(info, group, pos)
def parse_flag_set(source):
"Parses a set of inline flags."
flags = 0
try:
while True:
saved_pos = source.pos
ch = source.get()
if ch == "V":
ch += source.get()
flags |= REGEX_FLAGS[ch]
except KeyError:
source.pos = saved_pos
return flags
def parse_flags(source, info):
"Parses flags being turned on/off."
flags_on = parse_flag_set(source)
if source.match("-"):
flags_off = parse_flag_set(source)
if not flags_off:
raise error("bad inline flags: no flags after '-'", source.string,
source.pos)
else:
flags_off = 0
if flags_on & LOCALE:
# Remember that this pattern as an inline locale flag.
info.inline_locale = True
return flags_on, flags_off
def parse_subpattern(source, info, flags_on, flags_off):
"Parses a subpattern with scoped flags."
saved_flags = info.flags
info.flags = (info.flags | flags_on) & ~flags_off
source.ignore_space = bool(info.flags & VERBOSE)
try:
subpattern = _parse_pattern(source, info)
source.expect(")")
finally:
info.flags = saved_flags
source.ignore_space = bool(info.flags & VERBOSE)
return subpattern
def parse_flags_subpattern(source, info):
"""Parses a flags subpattern. It could be inline flags or a subpattern
possibly with local flags. If it's a subpattern, then that's returned;
if it's a inline flags, then FLAGS is returned.
"""
flags_on, flags_off = parse_flags(source, info)
if flags_off & GLOBAL_FLAGS:
raise error("bad inline flags: cannot turn off global flag",
source.string, source.pos)
if flags_on & flags_off:
raise error("bad inline flags: flag turned on and off", source.string,
source.pos)
# Handle flags which are global in all regex behaviours.
new_global_flags = (flags_on & ~info.global_flags) & GLOBAL_FLAGS
if new_global_flags:
info.global_flags |= new_global_flags
# A global has been turned on, so reparse the pattern.
raise _UnscopedFlagSet(info.global_flags)
# Ensure that from now on we have only scoped flags.
flags_on &= ~GLOBAL_FLAGS
if source.match(":"):
return parse_subpattern(source, info, flags_on, flags_off)
if source.match(")"):
parse_positional_flags(source, info, flags_on, flags_off)
return FLAGS
raise error("unknown extension", source.string, source.pos)
def parse_positional_flags(source, info, flags_on, flags_off):
"Parses positional flags."
version = (info.flags & _ALL_VERSIONS) or DEFAULT_VERSION
if version == VERSION0:
# Positional flags are global and can only be turned on.
if flags_off:
raise error("bad inline flags: cannot turn flags off",
source.string, source.pos)
new_global_flags = flags_on & ~info.global_flags
if new_global_flags:
info.global_flags |= new_global_flags
# A global has been turned on, so reparse the pattern.
raise _UnscopedFlagSet(info.global_flags)
else:
info.flags = (info.flags | flags_on) & ~flags_off
source.ignore_space = bool(info.flags & VERBOSE)
def parse_name(source, allow_numeric=False, allow_group_0=False):
"Parses a name."
name = source.get_while(set(")>"), include=False)
if not name:
raise error("bad group name", source.string, source.pos)
if name.isdigit():
min_group = 0 if allow_group_0 else 1
if not allow_numeric or int(name) < min_group:
raise error("bad group name", source.string, source.pos)
else:
if not name.isidentifier():
raise error("bad group name", source.string, source.pos)
return name
def is_octal(string):
"Checks whether a string is octal."
return all(ch in OCT_DIGITS for ch in string)
def is_decimal(string):
"Checks whether a string is decimal."
return all(ch in DIGITS for ch in string)
def is_hexadecimal(string):
"Checks whether a string is hexadecimal."
return all(ch in HEX_DIGITS for ch in string)
def parse_escape(source, info, in_set):
"Parses an escape sequence."
saved_ignore = source.ignore_space
source.ignore_space = False
ch = source.get()
source.ignore_space = saved_ignore
if not ch:
# A backslash at the end of the pattern.
raise error("bad escape", source.string, source.pos)
if ch in HEX_ESCAPES:
# A hexadecimal escape sequence.
return parse_hex_escape(source, info, HEX_ESCAPES[ch], in_set)
elif ch == "g" and not in_set:
# A group reference.
saved_pos = source.pos
try:
return parse_group_ref(source, info)
except error:
# Invalid as a group reference, so assume it's a literal.
source.pos = saved_pos
return make_character(info, ord(ch), in_set)
elif ch == "G" and not in_set:
# A search anchor.
return SearchAnchor()
elif ch == "L" and not in_set:
# A string set.
return parse_string_set(source, info)
elif ch == "N":
# A named codepoint.
return parse_named_char(source, info, in_set)
elif ch in "pP":
# A Unicode property, positive or negative.
return parse_property(source, info, ch == "p", in_set)
elif ch == "X" and not in_set:
# A grapheme cluster.
return Grapheme()
elif ch in ALPHA:
# An alphabetic escape sequence.
# Positional escapes aren't allowed inside a character set.
if not in_set:
if info.flags & WORD:
value = WORD_POSITION_ESCAPES.get(ch)
else:
value = POSITION_ESCAPES.get(ch)
if value:
return value
value = CHARSET_ESCAPES.get(ch)
if value:
return value
value = CHARACTER_ESCAPES.get(ch)
if value:
return Character(ord(value))
return make_character(info, ord(ch), in_set)
elif ch in DIGITS:
# A numeric escape sequence.
return parse_numeric_escape(source, info, ch, in_set)
else:
# A literal.
return make_character(info, ord(ch), in_set)
def parse_numeric_escape(source, info, ch, in_set):
"Parses a numeric escape sequence."
if in_set or ch == "0":
# Octal escape sequence, max 3 digits.
return parse_octal_escape(source, info, [ch], in_set)
# At least 1 digit, so either octal escape or group.
digits = ch
saved_pos = source.pos
ch = source.get()
if ch in DIGITS:
# At least 2 digits, so either octal escape or group.
digits += ch
saved_pos = source.pos
ch = source.get()
if is_octal(digits) and ch in OCT_DIGITS:
# 3 octal digits, so octal escape sequence.
encoding = info.flags & _ALL_ENCODINGS
if encoding == ASCII or encoding == LOCALE:
octal_mask = 0xFF
else:
octal_mask = 0x1FF
value = int(digits + ch, 8) & octal_mask
return make_character(info, value)
# Group reference.
source.pos = saved_pos
if info.is_open_group(digits):
raise error("cannot refer to an open group", source.string, source.pos)
return make_ref_group(info, digits, source.pos)
def parse_octal_escape(source, info, digits, in_set):
"Parses an octal escape sequence."
saved_pos = source.pos
ch = source.get()
while len(digits) < 3 and ch in OCT_DIGITS:
digits.append(ch)
saved_pos = source.pos
ch = source.get()
source.pos = saved_pos
try:
value = int("".join(digits), 8)
return make_character(info, value, in_set)
except ValueError:
raise error("bad octal escape", source.string, source.pos)
def parse_hex_escape(source, info, expected_len, in_set):
"Parses a hex escape sequence."
digits = []
for i in range(expected_len):
ch = source.get()
if ch not in HEX_DIGITS:
raise error("bad hex escape", source.string, source.pos)
digits.append(ch)
value = int("".join(digits), 16)
return make_character(info, value, in_set)
def parse_group_ref(source, info):
"Parses a group reference."
source.expect("<")
saved_pos = source.pos
name = parse_name(source, True)
source.expect(">")
if info.is_open_group(name):
raise error("cannot refer to an open group", source.string, source.pos)
return make_ref_group(info, name, saved_pos)
def parse_string_set(source, info):
"Parses a string set reference."
source.expect("<")
name = parse_name(source, True)
source.expect(">")
if name is None or name not in info.kwargs:
raise error("undefined named list", source.string, source.pos)
return make_string_set(info, name)
def parse_named_char(source, info, in_set):
"Parses a named character."
saved_pos = source.pos
if source.match("{"):
name = source.get_while(NAMED_CHAR_PART)
if source.match("}"):
try:
value = unicodedata.lookup(name)
return make_character(info, ord(value), in_set)
except KeyError:
raise error("undefined character name", source.string,
source.pos)
source.pos = saved_pos
return make_character(info, ord("N"), in_set)
def parse_property(source, info, positive, in_set):
"Parses a Unicode property."
saved_pos = source.pos
ch = source.get()
if ch == "{":
negate = source.match("^")
prop_name, name = parse_property_name(source)
if source.match("}"):
# It's correctly delimited.
prop = lookup_property(prop_name, name, positive != negate, source)
return make_property(info, prop, in_set)
elif ch and ch in "CLMNPSZ":
# An abbreviated property, eg \pL.
prop = lookup_property(None, ch, positive, source)
return make_property(info, prop, in_set)
# Not a property, so treat as a literal "p" or "P".
source.pos = saved_pos
ch = "p" if positive else "P"
return make_character(info, ord(ch), in_set)
def parse_property_name(source):
"Parses a property name, which may be qualified."
name = source.get_while(PROPERTY_NAME_PART)
saved_pos = source.pos
ch = source.get()
if ch and ch in ":=":
prop_name = name
name = source.get_while(ALNUM | set(" &_-./")).strip()
if name:
# Name after the ":" or "=", so it's a qualified name.
saved_pos = source.pos
else:
# No name after the ":" or "=", so assume it's an unqualified name.
prop_name, name = None, prop_name
else:
prop_name = None
source.pos = saved_pos
return prop_name, name
def parse_set(source, info):
"Parses a character set."
version = (info.flags & _ALL_VERSIONS) or DEFAULT_VERSION
saved_ignore = source.ignore_space
source.ignore_space = False
# Negative set?
negate = source.match("^")
try:
if version == VERSION0:
item = parse_set_imp_union(source, info)
else:
item = parse_set_union(source, info)
if not source.match("]"):
raise error("missing ]", source.string, source.pos)
finally:
source.ignore_space = saved_ignore
if negate:
item = item.with_flags(positive=not item.positive)
item = item.with_flags(case_flags=info.flags & CASE_FLAGS)
return item
def parse_set_union(source, info):
"Parses a set union ([x||y])."
items = [parse_set_symm_diff(source, info)]
while source.match("||"):
items.append(parse_set_symm_diff(source, info))
if len(items) == 1:
return items[0]
return SetUnion(info, items)
def parse_set_symm_diff(source, info):
"Parses a set symmetric difference ([x~~y])."
items = [parse_set_inter(source, info)]
while source.match("~~"):
items.append(parse_set_inter(source, info))
if len(items) == 1:
return items[0]
return SetSymDiff(info, items)
def parse_set_inter(source, info):
"Parses a set intersection ([x&&y])."
items = [parse_set_diff(source, info)]
while source.match("&&"):
items.append(parse_set_diff(source, info))
if len(items) == 1:
return items[0]
return SetInter(info, items)
def parse_set_diff(source, info):
"Parses a set difference ([x--y])."
items = [parse_set_imp_union(source, info)]
while source.match("--"):
items.append(parse_set_imp_union(source, info))
if len(items) == 1:
return items[0]
return SetDiff(info, items)
def parse_set_imp_union(source, info):
"Parses a set implicit union ([xy])."
version = (info.flags & _ALL_VERSIONS) or DEFAULT_VERSION
items = [parse_set_member(source, info)]
while True:
saved_pos = source.pos
if source.match("]"):
# End of the set.
source.pos = saved_pos
break
if version == VERSION1 and any(source.match(op) for op in SET_OPS):
# The new behaviour has set operators.
source.pos = saved_pos
break
items.append(parse_set_member(source, info))
if len(items) == 1:
return items[0]
return SetUnion(info, items)
def parse_set_member(source, info):
"Parses a member in a character set."
# Parse a set item.
start = parse_set_item(source, info)
saved_pos1 = source.pos
if (not isinstance(start, Character) or not start.positive or not
source.match("-")):
# It's not the start of a range.
return start
version = (info.flags & _ALL_VERSIONS) or DEFAULT_VERSION
# It looks like the start of a range of characters.
saved_pos2 = source.pos
if version == VERSION1 and source.match("-"):
# It's actually the set difference operator '--', so return the
# character.
source.pos = saved_pos1
return start
if source.match("]"):
# We've reached the end of the set, so return both the character and
# hyphen.
source.pos = saved_pos2
return SetUnion(info, [start, Character(ord("-"))])
# Parse a set item.
end = parse_set_item(source, info)
if not isinstance(end, Character) or not end.positive:
# It's not a range, so return the character, hyphen and property.
return SetUnion(info, [start, Character(ord("-")), end])
# It _is_ a range.
if start.value > end.value:
raise error("bad character range", source.string, source.pos)
if start.value == end.value:
return start
return Range(start.value, end.value)
def parse_set_item(source, info):
"Parses an item in a character set."
version = (info.flags & _ALL_VERSIONS) or DEFAULT_VERSION
if source.match("\\"):
# An escape sequence in a set.
return parse_escape(source, info, True)
saved_pos = source.pos
if source.match("[:"):
# Looks like a POSIX character class.
try:
return parse_posix_class(source, info)
except ParseError:
# Not a POSIX character class.
source.pos = saved_pos
if version == VERSION1 and source.match("["):
# It's the start of a nested set.
# Negative set?
negate = source.match("^")
item = parse_set_union(source, info)
if not source.match("]"):
raise error("missing ]", source.string, source.pos)
if negate:
item = item.with_flags(positive=not item.positive)
return item
ch = source.get()
if not ch:
raise error("bad set", source.string, source.pos)
return Character(ord(ch))
def parse_posix_class(source, info):
"Parses a POSIX character class."
negate = source.match("^")
prop_name, name = parse_property_name(source)
if not source.match(":]"):
raise ParseError()
return lookup_property(prop_name, name, not negate, source)
def float_to_rational(flt):
"Converts a float to a rational pair."
int_part = int(flt)
error = flt - int_part
if abs(error) < 0.0001:
return int_part, 1
den, num = float_to_rational(1.0 / error)
return int_part * den + num, den
def numeric_to_rational(numeric):
"Converts a numeric string to a rational string, if possible."
if numeric[ : 1] == "-":
sign, numeric = numeric[0], numeric[1 : ]
else:
sign = ""
parts = numeric.split("/")
if len(parts) == 2:
num, den = float_to_rational(float(parts[0]) / float(parts[1]))
elif len(parts) == 1:
num, den = float_to_rational(float(parts[0]))
else:
raise ValueError()
result = "{}{}/{}".format(sign, num, den)
if result.endswith("/1"):
return result[ : -2]
return result
def standardise_name(name):
"Standardises a property or value name."
try:
return numeric_to_rational("".join(name))
except (ValueError, ZeroDivisionError):
return "".join(ch for ch in name if ch not in "_- ").upper()
def lookup_property(property, value, positive, source=None):
"Looks up a property."
# Normalise the names (which may still be lists).
property = standardise_name(property) if property else None
value = standardise_name(value)
if (property, value) == ("GENERALCATEGORY", "ASSIGNED"):
property, value, positive = "GENERALCATEGORY", "UNASSIGNED", not positive
if property:
# Both the property and the value are provided.
prop = PROPERTIES.get(property)
if not prop:
if not source:
raise error("unknown property")
raise error("unknown property", source.string, source.pos)
prop_id, value_dict = prop
val_id = value_dict.get(value)
if val_id is None:
if not source:
raise error("unknown property value")
raise error("unknown property value", source.string, source.pos)
if "YES" in value_dict and val_id == 0:
positive, val_id = not positive, 1
return Property((prop_id << 16) | val_id, positive)
# Only the value is provided.
# It might be the name of a GC, script or block value.
for property in ("GC", "SCRIPT", "BLOCK"):
prop_id, value_dict = PROPERTIES.get(property)
val_id = value_dict.get(value)
if val_id is not None:
return Property((prop_id << 16) | val_id, positive)
# It might be the name of a binary property.
prop = PROPERTIES.get(value)
if prop:
prop_id, value_dict = prop
if "YES" in value_dict:
return Property((prop_id << 16) | 1, positive)
# It might be the name of a binary property starting with a prefix.
if value.startswith("IS"):
prop = PROPERTIES.get(value[2 : ])
if prop:
prop_id, value_dict = prop
if "YES" in value_dict:
return Property((prop_id << 16) | 1, positive)
# It might be the name of a script or block starting with a prefix.
for prefix, property in (("IS", "SCRIPT"), ("IN", "BLOCK")):
if value.startswith(prefix):
prop_id, value_dict = PROPERTIES.get(property)
val_id = value_dict.get(value[2 : ])
if val_id is not None:
return Property((prop_id << 16) | val_id, positive)
# Unknown property.
if not source:
raise error("unknown property")
raise error("unknown property", source.string, source.pos)
def _compile_replacement(source, pattern, is_unicode):
"Compiles a replacement template escape sequence."
ch = source.get()
if ch in ALPHA:
# An alphabetic escape sequence.
value = CHARACTER_ESCAPES.get(ch)
if value:
return False, [ord(value)]
if ch in HEX_ESCAPES and (ch == "x" or is_unicode):
# A hexadecimal escape sequence.
return False, [parse_repl_hex_escape(source, HEX_ESCAPES[ch])]
if ch == "g":
# A group preference.
return True, [compile_repl_group(source, pattern)]
if ch == "N" and is_unicode:
# A named character.
value = parse_repl_named_char(source)
if value is not None:
return False, [value]
return False, [ord("\\"), ord(ch)]
if isinstance(source.sep, bytes):
octal_mask = 0xFF
else:
octal_mask = 0x1FF
if ch == "0":
# An octal escape sequence.
digits = ch
while len(digits) < 3:
saved_pos = source.pos
ch = source.get()
if ch not in OCT_DIGITS:
source.pos = saved_pos
break
digits += ch
return False, [int(digits, 8) & octal_mask]
if ch in DIGITS:
# Either an octal escape sequence (3 digits) or a group reference (max
# 2 digits).
digits = ch
saved_pos = source.pos
ch = source.get()
if ch in DIGITS:
digits += ch
saved_pos = source.pos
ch = source.get()
if ch and is_octal(digits + ch):
# An octal escape sequence.
return False, [int(digits + ch, 8) & octal_mask]
# A group reference.
source.pos = saved_pos
return True, [int(digits)]
if ch == "\\":
# An escaped backslash is a backslash.
return False, [ord("\\")]
if not ch:
# A trailing backslash.
raise error("bad escape", source.string, source.pos)
# An escaped non-backslash is a backslash followed by the literal.
return False, [ord("\\"), ord(ch)]
def parse_repl_hex_escape(source, expected_len):
"Parses a hex escape sequence in a replacement string."
digits = []
for i in range(expected_len):
ch = source.get()
if ch not in HEX_DIGITS:
raise error("bad hex escape", source.string, source.pos)
digits.append(ch)
return int("".join(digits), 16)
def parse_repl_named_char(source):
"Parses a named character in a replacement string."
saved_pos = source.pos
if source.match("{"):
name = source.get_while(ALPHA | set(" "))
if source.match("}"):
try:
value = unicodedata.lookup(name)
return ord(value)
except KeyError:
raise error("undefined character name", source.string,
source.pos)
source.pos = saved_pos
return None
def compile_repl_group(source, pattern):
"Compiles a replacement template group reference."
source.expect("<")
name = parse_name(source, True, True)
source.expect(">")
if name.isdigit():
index = int(name)
if not 0 <= index <= pattern.groups:
raise error("invalid group", source.string, source.pos)
return index
try:
return pattern.groupindex[name]
except KeyError:
raise IndexError("unknown group")
# The regular expression is parsed into a syntax tree. The different types of
# node are defined below.
INDENT = " "
POSITIVE_OP = 0x1
ZEROWIDTH_OP = 0x2
FUZZY_OP = 0x4
REVERSE_OP = 0x8
REQUIRED_OP = 0x10
POS_TEXT = {False: "NON-MATCH", True: "MATCH"}
CASE_TEXT = {NOCASE: "", IGNORECASE: " SIMPLE_IGNORE_CASE", FULLCASE: "",
FULLIGNORECASE: " FULL_IGNORE_CASE"}
def make_sequence(items):
if len(items) == 1:
return items[0]
return Sequence(items)
# Common base class for all nodes.
class RegexBase:
def __init__(self):
self._key = self.__class__
def with_flags(self, positive=None, case_flags=None, zerowidth=None):
if positive is None:
positive = self.positive
else:
positive = bool(positive)
if case_flags is None:
case_flags = self.case_flags
else:
case_flags = case_flags & CASE_FLAGS
if zerowidth is None:
zerowidth = self.zerowidth
else:
zerowidth = bool(zerowidth)
if (positive == self.positive and case_flags == self.case_flags and
zerowidth == self.zerowidth):
return self
return self.rebuild(positive, case_flags, zerowidth)
def fix_groups(self, pattern, reverse, fuzzy):
pass
def optimise(self, info):
return self
def pack_characters(self, info):
return self
def remove_captures(self):
return self
def is_atomic(self):
return True
def can_be_affix(self):
return True
def contains_group(self):
return False
def get_firstset(self, reverse):
raise _FirstSetError()
def has_simple_start(self):
return False
def compile(self, reverse=False, fuzzy=False):
return self._compile(reverse, fuzzy)
def dump(self, indent, reverse):
self._dump(indent, reverse)
def is_empty(self):
return False
def __hash__(self):
return hash(self._key)
def __eq__(self, other):
return type(self) is type(other) and self._key == other._key
def __ne__(self, other):
return not self.__eq__(other)
def get_required_string(self, reverse):
return self.max_width(), None
# Base class for zero-width nodes.
class ZeroWidthBase(RegexBase):
def __init__(self, positive=True):
RegexBase.__init__(self)
self.positive = bool(positive)
self._key = self.__class__, self.positive
def get_firstset(self, reverse):
return set([None])
def _compile(self, reverse, fuzzy):
flags = 0
if self.positive:
flags |= POSITIVE_OP
if fuzzy:
flags |= FUZZY_OP
if reverse:
flags |= REVERSE_OP
return [(self._opcode, flags)]
def _dump(self, indent, reverse):
print("{}{} {}".format(INDENT * indent, self._op_name,
POS_TEXT[self.positive]))
def max_width(self):
return 0
class Any(RegexBase):
_opcode = {False: OP.ANY, True: OP.ANY_REV}
_op_name = "ANY"
def has_simple_start(self):
return True
def _compile(self, reverse, fuzzy):
flags = 0
if fuzzy:
flags |= FUZZY_OP
return [(self._opcode[reverse], flags)]
def _dump(self, indent, reverse):
print("{}{}".format(INDENT * indent, self._op_name))
def max_width(self):
return 1
class AnyAll(Any):
_opcode = {False: OP.ANY_ALL, True: OP.ANY_ALL_REV}
_op_name = "ANY_ALL"
class AnyU(Any):
_opcode = {False: OP.ANY_U, True: OP.ANY_U_REV}
_op_name = "ANY_U"
class Atomic(RegexBase):
def __init__(self, subpattern):
RegexBase.__init__(self)
self.subpattern = subpattern
def fix_groups(self, pattern, reverse, fuzzy):
self.subpattern.fix_groups(pattern, reverse, fuzzy)
def optimise(self, info):
self.subpattern = self.subpattern.optimise(info)
if self.subpattern.is_empty():
return self.subpattern
return self
def pack_characters(self, info):
self.subpattern = self.subpattern.pack_characters(info)
return self
def remove_captures(self):
self.subpattern = self.subpattern.remove_captures()
return self
def can_be_affix(self):
return self.subpattern.can_be_affix()
def contains_group(self):
return self.subpattern.contains_group()
def get_firstset(self, reverse):
return self.subpattern.get_firstset(reverse)
def has_simple_start(self):
return self.subpattern.has_simple_start()
def _compile(self, reverse, fuzzy):
return ([(OP.ATOMIC, )] + self.subpattern.compile(reverse, fuzzy) +
[(OP.END, )])
def _dump(self, indent, reverse):
print("{}ATOMIC".format(INDENT * indent))
self.subpattern.dump(indent + 1, reverse)
def is_empty(self):
return self.subpattern.is_empty()
def __eq__(self, other):
return (type(self) is type(other) and self.subpattern ==
other.subpattern)
def max_width(self):
return self.subpattern.max_width()
def get_required_string(self, reverse):
return self.subpattern.get_required_string(reverse)
class Boundary(ZeroWidthBase):
_opcode = OP.BOUNDARY
_op_name = "BOUNDARY"
class Branch(RegexBase):
def __init__(self, branches):
RegexBase.__init__(self)
self.branches = branches
def fix_groups(self, pattern, reverse, fuzzy):
for b in self.branches:
b.fix_groups(pattern, reverse, fuzzy)
def optimise(self, info):
# Flatten branches within branches.
branches = Branch._flatten_branches(info, self.branches)
# Move any common prefix or suffix out of the branches.
prefix, branches = Branch._split_common_prefix(info, branches)
suffix, branches = Branch._split_common_suffix(info, branches)
# Merge branches starting with the same character. (If a character
# prefix doesn't match in one branch, it won't match in any of the
# others starting with that same character.)
branches = Branch._merge_common_prefixes(info, branches)
# Try to reduce adjacent single-character branches to sets.
branches = Branch._reduce_to_set(info, branches)
if len(branches) > 1:
sequence = prefix + [Branch(branches)] + suffix
else:
sequence = prefix + branches + suffix
return make_sequence(sequence)
def optimise(self, info):
# Flatten branches within branches.
branches = Branch._flatten_branches(info, self.branches)
# Try to reduce adjacent single-character branches to sets.
branches = Branch._reduce_to_set(info, branches)
if len(branches) > 1:
sequence = [Branch(branches)]
else:
sequence = branches
return make_sequence(sequence)
def pack_characters(self, info):
self.branches = [b.pack_characters(info) for b in self.branches]
return self
def remove_captures(self):
self.branches = [b.remove_captures() for b in self.branches]
return self
def is_atomic(self):
return all(b.is_atomic() for b in self.branches)
def can_be_affix(self):
return all(b.can_be_affix() for b in self.branches)
def contains_group(self):
return any(b.contains_group() for b in self.branches)
def get_firstset(self, reverse):
fs = set()
for b in self.branches:
fs |= b.get_firstset(reverse)
return fs or set([None])
def _compile(self, reverse, fuzzy):
code = [(OP.BRANCH, )]
for b in self.branches:
code.extend(b.compile(reverse, fuzzy))
code.append((OP.NEXT, ))
code[-1] = (OP.END, )
return code
def _dump(self, indent, reverse):
print("{}BRANCH".format(INDENT * indent))
self.branches[0].dump(indent + 1, reverse)
for b in self.branches[1 : ]:
print("{}OR".format(INDENT * indent))
b.dump(indent + 1, reverse)
@staticmethod
def _flatten_branches(info, branches):
# Flatten the branches so that there aren't branches of branches.
new_branches = []
for b in branches:
b = b.optimise(info)
if isinstance(b, Branch):
new_branches.extend(b.branches)
else:
new_branches.append(b)
return new_branches
@staticmethod
def _split_common_prefix(info, branches):
# Common leading items can be moved out of the branches.
# Get the items in the branches.
alternatives = []
for b in branches:
if isinstance(b, Sequence):
alternatives.append(b.items)
else:
alternatives.append([b])
# What is the maximum possible length of the prefix?
max_count = min(len(a) for a in alternatives)
# What is the longest common prefix?
prefix = alternatives[0]
pos = 0
end_pos = max_count
while pos < end_pos and prefix[pos].can_be_affix() and all(a[pos] ==
prefix[pos] for a in alternatives):
pos += 1
count = pos
if info.flags & UNICODE:
# We need to check that we're not splitting a sequence of
# characters which could form part of full case-folding.
count = pos
while count > 0 and not all(Branch._can_split(a, count) for a in
alternatives):
count -= 1
# No common prefix is possible.
if count == 0:
return [], branches
# Rebuild the branches.
new_branches = []
for a in alternatives:
new_branches.append(make_sequence(a[count : ]))
return prefix[ : count], new_branches
@staticmethod
def _split_common_suffix(info, branches):
# Common trailing items can be moved out of the branches.
# Get the items in the branches.
alternatives = []
for b in branches:
if isinstance(b, Sequence):
alternatives.append(b.items)
else:
alternatives.append([b])
# What is the maximum possible length of the suffix?
max_count = min(len(a) for a in alternatives)
# What is the longest common suffix?
suffix = alternatives[0]
pos = -1
end_pos = -1 - max_count
while pos > end_pos and suffix[pos].can_be_affix() and all(a[pos] ==
suffix[pos] for a in alternatives):
pos -= 1
count = -1 - pos
if info.flags & UNICODE:
# We need to check that we're not splitting a sequence of
# characters which could form part of full case-folding.
while count > 0 and not all(Branch._can_split_rev(a, count) for a
in alternatives):
count -= 1
# No common suffix is possible.
if count == 0:
return [], branches
# Rebuild the branches.
new_branches = []
for a in alternatives:
new_branches.append(make_sequence(a[ : -count]))
return suffix[-count : ], new_branches
@staticmethod
def _can_split(items, count):
# Check the characters either side of the proposed split.
if not Branch._is_full_case(items, count - 1):
return True
if not Branch._is_full_case(items, count):
return True
# Check whether a 1-1 split would be OK.
if Branch._is_folded(items[count - 1 : count + 1]):
return False
# Check whether a 1-2 split would be OK.
if (Branch._is_full_case(items, count + 2) and
Branch._is_folded(items[count - 1 : count + 2])):
return False
# Check whether a 2-1 split would be OK.
if (Branch._is_full_case(items, count - 2) and
Branch._is_folded(items[count - 2 : count + 1])):
return False
return True
@staticmethod
def _can_split_rev(items, count):
end = len(items)
# Check the characters either side of the proposed split.
if not Branch._is_full_case(items, end - count):
return True
if not Branch._is_full_case(items, end - count - 1):
return True
# Check whether a 1-1 split would be OK.
if Branch._is_folded(items[end - count - 1 : end - count + 1]):
return False
# Check whether a 1-2 split would be OK.
if (Branch._is_full_case(items, end - count + 2) and
Branch._is_folded(items[end - count - 1 : end - count + 2])):
return False
# Check whether a 2-1 split would be OK.
if (Branch._is_full_case(items, end - count - 2) and
Branch._is_folded(items[end - count - 2 : end - count + 1])):
return False
return True
@staticmethod
def _merge_common_prefixes(info, branches):
# Branches with the same case-sensitive character prefix can be grouped
# together if they are separated only by other branches with a
# character prefix.
prefixed = defaultdict(list)
order = {}
new_branches = []
for b in branches:
if Branch._is_simple_character(b):
# Branch starts with a simple character.
prefixed[b.value].append([b])
order.setdefault(b.value, len(order))
elif (isinstance(b, Sequence) and b.items and
Branch._is_simple_character(b.items[0])):
# Branch starts with a simple character.
prefixed[b.items[0].value].append(b.items)
order.setdefault(b.items[0].value, len(order))
else:
Branch._flush_char_prefix(info, prefixed, order, new_branches)
new_branches.append(b)
Branch._flush_char_prefix(info, prefixed, order, new_branches)
return new_branches
@staticmethod
def _is_simple_character(c):
return isinstance(c, Character) and c.positive and not c.case_flags
@staticmethod
def _reduce_to_set(info, branches):
# Can the branches be reduced to a set?
new_branches = []
items = set()
case_flags = NOCASE
for b in branches:
if isinstance(b, (Character, Property, SetBase)):
# Branch starts with a single character.
if b.case_flags != case_flags:
# Different case sensitivity, so flush.
Branch._flush_set_members(info, items, case_flags,
new_branches)
case_flags = b.case_flags
items.add(b.with_flags(case_flags=NOCASE))
else:
Branch._flush_set_members(info, items, case_flags,
new_branches)
new_branches.append(b)
Branch._flush_set_members(info, items, case_flags, new_branches)
return new_branches
@staticmethod
def _flush_char_prefix(info, prefixed, order, new_branches):
# Flush the prefixed branches.
if not prefixed:
return
for value, branches in sorted(prefixed.items(), key=lambda pair:
order[pair[0]]):
if len(branches) == 1:
new_branches.append(make_sequence(branches[0]))
else:
subbranches = []
optional = False
for b in branches:
if len(b) > 1:
subbranches.append(make_sequence(b[1 : ]))
elif not optional:
subbranches.append(Sequence())
optional = True
sequence = Sequence([Character(value), Branch(subbranches)])
new_branches.append(sequence.optimise(info))
prefixed.clear()
order.clear()
@staticmethod
def _flush_set_members(info, items, case_flags, new_branches):
# Flush the set members.
if not items:
return
if len(items) == 1:
item = list(items)[0]
else:
item = SetUnion(info, list(items)).optimise(info)
new_branches.append(item.with_flags(case_flags=case_flags))
items.clear()
@staticmethod
def _is_full_case(items, i):
if not 0 <= i < len(items):
return False
item = items[i]
return (isinstance(item, Character) and item.positive and
(item.case_flags & FULLIGNORECASE) == FULLIGNORECASE)
@staticmethod
def _is_folded(items):
if len(items) < 2:
return False
for i in items:
if (not isinstance(i, Character) or not i.positive or not
i.case_flags):
return False
folded = "".join(chr(i.value) for i in items)
folded = _regex.fold_case(FULL_CASE_FOLDING, folded)
# Get the characters which expand to multiple codepoints on folding.
expanding_chars = _regex.get_expand_on_folding()
for c in expanding_chars:
if folded == _regex.fold_case(FULL_CASE_FOLDING, c):
return True
return False
def is_empty(self):
return all(b.is_empty() for b in self.branches)
def __eq__(self, other):
return type(self) is type(other) and self.branches == other.branches
def max_width(self):
return max(b.max_width() for b in self.branches)
class CallGroup(RegexBase):
def __init__(self, info, group, position):
RegexBase.__init__(self)
self.info = info
self.group = group
self.position = position
self._key = self.__class__, self.group
def fix_groups(self, pattern, reverse, fuzzy):
try:
self.group = int(self.group)
except ValueError:
try:
self.group = self.info.group_index[self.group]
except KeyError:
raise error("unknown group", pattern, self.position)
if not 0 <= self.group <= self.info.group_count:
raise error("unknown group", pattern, self.position)
if self.group > 0 and self.info.open_group_count[self.group] > 1:
raise error("ambiguous group reference", pattern, self.position)
self.info.group_calls.append((self, reverse, fuzzy))
self._key = self.__class__, self.group
def remove_captures(self):
raise error("group reference not allowed", pattern, self.position)
def _compile(self, reverse, fuzzy):
return [(OP.GROUP_CALL, self.call_ref)]
def _dump(self, indent, reverse):
print("{}GROUP_CALL {}".format(INDENT * indent, self.group))
def __eq__(self, other):
return type(self) is type(other) and self.group == other.group
def max_width(self):
return UNLIMITED
class Character(RegexBase):
_opcode = {(NOCASE, False): OP.CHARACTER, (IGNORECASE, False):
OP.CHARACTER_IGN, (FULLCASE, False): OP.CHARACTER, (FULLIGNORECASE,
False): OP.CHARACTER_IGN, (NOCASE, True): OP.CHARACTER_REV, (IGNORECASE,
True): OP.CHARACTER_IGN_REV, (FULLCASE, True): OP.CHARACTER_REV,
(FULLIGNORECASE, True): OP.CHARACTER_IGN_REV}
def __init__(self, value, positive=True, case_flags=NOCASE,
zerowidth=False):
RegexBase.__init__(self)
self.value = value
self.positive = bool(positive)
self.case_flags = case_flags
self.zerowidth = bool(zerowidth)
if (self.positive and (self.case_flags & FULLIGNORECASE) ==
FULLIGNORECASE):
self.folded = _regex.fold_case(FULL_CASE_FOLDING, chr(self.value))
else:
self.folded = chr(self.value)
self._key = (self.__class__, self.value, self.positive,
self.case_flags, self.zerowidth)
def rebuild(self, positive, case_flags, zerowidth):
return Character(self.value, positive, case_flags, zerowidth)
def optimise(self, info, in_set=False):
return self
def get_firstset(self, reverse):
return set([self])
def has_simple_start(self):
return True
def _compile(self, reverse, fuzzy):
flags = 0
if self.positive:
flags |= POSITIVE_OP
if self.zerowidth:
flags |= ZEROWIDTH_OP
if fuzzy:
flags |= FUZZY_OP
code = PrecompiledCode([self._opcode[self.case_flags, reverse], flags,
self.value])
if len(self.folded) > 1:
# The character expands on full case-folding.
code = Branch([code, String([ord(c) for c in self.folded],
case_flags=self.case_flags)])
return code.compile(reverse, fuzzy)
def _dump(self, indent, reverse):
display = ascii(chr(self.value)).lstrip("bu")
print("{}CHARACTER {} {}{}".format(INDENT * indent,
POS_TEXT[self.positive], display, CASE_TEXT[self.case_flags]))
def matches(self, ch):
return (ch == self.value) == self.positive
def max_width(self):
return len(self.folded)
def get_required_string(self, reverse):
if not self.positive:
return 1, None
self.folded_characters = tuple(ord(c) for c in self.folded)
return 0, self
class Conditional(RegexBase):
def __init__(self, info, group, yes_item, no_item, position):
RegexBase.__init__(self)
self.info = info
self.group = group
self.yes_item = yes_item
self.no_item = no_item
self.position = position
def fix_groups(self, pattern, reverse, fuzzy):
try:
self.group = int(self.group)
except ValueError:
try:
self.group = self.info.group_index[self.group]
except KeyError:
raise error("unknown group", pattern, self.position)
if not 1 <= self.group <= self.info.group_count:
raise error("unknown group", pattern, self.position)
self.yes_item.fix_groups(pattern, reverse, fuzzy)
self.no_item.fix_groups(pattern, reverse, fuzzy)
def optimise(self, info):
yes_item = self.yes_item.optimise(info)
no_item = self.no_item.optimise(info)
return Conditional(info, self.group, yes_item, no_item, self.position)
def pack_characters(self, info):
self.yes_item = self.yes_item.pack_characters(info)
self.no_item = self.no_item.pack_characters(info)
return self
def remove_captures(self):
self.yes_item = self.yes_item.remove_captures()
self.no_item = self.no_item.remove_captures()
def is_atomic(self):
return self.yes_item.is_atomic() and self.no_item.is_atomic()
def can_be_affix(self):
return self.yes_item.can_be_affix() and self.no_item.can_be_affix()
def contains_group(self):
return self.yes_item.contains_group() or self.no_item.contains_group()
def get_firstset(self, reverse):
return (self.yes_item.get_firstset(reverse) |
self.no_item.get_firstset(reverse))
def _compile(self, reverse, fuzzy):
code = [(OP.GROUP_EXISTS, self.group)]
code.extend(self.yes_item.compile(reverse, fuzzy))
add_code = self.no_item.compile(reverse, fuzzy)
if add_code:
code.append((OP.NEXT, ))
code.extend(add_code)
code.append((OP.END, ))
return code
def _dump(self, indent, reverse):
print("{}GROUP_EXISTS {}".format(INDENT * indent, self.group))
self.yes_item.dump(indent + 1, reverse)
if self.no_item:
print("{}OR".format(INDENT * indent))
self.no_item.dump(indent + 1, reverse)
def is_empty(self):
return self.yes_item.is_empty() and self.no_item.is_empty()
def __eq__(self, other):
return type(self) is type(other) and (self.group, self.yes_item,
self.no_item) == (other.group, other.yes_item, other.no_item)
def max_width(self):
return max(self.yes_item.max_width(), self.no_item.max_width())
class DefaultBoundary(ZeroWidthBase):
_opcode = OP.DEFAULT_BOUNDARY
_op_name = "DEFAULT_BOUNDARY"
class DefaultEndOfWord(ZeroWidthBase):
_opcode = OP.DEFAULT_END_OF_WORD
_op_name = "DEFAULT_END_OF_WORD"
class DefaultStartOfWord(ZeroWidthBase):
_opcode = OP.DEFAULT_START_OF_WORD
_op_name = "DEFAULT_START_OF_WORD"
class EndOfLine(ZeroWidthBase):
_opcode = OP.END_OF_LINE
_op_name = "END_OF_LINE"
class EndOfLineU(EndOfLine):
_opcode = OP.END_OF_LINE_U
_op_name = "END_OF_LINE_U"
class EndOfString(ZeroWidthBase):
_opcode = OP.END_OF_STRING
_op_name = "END_OF_STRING"
class EndOfStringLine(ZeroWidthBase):
_opcode = OP.END_OF_STRING_LINE
_op_name = "END_OF_STRING_LINE"
class EndOfStringLineU(EndOfStringLine):
_opcode = OP.END_OF_STRING_LINE_U
_op_name = "END_OF_STRING_LINE_U"
class EndOfWord(ZeroWidthBase):
_opcode = OP.END_OF_WORD
_op_name = "END_OF_WORD"
class Fuzzy(RegexBase):
def __init__(self, subpattern, constraints=None):
RegexBase.__init__(self)
if constraints is None:
constraints = {}
self.subpattern = subpattern
self.constraints = constraints
# If an error type is mentioned in the cost equation, then its maximum
# defaults to unlimited.
if "cost" in constraints:
for e in "dis":
if e in constraints["cost"]:
constraints.setdefault(e, (0, None))
# If any error type is mentioned, then all the error maxima default to
# 0, otherwise they default to unlimited.
if set(constraints) & set("dis"):
for e in "dis":
constraints.setdefault(e, (0, 0))
else:
for e in "dis":
constraints.setdefault(e, (0, None))
# The maximum of the generic error type defaults to unlimited.
constraints.setdefault("e", (0, None))
# The cost equation defaults to equal costs. Also, the cost of any
# error type not mentioned in the cost equation defaults to 0.
if "cost" in constraints:
for e in "dis":
constraints["cost"].setdefault(e, 0)
else:
constraints["cost"] = {"d": 1, "i": 1, "s": 1, "max":
constraints["e"][1]}
def fix_groups(self, pattern, reverse, fuzzy):
self.subpattern.fix_groups(pattern, reverse, True)
def pack_characters(self, info):
self.subpattern = self.subpattern.pack_characters(info)
return self
def remove_captures(self):
self.subpattern = self.subpattern.remove_captures()
return self
def is_atomic(self):
return self.subpattern.is_atomic()
def contains_group(self):
return self.subpattern.contains_group()
def _compile(self, reverse, fuzzy):
# The individual limits.
arguments = []
for e in "dise":
v = self.constraints[e]
arguments.append(v[0])
arguments.append(UNLIMITED if v[1] is None else v[1])
# The coeffs of the cost equation.
for e in "dis":
arguments.append(self.constraints["cost"][e])
# The maximum of the cost equation.
v = self.constraints["cost"]["max"]
arguments.append(UNLIMITED if v is None else v)
flags = 0
if reverse:
flags |= REVERSE_OP
return ([(OP.FUZZY, flags) + tuple(arguments)] +
self.subpattern.compile(reverse, True) + [(OP.END,)])
def _dump(self, indent, reverse):
constraints = self._constraints_to_string()
if constraints:
constraints = " " + constraints
print("{}FUZZY{}".format(INDENT * indent, constraints))
self.subpattern.dump(indent + 1, reverse)
def is_empty(self):
return self.subpattern.is_empty()
def __eq__(self, other):
return (type(self) is type(other) and self.subpattern ==
other.subpattern)
def max_width(self):
return UNLIMITED
def _constraints_to_string(self):
constraints = []
for name in "ids":
min, max = self.constraints[name]
if max == 0:
continue
con = ""
if min > 0:
con = "{}<=".format(min)
con += name
if max is not None:
con += "<={}".format(max)
constraints.append(con)
cost = []
for name in "ids":
coeff = self.constraints["cost"][name]
if coeff > 0:
cost.append("{}{}".format(coeff, name))
limit = self.constraints["cost"]["max"]
if limit is not None and limit > 0:
cost = "{}<={}".format("+".join(cost), limit)
constraints.append(cost)
return ",".join(constraints)
class Grapheme(RegexBase):
def _compile(self, reverse, fuzzy):
# Match at least 1 character until a grapheme boundary is reached. Note
# that this is the same whether matching forwards or backwards.
character_matcher = LazyRepeat(AnyAll(), 1, None).compile(reverse,
fuzzy)
boundary_matcher = [(OP.GRAPHEME_BOUNDARY, 1)]
return character_matcher + boundary_matcher
def _dump(self, indent, reverse):
print("{}GRAPHEME".format(INDENT * indent))
def max_width(self):
return UNLIMITED
class GreedyRepeat(RegexBase):
_opcode = OP.GREEDY_REPEAT
_op_name = "GREEDY_REPEAT"
def __init__(self, subpattern, min_count, max_count):
RegexBase.__init__(self)
self.subpattern = subpattern
self.min_count = min_count
self.max_count = max_count
def fix_groups(self, pattern, reverse, fuzzy):
self.subpattern.fix_groups(pattern, reverse, fuzzy)
def optimise(self, info):
subpattern = self.subpattern.optimise(info)
return type(self)(subpattern, self.min_count, self.max_count)
def pack_characters(self, info):
self.subpattern = self.subpattern.pack_characters(info)
return self
def remove_captures(self):
self.subpattern = self.subpattern.remove_captures()
return self
def is_atomic(self):
return self.min_count == self.max_count and self.subpattern.is_atomic()
def contains_group(self):
return self.subpattern.contains_group()
def get_firstset(self, reverse):
fs = self.subpattern.get_firstset(reverse)
if self.min_count == 0:
fs.add(None)
return fs
def _compile(self, reverse, fuzzy):
repeat = [self._opcode, self.min_count]
if self.max_count is None:
repeat.append(UNLIMITED)
else:
repeat.append(self.max_count)
subpattern = self.subpattern.compile(reverse, fuzzy)
if not subpattern:
return []
return ([tuple(repeat)] + subpattern + [(OP.END, )])
def _dump(self, indent, reverse):
if self.max_count is None:
limit = "INF"
else:
limit = self.max_count
print("{}{} {} {}".format(INDENT * indent, self._op_name,
self.min_count, limit))
self.subpattern.dump(indent + 1, reverse)
def is_empty(self):
return self.subpattern.is_empty()
def __eq__(self, other):
return type(self) is type(other) and (self.subpattern, self.min_count,
self.max_count) == (other.subpattern, other.min_count,
other.max_count)
def max_width(self):
if self.max_count is None:
return UNLIMITED
return self.subpattern.max_width() * self.max_count
def get_required_string(self, reverse):
max_count = UNLIMITED if self.max_count is None else self.max_count
if self.min_count == 0:
w = self.subpattern.max_width() * max_count
return min(w, UNLIMITED), None
ofs, req = self.subpattern.get_required_string(reverse)
if req:
return ofs, req
w = self.subpattern.max_width() * max_count
return min(w, UNLIMITED), None
class Group(RegexBase):
def __init__(self, info, group, subpattern):
RegexBase.__init__(self)
self.info = info
self.group = group
self.subpattern = subpattern
self.call_ref = None
def fix_groups(self, pattern, reverse, fuzzy):
self.info.defined_groups[self.group] = (self, reverse, fuzzy)
self.subpattern.fix_groups(pattern, reverse, fuzzy)
def optimise(self, info):
subpattern = self.subpattern.optimise(info)
return Group(self.info, self.group, subpattern)
def pack_characters(self, info):
self.subpattern = self.subpattern.pack_characters(info)
return self
def remove_captures(self):
return self.subpattern.remove_captures()
def is_atomic(self):
return self.subpattern.is_atomic()
def can_be_affix(self):
return False
def contains_group(self):
return True
def get_firstset(self, reverse):
return self.subpattern.get_firstset(reverse)
def has_simple_start(self):
return self.subpattern.has_simple_start()
def _compile(self, reverse, fuzzy):
code = []
key = self.group, reverse, fuzzy
ref = self.info.call_refs.get(key)
if ref is not None:
code += [(OP.CALL_REF, ref)]
public_group = private_group = self.group
if private_group < 0:
public_group = self.info.private_groups[private_group]
private_group = self.info.group_count - private_group
code += ([(OP.GROUP, private_group, public_group)] +
self.subpattern.compile(reverse, fuzzy) + [(OP.END, )])
if ref is not None:
code += [(OP.END, )]
return code
def _dump(self, indent, reverse):
group = self.group
if group < 0:
group = private_groups[group]
print("{}GROUP {}".format(INDENT * indent, group))
self.subpattern.dump(indent + 1, reverse)
def __eq__(self, other):
return (type(self) is type(other) and (self.group, self.subpattern) ==
(other.group, other.subpattern))
def max_width(self):
return self.subpattern.max_width()
def get_required_string(self, reverse):
return self.subpattern.get_required_string(reverse)
class LazyRepeat(GreedyRepeat):
_opcode = OP.LAZY_REPEAT
_op_name = "LAZY_REPEAT"
class LookAround(RegexBase):
_dir_text = {False: "AHEAD", True: "BEHIND"}
def __new__(cls, behind, positive, subpattern):
if positive and subpattern.is_empty():
return subpattern
return RegexBase.__new__(cls)
def __init__(self, behind, positive, subpattern):
RegexBase.__init__(self)
self.behind = bool(behind)
self.positive = bool(positive)
self.subpattern = subpattern
def fix_groups(self, pattern, reverse, fuzzy):
self.subpattern.fix_groups(pattern, self.behind, fuzzy)
def optimise(self, info):
subpattern = self.subpattern.optimise(info)
return LookAround(self.behind, self.positive, subpattern)
def pack_characters(self, info):
self.subpattern = self.subpattern.pack_characters(info)
return self
def remove_captures(self):
return self.subpattern.remove_captures()
def is_atomic(self):
return self.subpattern.is_atomic()
def can_be_affix(self):
return self.subpattern.can_be_affix()
def contains_group(self):
return self.subpattern.contains_group()
def _compile(self, reverse, fuzzy):
return ([(OP.LOOKAROUND, int(self.positive), int(not self.behind))] +
self.subpattern.compile(self.behind) + [(OP.END, )])
def _dump(self, indent, reverse):
print("{}LOOK{} {}".format(INDENT * indent,
self._dir_text[self.behind], POS_TEXT[self.positive]))
self.subpattern.dump(indent + 1, self.behind)
def is_empty(self):
return self.subpattern.is_empty()
def __eq__(self, other):
return type(self) is type(other) and (self.behind, self.positive,
self.subpattern) == (other.behind, other.positive, other.subpattern)
def max_width(self):
return 0
class PrecompiledCode(RegexBase):
def __init__(self, code):
self.code = code
def _compile(self, reverse, fuzzy):
return [tuple(self.code)]
class Property(RegexBase):
_opcode = {(NOCASE, False): OP.PROPERTY, (IGNORECASE, False):
OP.PROPERTY_IGN, (FULLCASE, False): OP.PROPERTY, (FULLIGNORECASE, False):
OP.PROPERTY_IGN, (NOCASE, True): OP.PROPERTY_REV, (IGNORECASE, True):
OP.PROPERTY_IGN_REV, (FULLCASE, True): OP.PROPERTY_REV, (FULLIGNORECASE,
True): OP.PROPERTY_IGN_REV}
def __init__(self, value, positive=True, case_flags=NOCASE,
zerowidth=False):
RegexBase.__init__(self)
self.value = value
self.positive = bool(positive)
self.case_flags = case_flags
self.zerowidth = bool(zerowidth)
self._key = (self.__class__, self.value, self.positive,
self.case_flags, self.zerowidth)
def rebuild(self, positive, case_flags, zerowidth):
return Property(self.value, positive, case_flags, zerowidth)
def optimise(self, info, in_set=False):
return self
def get_firstset(self, reverse):
return set([self])
def has_simple_start(self):
return True
def _compile(self, reverse, fuzzy):
flags = 0
if self.positive:
flags |= POSITIVE_OP
if self.zerowidth:
flags |= ZEROWIDTH_OP
if fuzzy:
flags |= FUZZY_OP
return [(self._opcode[self.case_flags, reverse], flags, self.value)]
def _dump(self, indent, reverse):
prop = PROPERTY_NAMES[self.value >> 16]
name, value = prop[0], prop[1][self.value & 0xFFFF]
print("{}PROPERTY {} {}:{}{}".format(INDENT * indent,
POS_TEXT[self.positive], name, value, CASE_TEXT[self.case_flags]))
def matches(self, ch):
return _regex.has_property_value(self.value, ch) == self.positive
def max_width(self):
return 1
class Range(RegexBase):
_opcode = {(NOCASE, False): OP.RANGE, (IGNORECASE, False): OP.RANGE_IGN,
(FULLCASE, False): OP.RANGE, (FULLIGNORECASE, False): OP.RANGE_IGN,
(NOCASE, True): OP.RANGE_REV, (IGNORECASE, True): OP.RANGE_IGN_REV,
(FULLCASE, True): OP.RANGE_REV, (FULLIGNORECASE, True): OP.RANGE_IGN_REV}
_op_name = "RANGE"
def __init__(self, lower, upper, positive=True, case_flags=NOCASE,
zerowidth=False):
RegexBase.__init__(self)
self.lower = lower
self.upper = upper
self.positive = bool(positive)
self.case_flags = case_flags
self.zerowidth = bool(zerowidth)
self._key = (self.__class__, self.lower, self.upper, self.positive,
self.case_flags, self.zerowidth)
def rebuild(self, positive, case_flags, zerowidth):
return Range(self.lower, self.upper, positive, case_flags, zerowidth)
def optimise(self, info, in_set=False):
# Is the range case-sensitive?
if not self.positive or not (self.case_flags & IGNORECASE) or in_set:
return self
# Is full case-folding possible?
if (not (info.flags & UNICODE) or (self.case_flags & FULLIGNORECASE) !=
FULLIGNORECASE):
return self
# Get the characters which expand to multiple codepoints on folding.
expanding_chars = _regex.get_expand_on_folding()
# Get the folded characters in the range.
items = []
for ch in expanding_chars:
if self.lower <= ord(ch) <= self.upper:
folded = _regex.fold_case(FULL_CASE_FOLDING, ch)
items.append(String([ord(c) for c in folded],
case_flags=self.case_flags))
if not items:
# We can fall back to simple case-folding.
return self
if len(items) < self.upper - self.lower + 1:
# Not all the characters are covered by the full case-folding.
items.insert(0, self)
return Branch(items)
def _compile(self, reverse, fuzzy):
flags = 0
if self.positive:
flags |= POSITIVE_OP
if self.zerowidth:
flags |= ZEROWIDTH_OP
if fuzzy:
flags |= FUZZY_OP
return [(self._opcode[self.case_flags, reverse], flags, self.lower,
self.upper)]
def _dump(self, indent, reverse):
display_lower = ascii(chr(self.lower)).lstrip("bu")
display_upper = ascii(chr(self.upper)).lstrip("bu")
print("{}RANGE {} {} {}{}".format(INDENT * indent,
POS_TEXT[self.positive], display_lower, display_upper,
CASE_TEXT[self.case_flags]))
def matches(self, ch):
return (self.lower <= ch <= self.upper) == self.positive
def max_width(self):
return 1
class RefGroup(RegexBase):
_opcode = {(NOCASE, False): OP.REF_GROUP, (IGNORECASE, False):
OP.REF_GROUP_IGN, (FULLCASE, False): OP.REF_GROUP, (FULLIGNORECASE,
False): OP.REF_GROUP_FLD, (NOCASE, True): OP.REF_GROUP_REV, (IGNORECASE,
True): OP.REF_GROUP_IGN_REV, (FULLCASE, True): OP.REF_GROUP_REV,
(FULLIGNORECASE, True): OP.REF_GROUP_FLD_REV}
def __init__(self, info, group, position, case_flags=NOCASE):
RegexBase.__init__(self)
self.info = info
self.group = group
self.position = position
self.case_flags = case_flags
self._key = self.__class__, self.group, self.case_flags
def fix_groups(self, pattern, reverse, fuzzy):
try:
self.group = int(self.group)
except ValueError:
try:
self.group = self.info.group_index[self.group]
except KeyError:
raise error("unknown group", pattern, self.position)
if not 1 <= self.group <= self.info.group_count:
raise error("unknown group", pattern, self.position)
self._key = self.__class__, self.group, self.case_flags
def remove_captures(self):
raise error("group reference not allowed", pattern, self.position)
def _compile(self, reverse, fuzzy):
flags = 0
if fuzzy:
flags |= FUZZY_OP
return [(self._opcode[self.case_flags, reverse], flags, self.group)]
def _dump(self, indent, reverse):
print("{}REF_GROUP {}{}".format(INDENT * indent, self.group,
CASE_TEXT[self.case_flags]))
def max_width(self):
return UNLIMITED
class SearchAnchor(ZeroWidthBase):
_opcode = OP.SEARCH_ANCHOR
_op_name = "SEARCH_ANCHOR"
class Sequence(RegexBase):
def __init__(self, items=None):
RegexBase.__init__(self)
if items is None:
items = []
self.items = items
def fix_groups(self, pattern, reverse, fuzzy):
for s in self.items:
s.fix_groups(pattern, reverse, fuzzy)
def optimise(self, info):
# Flatten the sequences.
items = []
for s in self.items:
s = s.optimise(info)
if isinstance(s, Sequence):
items.extend(s.items)
else:
items.append(s)
return make_sequence(items)
def pack_characters(self, info):
"Packs sequences of characters into strings."
items = []
characters = []
case_flags = NOCASE
for s in self.items:
if type(s) is Character and s.positive:
if s.case_flags != case_flags:
# Different case sensitivity, so flush, unless neither the
# previous nor the new character are cased.
if s.case_flags or is_cased(info, s.value):
Sequence._flush_characters(info, characters,
case_flags, items)
case_flags = s.case_flags
characters.append(s.value)
elif type(s) is String or type(s) is Literal:
if s.case_flags != case_flags:
# Different case sensitivity, so flush, unless the neither
# the previous nor the new string are cased.
if s.case_flags or any(is_cased(info, c) for c in
characters):
Sequence._flush_characters(info, characters,
case_flags, items)
case_flags = s.case_flags
characters.extend(s.characters)
else:
Sequence._flush_characters(info, characters, case_flags, items)
items.append(s.pack_characters(info))
Sequence._flush_characters(info, characters, case_flags, items)
return make_sequence(items)
def remove_captures(self):
self.items = [s.remove_captures() for s in self.items]
return self
def is_atomic(self):
return all(s.is_atomic() for s in self.items)
def can_be_affix(self):
return False
def contains_group(self):
return any(s.contains_group() for s in self.items)
def get_firstset(self, reverse):
fs = set()
items = self.items
if reverse:
items.reverse()
for s in items:
fs |= s.get_firstset(reverse)
if None not in fs:
return fs
fs.discard(None)
return fs | set([None])
def has_simple_start(self):
return self.items and self.items[0].has_simple_start()
def _compile(self, reverse, fuzzy):
seq = self.items
if reverse:
seq = seq[::-1]
code = []
for s in seq:
code.extend(s.compile(reverse, fuzzy))
return code
def _dump(self, indent, reverse):
for s in self.items:
s.dump(indent, reverse)
@staticmethod
def _flush_characters(info, characters, case_flags, items):
if not characters:
return
# Disregard case_flags if all of the characters are case-less.
if case_flags & IGNORECASE:
if not any(is_cased(info, c) for c in characters):
case_flags = NOCASE
if len(characters) == 1:
items.append(Character(characters[0], case_flags=case_flags))
else:
items.append(String(characters, case_flags=case_flags))
characters[:] = []
def is_empty(self):
return all(i.is_empty() for i in self.items)
def __eq__(self, other):
return type(self) is type(other) and self.items == other.items
def max_width(self):
return sum(s.max_width() for s in self.items)
def get_required_string(self, reverse):
seq = self.items
if reverse:
seq = seq[::-1]
offset = 0
for s in seq:
ofs, req = s.get_required_string(reverse)
offset += ofs
if req:
return offset, req
return offset, None
class SetBase(RegexBase):
def __init__(self, info, items, positive=True, case_flags=NOCASE,
zerowidth=False):
RegexBase.__init__(self)
self.info = info
self.items = tuple(items)
self.positive = bool(positive)
self.case_flags = case_flags
self.zerowidth = bool(zerowidth)
self.char_width = 1
self._key = (self.__class__, self.items, self.positive,
self.case_flags, self.zerowidth)
def rebuild(self, positive, case_flags, zerowidth):
return type(self)(self.info, self.items, positive, case_flags,
zerowidth).optimise(self.info)
def get_firstset(self, reverse):
return set([self])
def has_simple_start(self):
return True
def _compile(self, reverse, fuzzy):
flags = 0
if self.positive:
flags |= POSITIVE_OP
if self.zerowidth:
flags |= ZEROWIDTH_OP
if fuzzy:
flags |= FUZZY_OP
code = [(self._opcode[self.case_flags, reverse], flags)]
for m in self.items:
code.extend(m.compile())
code.append((OP.END, ))
return code
def _dump(self, indent, reverse):
print("{}{} {}{}".format(INDENT * indent, self._op_name,
POS_TEXT[self.positive], CASE_TEXT[self.case_flags]))
for i in self.items:
i.dump(indent + 1, reverse)
def _handle_case_folding(self, info, in_set):
# Is the set case-sensitive?
if not self.positive or not (self.case_flags & IGNORECASE) or in_set:
return self
# Is full case-folding possible?
if (not (self.info.flags & UNICODE) or (self.case_flags &
FULLIGNORECASE) !=
FULLIGNORECASE):
return self
# Get the characters which expand to multiple codepoints on folding.
expanding_chars = _regex.get_expand_on_folding()
# Get the folded characters in the set.
items = []
seen = set()
for ch in expanding_chars:
if self.matches(ord(ch)):
folded = _regex.fold_case(FULL_CASE_FOLDING, ch)
if folded not in seen:
items.append(String([ord(c) for c in folded],
case_flags=self.case_flags))
seen.add(folded)
if not items:
# We can fall back to simple case-folding.
return self
return Branch([self] + items)
def max_width(self):
# Is the set case-sensitive?
if not self.positive or not (self.case_flags & IGNORECASE):
return 1
# Is full case-folding possible?
if (not (self.info.flags & UNICODE) or (self.case_flags &
FULLIGNORECASE) != FULLIGNORECASE):
return 1
# Get the characters which expand to multiple codepoints on folding.
expanding_chars = _regex.get_expand_on_folding()
# Get the folded characters in the set.
seen = set()
for ch in expanding_chars:
if self.matches(ord(ch)):
folded = _regex.fold_case(FULL_CASE_FOLDING, ch)
seen.add(folded)
if not seen:
return 1
return max(len(folded) for folded in seen)
class SetDiff(SetBase):
_opcode = {(NOCASE, False): OP.SET_DIFF, (IGNORECASE, False):
OP.SET_DIFF_IGN, (FULLCASE, False): OP.SET_DIFF, (FULLIGNORECASE, False):
OP.SET_DIFF_IGN, (NOCASE, True): OP.SET_DIFF_REV, (IGNORECASE, True):
OP.SET_DIFF_IGN_REV, (FULLCASE, True): OP.SET_DIFF_REV, (FULLIGNORECASE,
True): OP.SET_DIFF_IGN_REV}
_op_name = "SET_DIFF"
def optimise(self, info, in_set=False):
items = self.items
if len(items) > 2:
items = [items[0], SetUnion(info, items[1 : ])]
if len(items) == 1:
return items[0].with_flags(case_flags=self.case_flags,
zerowidth=self.zerowidth).optimise(info, in_set)
self.items = tuple(m.optimise(info, in_set=True) for m in items)
return self._handle_case_folding(info, in_set)
def matches(self, ch):
m = self.items[0].matches(ch) and not self.items[1].matches(ch)
return m == self.positive
class SetInter(SetBase):
_opcode = {(NOCASE, False): OP.SET_INTER, (IGNORECASE, False):
OP.SET_INTER_IGN, (FULLCASE, False): OP.SET_INTER, (FULLIGNORECASE,
False): OP.SET_INTER_IGN, (NOCASE, True): OP.SET_INTER_REV, (IGNORECASE,
True): OP.SET_INTER_IGN_REV, (FULLCASE, True): OP.SET_INTER_REV,
(FULLIGNORECASE, True): OP.SET_INTER_IGN_REV}
_op_name = "SET_INTER"
def optimise(self, info, in_set=False):
items = []
for m in self.items:
m = m.optimise(info, in_set=True)
if isinstance(m, SetInter) and m.positive:
# Intersection in intersection.
items.extend(m.items)
else:
items.append(m)
if len(items) == 1:
return items[0].with_flags(case_flags=self.case_flags,
zerowidth=self.zerowidth).optimise(info, in_set)
self.items = tuple(items)
return self._handle_case_folding(info, in_set)
def matches(self, ch):
m = all(i.matches(ch) for i in self.items)
return m == self.positive
class SetSymDiff(SetBase):
_opcode = {(NOCASE, False): OP.SET_SYM_DIFF, (IGNORECASE, False):
OP.SET_SYM_DIFF_IGN, (FULLCASE, False): OP.SET_SYM_DIFF, (FULLIGNORECASE,
False): OP.SET_SYM_DIFF_IGN, (NOCASE, True): OP.SET_SYM_DIFF_REV,
(IGNORECASE, True): OP.SET_SYM_DIFF_IGN_REV, (FULLCASE, True):
OP.SET_SYM_DIFF_REV, (FULLIGNORECASE, True): OP.SET_SYM_DIFF_IGN_REV}
_op_name = "SET_SYM_DIFF"
def optimise(self, info, in_set=False):
items = []
for m in self.items:
m = m.optimise(info, in_set=True)
if isinstance(m, SetSymDiff) and m.positive:
# Symmetric difference in symmetric difference.
items.extend(m.items)
else:
items.append(m)
if len(items) == 1:
return items[0].with_flags(case_flags=self.case_flags,
zerowidth=self.zerowidth).optimise(info, in_set)
self.items = tuple(items)
return self._handle_case_folding(info, in_set)
def matches(self, ch):
m = False
for i in self.items:
m = m != i.matches(ch)
return m == self.positive
class SetUnion(SetBase):
_opcode = {(NOCASE, False): OP.SET_UNION, (IGNORECASE, False):
OP.SET_UNION_IGN, (FULLCASE, False): OP.SET_UNION, (FULLIGNORECASE,
False): OP.SET_UNION_IGN, (NOCASE, True): OP.SET_UNION_REV, (IGNORECASE,
True): OP.SET_UNION_IGN_REV, (FULLCASE, True): OP.SET_UNION_REV,
(FULLIGNORECASE, True): OP.SET_UNION_IGN_REV}
_op_name = "SET_UNION"
def optimise(self, info, in_set=False):
items = []
for m in self.items:
m = m.optimise(info, in_set=True)
if isinstance(m, SetUnion) and m.positive:
# Union in union.
items.extend(m.items)
else:
items.append(m)
if len(items) == 1:
i = items[0]
return i.with_flags(positive=i.positive == self.positive,
case_flags=self.case_flags,
zerowidth=self.zerowidth).optimise(info, in_set)
self.items = tuple(items)
return self._handle_case_folding(info, in_set)
def _compile(self, reverse, fuzzy):
flags = 0
if self.positive:
flags |= POSITIVE_OP
if self.zerowidth:
flags |= ZEROWIDTH_OP
if fuzzy:
flags |= FUZZY_OP
characters, others = defaultdict(list), []
for m in self.items:
if isinstance(m, Character):
characters[m.positive].append(m.value)
else:
others.append(m)
code = [(self._opcode[self.case_flags, reverse], flags)]
for positive, values in characters.items():
flags = 0
if positive:
flags |= POSITIVE_OP
if len(values) == 1:
code.append((OP.CHARACTER, flags, values[0]))
else:
code.append((OP.STRING, flags, len(values)) + tuple(values))
for m in others:
code.extend(m.compile())
code.append((OP.END, ))
return code
def matches(self, ch):
m = any(i.matches(ch) for i in self.items)
return m == self.positive
class StartOfLine(ZeroWidthBase):
_opcode = OP.START_OF_LINE
_op_name = "START_OF_LINE"
class StartOfLineU(StartOfLine):
_opcode = OP.START_OF_LINE_U
_op_name = "START_OF_LINE_U"
class StartOfString(ZeroWidthBase):
_opcode = OP.START_OF_STRING
_op_name = "START_OF_STRING"
class StartOfWord(ZeroWidthBase):
_opcode = OP.START_OF_WORD
_op_name = "START_OF_WORD"
class String(RegexBase):
_opcode = {(NOCASE, False): OP.STRING, (IGNORECASE, False): OP.STRING_IGN,
(FULLCASE, False): OP.STRING, (FULLIGNORECASE, False): OP.STRING_FLD,
(NOCASE, True): OP.STRING_REV, (IGNORECASE, True): OP.STRING_IGN_REV,
(FULLCASE, True): OP.STRING_REV, (FULLIGNORECASE, True):
OP.STRING_FLD_REV}
def __init__(self, characters, case_flags=NOCASE):
self.characters = tuple(characters)
self.case_flags = case_flags
if (self.case_flags & FULLIGNORECASE) == FULLIGNORECASE:
folded_characters = []
for char in self.characters:
folded = _regex.fold_case(FULL_CASE_FOLDING, chr(char))
folded_characters.extend(ord(c) for c in folded)
else:
folded_characters = self.characters
self.folded_characters = tuple(folded_characters)
self.required = False
self._key = self.__class__, self.characters, self.case_flags
def get_firstset(self, reverse):
if reverse:
pos = -1
else:
pos = 0
return set([Character(self.characters[pos],
case_flags=self.case_flags)])
def has_simple_start(self):
return True
def _compile(self, reverse, fuzzy):
flags = 0
if fuzzy:
flags |= FUZZY_OP
if self.required:
flags |= REQUIRED_OP
return [(self._opcode[self.case_flags, reverse], flags,
len(self.folded_characters)) + self.folded_characters]
def _dump(self, indent, reverse):
display = ascii("".join(chr(c) for c in self.characters)).lstrip("bu")
print("{}STRING {}{}".format(INDENT * indent, display,
CASE_TEXT[self.case_flags]))
def max_width(self):
return len(self.folded_characters)
def get_required_string(self, reverse):
return 0, self
class Literal(String):
def _dump(self, indent, reverse):
for c in self.characters:
display = ascii(chr(c)).lstrip("bu")
print("{}CHARACTER MATCH {}{}".format(INDENT * indent, display,
CASE_TEXT[self.case_flags]))
class StringSet(RegexBase):
_opcode = {(NOCASE, False): OP.STRING_SET, (IGNORECASE, False):
OP.STRING_SET_IGN, (FULLCASE, False): OP.STRING_SET, (FULLIGNORECASE,
False): OP.STRING_SET_FLD, (NOCASE, True): OP.STRING_SET_REV,
(IGNORECASE, True): OP.STRING_SET_IGN_REV, (FULLCASE, True):
OP.STRING_SET_REV, (FULLIGNORECASE, True): OP.STRING_SET_FLD_REV}
def __init__(self, info, name, case_flags=NOCASE):
self.info = info
self.name = name
self.case_flags = case_flags
self._key = self.__class__, self.name, self.case_flags
self.set_key = (name, self.case_flags)
if self.set_key not in info.named_lists_used:
info.named_lists_used[self.set_key] = len(info.named_lists_used)
def _compile(self, reverse, fuzzy):
index = self.info.named_lists_used[self.set_key]
items = self.info.kwargs[self.name]
case_flags = self.case_flags
if not items:
return []
encoding = self.info.flags & _ALL_ENCODINGS
fold_flags = encoding | case_flags
if fuzzy:
choices = [self._folded(fold_flags, i) for i in items]
# Sort from longest to shortest.
choices.sort(key=lambda s: (-len(s), s))
branches = []
for string in choices:
branches.append(Sequence([Character(c, case_flags=case_flags)
for c in string]))
if len(branches) > 1:
branch = Branch(branches)
else:
branch = branches[0]
branch = branch.optimise(self.info).pack_characters(self.info)
return branch.compile(reverse, fuzzy)
else:
min_len = min(len(i) for i in items)
max_len = max(len(self._folded(fold_flags, i)) for i in items)
return [(self._opcode[case_flags, reverse], index, min_len,
max_len)]
def _dump(self, indent, reverse):
print("{}STRING_SET {}{}".format(INDENT * indent, self.name,
CASE_TEXT[self.case_flags]))
def _folded(self, fold_flags, item):
if isinstance(item, str):
return [ord(c) for c in _regex.fold_case(fold_flags, item)]
else:
return list(item)
def _flatten(self, s):
# Flattens the branches.
if isinstance(s, Branch):
for b in s.branches:
self._flatten(b)
elif isinstance(s, Sequence) and s.items:
seq = s.items
while isinstance(seq[-1], Sequence):
seq[-1 : ] = seq[-1].items
n = 0
while n < len(seq) and isinstance(seq[n], Character):
n += 1
if n > 1:
seq[ : n] = [String([c.value for c in seq[ : n]],
case_flags=self.case_flags)]
self._flatten(seq[-1])
def max_width(self):
if not self.info.kwargs[self.name]:
return 0
if self.case_flags & IGNORECASE:
fold_flags = (self.info.flags & _ALL_ENCODINGS) | self.case_flags
return max(len(_regex.fold_case(fold_flags, i)) for i in
self.info.kwargs[self.name])
else:
return max(len(i) for i in self.info.kwargs[self.name])
class Source:
"Scanner for the regular expression source string."
def __init__(self, string):
if isinstance(string, str):
self.string = string
self.char_type = chr
else:
self.string = string.decode("latin-1")
self.char_type = lambda c: bytes([c])
self.pos = 0
self.ignore_space = False
self.sep = string[ : 0]
def get(self):
string = self.string
pos = self.pos
try:
if self.ignore_space:
while True:
if string[pos].isspace():
# Skip over the whitespace.
pos += 1
elif string[pos] == "#":
# Skip over the comment to the end of the line.
pos = string.index("\n", pos)
else:
break
ch = string[pos]
self.pos = pos + 1
return ch
except IndexError:
# We've reached the end of the string.
self.pos = pos
return string[ : 0]
except ValueError:
# The comment extended to the end of the string.
self.pos = len(string)
return string[ : 0]
def get_many(self, count=1):
string = self.string
pos = self.pos
try:
if self.ignore_space:
substring = []
while len(substring) < count:
while True:
if string[pos].isspace():
# Skip over the whitespace.
pos += 1
elif string[pos] == "#":
# Skip over the comment to the end of the line.
pos = string.index("\n", pos)
else:
break
substring.append(string[pos])
pos += 1
substring = "".join(substring)
else:
substring = string[pos : pos + count]
pos += len(substring)
self.pos = pos
return substring
except IndexError:
# We've reached the end of the string.
self.pos = len(string)
return "".join(substring)
except ValueError:
# The comment extended to the end of the string.
self.pos = len(string)
return "".join(substring)
def get_while(self, test_set, include=True):
string = self.string
pos = self.pos
if self.ignore_space:
try:
substring = []
while True:
if string[pos].isspace():
# Skip over the whitespace.
pos += 1
elif string[pos] == "#":
# Skip over the comment to the end of the line.
pos = string.index("\n", pos)
elif (string[pos] in test_set) == include:
substring.append(string[pos])
pos += 1
else:
break
self.pos = pos
except IndexError:
# We've reached the end of the string.
self.pos = len(string)
except ValueError:
# The comment extended to the end of the string.
self.pos = len(string)
return "".join(substring)
else:
try:
while (string[pos] in test_set) == include:
pos += 1
substring = string[self.pos : pos]
self.pos = pos
return substring
except IndexError:
# We've reached the end of the string.
substring = string[self.pos : pos]
self.pos = pos
return substring
def skip_while(self, test_set, include=True):
string = self.string
pos = self.pos
try:
if self.ignore_space:
while True:
if string[pos].isspace():
# Skip over the whitespace.
pos += 1
elif string[pos] == "#":
# Skip over the comment to the end of the line.
pos = string.index("\n", pos)
elif (string[pos] in test_set) == include:
pos += 1
else:
break
else:
while (string[pos] in test_set) == include:
pos += 1
self.pos = pos
except IndexError:
# We've reached the end of the string.
self.pos = len(string)
except ValueError:
# The comment extended to the end of the string.
self.pos = len(string)
def match(self, substring):
string = self.string
pos = self.pos
if self.ignore_space:
try:
for c in substring:
while True:
if string[pos].isspace():
# Skip over the whitespace.
pos += 1
elif string[pos] == "#":
# Skip over the comment to the end of the line.
pos = string.index("\n", pos)
else:
break
if string[pos] != c:
return False
pos += 1
self.pos = pos
return True
except IndexError:
# We've reached the end of the string.
return False
except ValueError:
# The comment extended to the end of the string.
return False
else:
if not string.startswith(substring, pos):
return False
self.pos = pos + len(substring)
return True
def expect(self, substring):
if not self.match(substring):
raise error("missing {}".format(substring), self.string, self.pos)
def at_end(self):
string = self.string
pos = self.pos
try:
if self.ignore_space:
while True:
if string[pos].isspace():
pos += 1
elif string[pos] == "#":
pos = string.index("\n", pos)
else:
break
return pos >= len(string)
except IndexError:
# We've reached the end of the string.
return True
except ValueError:
# The comment extended to the end of the string.
return True
class Info:
"Info about the regular expression."
def __init__(self, flags=0, char_type=None, kwargs={}):
flags |= DEFAULT_FLAGS[(flags & _ALL_VERSIONS) or DEFAULT_VERSION]
self.flags = flags
self.global_flags = flags
self.inline_locale = False
self.kwargs = kwargs
self.group_count = 0
self.group_index = {}
self.group_name = {}
self.char_type = char_type
self.named_lists_used = {}
self.open_groups = []
self.open_group_count = {}
self.defined_groups = {}
self.group_calls = []
self.private_groups = {}
def open_group(self, name=None):
group = self.group_index.get(name)
if group is None:
while True:
self.group_count += 1
if name is None or self.group_count not in self.group_name:
break
group = self.group_count
if name:
self.group_index[name] = group
self.group_name[group] = name
if group in self.open_groups:
# We have a nested named group. We'll assign it a private group
# number, initially negative until we can assign a proper
# (positive) number.
group_alias = -(len(self.private_groups) + 1)
self.private_groups[group_alias] = group
group = group_alias
self.open_groups.append(group)
self.open_group_count[group] = self.open_group_count.get(group, 0) + 1
return group
def close_group(self):
self.open_groups.pop()
def is_open_group(self, name):
# In version 1, a group reference can refer to an open group. We'll
# just pretend the group isn't open.
version = (self.flags & _ALL_VERSIONS) or DEFAULT_VERSION
if version == VERSION1:
return False
if name.isdigit():
group = int(name)
else:
group = self.group_index.get(name)
return group in self.open_groups
def _check_group_features(info, parsed):
"""Checks whether the reverse and fuzzy features of the group calls match
the groups which they call.
"""
call_refs = {}
additional_groups = []
for call, reverse, fuzzy in info.group_calls:
# Look up the reference of this group call.
key = (call.group, reverse, fuzzy)
ref = call_refs.get(key)
if ref is None:
# This group doesn't have a reference yet, so look up its features.
if call.group == 0:
# Calling the pattern as a whole.
rev = bool(info.flags & REVERSE)
fuz = isinstance(parsed, Fuzzy)
if (rev, fuz) != (reverse, fuzzy):
# The pattern as a whole doesn't have the features we want,
# so we'll need to make a copy of it with the desired
# features.
additional_groups.append((parsed, reverse, fuzzy))
else:
# Calling a capture group.
def_info = info.defined_groups[call.group]
group = def_info[0]
if def_info[1 : ] != (reverse, fuzzy):
# The group doesn't have the features we want, so we'll
# need to make a copy of it with the desired features.
additional_groups.append((group, reverse, fuzzy))
ref = len(call_refs)
call_refs[key] = ref
call.call_ref = ref
info.call_refs = call_refs
info.additional_groups = additional_groups
def _get_required_string(parsed, flags):
"Gets the required string and related info of a parsed pattern."
req_offset, required = parsed.get_required_string(bool(flags & REVERSE))
if required:
required.required = True
if req_offset >= UNLIMITED:
req_offset = -1
req_flags = required.case_flags
if not (flags & UNICODE):
req_flags &= ~UNICODE
req_chars = required.folded_characters
else:
req_offset = 0
req_chars = ()
req_flags = 0
return req_offset, req_chars, req_flags
class Scanner:
def __init__(self, lexicon, flags=0):
self.lexicon = lexicon
# Combine phrases into a compound pattern.
patterns = []
for phrase, action in lexicon:
# Parse the regular expression.
source = Source(phrase)
info = Info(flags, source.char_type)
source.ignore_space = bool(info.flags & VERBOSE)
parsed = _parse_pattern(source, info)
if not source.at_end():
raise error("trailing characters", source.string, source.pos)
# We want to forbid capture groups within each phrase.
patterns.append(parsed.remove_captures())
# Combine all the subpatterns into one pattern.
info = Info(flags)
patterns = [Group(info, g + 1, p) for g, p in enumerate(patterns)]
parsed = Branch(patterns)
# Optimise the compound pattern.
parsed = parsed.optimise(info)
parsed = parsed.pack_characters(info)
# Get the required string.
req_offset, req_chars, req_flags = _get_required_string(parsed,
info.flags)
# Check the features of the groups.
_check_group_features(info, parsed)
# Complain if there are any group calls. They are not supported by the
# Scanner class.
if info.call_refs:
raise error("recursive regex not supported by Scanner",
source.string, source.pos)
reverse = bool(info.flags & REVERSE)
# Compile the compound pattern. The result is a list of tuples.
code = parsed.compile(reverse) + [(OP.SUCCESS, )]
# Flatten the code into a list of ints.
code = _flatten_code(code)
if not parsed.has_simple_start():
# Get the first set, if possible.
try:
fs_code = _compile_firstset(info, parsed.get_firstset(reverse))
fs_code = _flatten_code(fs_code)
code = fs_code + code
except _FirstSetError:
pass
# Check the global flags for conflicts.
version = (info.flags & _ALL_VERSIONS) or DEFAULT_VERSION
if version not in (0, VERSION0, VERSION1):
raise ValueError("VERSION0 and VERSION1 flags are mutually incompatible")
# Create the PatternObject.
#
# Local flags like IGNORECASE affect the code generation, but aren't
# needed by the PatternObject itself. Conversely, global flags like
# LOCALE _don't_ affect the code generation but _are_ needed by the
# PatternObject.
self.scanner = _regex.compile(None, (flags & GLOBAL_FLAGS) | version,
code, {}, {}, {}, [], req_offset, req_chars, req_flags,
len(patterns))
def scan(self, string):
result = []
append = result.append
match = self.scanner.scanner(string).match
i = 0
while True:
m = match()
if not m:
break
j = m.end()
if i == j:
break
action = self.lexicon[m.lastindex - 1][1]
if hasattr(action, '__call__'):
self.match = m
action = action(self, m.group())
if action is not None:
append(action)
i = j
return result, string[i : ]
# Get the known properties dict.
PROPERTIES = _regex.get_properties()
# Build the inverse of the properties dict.
PROPERTY_NAMES = {}
for prop_name, (prop_id, values) in PROPERTIES.items():
name, prop_values = PROPERTY_NAMES.get(prop_id, ("", {}))
name = max(name, prop_name, key=len)
PROPERTY_NAMES[prop_id] = name, prop_values
for val_name, val_id in values.items():
prop_values[val_id] = max(prop_values.get(val_id, ""), val_name,
key=len)
# Character escape sequences.
CHARACTER_ESCAPES = {
"a": "\a",
"b": "\b",
"f": "\f",
"n": "\n",
"r": "\r",
"t": "\t",
"v": "\v",
}
# Predefined character set escape sequences.
CHARSET_ESCAPES = {
"d": lookup_property(None, "Digit", True),
"D": lookup_property(None, "Digit", False),
"s": lookup_property(None, "Space", True),
"S": lookup_property(None, "Space", False),
"w": lookup_property(None, "Word", True),
"W": lookup_property(None, "Word", False),
}
# Positional escape sequences.
POSITION_ESCAPES = {
"A": StartOfString(),
"b": Boundary(),
"B": Boundary(False),
"m": StartOfWord(),
"M": EndOfWord(),
"Z": EndOfString(),
}
# Positional escape sequences when WORD flag set.
WORD_POSITION_ESCAPES = dict(POSITION_ESCAPES)
WORD_POSITION_ESCAPES.update({
"b": DefaultBoundary(),
"B": DefaultBoundary(False),
"m": DefaultStartOfWord(),
"M": DefaultEndOfWord(),
})
| Python |
#
# Secret Labs' Regular Expression Engine
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# This version of the SRE library can be redistributed under CNRI's
# Python 1.6 license. For any other use, please contact Secret Labs
# AB (info@pythonware.com).
#
# Portions of this engine have been developed in cooperation with
# CNRI. Hewlett-Packard provided funding for 1.6 integration and
# other compatibility work.
#
# 2010-01-16 mrab Python front-end re-written and extended
r"""Support for regular expressions (RE).
This module provides regular expression matching operations similar to those
found in Perl. It supports both 8-bit and Unicode strings; both the pattern and
the strings being processed can contain null bytes and characters outside the
US ASCII range.
Regular expressions can contain both special and ordinary characters. Most
ordinary characters, like "A", "a", or "0", are the simplest regular
expressions; they simply match themselves. You can concatenate ordinary
characters, so last matches the string 'last'.
There are a few differences between the old (legacy) behaviour and the new
(enhanced) behaviour, which are indicated by VERSION0 or VERSION1.
The special characters are:
"." Matches any character except a newline.
"^" Matches the start of the string.
"$" Matches the end of the string or just before the
newline at the end of the string.
"*" Matches 0 or more (greedy) repetitions of the preceding
RE. Greedy means that it will match as many repetitions
as possible.
"+" Matches 1 or more (greedy) repetitions of the preceding
RE.
"?" Matches 0 or 1 (greedy) of the preceding RE.
*?,+?,?? Non-greedy versions of the previous three special
characters.
*+,++,?+ Possessive versions of the previous three special
characters.
{m,n} Matches from m to n repetitions of the preceding RE.
{m,n}? Non-greedy version of the above.
{m,n}+ Possessive version of the above.
{...} Fuzzy matching constraints.
"\\" Either escapes special characters or signals a special
sequence.
[...] Indicates a set of characters. A "^" as the first
character indicates a complementing set.
"|" A|B, creates an RE that will match either A or B.
(...) Matches the RE inside the parentheses. The contents are
captured and can be retrieved or matched later in the
string.
(?flags-flags) VERSION1: Sets/clears the flags for the remainder of
the group or pattern; VERSION0: Sets the flags for the
entire pattern.
(?:...) Non-capturing version of regular parentheses.
(?>...) Atomic non-capturing version of regular parentheses.
(?flags-flags:...) Non-capturing version of regular parentheses with local
flags.
(?P<name>...) The substring matched by the group is accessible by
name.
(?<name>...) The substring matched by the group is accessible by
name.
(?P=name) Matches the text matched earlier by the group named
name.
(?#...) A comment; ignored.
(?=...) Matches if ... matches next, but doesn't consume the
string.
(?!...) Matches if ... doesn't match next.
(?<=...) Matches if preceded by ....
(?<!...) Matches if not preceded by ....
(?(id)yes|no) Matches yes pattern if group id matched, the (optional)
no pattern otherwise.
(?|...|...) (?|A|B), creates an RE that will match either A or B,
but reuses capture group numbers across the
alternatives.
The fuzzy matching constraints are: "i" to permit insertions, "d" to permit
deletions, "s" to permit substitutions, "e" to permit any of these. Limits are
optional with "<=" and "<". If any type of error is provided then any type not
provided is not permitted.
A cost equation may be provided.
Examples:
(?:fuzzy){i<=2}
(?:fuzzy){i<=1,s<=2,d<=1,1i+1s+1d<3}
VERSION1: Set operators are supported, and a set can include nested sets. The
set operators, in order of increasing precedence, are:
|| Set union ("x||y" means "x or y").
~~ (double tilde) Symmetric set difference ("x~~y" means "x or y, but not
both").
&& Set intersection ("x&&y" means "x and y").
-- (double dash) Set difference ("x--y" means "x but not y").
Implicit union, ie, simple juxtaposition like in [ab], has the highest
precedence.
VERSION0 and VERSION1:
The special sequences consist of "\\" and a character from the list below. If
the ordinary character is not on the list, then the resulting RE will match the
second character.
\number Matches the contents of the group of the same number if
number is no more than 2 digits, otherwise the character
with the 3-digit octal code.
\a Matches the bell character.
\A Matches only at the start of the string.
\b Matches the empty string, but only at the start or end of a
word.
\B Matches the empty string, but not at the start or end of a
word.
\d Matches any decimal digit; equivalent to the set [0-9] when
matching a bytestring or a Unicode string with the ASCII
flag, or the whole range of Unicode digits when matching a
Unicode string.
\D Matches any non-digit character; equivalent to [^\d].
\f Matches the formfeed character.
\g<name> Matches the text matched by the group named name.
\G Matches the empty string, but only at the position where
the search started.
\L<name> Named list. The list is provided as a keyword argument.
\m Matches the empty string, but only at the start of a word.
\M Matches the empty string, but only at the end of a word.
\n Matches the newline character.
\N{name} Matches the named character.
\p{name=value} Matches the character if its property has the specified
value.
\P{name=value} Matches the character if its property hasn't the specified
value.
\r Matches the carriage-return character.
\s Matches any whitespace character; equivalent to
[ \t\n\r\f\v].
\S Matches any non-whitespace character; equivalent to [^\s].
\t Matches the tab character.
\uXXXX Matches the Unicode codepoint with 4-digit hex code XXXX.
\UXXXXXXXX Matches the Unicode codepoint with 8-digit hex code
XXXXXXXX.
\v Matches the vertical tab character.
\w Matches any alphanumeric character; equivalent to
[a-zA-Z0-9_] when matching a bytestring or a Unicode string
with the ASCII flag, or the whole range of Unicode
alphanumeric characters (letters plus digits plus
underscore) when matching a Unicode string. With LOCALE, it
will match the set [0-9_] plus characters defined as
letters for the current locale.
\W Matches the complement of \w; equivalent to [^\w].
\xXX Matches the character with 2-digit hex code XX.
\X Matches a grapheme.
\Z Matches only at the end of the string.
\\ Matches a literal backslash.
This module exports the following functions:
match Match a regular expression pattern at the beginning of a string.
fullmatch Match a regular expression pattern against all of a string.
search Search a string for the presence of a pattern.
sub Substitute occurrences of a pattern found in a string using a
template string.
subf Substitute occurrences of a pattern found in a string using a
format string.
subn Same as sub, but also return the number of substitutions made.
subfn Same as subf, but also return the number of substitutions made.
split Split a string by the occurrences of a pattern. VERSION1: will
split at zero-width match; VERSION0: won't split at zero-width
match.
splititer Return an iterator yielding the parts of a split string.
findall Find all occurrences of a pattern in a string.
finditer Return an iterator yielding a match object for each match.
compile Compile a pattern into a Pattern object.
purge Clear the regular expression cache.
escape Backslash all non-alphanumerics or special characters in a
string.
Most of the functions support a concurrent parameter: if True, the GIL will be
released during matching, allowing other Python threads to run concurrently. If
the string changes during matching, the behaviour is undefined. This parameter
is not needed when working on the builtin (immutable) string classes.
Some of the functions in this module take flags as optional parameters. Most of
these flags can also be set within an RE:
A a ASCII Make \w, \W, \b, \B, \d, and \D match the
corresponding ASCII character categories. Default
when matching a bytestring.
B b BESTMATCH Find the best fuzzy match (default is first).
D DEBUG Print the parsed pattern.
F f FULLCASE Use full case-folding when performing
case-insensitive matching in Unicode.
I i IGNORECASE Perform case-insensitive matching.
L L LOCALE Make \w, \W, \b, \B, \d, and \D dependent on the
current locale. (One byte per character only.)
M m MULTILINE "^" matches the beginning of lines (after a newline)
as well as the string. "$" matches the end of lines
(before a newline) as well as the end of the string.
E e ENHANCEMATCH Attempt to improve the fit after finding the first
fuzzy match.
R r REVERSE Searches backwards.
S s DOTALL "." matches any character at all, including the
newline.
U u UNICODE Make \w, \W, \b, \B, \d, and \D dependent on the
Unicode locale. Default when matching a Unicode
string.
V0 V0 VERSION0 Turn on the old legacy behaviour.
V1 V1 VERSION1 Turn on the new enhanced behaviour. This flag
includes the FULLCASE flag.
W w WORD Make \b and \B work with default Unicode word breaks
and make ".", "^" and "$" work with Unicode line
breaks.
X x VERBOSE Ignore whitespace and comments for nicer looking REs.
This module also defines an exception 'error'.
"""
# Public symbols.
__all__ = ["compile", "escape", "findall", "finditer", "fullmatch", "match",
"purge", "search", "split", "splititer", "sub", "subf", "subfn", "subn",
"template", "Scanner", "A", "ASCII", "B", "BESTMATCH", "D", "DEBUG", "E",
"ENHANCEMATCH", "S", "DOTALL", "F", "FULLCASE", "I", "IGNORECASE", "L",
"LOCALE", "M", "MULTILINE", "R", "REVERSE", "T", "TEMPLATE", "U", "UNICODE",
"V0", "VERSION0", "V1", "VERSION1", "X", "VERBOSE", "W", "WORD", "error",
"Regex"]
__version__ = "2.4.58"
# --------------------------------------------------------------------
# Public interface.
def match(pattern, string, flags=0, pos=None, endpos=None, partial=False,
concurrent=None, **kwargs):
"""Try to apply the pattern at the start of the string, returning a match
object, or None if no match was found."""
return _compile(pattern, flags, kwargs).match(string, pos, endpos,
concurrent, partial)
def fullmatch(pattern, string, flags=0, pos=None, endpos=None, partial=False,
concurrent=None, **kwargs):
"""Try to apply the pattern against all of the string, returning a match
object, or None if no match was found."""
return _compile(pattern, flags, kwargs).fullmatch(string, pos, endpos,
concurrent, partial)
def search(pattern, string, flags=0, pos=None, endpos=None, partial=False,
concurrent=None, **kwargs):
"""Search through string looking for a match to the pattern, returning a
match object, or None if no match was found."""
return _compile(pattern, flags, kwargs).search(string, pos, endpos,
concurrent, partial)
def sub(pattern, repl, string, count=0, flags=0, pos=None, endpos=None,
concurrent=None, **kwargs):
"""Return the string obtained by replacing the leftmost (or rightmost with a
reverse pattern) non-overlapping occurrences of the pattern in string by the
replacement repl. repl can be either a string or a callable; if a string,
backslash escapes in it are processed; if a callable, it's passed the match
object and must return a replacement string to be used."""
return _compile(pattern, flags, kwargs).sub(repl, string, count, pos,
endpos, concurrent)
def subf(pattern, format, string, count=0, flags=0, pos=None, endpos=None,
concurrent=None, **kwargs):
"""Return the string obtained by replacing the leftmost (or rightmost with a
reverse pattern) non-overlapping occurrences of the pattern in string by the
replacement format. format can be either a string or a callable; if a string,
it's treated as a format string; if a callable, it's passed the match object
and must return a replacement string to be used."""
return _compile(pattern, flags, kwargs).subf(format, string, count, pos,
endpos, concurrent)
def subn(pattern, repl, string, count=0, flags=0, pos=None, endpos=None,
concurrent=None, **kwargs):
"""Return a 2-tuple containing (new_string, number). new_string is the string
obtained by replacing the leftmost (or rightmost with a reverse pattern)
non-overlapping occurrences of the pattern in the source string by the
replacement repl. number is the number of substitutions that were made. repl
can be either a string or a callable; if a string, backslash escapes in it
are processed; if a callable, it's passed the match object and must return a
replacement string to be used."""
return _compile(pattern, flags, kwargs).subn(repl, string, count, pos,
endpos, concurrent)
def subfn(pattern, format, string, count=0, flags=0, pos=None, endpos=None,
concurrent=None, **kwargs):
"""Return a 2-tuple containing (new_string, number). new_string is the string
obtained by replacing the leftmost (or rightmost with a reverse pattern)
non-overlapping occurrences of the pattern in the source string by the
replacement format. number is the number of substitutions that were made. format
can be either a string or a callable; if a string, it's treated as a format
string; if a callable, it's passed the match object and must return a
replacement string to be used."""
return _compile(pattern, flags, kwargs).subfn(format, string, count, pos,
endpos, concurrent)
def split(pattern, string, maxsplit=0, flags=0, concurrent=None, **kwargs):
"""Split the source string by the occurrences of the pattern, returning a
list containing the resulting substrings. If capturing parentheses are used
in pattern, then the text of all groups in the pattern are also returned as
part of the resulting list. If maxsplit is nonzero, at most maxsplit splits
occur, and the remainder of the string is returned as the final element of
the list."""
return _compile(pattern, flags, kwargs).split(string, maxsplit, concurrent)
def splititer(pattern, string, maxsplit=0, flags=0, concurrent=None, **kwargs):
"Return an iterator yielding the parts of a split string."
return _compile(pattern, flags, kwargs).splititer(string, maxsplit,
concurrent)
def findall(pattern, string, flags=0, pos=None, endpos=None, overlapped=False,
concurrent=None, **kwargs):
"""Return a list of all matches in the string. The matches may be overlapped
if overlapped is True. If one or more groups are present in the pattern,
return a list of groups; this will be a list of tuples if the pattern has
more than one group. Empty matches are included in the result."""
return _compile(pattern, flags, kwargs).findall(string, pos, endpos,
overlapped, concurrent)
def finditer(pattern, string, flags=0, pos=None, endpos=None, overlapped=False,
partial=False, concurrent=None, **kwargs):
"""Return an iterator over all matches in the string. The matches may be
overlapped if overlapped is True. For each match, the iterator returns a
match object. Empty matches are included in the result."""
return _compile(pattern, flags, kwargs).finditer(string, pos, endpos,
overlapped, concurrent, partial)
def compile(pattern, flags=0, **kwargs):
"Compile a regular expression pattern, returning a pattern object."
return _compile(pattern, flags, kwargs)
def purge():
"Clear the regular expression cache"
_cache.clear()
_locale_sensitive.clear()
def template(pattern, flags=0):
"Compile a template pattern, returning a pattern object."
return _compile(pattern, flags | TEMPLATE)
def escape(pattern, special_only=False):
"Escape all non-alphanumeric characters or special characters in pattern."
if isinstance(pattern, str):
s = []
if special_only:
for c in pattern:
if c in _METACHARS:
s.append("\\")
s.append(c)
elif c == "\x00":
s.append("\\000")
else:
s.append(c)
else:
for c in pattern:
if c in _ALNUM:
s.append(c)
elif c == "\x00":
s.append("\\000")
else:
s.append("\\")
s.append(c)
return "".join(s)
else:
s = []
if special_only:
for c in pattern:
if chr(c) in _METACHARS:
s.extend(b"\\")
s.append(c)
elif c == 0:
s.extend(b"\\000")
else:
s.append(c)
else:
for c in pattern:
if chr(c) in _ALNUM:
s.append(c)
elif c == 0:
s.extend(b"\\000")
else:
s.extend(b"\\")
s.append(c)
return bytes(s)
# --------------------------------------------------------------------
# Internals.
import _regex_core
import _regex
from threading import RLock as _RLock
from locale import getlocale as _getlocale
from _regex_core import *
from _regex_core import (_ALL_VERSIONS, _ALL_ENCODINGS, _FirstSetError,
_UnscopedFlagSet, _check_group_features, _compile_firstset,
_compile_replacement, _flatten_code, _fold_case, _get_required_string,
_parse_pattern, _shrink_cache)
from _regex_core import (ALNUM as _ALNUM, Info as _Info, OP as _OP, Source as
_Source, Fuzzy as _Fuzzy)
# Version 0 is the old behaviour, compatible with the original 're' module.
# Version 1 is the new behaviour, which differs slightly.
DEFAULT_VERSION = VERSION0
_METACHARS = frozenset("()[]{}?*+|^$\\.")
_regex_core.DEFAULT_VERSION = DEFAULT_VERSION
# Caches for the patterns and replacements.
_cache = {}
_cache_lock = _RLock()
_named_args = {}
_replacement_cache = {}
_locale_sensitive = {}
# Maximum size of the cache.
_MAXCACHE = 500
_MAXREPCACHE = 500
def _compile(pattern, flags=0, kwargs={}):
"Compiles a regular expression to a PatternObject."
# We won't bother to cache the pattern if we're debugging.
debugging = (flags & DEBUG) != 0
# What locale is this pattern using?
locale_key = (type(pattern), pattern)
if _locale_sensitive.get(locale_key, True) or (flags & LOCALE) != 0:
# This pattern is, or might be, locale-sensitive.
pattern_locale = _getlocale()[1]
else:
# This pattern is definitely not locale-sensitive.
pattern_locale = None
if not debugging:
try:
# Do we know what keyword arguments are needed?
args_key = pattern, type(pattern), flags
args_needed = _named_args[args_key]
# Are we being provided with its required keyword arguments?
args_supplied = set()
if args_needed:
for k, v in args_needed:
try:
args_supplied.add((k, frozenset(kwargs[k])))
except KeyError:
raise error("missing named list: {!r}".format(k))
args_supplied = frozenset(args_supplied)
# Have we already seen this regular expression and named list?
pattern_key = (pattern, type(pattern), flags, args_supplied,
DEFAULT_VERSION, pattern_locale)
return _cache[pattern_key]
except KeyError:
# It's a new pattern, or new named list for a known pattern.
pass
# Guess the encoding from the class of the pattern string.
if isinstance(pattern, str):
guess_encoding = UNICODE
elif isinstance(pattern, bytes):
guess_encoding = ASCII
elif isinstance(pattern, _pattern_type):
if flags:
raise ValueError("cannot process flags argument with a compiled pattern")
return pattern
else:
raise TypeError("first argument must be a string or compiled pattern")
# Set the default version in the core code in case it has been changed.
_regex_core.DEFAULT_VERSION = DEFAULT_VERSION
caught_exception = None
global_flags = flags
while True:
try:
source = _Source(pattern)
info = _Info(global_flags, source.char_type, kwargs)
info.guess_encoding = guess_encoding
source.ignore_space = bool(info.flags & VERBOSE)
parsed = _parse_pattern(source, info)
break
except _UnscopedFlagSet:
# Remember the global flags for the next attempt.
global_flags = info.global_flags
except error as e:
caught_exception = e
if caught_exception:
raise error(caught_exception.msg, caught_exception.pattern,
caught_exception.pos)
if not source.at_end():
raise error("trailing characters in pattern", pattern, source.pos)
# Check the global flags for conflicts.
version = (info.flags & _ALL_VERSIONS) or DEFAULT_VERSION
if version not in (0, VERSION0, VERSION1):
raise ValueError("VERSION0 and VERSION1 flags are mutually incompatible")
if (info.flags & _ALL_ENCODINGS) not in (0, ASCII, LOCALE, UNICODE):
raise ValueError("ASCII, LOCALE and UNICODE flags are mutually incompatible")
if isinstance(pattern, bytes) and (info.flags & UNICODE):
raise ValueError("cannot use UNICODE flag with a bytes pattern")
if not (info.flags & _ALL_ENCODINGS):
if isinstance(pattern, str):
info.flags |= UNICODE
else:
info.flags |= ASCII
reverse = bool(info.flags & REVERSE)
fuzzy = isinstance(parsed, _Fuzzy)
# Remember whether this pattern as an inline locale flag.
_locale_sensitive[locale_key] = info.inline_locale
# Should we print the parsed pattern?
if flags & DEBUG:
parsed.dump(indent=0, reverse=reverse)
# Fix the group references.
parsed.fix_groups(pattern, reverse, False)
# Optimise the parsed pattern.
parsed = parsed.optimise(info)
parsed = parsed.pack_characters(info)
# Get the required string.
req_offset, req_chars, req_flags = _get_required_string(parsed, info.flags)
# Build the named lists.
named_lists = {}
named_list_indexes = [None] * len(info.named_lists_used)
args_needed = set()
for key, index in info.named_lists_used.items():
name, case_flags = key
values = frozenset(kwargs[name])
if case_flags:
items = frozenset(_fold_case(info, v) for v in values)
else:
items = values
named_lists[name] = values
named_list_indexes[index] = items
args_needed.add((name, values))
# Check the features of the groups.
_check_group_features(info, parsed)
# Compile the parsed pattern. The result is a list of tuples.
code = parsed.compile(reverse)
# Is there a group call to the pattern as a whole?
key = (0, reverse, fuzzy)
ref = info.call_refs.get(key)
if ref is not None:
code = [(_OP.CALL_REF, ref)] + code + [(_OP.END, )]
# Add the final 'success' opcode.
code += [(_OP.SUCCESS, )]
# Compile the additional copies of the groups that we need.
for group, rev, fuz in info.additional_groups:
code += group.compile(rev, fuz)
# Flatten the code into a list of ints.
code = _flatten_code(code)
if not parsed.has_simple_start():
# Get the first set, if possible.
try:
fs_code = _compile_firstset(info, parsed.get_firstset(reverse))
fs_code = _flatten_code(fs_code)
code = fs_code + code
except _FirstSetError:
pass
# The named capture groups.
index_group = dict((v, n) for n, v in info.group_index.items())
# Create the PatternObject.
#
# Local flags like IGNORECASE affect the code generation, but aren't needed
# by the PatternObject itself. Conversely, global flags like LOCALE _don't_
# affect the code generation but _are_ needed by the PatternObject.
compiled_pattern = _regex.compile(pattern, info.flags | version, code,
info.group_index, index_group, named_lists, named_list_indexes,
req_offset, req_chars, req_flags, info.group_count)
# Do we need to reduce the size of the cache?
if len(_cache) >= _MAXCACHE:
with _cache_lock:
_shrink_cache(_cache, _named_args, _locale_sensitive, _MAXCACHE)
if not debugging:
if (info.flags & LOCALE) == 0:
pattern_locale = None
args_needed = frozenset(args_needed)
# Store this regular expression and named list.
pattern_key = (pattern, type(pattern), flags, args_needed,
DEFAULT_VERSION, pattern_locale)
_cache[pattern_key] = compiled_pattern
# Store what keyword arguments are needed.
_named_args[args_key] = args_needed
return compiled_pattern
def _compile_replacement_helper(pattern, template):
"Compiles a replacement template."
# This function is called by the _regex module.
# Have we seen this before?
key = pattern.pattern, pattern.flags, template
compiled = _replacement_cache.get(key)
if compiled is not None:
return compiled
if len(_replacement_cache) >= _MAXREPCACHE:
_replacement_cache.clear()
is_unicode = isinstance(template, str)
source = _Source(template)
if is_unicode:
def make_string(char_codes):
return "".join(chr(c) for c in char_codes)
else:
def make_string(char_codes):
return bytes(char_codes)
compiled = []
literal = []
while True:
ch = source.get()
if not ch:
break
if ch == "\\":
# '_compile_replacement' will return either an int group reference
# or a string literal. It returns items (plural) in order to handle
# a 2-character literal (an invalid escape sequence).
is_group, items = _compile_replacement(source, pattern, is_unicode)
if is_group:
# It's a group, so first flush the literal.
if literal:
compiled.append(make_string(literal))
literal = []
compiled.extend(items)
else:
literal.extend(items)
else:
literal.append(ord(ch))
# Flush the literal.
if literal:
compiled.append(make_string(literal))
_replacement_cache[key] = compiled
return compiled
# We define _pattern_type here after all the support objects have been defined.
_pattern_type = type(_compile("", 0, {}))
# We'll define an alias for the 'compile' function so that the repr of a
# pattern object is eval-able.
Regex = compile
# Register myself for pickling.
import copyreg as _copy_reg
def _pickle(p):
return _compile, (p.pattern, p.flags)
_copy_reg.pickle(_pattern_type, _pickle, _compile)
| Python |
#!/usr/bin/env python
import os
import sys
from distutils.core import setup, Extension
MAJOR, MINOR = sys.version_info[:2]
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_BASE = 'Python%i' % MAJOR
DOCS_DIR = os.path.join(BASE_DIR, 'docs')
setup(
name='regex',
version='2014.12.24',
description='Alternative regular expression module, to replace re.',
long_description=open(os.path.join(DOCS_DIR, 'Features.rst')).read(),
# PyPI does spam protection on email addresses, no need to do it here
author='Matthew Barnett',
author_email='regex@mrabarnett.plus.com',
maintainer='Matthew Barnett',
maintainer_email='regex@mrabarnett.plus.com',
url='https://code.google.com/p/mrab-regex-hg/',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: Python Software Foundation License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Text Processing',
'Topic :: Text Processing :: General',
],
license='Python Software Foundation License',
py_modules = ['regex', '_regex_core', 'test_regex'],
package_dir={'': PKG_BASE},
ext_modules=[Extension('_regex', [os.path.join(PKG_BASE, '_regex.c'),
os.path.join(PKG_BASE, '_regex_unicode.c')])],
)
| Python |
#!/usr/bin/python
# Copyright 2011 Google, Inc. All Rights Reserved.
# simple script to walk source tree looking for third-party licenses
# dumps resulting html page to stdout
import os, re, mimetypes, sys
# read source directories to scan from command line
SOURCE = sys.argv[1:]
# regex to find /* */ style comment blocks
COMMENT_BLOCK = re.compile(r"(/\*.+?\*/)", re.MULTILINE | re.DOTALL)
# regex used to detect if comment block is a license
COMMENT_LICENSE = re.compile(r"(license)", re.IGNORECASE)
COMMENT_COPYRIGHT = re.compile(r"(copyright)", re.IGNORECASE)
EXCLUDE_TYPES = [
"application/xml",
"image/png",
]
# list of known licenses; keys are derived by stripping all whitespace and
# forcing to lowercase to help combine multiple files that have same license.
KNOWN_LICENSES = {}
class License:
def __init__(self, license_text):
self.license_text = license_text
self.filenames = []
# add filename to the list of files that have the same license text
def add_file(self, filename):
if filename not in self.filenames:
self.filenames.append(filename)
LICENSE_KEY = re.compile(r"[^\w]")
def find_license(license_text):
# TODO(alice): a lot these licenses are almost identical Apache licenses.
# Most of them differ in origin/modifications. Consider combining similar
# licenses.
license_key = LICENSE_KEY.sub("", license_text).lower()
if license_key not in KNOWN_LICENSES:
KNOWN_LICENSES[license_key] = License(license_text)
return KNOWN_LICENSES[license_key]
def discover_license(exact_path, filename):
# when filename ends with LICENSE, assume applies to filename prefixed
if filename.endswith("LICENSE"):
with open(exact_path) as file:
license_text = file.read()
target_filename = filename[:-len("LICENSE")]
if target_filename.endswith("."): target_filename = target_filename[:-1]
find_license(license_text).add_file(target_filename)
return None
# try searching for license blocks in raw file
mimetype = mimetypes.guess_type(filename)
if mimetype in EXCLUDE_TYPES: return None
with open(exact_path) as file:
raw_file = file.read()
# include comments that have both "license" and "copyright" in the text
for comment in COMMENT_BLOCK.finditer(raw_file):
comment = comment.group(1)
if COMMENT_LICENSE.search(comment) is None: continue
if COMMENT_COPYRIGHT.search(comment) is None: continue
find_license(comment).add_file(filename)
for source in SOURCE:
for root, dirs, files in os.walk(source):
for name in files:
discover_license(os.path.join(root, name), name)
print "<html><head><style> body { font-family: sans-serif; } pre { background-color: #eeeeee; padding: 1em; white-space: pre-wrap; } </style></head><body>"
for license in KNOWN_LICENSES.values():
print "<h3>Notices for files:</h3><ul>"
filenames = license.filenames
filenames.sort()
for filename in filenames:
print "<li>%s</li>" % (filename)
print "</ul>"
print "<pre>%s</pre>" % license.license_text
print "</body></html>"
| Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This script will try to create the minig indexes for all the users
# with active mail_perms in the OBM database.
#
# Depends: python-psycopg2 for postgresql or python-mysqldb for mysql.
import ConfigParser;
import os;
import sys;
import httplib;
import urllib;
# Read /etc/obm/obm_conf.ini & fetches login, domain & passwords from
# the P_UserObm table.
def fetch_user_passwords():
print "INFO: Reading /etc/obm/obm_conf.ini..."
config = ConfigParser.ConfigParser();
config.readfp(open("/etc/obm/obm_conf.ini"));
dbtype = config.get("global", "dbtype").strip();
host = config.get("global", "host").strip();
db = config.get("global", "db").strip();
user = config.get("global", "user").strip();
password = config.get("global", "password").strip(" \"");
print "INFO: type: '"+dbtype+"' host: '"+host+"' db: '"+db+"' user: '"+user+"' password: '"+password+"'";
ds = None;
if dbtype == "PGSQL":
import psycopg2 as dbapi2;
print "INFO: psycopg2 drived loaded."
ds = dbapi2.connect(host=host, database=db, user=user, password=password);
elif dbtype == 'MYSQL':
import MySQLdb as dbapi2;
print "INFO: MySQLdb driver loaded."
ds = dbapi2.connect(host=host, db=db, user=user, passwd=password);
else:
print "ERROR: Unrecognised dbtype: "+dbtype;
exit(1);
cur = ds.cursor();
cur.execute("""
SELECT userobm_login, domain_name, userobm_password
FROM P_UserObm
INNER JOIN P_Domain ON userobm_domain_id=domain_id
WHERE
userobm_password_type='PLAIN' AND
userobm_mail_perms=1 AND
userobm_archive=0 AND
NOT domain_global
ORDER BY domain_name, userobm_login
""");
rows = cur.fetchall();
cur.close();
return rows;
# login on the minig backend using the given tuple with login, domain
# & password
def init_minig_index(host, port, row):
print "INFO: init index for "+row[0]+"@"+row[1]+" on "+host+":"+str(port);
params = urllib.urlencode({ "login": row[0], "domain": row[1], "password": row[2] });
headers = { "Content-type": "application/x-www-form-urlencoded" };
con = httplib.HTTPConnection(host, port);
try:
con.request("POST", "/firstIndexing.do", params, headers);
response = con.getresponse();
print "INFO:", response.status, response.reason
data = response.read();
except Exception, e:
print "ERROR:", e
con.close();
def usage():
print """usage: ./minig_init_index.py <backend_host> <backend_port>
example: ./minig_init_index.py localhost 8081""";
if __name__ == "__main__":
if len(sys.argv) != 3:
usage();
exit(1);
rows = fetch_user_passwords();
for i in range (len(rows)):
init_minig_index(sys.argv[1], int(sys.argv[2]), rows[i]);
print "INFO: progress: "+str(i+1)+"/"+str(len(rows));
| Python |
#!/usr/bin/env python
"""
tesshelper.py -- Utility operations to compare, report stats, and copy
public headers for tesseract 3.0x VS2008 Project
$RCSfile: tesshelper.py,v $ $Revision: 7ca575b377aa $ $Date: 2012/03/07 17:26:31 $
"""
r"""
Requires:
python 2.7 or greater: activestate.com
http://www.activestate.com/activepython/downloads
because using the new argparse module and new literal set syntax (s={1, 2}) .
General Notes:
--------------
Format for a .vcproj file entry:
<File
RelativePath="..\src\allheaders.h"
>
</File>
"""
epilogStr = r"""
Examples:
Assume that tesshelper.py is in c:\buildfolder\tesseract-3.02\vs2008,
which is also the current directory. Then,
python tesshelper .. compare
will compare c:\buildfolder\tesseract-3.02 "library" directories to the
libtesseract Project
(c:\buildfolder\tesseract-3.02\vs2008\libtesseract\libtesseract.vcproj).
python tesshelper .. report
will display summary stats for c:\buildfolder\tesseract-3.02 "library"
directories and the libtesseract Project.
python tesshelper .. copy ..\..\include
will copy all "public" libtesseract header files to
c:\buildfolder\include.
python tesshelper .. clean
will clean the vs2008 folder of all build directories, and .user, .suo,
.ncb, and other temp files.
"""
# imports of python standard library modules
# See Python Documentation | Library Reference for details
import collections
import glob
import argparse
import os
import re
import shutil
import sys
# ====================================================================
VERSION = "1.0 %s" % "$Date: 2012/03/07 17:26:31 $".split()[1]
PROJ_SUBDIR = r"vs2008\libtesseract"
PROJFILE = "libtesseract.vcproj"
NEWHEADERS_FILENAME = "newheaders.txt"
NEWSOURCES_FILENAME = "newsources.txt"
fileNodeTemplate = \
''' <File
RelativePath="..\..\%s"
>
</File>
'''
# ====================================================================
def getProjectfiles(libTessDir, libProjectFile, nTrimChars):
"""Return sets of all, c, h, and resources files in libtesseract Project"""
#extract filenames of header & source files from the .vcproj
projectCFiles = set()
projectHFiles = set()
projectRFiles = set()
projectFilesSet = set()
f = open(libProjectFile, "r")
data = f.read()
f.close()
projectFiles = re.findall(r'(?i)RelativePath="(\.[^"]+)"', data)
for projectFile in projectFiles:
root, ext = os.path.splitext(projectFile.lower())
if ext == ".c" or ext == ".cpp":
projectCFiles.add(projectFile)
elif ext == ".h":
projectHFiles.add(projectFile)
elif ext == ".rc":
projectRFiles.add(projectFile)
else:
print "unknown file type: %s" % projectFile
relativePath = os.path.join(libTessDir, projectFile)
relativePath = os.path.abspath(relativePath)
relativePath = relativePath[nTrimChars:].lower()
projectFilesSet.add(relativePath)
return projectFilesSet, projectHFiles, projectCFiles, projectRFiles
def getTessLibFiles(tessDir, nTrimChars):
"""Return set of all libtesseract files in tessDir"""
libDirs = [
"api",
"ccmain",
"ccstruct",
"ccutil",
"classify",
"cube",
"cutil",
"dict",
r"neural_networks\runtime",
"opencl",
"textord",
"viewer",
"wordrec",
#"training",
r"vs2008\port",
r"vs2008\libtesseract",
]
#create list of all .h, .c, .cpp files in "library" directories
tessFiles = set()
for curDir in libDirs:
baseDir = os.path.join(tessDir, curDir)
for filetype in ["*.c", "*.cpp", "*.h", "*.rc"]:
pattern = os.path.join(baseDir, filetype)
fileList = glob.glob(pattern)
for curFile in fileList:
curFile = os.path.abspath(curFile)
relativePath = curFile[nTrimChars:].lower()
tessFiles.add(relativePath)
return tessFiles
# ====================================================================
def tessCompare(tessDir):
'''Compare libtesseract Project files and actual "sub-library" files.'''
vs2008Dir = os.path.join(tessDir, "vs2008")
libTessDir = os.path.join(vs2008Dir, "libtesseract")
libProjectFile = os.path.join(libTessDir,"libtesseract.vcproj")
tessAbsDir = os.path.abspath(tessDir)
nTrimChars = len(tessAbsDir)+1
print 'Comparing VS2008 Project "%s" with\n "%s"' % (libProjectFile,
tessAbsDir)
projectFilesSet, projectHFiles, projectCFiles, projectRFiles = \
getProjectfiles(libTessDir, libProjectFile, nTrimChars)
tessFiles = getTessLibFiles(tessDir, nTrimChars)
extraFiles = tessFiles - projectFilesSet
print "%2d Extra files (in %s but not in Project)" % (len(extraFiles),
tessAbsDir)
headerFiles = []
sourceFiles = []
sortedList = list(extraFiles)
sortedList.sort()
for filename in sortedList:
root, ext = os.path.splitext(filename.lower())
if ext == ".h":
headerFiles.append(filename)
else:
sourceFiles.append(filename)
print " %s " % filename
print
print "%2d new header file items written to %s" % (len(headerFiles),
NEWHEADERS_FILENAME)
headerFiles.sort()
with open(NEWHEADERS_FILENAME, "w") as f:
for filename in headerFiles:
f.write(fileNodeTemplate % filename)
print "%2d new source file items written to %s" % (len(sourceFiles),
NEWSOURCES_FILENAME)
sourceFiles.sort()
with open(NEWSOURCES_FILENAME, "w") as f:
for filename in sourceFiles:
f.write(fileNodeTemplate % filename)
print
deadFiles = projectFilesSet - tessFiles
print "%2d Dead files (in Project but not in %s" % (len(deadFiles),
tessAbsDir)
sortedList = list(deadFiles)
sortedList.sort()
for filename in sortedList:
print " %s " % filename
# ====================================================================
def tessReport(tessDir):
"""Report summary stats on "sub-library" files and libtesseract Project file."""
vs2008Dir = os.path.join(tessDir, "vs2008")
libTessDir = os.path.join(vs2008Dir, "libtesseract")
libProjectFile = os.path.join(libTessDir,"libtesseract.vcproj")
tessAbsDir = os.path.abspath(tessDir)
nTrimChars = len(tessAbsDir)+1
projectFilesSet, projectHFiles, projectCFiles, projectRFiles = \
getProjectfiles(libTessDir, libProjectFile, nTrimChars)
tessFiles = getTessLibFiles(tessDir, nTrimChars)
print 'Summary stats for "%s" library directories' % tessAbsDir
folderCounters = {}
for tessFile in tessFiles:
tessFile = tessFile.lower()
folder, head = os.path.split(tessFile)
file, ext = os.path.splitext(head)
typeCounter = folderCounters.setdefault(folder, collections.Counter())
typeCounter[ext[1:]] += 1
folders = folderCounters.keys()
folders.sort()
totalFiles = 0
totalH = 0
totalCPP = 0
totalOther = 0
print
print " total h cpp"
print " ----- --- ---"
for folder in folders:
counters = folderCounters[folder]
nHFiles = counters['h']
nCPPFiles = counters['cpp']
total = nHFiles + nCPPFiles
totalFiles += total
totalH += nHFiles
totalCPP += nCPPFiles
print " %5d %3d %3d %s" % (total, nHFiles, nCPPFiles, folder)
print " ----- --- ---"
print " %5d %3d %3d" % (totalFiles, totalH, totalCPP)
print
print 'Summary stats for VS2008 Project "%s"' % libProjectFile
print " %5d %s" %(len(projectHFiles), "Header files")
print " %5d %s" % (len(projectCFiles), "Source files")
print " %5d %s" % (len(projectRFiles), "Resource files")
print " -----"
print " %5d" % (len(projectHFiles) + len(projectCFiles) + len(projectRFiles), )
# ====================================================================
def copyIncludes(fileSet, description, tessDir, includeDir):
"""Copy set of files to specified include dir."""
print
print 'Copying libtesseract "%s" headers to %s' % (description, includeDir)
print
sortedList = list(fileSet)
sortedList.sort()
count = 0
errList = []
for includeFile in sortedList:
filepath = os.path.join(tessDir, includeFile)
if os.path.isfile(filepath):
shutil.copy2(filepath, includeDir)
print "Copied: %s" % includeFile
count += 1
else:
print '***Error: "%s" doesn\'t exist"' % filepath
errList.append(filepath)
print '%d header files successfully copied to "%s"' % (count, includeDir)
if len(errList):
print "The following %d files were not copied:"
for filepath in errList:
print " %s" % filepath
def tessCopy(tessDir, includeDir):
'''Copy all "public" libtesseract Project header files to include directory.
Preserves directory hierarchy.'''
baseIncludeSet = {
r"api\baseapi.h",
r"api\capi.h",
r"api\apitypes.h",
r"ccstruct\publictypes.h",
r"ccmain\thresholder.h",
r"ccutil\host.h",
r"ccutil\basedir.h",
r"ccutil\tesscallback.h",
r"ccutil\unichar.h",
r"ccutil\platform.h",
}
strngIncludeSet = {
r"ccutil\strngs.h",
r"ccutil\memry.h",
r"ccutil\host.h",
r"ccutil\serialis.h",
r"ccutil\errcode.h",
r"ccutil\fileerr.h",
#r"ccutil\genericvector.h",
}
resultIteratorIncludeSet = {
r"ccmain\ltrresultiterator.h",
r"ccmain\pageiterator.h",
r"ccmain\resultiterator.h",
r"ccutil\genericvector.h",
r"ccutil\tesscallback.h",
r"ccutil\errcode.h",
r"ccutil\host.h",
r"ccutil\helpers.h",
r"ccutil\ndminx.h",
r"ccutil\params.h",
r"ccutil\unicharmap.h",
r"ccutil\unicharset.h",
}
genericVectorIncludeSet = {
r"ccutil\genericvector.h",
r"ccutil\tesscallback.h",
r"ccutil\errcode.h",
r"ccutil\host.h",
r"ccutil\helpers.h",
r"ccutil\ndminx.h",
}
blobsIncludeSet = {
r"ccstruct\blobs.h",
r"ccstruct\rect.h",
r"ccstruct\points.h",
r"ccstruct\ipoints.h",
r"ccutil\elst.h",
r"ccutil\host.h",
r"ccutil\serialis.h",
r"ccutil\lsterr.h",
r"ccutil\ndminx.h",
r"ccutil\tprintf.h",
r"ccutil\params.h",
r"viewer\scrollview.h",
r"ccstruct\vecfuncs.h",
}
extraFilesSet = {
#r"vs2008\include\stdint.h",
r"vs2008\include\leptonica_versionnumbers.vsprops",
r"vs2008\include\tesseract_versionnumbers.vsprops",
}
tessIncludeDir = os.path.join(includeDir, "tesseract")
if os.path.isfile(tessIncludeDir):
print 'Aborting: "%s" is a file not a directory.' % tessIncludeDir
return
if not os.path.exists(tessIncludeDir):
os.mkdir(tessIncludeDir)
#fileSet = baseIncludeSet | strngIncludeSet | genericVectorIncludeSet | blobsIncludeSet
fileSet = baseIncludeSet | strngIncludeSet | resultIteratorIncludeSet
copyIncludes(fileSet, "public", tessDir, tessIncludeDir)
copyIncludes(extraFilesSet, "extra", tessDir, includeDir)
# ====================================================================
def tessClean(tessDir):
'''Clean vs2008 folder of all build directories and certain temp files.'''
vs2008Dir = os.path.join(tessDir, "vs2008")
vs2008AbsDir = os.path.abspath(vs2008Dir)
answer = raw_input(
'Are you sure you want to clean the\n "%s" folder (Yes/No) [No]? ' %
vs2008AbsDir)
if answer.lower() not in ("yes",):
return
answer = raw_input('Only list the items to be deleted (Yes/No) [Yes]? ')
answer = answer.strip()
listOnly = answer.lower() not in ("no",)
for rootDir, dirs, files in os.walk(vs2008AbsDir):
for buildDir in ("LIB_Release", "LIB_Debug", "DLL_Release", "DLL_Debug"):
if buildDir in dirs:
dirs.remove(buildDir)
absBuildDir = os.path.join(rootDir, buildDir)
if listOnly:
print "Would remove: %s" % absBuildDir
else:
print "Removing: %s" % absBuildDir
shutil.rmtree(absBuildDir)
if rootDir == vs2008AbsDir:
for file in files:
if file.lower() not in ("tesseract.sln",
"tesshelper.py",
"readme.txt"):
absPath = os.path.join(rootDir, file)
if listOnly:
print "Would remove: %s" % absPath
else:
print "Removing: %s" % absPath
os.remove(absPath)
else:
for file in files:
root, ext = os.path.splitext(file)
if ext.lower() in (".suo",
".ncb",
".user",
) or (
len(ext)>0 and ext[-1] == "~"):
absPath = os.path.join(rootDir, file)
if listOnly:
print "Would remove: %s" % absPath
else:
print "Removing: %s" % absPath
os.remove(absPath)
# ====================================================================
def validateTessDir(tessDir):
"""Check that tessDir is a valid tesseract directory."""
if not os.path.isdir(tessDir):
raise argparse.ArgumentTypeError('Directory "%s" doesn\'t exist.' % tessDir)
projFile = os.path.join(tessDir, PROJ_SUBDIR, PROJFILE)
if not os.path.isfile(projFile):
raise argparse.ArgumentTypeError('Project file "%s" doesn\'t exist.' % projFile)
return tessDir
def validateDir(dir):
"""Check that dir is a valid directory named include."""
if not os.path.isdir(dir):
raise argparse.ArgumentTypeError('Directory "%s" doesn\'t exist.' % dir)
dirpath = os.path.abspath(dir)
head, tail = os.path.split(dirpath)
if tail.lower() != "include":
raise argparse.ArgumentTypeError('Include directory "%s" must be named "include".' % tail)
return dir
def main ():
parser = argparse.ArgumentParser(
epilog=epilogStr,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("--version", action="version",
version="%(prog)s " + VERSION)
parser.add_argument('tessDir', type=validateTessDir,
help="tesseract installation directory")
subparsers = parser.add_subparsers(
dest="subparser_name",
title="Commands")
parser_changes = subparsers.add_parser('compare',
help="compare libtesseract Project with tessDir")
parser_changes.set_defaults(func=tessCompare)
parser_report = subparsers.add_parser('report',
help="report libtesseract summary stats")
parser_report.set_defaults(func=tessReport)
parser_copy = subparsers.add_parser('copy',
help="copy public libtesseract header files to includeDir")
parser_copy.add_argument('includeDir', type=validateDir,
help="Directory to copy header files to.")
parser_copy.set_defaults(func=tessCopy)
parser_clean = subparsers.add_parser('clean',
help="clean vs2008 folder of build folders and .user files")
parser_clean.set_defaults(func=tessClean)
#kludge because argparse has no ability to set default subparser
if (len(sys.argv) == 2):
sys.argv.append("compare")
args = parser.parse_args()
#handle commands
if args.func == tessCopy:
args.func(args.tessDir, args.includeDir)
else:
args.func(args.tessDir)
if __name__ == '__main__' :
main()
| Python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2012 Zdenko Podobný
# Author: Zdenko Podobný
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Simple python demo script of tesseract-ocr 3.02 c-api
"""
import os
import sys
import ctypes
# Demo variables
lang = "eng"
filename = "../phototest.tif"
libpath = "/usr/local/lib64/"
libpath_w = "../vs2008/DLL_Release/"
TESSDATA_PREFIX = os.environ.get('TESSDATA_PREFIX')
if not TESSDATA_PREFIX:
TESSDATA_PREFIX = "../"
if sys.platform == "win32":
libname = libpath_w + "libtesseract302.dll"
libname_alt = "libtesseract302.dll"
os.environ["PATH"] += os.pathsep + libpath_w
else:
libname = libpath + "libtesseract.so.3.0.2"
libname_alt = "libtesseract.so.3"
try:
tesseract = ctypes.cdll.LoadLibrary(libname)
except:
try:
tesseract = ctypes.cdll.LoadLibrary(libname_alt)
except WindowsError, err:
print("Trying to load '%s'..." % libname)
print("Trying to load '%s'..." % libname_alt)
print(err)
exit(1)
tesseract.TessVersion.restype = ctypes.c_char_p
tesseract_version = tesseract.TessVersion()[:4]
# We need to check library version because libtesseract.so.3 is symlink
# and can point to other version than 3.02
if float(tesseract_version) < 3.02:
print("Found tesseract-ocr library version %s." % tesseract_version)
print("C-API is present only in version 3.02!")
exit(2)
api = tesseract.TessBaseAPICreate()
rc = tesseract.TessBaseAPIInit3(api, TESSDATA_PREFIX, lang);
if (rc):
tesseract.TessBaseAPIDelete(api)
print("Could not initialize tesseract.\n")
exit(3)
text_out = tesseract.TessBaseAPIProcessPages(api, filename, None , 0);
result_text = ctypes.string_at(text_out)
print result_text
| Python |
'''
Created on Feb 3, 2013
@author: bawey
'''
import cpoo_tools as tools
import cv
import cv2
import numpy as np
import sys
import tesseract
from cpoo_tools import show_wait
image = cv.LoadImage( sys.argv[1] )
image = tools.split_channels( image )
resized = cv.CreateImage( ( ( int )( image.width * ( 640.0 / image.height ) ), 640 ), image.depth, image.nChannels )
cv.Resize( image, resized )
image = resized
image = tools.array2cv( cv2.medianBlur( tools.cv2array( image ), 3 ) )
tools.show_wait( image, "cpoo" )
output = tools.text_energy_map( image )
tools.show_wait( output, "cpoo" )
regions = tools.grow_regions( output, 255, 0 )
regions = tools.cluster_regions( image, regions )
regions = tools.kill_the_losers( image, regions )
api = tesseract.TessBaseAPI()
api.Init( ".", "eng", tesseract.OEM_TESSERACT_ONLY )
api.SetVariable( "tessedit_char_whitelist", " 0123456789.:\/\\PM" )
api.SetPageSegMode( tesseract.PSM_SINGLE_LINE )
for region in regions:
roi_image = tools.extract( image, region )
result = cv.CreateImage( ( 2 * roi_image.width, 2 * roi_image.height ), roi_image.depth, roi_image.nChannels )
cv.Resize( roi_image, result )
tools.show_wait( result, "final result" )
cv.SaveImage("result.png", result)
tesseract.SetCvImage( result, api )
text = api.GetUTF8Text()
print "scanned text: " + text
| Python |
import cv2
import cv
import sys
import cpoo_tools as tools
import numpy as np
image = cv.LoadImage( sys.argv[1] )
# tools.grow_region(image)
# TODO: set radius dynamically
img = cv.CreateImage( cv.GetSize( image ), image.depth, image.nChannels )
img = tools.split_channels( image )
img = tools.array2cv( cv2.GaussianBlur( tools.cv2array( img ), ( 5, 5 ), 5 ) )
entropies = tools.entropy( img[:, img.width / 2:] )
for i in range( 10, -1, -1 ):
entropies[len( entropies ) - 1 - i] = entropies[len( entropies ) - 2 - i]
entropies[i] = entropies[i + 1]
# print entropies
diffs = tools.derivative_of_vector( entropies, 1 )
ind_min = diffs.index( min( diffs ) )
ind_max = diffs.index( max( diffs ) )
print "Clipping zone: " + str( ind_max ) + "-" + str( ind_min ) + ", values: " + str( diffs[ind_max] ) + ", " + str( diffs[ind_min] )
# print diffs
clipped = tools.cropImage( image, 0, min( ind_max, ind_min ), image.width, max( ind_max, ind_min ) )
cv.ShowImage( "clipped", clipped )
cv.WaitKey()
regions = cv.CreateImage( cv.GetSize( clipped ), clipped.depth, clipped.nChannels )
tools.grow_regions( clipped )
# cv.WaitKey
| Python |
"""OCR in Python using the Tesseract engine from Google
http://code.google.com/p/pytesser/
by Michael J.T. O'Kelly
V 0.0.1, 3/10/07"""
import Image
import subprocess
import util
import errors
tesseract_exe_name = 'tesseract' # Name of executable to be called at command line
scratch_image_name = "temp.bmp" # This file must be .bmp or other Tesseract-compatible format
scratch_text_name_root = "temp" # Leave out the .txt extension
cleanup_scratch_flag = True # Temporary files cleaned up after OCR operation
def call_tesseract(input_filename, output_filename):
"""Calls external tesseract.exe on input file (restrictions on types),
outputting output_filename+'txt'"""
args = [tesseract_exe_name, input_filename, output_filename]
proc = subprocess.Popen(args)
retcode = proc.wait()
if retcode!=0:
errors.check_for_errors()
def image_to_string(im, cleanup = cleanup_scratch_flag):
"""Converts im to file, applies tesseract, and fetches resulting text.
If cleanup=True, delete scratch files after operation."""
try:
util.image_to_scratch(im, scratch_image_name)
call_tesseract(scratch_image_name, scratch_text_name_root)
text = util.retrieve_text(scratch_text_name_root)
finally:
if cleanup:
util.perform_cleanup(scratch_image_name, scratch_text_name_root)
return text
def image_file_to_string(filename, cleanup = cleanup_scratch_flag, graceful_errors=True):
"""Applies tesseract to filename; or, if image is incompatible and graceful_errors=True,
converts to compatible format and then applies tesseract. Fetches resulting text.
If cleanup=True, delete scratch files after operation."""
try:
try:
call_tesseract(filename, scratch_text_name_root)
text = util.retrieve_text(scratch_text_name_root)
except errors.Tesser_General_Exception:
if graceful_errors:
im = Image.open(filename)
text = image_to_string(im, cleanup)
else:
raise
finally:
if cleanup:
util.perform_cleanup(scratch_image_name, scratch_text_name_root)
return text
if __name__=='__main__':
im = Image.open('phototest.tif')
text = image_to_string(im)
print text
try:
text = image_file_to_string('fnord.tif', graceful_errors=False)
except errors.Tesser_General_Exception, value:
print "fnord.tif is incompatible filetype. Try graceful_errors=True"
print value
text = image_file_to_string('fnord.tif', graceful_errors=True)
print "fnord.tif contents:", text
text = image_file_to_string('fonts_test.png', graceful_errors=True)
print text
| Python |
import cv2
import cv
import sys
import cpoo_tools as tools
import pylab
import numpy as np
img = cv.LoadImage(sys.argv[1])
#img = cv.LoadImage("meat/rgb.png")
# eig_image = cv.CreateMat(img.rows, img.cols, cv2.CV_32FC1)
# temp_image = cv.CreateMat(img.rows, img.cols, cv2.CV_32FC1)
# for (x,y) in cv.GoodFeaturesToTrack(img, eig_image, temp_image, 100, 0.04, 1.0, useHarris = True):
# print "good feature at", x,y
#dst = cv.CreateImage(cv.GetSize(img), cv.IPL_DEPTH_16S, img.channels)
#laplace = cv.Laplace(img, dst)
#cv.ShowImage('sth', laplace)
#cv.WaitKey()
print "height: "+str(img.height)
print "width: "+str(img.width)
print type(img)
red = cv.CreateImage(cv.GetSize(img), img.depth, 1)
blue = cv.CreateImage(cv.GetSize(img), img.depth, 1)
green = cv.CreateImage(cv.GetSize(img), img.depth, 1)
print type(red)
print "reddepth: "+str(red.depth)
#print img.rows
#print img.cols
#print img[100,100][1]
#red = img
for y in range(0,img.height):
for x in range(0,img.width):
#rED = pImg[i*img->widthStep + j*img->nChannels + 2];
#print str(x)+", "+str(y)+"value: "+str(img[y,x][2])
# blue[y,x] = img[y,x][0];
# green[y,x] = img[y,x][1];
red[y,x] = img[y,x][2];
scores = list()
THRESH = 10
OPTIMAL_HEIGHT = img.height / 20
redunblurred = red
red = cv2.GaussianBlur(tools.cv2array(red),(5,5),OPTIMAL_HEIGHT/2)
red = tools.array2cv(red)
print "optimal height: "+str(OPTIMAL_HEIGHT)
weight_centers = dict();
#for y in range(0,img.height):
#score=0
#values=set()
#last_pos = dict();
#first_pos = dict();
#local_scores = dict();
#map(value, last_position)
#map(value, score)
#if last occurence long ago - zero the score
#score= max score for row!
#for x in range(0, img.width):
# color = red[y,x]
# if color in values:
# if y!=0 and x!= 0 and abs(red[y,x-1] - red[y,x])>THRESH:
# if x-first_pos[color]>img.width:
# #print "rejection criterion triggered"
# local_scores[color]=0
# else:
# local_scores[color]=local_scores[color]+1#red.width/(x-last_pos[color])
# if y in weight_centers.keys():
# print "this is sth for dictionary"
# weight_centers[y][0]+=x
# weight_centers[y][1]+=1
# else:
# weight_centers[y]=[x,1]
# else:
#first encounter of given color within a line
# values.add(red[y,x])
# local_scores[color] = 0
# first_pos[color]=x
# last_pos[red[y,x]]=x
#score = sum(local_scores.values())
#print "row: "+str(y)+" score: "+str(score)+" values.size="+str(len(values))
#scores.append(score)
# hmm, remove the frame results
for i in range(OPTIMAL_HEIGHT, -1, -1):
scores[i]=scores[i+1]
scores[len(scores)-1-i]=scores[len(scores)-2-i]
print scores
diffs = list()
global_min=0
global_max=0
min_sum = 0
max_sum = 0
diffs.append(0)
for i in range(1,len(scores)):
diffs.append(scores[i]-scores[i-1])
# if i>1:
# positive_sum = max(diffs[i],0)+max(diffs[i-1],0)+max(diffs[i-2],0)
# negative_sum = min(diffs[i],0)+min(diffs[i-1],0)+min(diffs[i-2],0)
# if positive_sum > max_sum:
# max_sum = positive_sum
# global_max=i
# elif negative_sum < min_sum:
# min_sum = negative_sum
# global_min=i
# print str(i)+" row: "+str(diffs[i])
#for i in range(0, len(scores)-1):
# if diffs[i] * diffs[i+1] >0:
# diffs[i]=diffs[i]+diffs[i+1]
#for i in range(0,len(diffs)):
# if diffs[i]>diffs[global_max]:
# global_max=i
# elif diffs[i]<diffs[global_min]:
# global_min=i
print "weight_centers"
avg_center=0
counter=0;
for key in weight_centers.keys():
if key> global_max and key<global_min:
counter +=1
avg_center+=weight_centers[key][0]/weight_centers[key][1]
avg_center/=counter
print "Mass center (X) for datestamp: "+str(avg_center)
print "min: "+str(global_min)+"("+str(min_sum)+"), max: "+str(global_max)+" ("+str(diffs[global_min])+")"
clipped = cv.GetSubRect(redunblurred, (avg_center, global_max, img.width-avg_center, global_min-global_max))
cv.ShowImage("clipping zone", clipped)
#cv.WaitKey()
#cv.ShowImage("green",green)
#cv.WaitKey()
#cv.ShowImage("blue",blue)
#cv.WaitKey()
#cv.ShowImage("orignal",img)
#thresh = cv2.adaptiveThreshold(np.asarray(clipped),255,1,1,11,2)
#cv.SaveImage("clipped.png", clipped)
#cv.SaveImage("thresh.png", cv.fromarray(thresh))
#cv.ShowImage("clipped", cv.fromarray(clipped))
cv.WaitKey()
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.