docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
|---|---|---|
Given the location of the 'middle snake', split the diff in two parts
and recurse.
Args:
text1: Old string to be diffed.
text2: New string to be diffed.
x: Index of split point in text1.
y: Index of split point in text2.
deadline: Time at which to bail if not yet complete.
Returns:
Array of diff tuples.
|
def diff_bisectSplit(self, text1, text2, x, y, deadline):
text1a = text1[:x]
text2a = text2[:y]
text1b = text1[x:]
text2b = text2[y:]
# Compute both diffs serially.
diffs = self.diff_main(text1a, text2a, False, deadline)
diffsb = self.diff_main(text1b, text2b, False, deadline)
return diffs + diffsb
| 368,591
|
Split two texts into an array of strings. Reduce the texts to a string
of hashes where each Unicode character represents one line.
Args:
text1: First string.
text2: Second string.
Returns:
Three element tuple, containing the encoded text1, the encoded text2 and
the array of unique strings. The zeroth element of the array of unique
strings is intentionally blank.
|
def diff_linesToChars(self, text1, text2):
lineArray = [] # e.g. lineArray[4] == "Hello\n"
lineHash = {} # e.g. lineHash["Hello\n"] == 4
# "\x00" is a valid character, but various debuggers don't like it.
# So we'll insert a junk entry to avoid generating a null character.
lineArray.append('')
def diff_linesToCharsMunge(text):
chars = []
# Walk the text, pulling out a substring for each line.
# text.split('\n') would would temporarily double our memory footprint.
# Modifying text would create many large strings to garbage collect.
lineStart = 0
lineEnd = -1
while lineEnd < len(text) - 1:
lineEnd = text.find('\n', lineStart)
if lineEnd == -1:
lineEnd = len(text) - 1
line = text[lineStart:lineEnd + 1]
if line in lineHash:
chars.append(chr(lineHash[line]))
else:
if len(lineArray) == maxLines:
# Bail out at 1114111 because chr(1114112) throws.
line = text[lineStart:]
lineEnd = len(text)
lineArray.append(line)
lineHash[line] = len(lineArray) - 1
chars.append(chr(len(lineArray) - 1))
lineStart = lineEnd + 1
return "".join(chars)
# Allocate 2/3rds of the space for text1, the rest for text2.
maxLines = 666666
chars1 = diff_linesToCharsMunge(text1)
maxLines = 1114111
chars2 = diff_linesToCharsMunge(text2)
return (chars1, chars2, lineArray)
| 368,592
|
Rehydrate the text in a diff from a string of line hashes to real lines
of text.
Args:
diffs: Array of diff tuples.
lineArray: Array of unique strings.
|
def diff_charsToLines(self, diffs, lineArray):
for i in range(len(diffs)):
text = []
for char in diffs[i][1]:
text.append(lineArray[ord(char)])
diffs[i] = (diffs[i][0], "".join(text))
| 368,593
|
Determine the common prefix of two strings.
Args:
text1: First string.
text2: Second string.
Returns:
The number of characters common to the start of each string.
|
def diff_commonPrefix(self, text1, text2):
# Quick check for common null cases.
if not text1 or not text2 or text1[0] != text2[0]:
return 0
# Binary search.
# Performance analysis: https://neil.fraser.name/news/2007/10/09/
pointermin = 0
pointermax = min(len(text1), len(text2))
pointermid = pointermax
pointerstart = 0
while pointermin < pointermid:
if text1[pointerstart:pointermid] == text2[pointerstart:pointermid]:
pointermin = pointermid
pointerstart = pointermin
else:
pointermax = pointermid
pointermid = (pointermax - pointermin) // 2 + pointermin
return pointermid
| 368,594
|
Determine the common suffix of two strings.
Args:
text1: First string.
text2: Second string.
Returns:
The number of characters common to the end of each string.
|
def diff_commonSuffix(self, text1, text2):
# Quick check for common null cases.
if not text1 or not text2 or text1[-1] != text2[-1]:
return 0
# Binary search.
# Performance analysis: https://neil.fraser.name/news/2007/10/09/
pointermin = 0
pointermax = min(len(text1), len(text2))
pointermid = pointermax
pointerend = 0
while pointermin < pointermid:
if (text1[-pointermid:len(text1) - pointerend] ==
text2[-pointermid:len(text2) - pointerend]):
pointermin = pointermid
pointerend = pointermin
else:
pointermax = pointermid
pointermid = (pointermax - pointermin) // 2 + pointermin
return pointermid
| 368,595
|
Determine if the suffix of one string is the prefix of another.
Args:
text1 First string.
text2 Second string.
Returns:
The number of characters common to the end of the first
string and the start of the second string.
|
def diff_commonOverlap(self, text1, text2):
# Cache the text lengths to prevent multiple calls.
text1_length = len(text1)
text2_length = len(text2)
# Eliminate the null case.
if text1_length == 0 or text2_length == 0:
return 0
# Truncate the longer string.
if text1_length > text2_length:
text1 = text1[-text2_length:]
elif text1_length < text2_length:
text2 = text2[:text1_length]
text_length = min(text1_length, text2_length)
# Quick check for the worst case.
if text1 == text2:
return text_length
# Start by looking for a single character match
# and increase length until no match is found.
# Performance analysis: https://neil.fraser.name/news/2010/11/04/
best = 0
length = 1
while True:
pattern = text1[-length:]
found = text2.find(pattern)
if found == -1:
return best
length += found
if found == 0 or text1[-length:] == text2[:length]:
best = length
length += 1
| 368,596
|
Do the two texts share a substring which is at least half the length of
the longer text?
This speedup can produce non-minimal diffs.
Args:
text1: First string.
text2: Second string.
Returns:
Five element Array, containing the prefix of text1, the suffix of text1,
the prefix of text2, the suffix of text2 and the common middle. Or None
if there was no match.
|
def diff_halfMatch(self, text1, text2):
if self.Diff_Timeout <= 0:
# Don't risk returning a non-optimal diff if we have unlimited time.
return None
if len(text1) > len(text2):
(longtext, shorttext) = (text1, text2)
else:
(shorttext, longtext) = (text1, text2)
if len(longtext) < 4 or len(shorttext) * 2 < len(longtext):
return None # Pointless.
def diff_halfMatchI(longtext, shorttext, i):
seed = longtext[i:i + len(longtext) // 4]
best_common = ''
j = shorttext.find(seed)
while j != -1:
prefixLength = self.diff_commonPrefix(longtext[i:], shorttext[j:])
suffixLength = self.diff_commonSuffix(longtext[:i], shorttext[:j])
if len(best_common) < suffixLength + prefixLength:
best_common = (shorttext[j - suffixLength:j] +
shorttext[j:j + prefixLength])
best_longtext_a = longtext[:i - suffixLength]
best_longtext_b = longtext[i + prefixLength:]
best_shorttext_a = shorttext[:j - suffixLength]
best_shorttext_b = shorttext[j + prefixLength:]
j = shorttext.find(seed, j + 1)
if len(best_common) * 2 >= len(longtext):
return (best_longtext_a, best_longtext_b,
best_shorttext_a, best_shorttext_b, best_common)
else:
return None
# First check if the second quarter is the seed for a half-match.
hm1 = diff_halfMatchI(longtext, shorttext, (len(longtext) + 3) // 4)
# Check again based on the third quarter.
hm2 = diff_halfMatchI(longtext, shorttext, (len(longtext) + 1) // 2)
if not hm1 and not hm2:
return None
elif not hm2:
hm = hm1
elif not hm1:
hm = hm2
else:
# Both matched. Select the longest.
if len(hm1[4]) > len(hm2[4]):
hm = hm1
else:
hm = hm2
# A half-match was found, sort out the return data.
if len(text1) > len(text2):
(text1_a, text1_b, text2_a, text2_b, mid_common) = hm
else:
(text2_a, text2_b, text1_a, text1_b, mid_common) = hm
return (text1_a, text1_b, text2_a, text2_b, mid_common)
| 368,597
|
Reduce the number of edits by eliminating semantically trivial
equalities.
Args:
diffs: Array of diff tuples.
|
def diff_cleanupSemantic(self, diffs):
changes = False
equalities = [] # Stack of indices where equalities are found.
lastEquality = None # Always equal to diffs[equalities[-1]][1]
pointer = 0 # Index of current position.
# Number of chars that changed prior to the equality.
length_insertions1, length_deletions1 = 0, 0
# Number of chars that changed after the equality.
length_insertions2, length_deletions2 = 0, 0
while pointer < len(diffs):
if diffs[pointer][0] == self.DIFF_EQUAL: # Equality found.
equalities.append(pointer)
length_insertions1, length_insertions2 = length_insertions2, 0
length_deletions1, length_deletions2 = length_deletions2, 0
lastEquality = diffs[pointer][1]
else: # An insertion or deletion.
if diffs[pointer][0] == self.DIFF_INSERT:
length_insertions2 += len(diffs[pointer][1])
else:
length_deletions2 += len(diffs[pointer][1])
# Eliminate an equality that is smaller or equal to the edits on both
# sides of it.
if (lastEquality and (len(lastEquality) <=
max(length_insertions1, length_deletions1)) and
(len(lastEquality) <= max(length_insertions2, length_deletions2))):
# Duplicate record.
diffs.insert(equalities[-1], (self.DIFF_DELETE, lastEquality))
# Change second copy to insert.
diffs[equalities[-1] + 1] = (self.DIFF_INSERT,
diffs[equalities[-1] + 1][1])
# Throw away the equality we just deleted.
equalities.pop()
# Throw away the previous equality (it needs to be reevaluated).
if len(equalities):
equalities.pop()
if len(equalities):
pointer = equalities[-1]
else:
pointer = -1
# Reset the counters.
length_insertions1, length_deletions1 = 0, 0
length_insertions2, length_deletions2 = 0, 0
lastEquality = None
changes = True
pointer += 1
# Normalize the diff.
if changes:
self.diff_cleanupMerge(diffs)
self.diff_cleanupSemanticLossless(diffs)
# Find any overlaps between deletions and insertions.
# e.g: <del>abcxxx</del><ins>xxxdef</ins>
# -> <del>abc</del>xxx<ins>def</ins>
# e.g: <del>xxxabc</del><ins>defxxx</ins>
# -> <ins>def</ins>xxx<del>abc</del>
# Only extract an overlap if it is as big as the edit ahead or behind it.
pointer = 1
while pointer < len(diffs):
if (diffs[pointer - 1][0] == self.DIFF_DELETE and
diffs[pointer][0] == self.DIFF_INSERT):
deletion = diffs[pointer - 1][1]
insertion = diffs[pointer][1]
overlap_length1 = self.diff_commonOverlap(deletion, insertion)
overlap_length2 = self.diff_commonOverlap(insertion, deletion)
if overlap_length1 >= overlap_length2:
if (overlap_length1 >= len(deletion) / 2.0 or
overlap_length1 >= len(insertion) / 2.0):
# Overlap found. Insert an equality and trim the surrounding edits.
diffs.insert(pointer, (self.DIFF_EQUAL,
insertion[:overlap_length1]))
diffs[pointer - 1] = (self.DIFF_DELETE,
deletion[:len(deletion) - overlap_length1])
diffs[pointer + 1] = (self.DIFF_INSERT,
insertion[overlap_length1:])
pointer += 1
else:
if (overlap_length2 >= len(deletion) / 2.0 or
overlap_length2 >= len(insertion) / 2.0):
# Reverse overlap found.
# Insert an equality and swap and trim the surrounding edits.
diffs.insert(pointer, (self.DIFF_EQUAL, deletion[:overlap_length2]))
diffs[pointer - 1] = (self.DIFF_INSERT,
insertion[:len(insertion) - overlap_length2])
diffs[pointer + 1] = (self.DIFF_DELETE, deletion[overlap_length2:])
pointer += 1
pointer += 1
pointer += 1
| 368,598
|
Look for single edits surrounded on both sides by equalities
which can be shifted sideways to align the edit to a word boundary.
e.g: The c<ins>at c</ins>ame. -> The <ins>cat </ins>came.
Args:
diffs: Array of diff tuples.
|
def diff_cleanupSemanticLossless(self, diffs):
def diff_cleanupSemanticScore(one, two):
if not one or not two:
# Edges are the best.
return 6
# Each port of this function behaves slightly differently due to
# subtle differences in each language's definition of things like
# 'whitespace'. Since this function's purpose is largely cosmetic,
# the choice has been made to use each language's native features
# rather than force total conformity.
char1 = one[-1]
char2 = two[0]
nonAlphaNumeric1 = not char1.isalnum()
nonAlphaNumeric2 = not char2.isalnum()
whitespace1 = nonAlphaNumeric1 and char1.isspace()
whitespace2 = nonAlphaNumeric2 and char2.isspace()
lineBreak1 = whitespace1 and (char1 == "\r" or char1 == "\n")
lineBreak2 = whitespace2 and (char2 == "\r" or char2 == "\n")
blankLine1 = lineBreak1 and self.BLANKLINEEND.search(one)
blankLine2 = lineBreak2 and self.BLANKLINESTART.match(two)
if blankLine1 or blankLine2:
# Five points for blank lines.
return 5
elif lineBreak1 or lineBreak2:
# Four points for line breaks.
return 4
elif nonAlphaNumeric1 and not whitespace1 and whitespace2:
# Three points for end of sentences.
return 3
elif whitespace1 or whitespace2:
# Two points for whitespace.
return 2
elif nonAlphaNumeric1 or nonAlphaNumeric2:
# One point for non-alphanumeric.
return 1
return 0
pointer = 1
# Intentionally ignore the first and last element (don't need checking).
while pointer < len(diffs) - 1:
if (diffs[pointer - 1][0] == self.DIFF_EQUAL and
diffs[pointer + 1][0] == self.DIFF_EQUAL):
# This is a single edit surrounded by equalities.
equality1 = diffs[pointer - 1][1]
edit = diffs[pointer][1]
equality2 = diffs[pointer + 1][1]
# First, shift the edit as far left as possible.
commonOffset = self.diff_commonSuffix(equality1, edit)
if commonOffset:
commonString = edit[-commonOffset:]
equality1 = equality1[:-commonOffset]
edit = commonString + edit[:-commonOffset]
equality2 = commonString + equality2
# Second, step character by character right, looking for the best fit.
bestEquality1 = equality1
bestEdit = edit
bestEquality2 = equality2
bestScore = (diff_cleanupSemanticScore(equality1, edit) +
diff_cleanupSemanticScore(edit, equality2))
while edit and equality2 and edit[0] == equality2[0]:
equality1 += edit[0]
edit = edit[1:] + equality2[0]
equality2 = equality2[1:]
score = (diff_cleanupSemanticScore(equality1, edit) +
diff_cleanupSemanticScore(edit, equality2))
# The >= encourages trailing rather than leading whitespace on edits.
if score >= bestScore:
bestScore = score
bestEquality1 = equality1
bestEdit = edit
bestEquality2 = equality2
if diffs[pointer - 1][1] != bestEquality1:
# We have an improvement, save it back to the diff.
if bestEquality1:
diffs[pointer - 1] = (diffs[pointer - 1][0], bestEquality1)
else:
del diffs[pointer - 1]
pointer -= 1
diffs[pointer] = (diffs[pointer][0], bestEdit)
if bestEquality2:
diffs[pointer + 1] = (diffs[pointer + 1][0], bestEquality2)
else:
del diffs[pointer + 1]
pointer -= 1
pointer += 1
| 368,599
|
Reduce the number of edits by eliminating operationally trivial
equalities.
Args:
diffs: Array of diff tuples.
|
def diff_cleanupEfficiency(self, diffs):
changes = False
equalities = [] # Stack of indices where equalities are found.
lastEquality = None # Always equal to diffs[equalities[-1]][1]
pointer = 0 # Index of current position.
pre_ins = False # Is there an insertion operation before the last equality.
pre_del = False # Is there a deletion operation before the last equality.
post_ins = False # Is there an insertion operation after the last equality.
post_del = False # Is there a deletion operation after the last equality.
while pointer < len(diffs):
if diffs[pointer][0] == self.DIFF_EQUAL: # Equality found.
if (len(diffs[pointer][1]) < self.Diff_EditCost and
(post_ins or post_del)):
# Candidate found.
equalities.append(pointer)
pre_ins = post_ins
pre_del = post_del
lastEquality = diffs[pointer][1]
else:
# Not a candidate, and can never become one.
equalities = []
lastEquality = None
post_ins = post_del = False
else: # An insertion or deletion.
if diffs[pointer][0] == self.DIFF_DELETE:
post_del = True
else:
post_ins = True
# Five types to be split:
# <ins>A</ins><del>B</del>XY<ins>C</ins><del>D</del>
# <ins>A</ins>X<ins>C</ins><del>D</del>
# <ins>A</ins><del>B</del>X<ins>C</ins>
# <ins>A</del>X<ins>C</ins><del>D</del>
# <ins>A</ins><del>B</del>X<del>C</del>
if lastEquality and ((pre_ins and pre_del and post_ins and post_del) or
((len(lastEquality) < self.Diff_EditCost / 2) and
(pre_ins + pre_del + post_ins + post_del) == 3)):
# Duplicate record.
diffs.insert(equalities[-1], (self.DIFF_DELETE, lastEquality))
# Change second copy to insert.
diffs[equalities[-1] + 1] = (self.DIFF_INSERT,
diffs[equalities[-1] + 1][1])
equalities.pop() # Throw away the equality we just deleted.
lastEquality = None
if pre_ins and pre_del:
# No changes made which could affect previous entry, keep going.
post_ins = post_del = True
equalities = []
else:
if len(equalities):
equalities.pop() # Throw away the previous equality.
if len(equalities):
pointer = equalities[-1]
else:
pointer = -1
post_ins = post_del = False
changes = True
pointer += 1
if changes:
self.diff_cleanupMerge(diffs)
| 368,600
|
Reorder and merge like edit sections. Merge equalities.
Any edit section can move as long as it doesn't cross an equality.
Args:
diffs: Array of diff tuples.
|
def diff_cleanupMerge(self, diffs):
diffs.append((self.DIFF_EQUAL, '')) # Add a dummy entry at the end.
pointer = 0
count_delete = 0
count_insert = 0
text_delete = ''
text_insert = ''
while pointer < len(diffs):
if diffs[pointer][0] == self.DIFF_INSERT:
count_insert += 1
text_insert += diffs[pointer][1]
pointer += 1
elif diffs[pointer][0] == self.DIFF_DELETE:
count_delete += 1
text_delete += diffs[pointer][1]
pointer += 1
elif diffs[pointer][0] == self.DIFF_EQUAL:
# Upon reaching an equality, check for prior redundancies.
if count_delete + count_insert > 1:
if count_delete != 0 and count_insert != 0:
# Factor out any common prefixies.
commonlength = self.diff_commonPrefix(text_insert, text_delete)
if commonlength != 0:
x = pointer - count_delete - count_insert - 1
if x >= 0 and diffs[x][0] == self.DIFF_EQUAL:
diffs[x] = (diffs[x][0], diffs[x][1] +
text_insert[:commonlength])
else:
diffs.insert(0, (self.DIFF_EQUAL, text_insert[:commonlength]))
pointer += 1
text_insert = text_insert[commonlength:]
text_delete = text_delete[commonlength:]
# Factor out any common suffixes.
commonlength = self.diff_commonSuffix(text_insert, text_delete)
if commonlength != 0:
diffs[pointer] = (diffs[pointer][0], text_insert[-commonlength:] +
diffs[pointer][1])
text_insert = text_insert[:-commonlength]
text_delete = text_delete[:-commonlength]
# Delete the offending records and add the merged ones.
new_ops = []
if len(text_delete) != 0:
new_ops.append((self.DIFF_DELETE, text_delete))
if len(text_insert) != 0:
new_ops.append((self.DIFF_INSERT, text_insert))
pointer -= count_delete + count_insert
diffs[pointer : pointer + count_delete + count_insert] = new_ops
pointer += len(new_ops) + 1
elif pointer != 0 and diffs[pointer - 1][0] == self.DIFF_EQUAL:
# Merge this equality with the previous one.
diffs[pointer - 1] = (diffs[pointer - 1][0],
diffs[pointer - 1][1] + diffs[pointer][1])
del diffs[pointer]
else:
pointer += 1
count_insert = 0
count_delete = 0
text_delete = ''
text_insert = ''
if diffs[-1][1] == '':
diffs.pop() # Remove the dummy entry at the end.
# Second pass: look for single edits surrounded on both sides by equalities
# which can be shifted sideways to eliminate an equality.
# e.g: A<ins>BA</ins>C -> <ins>AB</ins>AC
changes = False
pointer = 1
# Intentionally ignore the first and last element (don't need checking).
while pointer < len(diffs) - 1:
if (diffs[pointer - 1][0] == self.DIFF_EQUAL and
diffs[pointer + 1][0] == self.DIFF_EQUAL):
# This is a single edit surrounded by equalities.
if diffs[pointer][1].endswith(diffs[pointer - 1][1]):
# Shift the edit over the previous equality.
if diffs[pointer - 1][1] != "":
diffs[pointer] = (diffs[pointer][0],
diffs[pointer - 1][1] +
diffs[pointer][1][:-len(diffs[pointer - 1][1])])
diffs[pointer + 1] = (diffs[pointer + 1][0],
diffs[pointer - 1][1] + diffs[pointer + 1][1])
del diffs[pointer - 1]
changes = True
elif diffs[pointer][1].startswith(diffs[pointer + 1][1]):
# Shift the edit over the next equality.
diffs[pointer - 1] = (diffs[pointer - 1][0],
diffs[pointer - 1][1] + diffs[pointer + 1][1])
diffs[pointer] = (diffs[pointer][0],
diffs[pointer][1][len(diffs[pointer + 1][1]):] +
diffs[pointer + 1][1])
del diffs[pointer + 1]
changes = True
pointer += 1
# If shifts were made, the diff needs reordering and another shift sweep.
if changes:
self.diff_cleanupMerge(diffs)
| 368,601
|
loc is a location in text1, compute and return the equivalent location
in text2. e.g. "The cat" vs "The big cat", 1->1, 5->8
Args:
diffs: Array of diff tuples.
loc: Location within text1.
Returns:
Location within text2.
|
def diff_xIndex(self, diffs, loc):
chars1 = 0
chars2 = 0
last_chars1 = 0
last_chars2 = 0
for x in range(len(diffs)):
(op, text) = diffs[x]
if op != self.DIFF_INSERT: # Equality or deletion.
chars1 += len(text)
if op != self.DIFF_DELETE: # Equality or insertion.
chars2 += len(text)
if chars1 > loc: # Overshot the location.
break
last_chars1 = chars1
last_chars2 = chars2
if len(diffs) != x and diffs[x][0] == self.DIFF_DELETE:
# The location was deleted.
return last_chars2
# Add the remaining len(character).
return last_chars2 + (loc - last_chars1)
| 368,602
|
Convert a diff array into a pretty HTML report.
Args:
diffs: Array of diff tuples.
Returns:
HTML representation.
|
def diff_prettyHtml(self, diffs):
html = []
for (op, data) in diffs:
text = (data.replace("&", "&").replace("<", "<")
.replace(">", ">").replace("\n", "¶<br>"))
if op == self.DIFF_INSERT:
html.append("<ins style=\"background:#e6ffe6;\">%s</ins>" % text)
elif op == self.DIFF_DELETE:
html.append("<del style=\"background:#ffe6e6;\">%s</del>" % text)
elif op == self.DIFF_EQUAL:
html.append("<span>%s</span>" % text)
return "".join(html)
| 368,603
|
Compute and return the source text (all equalities and deletions).
Args:
diffs: Array of diff tuples.
Returns:
Source text.
|
def diff_text1(self, diffs):
text = []
for (op, data) in diffs:
if op != self.DIFF_INSERT:
text.append(data)
return "".join(text)
| 368,604
|
Compute and return the destination text (all equalities and insertions).
Args:
diffs: Array of diff tuples.
Returns:
Destination text.
|
def diff_text2(self, diffs):
text = []
for (op, data) in diffs:
if op != self.DIFF_DELETE:
text.append(data)
return "".join(text)
| 368,605
|
Compute the Levenshtein distance; the number of inserted, deleted or
substituted characters.
Args:
diffs: Array of diff tuples.
Returns:
Number of changes.
|
def diff_levenshtein(self, diffs):
levenshtein = 0
insertions = 0
deletions = 0
for (op, data) in diffs:
if op == self.DIFF_INSERT:
insertions += len(data)
elif op == self.DIFF_DELETE:
deletions += len(data)
elif op == self.DIFF_EQUAL:
# A deletion and an insertion is one substitution.
levenshtein += max(insertions, deletions)
insertions = 0
deletions = 0
levenshtein += max(insertions, deletions)
return levenshtein
| 368,606
|
Given the original text1, and an encoded string which describes the
operations required to transform text1 into text2, compute the full diff.
Args:
text1: Source string for the diff.
delta: Delta text.
Returns:
Array of diff tuples.
Raises:
ValueError: If invalid input.
|
def diff_fromDelta(self, text1, delta):
diffs = []
pointer = 0 # Cursor in text1
tokens = delta.split("\t")
for token in tokens:
if token == "":
# Blank tokens are ok (from a trailing \t).
continue
# Each token begins with a one character parameter which specifies the
# operation of this token (delete, insert, equality).
param = token[1:]
if token[0] == "+":
param = urllib.parse.unquote(param)
diffs.append((self.DIFF_INSERT, param))
elif token[0] == "-" or token[0] == "=":
try:
n = int(param)
except ValueError:
raise ValueError("Invalid number in diff_fromDelta: " + param)
if n < 0:
raise ValueError("Negative number in diff_fromDelta: " + param)
text = text1[pointer : pointer + n]
pointer += n
if token[0] == "=":
diffs.append((self.DIFF_EQUAL, text))
else:
diffs.append((self.DIFF_DELETE, text))
else:
# Anything else is an error.
raise ValueError("Invalid diff operation in diff_fromDelta: " +
token[0])
if pointer != len(text1):
raise ValueError(
"Delta length (%d) does not equal source text length (%d)." %
(pointer, len(text1)))
return diffs
| 368,607
|
Locate the best instance of 'pattern' in 'text' near 'loc'.
Args:
text: The text to search.
pattern: The pattern to search for.
loc: The location to search around.
Returns:
Best match index or -1.
|
def match_main(self, text, pattern, loc):
# Check for null inputs.
if text == None or pattern == None:
raise ValueError("Null inputs. (match_main)")
loc = max(0, min(loc, len(text)))
if text == pattern:
# Shortcut (potentially not guaranteed by the algorithm)
return 0
elif not text:
# Nothing to match.
return -1
elif text[loc:loc + len(pattern)] == pattern:
# Perfect match at the perfect spot! (Includes case of null pattern)
return loc
else:
# Do a fuzzy compare.
match = self.match_bitap(text, pattern, loc)
return match
| 368,608
|
Locate the best instance of 'pattern' in 'text' near 'loc' using the
Bitap algorithm.
Args:
text: The text to search.
pattern: The pattern to search for.
loc: The location to search around.
Returns:
Best match index or -1.
|
def match_bitap(self, text, pattern, loc):
# Python doesn't have a maxint limit, so ignore this check.
#if self.Match_MaxBits != 0 and len(pattern) > self.Match_MaxBits:
# raise ValueError("Pattern too long for this application.")
# Initialise the alphabet.
s = self.match_alphabet(pattern)
def match_bitapScore(e, x):
accuracy = float(e) / len(pattern)
proximity = abs(loc - x)
if not self.Match_Distance:
# Dodge divide by zero error.
return proximity and 1.0 or accuracy
return accuracy + (proximity / float(self.Match_Distance))
# Highest score beyond which we give up.
score_threshold = self.Match_Threshold
# Is there a nearby exact match? (speedup)
best_loc = text.find(pattern, loc)
if best_loc != -1:
score_threshold = min(match_bitapScore(0, best_loc), score_threshold)
# What about in the other direction? (speedup)
best_loc = text.rfind(pattern, loc + len(pattern))
if best_loc != -1:
score_threshold = min(match_bitapScore(0, best_loc), score_threshold)
# Initialise the bit arrays.
matchmask = 1 << (len(pattern) - 1)
best_loc = -1
bin_max = len(pattern) + len(text)
# Empty initialization added to appease pychecker.
last_rd = None
for d in range(len(pattern)):
# Scan for the best match each iteration allows for one more error.
# Run a binary search to determine how far from 'loc' we can stray at
# this error level.
bin_min = 0
bin_mid = bin_max
while bin_min < bin_mid:
if match_bitapScore(d, loc + bin_mid) <= score_threshold:
bin_min = bin_mid
else:
bin_max = bin_mid
bin_mid = (bin_max - bin_min) // 2 + bin_min
# Use the result from this iteration as the maximum for the next.
bin_max = bin_mid
start = max(1, loc - bin_mid + 1)
finish = min(loc + bin_mid, len(text)) + len(pattern)
rd = [0] * (finish + 2)
rd[finish + 1] = (1 << d) - 1
for j in range(finish, start - 1, -1):
if len(text) <= j - 1:
# Out of range.
charMatch = 0
else:
charMatch = s.get(text[j - 1], 0)
if d == 0: # First pass: exact match.
rd[j] = ((rd[j + 1] << 1) | 1) & charMatch
else: # Subsequent passes: fuzzy match.
rd[j] = (((rd[j + 1] << 1) | 1) & charMatch) | (
((last_rd[j + 1] | last_rd[j]) << 1) | 1) | last_rd[j + 1]
if rd[j] & matchmask:
score = match_bitapScore(d, j - 1)
# This match will almost certainly be better than any existing match.
# But check anyway.
if score <= score_threshold:
# Told you so.
score_threshold = score
best_loc = j - 1
if best_loc > loc:
# When passing loc, don't exceed our current distance from loc.
start = max(1, 2 * loc - best_loc)
else:
# Already passed loc, downhill from here on in.
break
# No hope for a (better) match at greater error levels.
if match_bitapScore(d + 1, loc) > score_threshold:
break
last_rd = rd
return best_loc
| 368,609
|
Initialise the alphabet for the Bitap algorithm.
Args:
pattern: The text to encode.
Returns:
Hash of character locations.
|
def match_alphabet(self, pattern):
s = {}
for char in pattern:
s[char] = 0
for i in range(len(pattern)):
s[pattern[i]] |= 1 << (len(pattern) - i - 1)
return s
| 368,610
|
Increase the context until it is unique,
but don't let the pattern expand beyond Match_MaxBits.
Args:
patch: The patch to grow.
text: Source text.
|
def patch_addContext(self, patch, text):
if len(text) == 0:
return
pattern = text[patch.start2 : patch.start2 + patch.length1]
padding = 0
# Look for the first and last matches of pattern in text. If two different
# matches are found, increase the pattern length.
while (text.find(pattern) != text.rfind(pattern) and (self.Match_MaxBits ==
0 or len(pattern) < self.Match_MaxBits - self.Patch_Margin -
self.Patch_Margin)):
padding += self.Patch_Margin
pattern = text[max(0, patch.start2 - padding) :
patch.start2 + patch.length1 + padding]
# Add one chunk for good luck.
padding += self.Patch_Margin
# Add the prefix.
prefix = text[max(0, patch.start2 - padding) : patch.start2]
if prefix:
patch.diffs[:0] = [(self.DIFF_EQUAL, prefix)]
# Add the suffix.
suffix = text[patch.start2 + patch.length1 :
patch.start2 + patch.length1 + padding]
if suffix:
patch.diffs.append((self.DIFF_EQUAL, suffix))
# Roll back the start points.
patch.start1 -= len(prefix)
patch.start2 -= len(prefix)
# Extend lengths.
patch.length1 += len(prefix) + len(suffix)
patch.length2 += len(prefix) + len(suffix)
| 368,611
|
Given an array of patches, return another array that is identical.
Args:
patches: Array of Patch objects.
Returns:
Array of Patch objects.
|
def patch_deepCopy(self, patches):
patchesCopy = []
for patch in patches:
patchCopy = patch_obj()
# No need to deep copy the tuples since they are immutable.
patchCopy.diffs = patch.diffs[:]
patchCopy.start1 = patch.start1
patchCopy.start2 = patch.start2
patchCopy.length1 = patch.length1
patchCopy.length2 = patch.length2
patchesCopy.append(patchCopy)
return patchesCopy
| 368,613
|
Merge a set of patches onto the text. Return a patched text, as well
as a list of true/false values indicating which patches were applied.
Args:
patches: Array of Patch objects.
text: Old text.
Returns:
Two element Array, containing the new text and an array of boolean values.
|
def patch_apply(self, patches, text):
if not patches:
return (text, [])
# Deep copy the patches so that no changes are made to originals.
patches = self.patch_deepCopy(patches)
nullPadding = self.patch_addPadding(patches)
text = nullPadding + text + nullPadding
self.patch_splitMax(patches)
# delta keeps track of the offset between the expected and actual location
# of the previous patch. If there are patches expected at positions 10 and
# 20, but the first patch was found at 12, delta is 2 and the second patch
# has an effective expected position of 22.
delta = 0
results = []
for patch in patches:
expected_loc = patch.start2 + delta
text1 = self.diff_text1(patch.diffs)
end_loc = -1
if len(text1) > self.Match_MaxBits:
# patch_splitMax will only provide an oversized pattern in the case of
# a monster delete.
start_loc = self.match_main(text, text1[:self.Match_MaxBits],
expected_loc)
if start_loc != -1:
end_loc = self.match_main(text, text1[-self.Match_MaxBits:],
expected_loc + len(text1) - self.Match_MaxBits)
if end_loc == -1 or start_loc >= end_loc:
# Can't find valid trailing context. Drop this patch.
start_loc = -1
else:
start_loc = self.match_main(text, text1, expected_loc)
if start_loc == -1:
# No match found. :(
results.append(False)
# Subtract the delta for this failed patch from subsequent patches.
delta -= patch.length2 - patch.length1
else:
# Found a match. :)
results.append(True)
delta = start_loc - expected_loc
if end_loc == -1:
text2 = text[start_loc : start_loc + len(text1)]
else:
text2 = text[start_loc : end_loc + self.Match_MaxBits]
if text1 == text2:
# Perfect match, just shove the replacement text in.
text = (text[:start_loc] + self.diff_text2(patch.diffs) +
text[start_loc + len(text1):])
else:
# Imperfect match.
# Run a diff to get a framework of equivalent indices.
diffs = self.diff_main(text1, text2, False)
if (len(text1) > self.Match_MaxBits and
self.diff_levenshtein(diffs) / float(len(text1)) >
self.Patch_DeleteThreshold):
# The end points match, but the content is unacceptably bad.
results[-1] = False
else:
self.diff_cleanupSemanticLossless(diffs)
index1 = 0
for (op, data) in patch.diffs:
if op != self.DIFF_EQUAL:
index2 = self.diff_xIndex(diffs, index1)
if op == self.DIFF_INSERT: # Insertion
text = text[:start_loc + index2] + data + text[start_loc +
index2:]
elif op == self.DIFF_DELETE: # Deletion
text = text[:start_loc + index2] + text[start_loc +
self.diff_xIndex(diffs, index1 + len(data)):]
if op != self.DIFF_DELETE:
index1 += len(data)
# Strip the padding off.
text = text[len(nullPadding):-len(nullPadding)]
return (text, results)
| 368,614
|
Add some padding on text start and end so that edges can match
something. Intended to be called only from within patch_apply.
Args:
patches: Array of Patch objects.
Returns:
The padding string added to each side.
|
def patch_addPadding(self, patches):
paddingLength = self.Patch_Margin
nullPadding = ""
for x in range(1, paddingLength + 1):
nullPadding += chr(x)
# Bump all the patches forward.
for patch in patches:
patch.start1 += paddingLength
patch.start2 += paddingLength
# Add some padding on start of first diff.
patch = patches[0]
diffs = patch.diffs
if not diffs or diffs[0][0] != self.DIFF_EQUAL:
# Add nullPadding equality.
diffs.insert(0, (self.DIFF_EQUAL, nullPadding))
patch.start1 -= paddingLength # Should be 0.
patch.start2 -= paddingLength # Should be 0.
patch.length1 += paddingLength
patch.length2 += paddingLength
elif paddingLength > len(diffs[0][1]):
# Grow first equality.
extraLength = paddingLength - len(diffs[0][1])
newText = nullPadding[len(diffs[0][1]):] + diffs[0][1]
diffs[0] = (diffs[0][0], newText)
patch.start1 -= extraLength
patch.start2 -= extraLength
patch.length1 += extraLength
patch.length2 += extraLength
# Add some padding on end of last diff.
patch = patches[-1]
diffs = patch.diffs
if not diffs or diffs[-1][0] != self.DIFF_EQUAL:
# Add nullPadding equality.
diffs.append((self.DIFF_EQUAL, nullPadding))
patch.length1 += paddingLength
patch.length2 += paddingLength
elif paddingLength > len(diffs[-1][1]):
# Grow last equality.
extraLength = paddingLength - len(diffs[-1][1])
newText = diffs[-1][1] + nullPadding[:extraLength]
diffs[-1] = (diffs[-1][0], newText)
patch.length1 += extraLength
patch.length2 += extraLength
return nullPadding
| 368,615
|
Look through the patches and break up any which are longer than the
maximum limit of the match algorithm.
Intended to be called only from within patch_apply.
Args:
patches: Array of Patch objects.
|
def patch_splitMax(self, patches):
patch_size = self.Match_MaxBits
if patch_size == 0:
# Python has the option of not splitting strings due to its ability
# to handle integers of arbitrary precision.
return
for x in range(len(patches)):
if patches[x].length1 <= patch_size:
continue
bigpatch = patches[x]
# Remove the big old patch.
del patches[x]
x -= 1
start1 = bigpatch.start1
start2 = bigpatch.start2
precontext = ''
while len(bigpatch.diffs) != 0:
# Create one of several smaller patches.
patch = patch_obj()
empty = True
patch.start1 = start1 - len(precontext)
patch.start2 = start2 - len(precontext)
if precontext:
patch.length1 = patch.length2 = len(precontext)
patch.diffs.append((self.DIFF_EQUAL, precontext))
while (len(bigpatch.diffs) != 0 and
patch.length1 < patch_size - self.Patch_Margin):
(diff_type, diff_text) = bigpatch.diffs[0]
if diff_type == self.DIFF_INSERT:
# Insertions are harmless.
patch.length2 += len(diff_text)
start2 += len(diff_text)
patch.diffs.append(bigpatch.diffs.pop(0))
empty = False
elif (diff_type == self.DIFF_DELETE and len(patch.diffs) == 1 and
patch.diffs[0][0] == self.DIFF_EQUAL and
len(diff_text) > 2 * patch_size):
# This is a large deletion. Let it pass in one chunk.
patch.length1 += len(diff_text)
start1 += len(diff_text)
empty = False
patch.diffs.append((diff_type, diff_text))
del bigpatch.diffs[0]
else:
# Deletion or equality. Only take as much as we can stomach.
diff_text = diff_text[:patch_size - patch.length1 -
self.Patch_Margin]
patch.length1 += len(diff_text)
start1 += len(diff_text)
if diff_type == self.DIFF_EQUAL:
patch.length2 += len(diff_text)
start2 += len(diff_text)
else:
empty = False
patch.diffs.append((diff_type, diff_text))
if diff_text == bigpatch.diffs[0][1]:
del bigpatch.diffs[0]
else:
bigpatch.diffs[0] = (bigpatch.diffs[0][0],
bigpatch.diffs[0][1][len(diff_text):])
# Compute the head context for the next patch.
precontext = self.diff_text2(patch.diffs)
precontext = precontext[-self.Patch_Margin:]
# Append the end context for this patch.
postcontext = self.diff_text1(bigpatch.diffs)[:self.Patch_Margin]
if postcontext:
patch.length1 += len(postcontext)
patch.length2 += len(postcontext)
if len(patch.diffs) != 0 and patch.diffs[-1][0] == self.DIFF_EQUAL:
patch.diffs[-1] = (self.DIFF_EQUAL, patch.diffs[-1][1] +
postcontext)
else:
patch.diffs.append((self.DIFF_EQUAL, postcontext))
if not empty:
x += 1
patches.insert(x, patch)
| 368,616
|
Take a list of patches and return a textual representation.
Args:
patches: Array of Patch objects.
Returns:
Text representation of patches.
|
def patch_toText(self, patches):
text = []
for patch in patches:
text.append(str(patch))
return "".join(text)
| 368,617
|
Crush the diff into an encoded string which describes the operations
required to transform text1 into text2.
E.g. =3\t-2\t+ing -> Keep 3 chars, delete 2 chars, insert 'ing'.
Operations are tab-separated. Inserted text is escaped using %xx notation.
Args:
diffs: Array of diff tuples.
Returns:
Delta text.
|
def diff_toDelta(self, diffs):
text = []
for (op, data) in diffs:
if op == self.DIFF_INSERT:
# High ascii will raise UnicodeDecodeError. Use Unicode instead.
data = data.encode("utf-8")
text.append("+" + urllib.quote(data, "!~*'();/?:@&=+$,# "))
elif op == self.DIFF_DELETE:
text.append("-%d" % len(data))
elif op == self.DIFF_EQUAL:
text.append("=%d" % len(data))
return "\t".join(text)
| 368,619
|
Initialise the alphabet for the Bitap algorithm.
Args:
pattern: The text to encode.
Returns:
Hash of character locations.
|
def match_alphabet(self, pattern):
s = {}
for char in pattern:
s[char] = 0
for i in xrange(len(pattern)):
s[pattern[i]] |= 1 << (len(pattern) - i - 1)
return s
| 368,620
|
Parse a textual representation of patches and return a list of patch
objects.
Args:
textline: Text representation of patches.
Returns:
Array of Patch objects.
Raises:
ValueError: If invalid input.
|
def patch_fromText(self, textline):
if type(textline) == unicode:
# Patches should be composed of a subset of ascii chars, Unicode not
# required. If this encode raises UnicodeEncodeError, patch is invalid.
textline = textline.encode("ascii")
patches = []
if not textline:
return patches
text = textline.split('\n')
while len(text) != 0:
m = re.match("^@@ -(\d+),?(\d*) \+(\d+),?(\d*) @@$", text[0])
if not m:
raise ValueError("Invalid patch string: " + text[0])
patch = patch_obj()
patches.append(patch)
patch.start1 = int(m.group(1))
if m.group(2) == '':
patch.start1 -= 1
patch.length1 = 1
elif m.group(2) == '0':
patch.length1 = 0
else:
patch.start1 -= 1
patch.length1 = int(m.group(2))
patch.start2 = int(m.group(3))
if m.group(4) == '':
patch.start2 -= 1
patch.length2 = 1
elif m.group(4) == '0':
patch.length2 = 0
else:
patch.start2 -= 1
patch.length2 = int(m.group(4))
del text[0]
while len(text) != 0:
if text[0]:
sign = text[0][0]
else:
sign = ''
line = urllib.unquote(text[0][1:])
line = line.decode("utf-8")
if sign == '+':
# Insertion.
patch.diffs.append((self.DIFF_INSERT, line))
elif sign == '-':
# Deletion.
patch.diffs.append((self.DIFF_DELETE, line))
elif sign == ' ':
# Minor equality.
patch.diffs.append((self.DIFF_EQUAL, line))
elif sign == '@':
# Start of next patch.
break
elif sign == '':
# Blank line? Whatever.
pass
else:
# WTF?
raise ValueError("Invalid patch mode: '%s'\n%s" % (sign, line))
del text[0]
return patches
| 368,621
|
Compute distance.
Args:
x_arr: `np.ndarray` of vectors.
y_arr: `np.ndarray` of vectors.
Retruns:
`np.ndarray` of distances.
|
def compute(self, x_arr, y_arr):
y_arr += 1e-08
return np.sum(x_arr * np.log(x_arr / y_arr), axis=-1)
| 368,727
|
Generate the N-gram's pair.
Args:
token_list: The list of tokens.
n N
Returns:
zip of Tuple(Training N-gram data, Target N-gram data)
|
def generate_ngram_data_set(self, token_list, n=2):
n_gram_tuple_zip = self.generate_tuple_zip(token_list, n)
n_gram_tuple_list = [n_gram_tuple for n_gram_tuple in n_gram_tuple_zip]
n_gram_data_set = self.generate_tuple_zip(n_gram_tuple_list, 2)
return n_gram_data_set
| 368,728
|
Generate the Skip-gram's pair.
Args:
token_list: The list of tokens.
Returns:
zip of Tuple(Training N-gram data, Target N-gram data)
|
def generate_skip_gram_data_set(self, token_list):
n_gram_tuple_zip = self.generate_tuple_zip(token_list, 3)
skip_gram_list = []
for pre, point, post in n_gram_tuple_zip:
skip_gram_list.append((point, pre))
skip_gram_list.append((point, post))
return zip(skip_gram_list)
| 368,729
|
Generate the N-gram.
Args:
token_list: The list of tokens.
n N
Returns:
zip of Tuple(N-gram)
|
def generate_tuple_zip(self, token_list, n=2):
return zip(*[token_list[i:] for i in range(n)])
| 368,730
|
Download PDF file and transform its document to string.
Args:
url: PDF url.
Returns:
string.
|
def url_to_text(self, url):
path, headers = urllib.request.urlretrieve(url)
return self.path_to_text(path)
| 368,733
|
Transform local PDF file to string.
Args:
path: path to PDF file.
Returns:
string.
|
def path_to_text(self, path):
rsrcmgr = PDFResourceManager()
retstr = StringIO()
codec = 'utf-8'
laparams = LAParams()
device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams)
fp = open(path, 'rb')
interpreter = PDFPageInterpreter(rsrcmgr, device)
password = ""
maxpages = 0
caching = True
pagenos = set()
pages_data = PDFPage.get_pages(
fp,
pagenos,
maxpages=maxpages,
password=password,
caching=caching,
check_extractable=True
)
for page in pages_data:
interpreter.process_page(page)
text = retstr.getvalue()
text = text.replace("\n", "")
fp.close()
device.close()
retstr.close()
return text
| 368,734
|
Divide string into sentence list.
Args:
data: string.
counter: recursive counter.
Returns:
List of sentences.
|
def listup_sentence(self, data, counter=0):
delimiter = self.delimiter_list[counter]
sentence_list = []
[sentence_list.append(sentence + delimiter) for sentence in data.split(delimiter) if sentence != ""]
if counter + 1 < len(self.delimiter_list):
sentence_list_r = []
[sentence_list_r.extend(self.listup_sentence(sentence, counter+1)) for sentence in sentence_list]
sentence_list = sentence_list_r
return sentence_list
| 368,736
|
Entry Point.
Args:
url: PDF url.
|
def Main(url):
# The object of Web-scraping.
web_scrape = WebScraping()
# Set the object of reading PDF files.
web_scrape.readable_web_pdf = WebPDFReading()
# Execute Web-scraping.
document = web_scrape.scrape(url)
# The object of automatic sumamrization.
auto_abstractor = AutoAbstractor()
# Set tokenizer. This is japanese tokenizer with MeCab.
auto_abstractor.tokenizable_doc = MeCabTokenizer()
# Object of abstracting and filtering document.
abstractable_doc = TopNRankAbstractor()
# Execute summarization.
result_dict = auto_abstractor.summarize(document, abstractable_doc)
# Output summarized sentence.
[print(sentence) for sentence in result_dict["summarize_result"]]
| 368,737
|
Initialization
Args:
default_alpha: Alpha
default_beta: Beta
|
def __init__(self, default_alpha=1, default_beta=1):
if isinstance(default_alpha, int) is False:
if isinstance(default_alpha, float) is False:
raise TypeError()
if isinstance(default_beta, int) is False:
if isinstance(default_beta, float) is False:
raise TypeError()
if default_alpha <= 0:
raise ValueError()
if default_beta <= 0:
raise ValueError()
self.__success += 0
self.__failure += 0
self.__default_alpha = default_alpha
self.__default_beta = default_beta
| 368,738
|
Observation data.
Args:
success: The number of success.
failure: The number of failure.
|
def observe(self, success, failure):
if isinstance(success, int) is False:
if isinstance(success, float) is False:
raise TypeError()
if isinstance(failure, int) is False:
if isinstance(failure, float) is False:
raise TypeError()
if success <= 0:
raise ValueError()
if failure <= 0:
raise ValueError()
self.__success += success
self.__failure += failure
| 368,739
|
Concreat method.
Args:
state_key The key of state. this value is point in map.
Returns:
[(x, y)]
|
def extract_possible_actions(self, state_key):
if state_key in self.__state_action_list_dict:
return self.__state_action_list_dict[state_key]
else:
action_list = []
state_key_list = [action_list.extend(self.__state_action_list_dict[k]) for k in self.__state_action_list_dict.keys() if len([s for s in state_key if s in k]) > 0]
return action_list
| 368,747
|
Compute the reward value.
Args:
state_key: The key of state.
action_key: The key of action.
Returns:
Reward value.
|
def observe_reward_value(self, state_key, action_key):
reward_value = 0.0
if state_key in self.__state_action_list_dict:
if action_key in self.__state_action_list_dict[state_key]:
reward_value = 1.0
return reward_value
| 368,748
|
Initialize.
Args:
token_list: The list of all tokens.
|
def __init__(self, token_list):
self.__token_arr = np.array(list(set(token_list)))
| 368,749
|
Create matrix of sentences.
Args:
token_list: The list of tokens.
Returns:
2-D `np.ndarray` of sentences.
Each row means one hot vectors of one sentence.
|
def convert_tokens_into_matrix(self, token_list):
return np.array(self.vectorize(token_list)).astype(np.float32)
| 368,750
|
Tokenize vector.
Args:
vector_list: The list of vector of one token.
Returns:
token
|
def tokenize(self, vector_list):
vector_arr = np.array(vector_list)
if vector_arr.ndim == 1:
key_arr = vector_arr.argmax()
else:
key_arr = vector_arr.argmax(axis=-1)
return self.__token_arr[key_arr]
| 368,751
|
Tokenize token list.
Args:
token_list: The list of tokens..
Returns:
[vector of token, vector of token, vector of token, ...]
|
def vectorize(self, token_list):
vector_list = [self.__collection.tf_idf(token, self.__collection) for token in token_list]
return vector_list
| 368,753
|
Move in the feature map.
Args:
current_pos: The now position.
Returns:
The next position.
|
def __move(self, current_pos):
if self.__move_range is not None:
next_pos = np.random.randint(current_pos - self.__move_range, current_pos + self.__move_range)
if next_pos < 0:
next_pos = 0
elif next_pos >= self.var_arr.shape[0] - 1:
next_pos = self.var_arr.shape[0] - 1
return next_pos
else:
next_pos = np.random.randint(self.var_arr.shape[0] - 1)
return next_pos
| 368,755
|
Update this Discriminator by ascending its stochastic gradient.
Args:
grad_arr: `np.ndarray` of gradients.
Returns:
`np.ndarray` of delta or gradients.
|
def learn(self, grad_arr):
deconvolution_layer_list = self.__deconvolution_layer_list[::-1]
for i in range(len(deconvolution_layer_list)):
try:
grad_arr = deconvolution_layer_list[i].back_propagate(grad_arr)
except:
self.__logger.debug("Error raised in Convolution layer " + str(i + 1))
raise
self.__optimize_deconvolution_layer(self.__learning_rate, 1)
layerable_cnn_list = self.__convolutional_auto_encoder.layerable_cnn_list[::-1]
for i in range(len(layerable_cnn_list)):
try:
grad_arr = layerable_cnn_list[i].back_propagate(grad_arr)
except:
self.__logger.debug(
"Delta computation raised an error in CNN layer " + str(len(layerable_cnn_list) - i)
)
raise
self.__convolutional_auto_encoder.optimize(self.__learning_rate, 1)
return grad_arr
| 368,759
|
Back propagation for Deconvolution layer.
Args:
learning_rate: Learning rate.
epoch: Now epoch.
|
def __optimize_deconvolution_layer(self, learning_rate, epoch):
params_list = []
grads_list = []
for i in range(len(self.__deconvolution_layer_list)):
if self.__deconvolution_layer_list[i].delta_weight_arr.shape[0] > 0:
params_list.append(self.__deconvolution_layer_list[i].graph.weight_arr)
grads_list.append(self.__deconvolution_layer_list[i].delta_weight_arr)
for i in range(len(self.__deconvolution_layer_list)):
if self.__deconvolution_layer_list[i].delta_bias_arr.shape[0] > 0:
params_list.append(self.__deconvolution_layer_list[i].graph.bias_arr)
grads_list.append(self.__deconvolution_layer_list[i].delta_bias_arr)
params_list = self.__opt_params.optimize(
params_list,
grads_list,
learning_rate
)
i = 0
for i in range(len(self.__deconvolution_layer_list)):
if self.__deconvolution_layer_list[i].delta_weight_arr.shape[0] > 0:
self.__deconvolution_layer_list[i].graph.weight_arr = params_list.pop(0)
if ((epoch + 1) % self.__attenuate_epoch == 0):
self.__deconvolution_layer_list[i].graph.weight_arr = self.__opt_params.constrain_weight(
self.__deconvolution_layer_list[i].graph.weight_arr
)
for i in range(len(self.__deconvolution_layer_list)):
if self.__deconvolution_layer_list[i].delta_bias_arr.shape[0] > 0:
self.__deconvolution_layer_list[i].graph.bias_arr = params_list.pop(0)
for i in range(len(self.__deconvolution_layer_list)):
if self.__deconvolution_layer_list[i].delta_weight_arr.shape[0] > 0:
if self.__deconvolution_layer_list[i].delta_bias_arr.shape[0] > 0:
self.__deconvolution_layer_list[i].reset_delta()
| 368,760
|
Execute Web-Scraping.
The target dom objects are in self.__dom_object_list.
Args:
url: Web site url.
Returns:
The result. this is a string.
@TODO(chimera0): check URLs format.
|
def scrape(self, url):
if isinstance(url, str) is False:
raise TypeError("The type of url must be str.")
if self.readable_web_pdf is not None and self.readable_web_pdf.is_pdf_url(url) is True:
web_data = self.readable_web_pdf.url_to_text(url)
else:
web_data = ""
req = urllib.request.Request(url=url)
with urllib.request.urlopen(req) as f:
web = f.read().decode('utf-8')
dom = pq(web)
[dom(remove_object).remove() for remove_object in self.__remove_object_list]
for dom_object in self.__dom_object_list:
web_data += dom(dom_object).text()
sleep(1)
return web_data
| 368,764
|
Extract MIDI file.
Args:
file_path: File path of MIDI.
is_drum: Extract drum data or not.
Returns:
pd.DataFrame(columns=["program", "start", "end", "pitch", "velocity", "duration"])
|
def extract(self, file_path, is_drum=False):
midi_data = pretty_midi.PrettyMIDI(file_path)
note_tuple_list = []
for instrument in midi_data.instruments:
if (is_drum is False and instrument.is_drum is False) or (is_drum is True and instrument.is_drum is True):
for note in instrument.notes:
note_tuple_list.append((instrument.program, note.start, note.end, note.pitch, note.velocity))
note_df = pd.DataFrame(note_tuple_list, columns=["program", "start", "end", "pitch", "velocity"])
note_df = note_df.sort_values(by=["program", "start", "end"])
note_df["duration"] = note_df.end - note_df.start
return note_df
| 368,765
|
Save MIDI file.
Args:
file_path: File path of MIDI.
note_df: `pd.DataFrame` of note data.
|
def save(self, file_path, note_df):
chord = pretty_midi.PrettyMIDI()
for program in note_df.program.drop_duplicates().values.tolist():
df = note_df[note_df.program == program]
midi_obj = pretty_midi.Instrument(program=program)
for i in range(df.shape[0]):
note = pretty_midi.Note(
velocity=int(df.iloc[i, :]["velocity"]),
pitch=int(df.iloc[i, :]["pitch"]),
start=float(df.iloc[i, :]["start"]),
end=float(df.iloc[i, :]["end"])
)
# Add it to our cello instrument
midi_obj.notes.append(note)
# Add the cello instrument to the PrettyMIDI object
chord.instruments.append(midi_obj)
# Write out the MIDI data
chord.write(file_path)
| 368,766
|
Init.
Args:
greedy_q_learning: is-a `GreedyQLearning`.
init_state_key: First state key.
|
def __init__(
self,
greedy_q_learning,
init_state_key
):
if isinstance(greedy_q_learning, GreedyQLearning):
self.__greedy_q_learning = greedy_q_learning
else:
raise TypeError()
self.__init_state_key = init_state_key
| 368,767
|
Compute cost.
Args:
x: `np.ndarray` of explanatory variables.
Returns:
cost
|
def compute(self, x):
q_learning = copy(self.__greedy_q_learning)
q_learning.epsilon_greedy_rate = x[0]
q_learning.alpha_value = x[1]
q_learning.gamma_value = x[2]
if self.__init_state_key is not None:
q_learning.learn(state_key=self.__init_state_key, limit=int(x[3]))
else:
q_learning.learn(limit=x[3])
q_sum = q_learning.q_df.q_value.sum()
if q_sum != 0:
cost = q_learning.q_df.shape[0] / q_sum
else:
cost = q_learning.q_df.shape[0] / 1e-4
return cost
| 368,768
|
Entry Point.
Args:
url: target url.
|
def Main(url):
# The object of Web-Scraping.
web_scrape = WebScraping()
# Execute Web-Scraping.
document = web_scrape.scrape(url)
# The object of automatic summarization with N-gram.
auto_abstractor = NgramAutoAbstractor()
# n-gram object
auto_abstractor.n_gram = Ngram()
# n of n-gram
auto_abstractor.n = 3
# Set tokenizer. This is japanese tokenizer with MeCab.
auto_abstractor.tokenizable_doc = MeCabTokenizer()
# Object of abstracting and filtering document.
abstractable_doc = TopNRankAbstractor()
# Execute summarization.
result_dict = auto_abstractor.summarize(document, abstractable_doc)
# Output 3 summarized sentences.
limit = 3
i = 1
for sentence in result_dict["summarize_result"]:
print(sentence)
if i >= limit:
break
i += 1
| 368,769
|
Execute summarization.
Args:
document: The target document.
Abstractor: The object of AbstractableDoc.
similarity_filter The object of SimilarityFilter.
Returns:
dict data.
- "summarize_result": The list of summarized sentences.,
- "scoring_data": The list of scores.
|
def summarize(self, document, Abstractor, similarity_filter=None):
if isinstance(document, str) is False:
raise TypeError("The type of document must be str.")
if isinstance(Abstractor, AbstractableDoc) is False:
raise TypeError("The type of Abstractor must be AbstractableDoc.")
if isinstance(similarity_filter, SimilarityFilter) is False and similarity_filter is not None:
raise TypeError("The type of similarity_filter must be SimilarityFilter.")
normalized_sentences = self.listup_sentence(document)
# for filtering similar sentences.
if similarity_filter is not None:
normalized_sentences = similarity_filter.similar_filter_r(normalized_sentences)
self.tokenize(document)
words = self.token
fdist = nltk.FreqDist(words)
top_n_words = [w[0] for w in fdist.items()][:self.target_n]
scored_list = self.__closely_associated_score(normalized_sentences, top_n_words)
filtered_list = Abstractor.filter(scored_list)
result_list = [normalized_sentences[idx] for (idx, score) in filtered_list]
result_dict = {
"summarize_result": result_list,
"scoring_data": filtered_list
}
return result_dict
| 368,776
|
Scoring the sentence with closely associations.
Args:
normalized_sentences: The list of sentences.
top_n_words: Important sentences.
Returns:
The list of scores.
|
def __closely_associated_score(self, normalized_sentences, top_n_words):
scores_list = []
sentence_idx = -1
for sentence in normalized_sentences:
self.tokenize(sentence)
sentence = self.token
sentence_idx += 1
word_idx = []
for w in top_n_words:
try:
word_idx.append(sentence.index(w))
except ValueError:
pass
word_idx.sort()
if len(word_idx) == 0:
continue
clusters = []
cluster = [word_idx[0]]
i = 1
while i < len(word_idx):
if word_idx[i] - word_idx[i - 1] < self.cluster_threshold:
cluster.append(word_idx[i])
else:
clusters.append(cluster[:])
cluster = [word_idx[i]]
i += 1
clusters.append(cluster)
max_cluster_score = 0
for c in clusters:
significant_words_in_cluster = len(c)
total_words_in_cluster = c[-1] - c[0] + 1
score = 1.0 * significant_words_in_cluster \
* significant_words_in_cluster / total_words_in_cluster
if score > max_cluster_score:
max_cluster_score = score
scores_list.append((sentence_idx, score))
return scores_list
| 368,777
|
Init.
Args:
mu: `float` or `array_like of floats`.
Mean (`centre`) of the distribution.
sigma: `float` or `array_like of floats`.
Standard deviation (spread or `width`) of the distribution.
output_shape: Output shape.
the shape is `(batch size, d1, d2, d3, ...)`.
|
def __init__(self, mu, sigma, output_shape):
self.__mu = mu
self.__sigma = sigma
self.__output_shape = output_shape
| 368,778
|
Multi-Agent Learning.
Override.
Args:
initial_state_key: Initial state.
limit: Limit of the number of learning.
game_n: The number of games.
|
def learn(self, initial_state_key, limit=1000, game_n=1):
end_flag = False
state_key_list = [None] * len(self.q_learning_list)
action_key_list = [None] * len(self.q_learning_list)
next_action_key_list = [None] * len(self.q_learning_list)
for game in range(game_n):
state_key = initial_state_key
self.t = 1
while self.t <= limit:
for i in range(len(self.q_learning_list)):
state_key_list[i] = state_key
if game + 1 == game_n:
self.state_key_list.append(tuple(i, state_key_list))
self.q_learning_list[i].t = self.t
next_action_list = self.q_learning_list[i].extract_possible_actions(tuple(i, state_key_list))
if len(next_action_list):
action_key = self.q_learning_list[i].select_action(
state_key=tuple(i, state_key_list),
next_action_list=next_action_list
)
action_key_list[i] = action_key
reward_value = self.q_learning_list[i].observe_reward_value(
tuple(i, state_key_list),
tuple(i, action_key_list)
)
# Check.
if self.q_learning_list[i].check_the_end_flag(tuple(i, state_key_list)) is True:
end_flag = True
# Max-Q-Value in next action time.
next_next_action_list = self.q_learning_list[i].extract_possible_actions(
tuple(i, action_key_list)
)
if len(next_next_action_list):
next_action_key = self.q_learning_list[i].predict_next_action(
tuple(i, action_key_list),
next_next_action_list
)
next_action_key_list[i] = next_action_key
next_max_q = self.q_learning_list[i].extract_q_df(
tuple(i, action_key_list),
next_action_key
)
# Update Q-Value.
self.q_learning_list[i].update_q(
state_key=tuple(i, state_key_list),
action_key=tuple(i, action_key_list),
reward_value=reward_value,
next_max_q=next_max_q
)
# Update State.
state_key = self.q_learning_list[i].update_state(
state_key=tuple(i, state_key_list),
action_key=tuple(i, action_key_list)
)
state_key_list[i] = state_key
# Epsode.
self.t += 1
self.q_learning_list[i].t = self.t
if end_flag is True:
break
| 368,780
|
Update this Discriminator by ascending its stochastic gradient.
Args:
grad_arr: `np.ndarray` of gradients.
fix_opt_flag: If `False`, no optimization in this model will be done.
Returns:
`np.ndarray` of delta or gradients.
|
def learn(self, grad_arr, fix_opt_flag=False):
channel = grad_arr.shape[1] // 2
grad_arr = self.__deconvolution_model.learn(grad_arr[:, :channel], fix_opt_flag=fix_opt_flag)
delta_arr = self.__cnn.back_propagation(grad_arr)
if fix_opt_flag is False:
self.__cnn.optimize(self.__learning_rate, 1)
return delta_arr
| 368,783
|
Init.
Args:
deconvolution_layer_list: `list` of `DeconvolutionLayer`.
opt_params: is-a `OptParams`. If `None`, this value will be `Adam`.
learning_rate: Learning rate.
verbose_mode: Verbose mode or not.
|
def __init__(
self,
deconvolution_layer_list,
opt_params=None,
learning_rate=1e-05,
verbose_mode=False
):
for deconvolution_layer in deconvolution_layer_list:
if isinstance(deconvolution_layer, DeconvolutionLayer) is False:
raise TypeError()
if opt_params is None:
opt_params = Adam()
opt_params.dropout_rate = 0.0
if isinstance(opt_params, OptParams) is False:
raise TypeError()
logger = getLogger("pydbm")
handler = StreamHandler()
if verbose_mode is True:
handler.setLevel(DEBUG)
logger.setLevel(DEBUG)
else:
handler.setLevel(ERROR)
logger.setLevel(ERROR)
logger.addHandler(handler)
self.__deconvolution_layer_list = deconvolution_layer_list
self.__learning_rate = learning_rate
self.__attenuate_epoch = 50
self.__opt_params = opt_params
self.__logger = logger
| 368,784
|
Draws samples from the `fake` distribution.
Args:
observed_arr: `np.ndarray` of observed data points.
Returns:
`np.ndarray` of inferenced.
|
def inference(self, observed_arr):
for i in range(len(self.__deconvolution_layer_list)):
try:
observed_arr = self.__deconvolution_layer_list[i].forward_propagate(observed_arr)
except:
self.__logger.debug("Error raised in Deconvolution layer " + str(i + 1))
raise
return observed_arr
| 368,785
|
Update this Discriminator by ascending its stochastic gradient.
Args:
grad_arr: `np.ndarray` of gradients.
fix_opt_flag: If `False`, no optimization in this model will be done.
Returns:
`np.ndarray` of delta or gradients.
|
def learn(self, grad_arr, fix_opt_flag=False):
deconvolution_layer_list = self.__deconvolution_layer_list[::-1]
for i in range(len(deconvolution_layer_list)):
try:
grad_arr = deconvolution_layer_list[i].back_propagate(grad_arr)
except:
self.__logger.debug("Error raised in Convolution layer " + str(i + 1))
raise
if fix_opt_flag is False:
self.__optimize(self.__learning_rate, 1)
return grad_arr
| 368,786
|
Draws samples from the `true` distribution.
Args:
observed_arr: `np.ndarray` of observed data points.
Returns:
`np.ndarray` of inferenced.
|
def inference(self, observed_arr):
self.__pred_arr = self.__lstm_model.inference(observed_arr)
return self.__pred_arr
| 368,788
|
Update this Discriminator by ascending its stochastic gradient.
Args:
grad_arr: `np.ndarray` of gradients.
fix_opt_flag: If `False`, no optimization in this model will be done.
Returns:
`np.ndarray` of delta or gradients.
|
def learn(self, grad_arr, fix_opt_flag=False):
if grad_arr.ndim > 3:
grad_arr = grad_arr.reshape((
grad_arr.shape[0],
grad_arr.shape[1],
-1
))
delta_arr, grads_list = self.__lstm_model.back_propagation(self.__pred_arr, grad_arr)
if fix_opt_flag is False:
self.__lstm_model.optimize(
grads_list,
self.__learning_rate,
1
)
return delta_arr
| 368,789
|
Init.
Args:
low: Lower boundary of the output interval.
All values generated will be greater than or equal to low.
high: Upper boundary of the output interval.
All values generated will be less than high.
output_shape: Output shape.
the shape is `(batch size, d1, d2, d3, ...)`.
|
def __init__(self, low, high, output_shape):
self.__low = low
self.__high = high
self.__output_shape = output_shape
| 368,790
|
Remove duplicated elements.
Args:
token_list_x: [token, token, token, ...]
token_list_y: [token, token, token, ...]
Returns:
Tuple(token_list_x, token_list_y)
|
def unique(self, token_list_x, token_list_y):
x = set(list(token_list_x))
y = set(list(token_list_y))
return (x, y)
| 368,797
|
Count the number of tokens in `token_list`.
Args:
token_list: The list of tokens.
Returns:
{token: the numbers}
|
def count(self, token_list):
token_dict = {}
for token in token_list:
if token in token_dict:
token_dict[token] += 1
else:
token_dict[token] = 1
return token_dict
| 368,798
|
Filter mutually similar sentences.
Args:
sentence_list: The list of sentences.
Returns:
The list of filtered sentences.
|
def similar_filter_r(self, sentence_list):
result_list = []
recursive_list = []
try:
self.nlp_base.tokenize(sentence_list[0])
subject_token = self.nlp_base.token
result_list.append(sentence_list[0])
if len(sentence_list) > 1:
for i in range(len(sentence_list)):
if i > 0:
self.nlp_base.tokenize(sentence_list[i])
object_token = self.nlp_base.token
similarity = self.calculate(subject_token, object_token)
if similarity <= self.similarity_limit:
recursive_list.append(sentence_list[i])
if len(recursive_list) > 0:
result_list.extend(self.similar_filter_r(recursive_list))
except IndexError:
result_list = sentence_list
return result_list
| 368,799
|
Calculate similarity with the Jaccard coefficient.
Concrete method.
Args:
token_list_x: [token, token, token, ...]
token_list_y: [token, token, token, ...]
Returns:
Similarity.
|
def calculate(self, token_list_x, token_list_y):
x, y = self.unique(token_list_x, token_list_y)
try:
result = len(x & y) / len(x | y)
except ZeroDivisionError:
result = 0.0
return result
| 368,803
|
Infernce Q-Value.
Args:
predicted_q_arr: `np.ndarray` of predicted Q-Values.
real_q_arr: `np.ndarray` of real Q-Values.
|
def learn_q(self, predicted_q_arr, real_q_arr):
loss = self.__computable_loss.compute_loss(predicted_q_arr, real_q_arr)
delta_arr = self.__computable_loss.compute_delta(predicted_q_arr, real_q_arr)
delta_arr = self.__cnn.back_propagation(delta_arr)
self.__cnn.optimize(self.__learning_rate, 1)
self.__loss_list.append(loss)
| 368,807
|
引数で指定した条件でビートを鳴らす
Args:
frequencys: (左の周波数(Hz), 右の周波数(Hz))のtuple
play_time: 再生時間(秒)
sample_rate: サンプルレート
volume: 音量
Returns:
void
|
def play_beat(
self,
frequencys,
play_time,
sample_rate=44100,
volume=0.01
):
# 依存するライブラリの基底オブジェクト
audio = pyaudio.PyAudio()
# ストリーム
stream = audio.open(
format=pyaudio.paFloat32,
channels=2,
rate=sample_rate,
output=1
)
left_frequency, right_frequency = frequencys
left_chunk = self.__create_chunk(left_frequency, play_time, sample_rate)
right_chunk = self.__create_chunk(right_frequency, play_time, sample_rate)
self.write_stream(stream, left_chunk, right_chunk, volume)
stream.stop_stream()
stream.close()
audio.terminate()
| 368,811
|
引数で指定した条件でビートを鳴らす
Args:
frequencys: (左の周波数(Hz), 右の周波数(Hz))のtuple
play_time: 再生時間(秒)
sample_rate: サンプルレート
volume: 音量
Returns:
void
|
def save_beat(
self,
output_file_name,
frequencys,
play_time,
sample_rate=44100,
volume=0.01
):
left_frequency, right_frequency = frequencys
left_chunk = self.__create_chunk(left_frequency, play_time, sample_rate)
right_chunk = self.__create_chunk(right_frequency, play_time, sample_rate)
frame_list = self.read_stream(left_chunk, right_chunk, volume)
wf = wave.open(output_file_name, 'wb')
wf.setparams((2, 2, sample_rate, 0, 'NONE', 'not compressed'))
wf.writeframes(b''.join(frame_list))
wf.close()
| 368,812
|
チャンクを生成する
Args:
frequency: 周波数
play_time: 再生時間(秒)
sample_rate: サンプルレート
Returns:
チャンクのnumpy配列
|
def __create_chunk(self, frequency, play_time, sample_rate):
chunks = []
wave_form = self.wave_form.create(frequency, play_time, sample_rate)
chunks.append(wave_form)
chunk = numpy.concatenate(chunks)
return chunk
| 368,813
|
Init.
Args:
image_true_sampler: is-a `ImageTrueSampler`.
|
def __init__(self, image_true_sampler):
if isinstance(image_true_sampler, ImageTrueSampler) is False:
raise TypeError()
self.__image_true_sampler = image_true_sampler
| 368,814
|
Add condtion.
Args:
observed_arr: `np.ndarray` of samples.
Returns:
`np.ndarray` of samples.
|
def add_condition(self, observed_arr):
condition_arr = self.__image_true_sampler.draw()
return np.concatenate((observed_arr, condition_arr), axis=1)
| 368,816
|
音の波形を生成する
Args:
frequency: 周波数
play_time: 再生時間
sample_rate: サンプルレート
Returns:
波形要素を格納した配列
|
def create(self, frequency, play_time, sample_rate):
length = int(play_time * sample_rate)
factor = float(frequency) * (math.pi * 2) / sample_rate
return numpy.sin(numpy.arange(length) * factor)
| 368,817
|
Init.
Args:
function_approximator: is-a `FunctionApproximator`.
map_size: Size of map.
memory_num: The number of step of agent's memory.
repeating_penalty: The value of penalty in the case that agent revisit.
|
def __init__(self, function_approximator, map_size=(10, 10), memory_num=4, repeating_penalty=0.5):
self.__map_arr = self.__create_map(map_size)
self.__agent_pos = self.START_POS
self.__reward_list = []
self.__route_memory_list = []
self.__memory_num = memory_num
self.__repeating_penalty = repeating_penalty
super().__init__(function_approximator)
| 368,818
|
Infernce.
Args:
state_arr: `np.ndarray` of state.
limit: The number of inferencing.
Returns:
`list of `np.ndarray` of an optimal route.
|
def inference(self, state_arr, limit=1000):
agent_x, agent_y = np.where(state_arr[0] == 1)
agent_x, agent_y = agent_x[0], agent_y[0]
result_list = [(agent_x, agent_y, 0.0)]
self.t = 1
while self.t <= limit:
next_action_arr = self.extract_possible_actions(state_arr)
next_q_arr = self.function_approximator.inference_q(next_action_arr)
action_arr, q = self.select_action(next_action_arr, next_q_arr)
agent_x, agent_y = np.where(action_arr[0] == 1)
agent_x, agent_y = agent_x[0], agent_y[0]
result_list.append((agent_x, agent_y, q[0]))
# Update State.
state_arr = self.update_state(state_arr, action_arr)
# Epsode.
self.t += 1
# Check.
end_flag = self.check_the_end_flag(state_arr)
if end_flag is True:
break
return result_list
| 368,819
|
Compute the reward value.
Args:
state_arr: `np.ndarray` of state.
action_arr: `np.ndarray` of action.
Returns:
Reward value.
|
def observe_reward_value(self, state_arr, action_arr):
if self.__check_goal_flag(action_arr) is True:
return 1.0
else:
x, y = np.where(action_arr[-1] == 1)
x, y = x[0], y[0]
goal_x, goal_y = self.__goal_pos
if x == goal_x and y == goal_y:
distance = 0.0
else:
distance = np.sqrt(((x - goal_x) ** 2) + (y - goal_y) ** 2)
if (x, y) in self.__route_long_memory_list:
repeating_penalty = self.__repeating_penalty
else:
repeating_penalty = 0.0
return 1.0 - distance - repeating_penalty
| 368,820
|
Update state.
Override.
Args:
state_arr: `np.ndarray` of state in `self.t`.
action_arr: `np.ndarray` of action in `self.t`.
Returns:
`np.ndarray` of state in `self.t+1`.
|
def update_state(self, state_arr, action_arr):
x, y = np.where(action_arr[-1] == 1)
self.__agent_pos = (x[0], y[0])
self.__route_memory_list.append((x[0], y[0]))
self.__route_long_memory_list.append((x[0], y[0]))
self.__route_long_memory_list = list(set(self.__route_long_memory_list))
while len(self.__route_memory_list) > self.__memory_num:
self.__route_memory_list = self.__route_memory_list[1:]
return self.extract_now_state()
| 368,822
|
Draws samples from the `true` distribution.
Args:
observed_arr: `np.ndarray` of observed data points.
Returns:
`np.ndarray` of inferenced.
|
def inference(self, observed_arr):
if observed_arr.ndim < 4:
# Add rank for channel.
observed_arr = np.expand_dims(observed_arr, axis=1)
self.__add_channel_flag = True
else:
self.__add_channel_flag = False
return super().inference(observed_arr)
| 368,826
|
Update this Discriminator by ascending its stochastic gradient.
Args:
grad_arr: `np.ndarray` of gradients.
fix_opt_flag: If `False`, no optimization in this model will be done.
Returns:
`np.ndarray` of delta or gradients.
|
def learn(self, grad_arr, fix_opt_flag=False):
delta_arr = super().learn(grad_arr, fix_opt_flag)
if self.__add_channel_flag is True:
return delta_arr[:, 0]
else:
return delta_arr
| 368,827
|
Calculate similarity with the so-called Cosine similarity of Tf-Idf vectors.
Concrete method.
Args:
token_list_x: [token, token, token, ...]
token_list_y: [token, token, token, ...]
Returns:
Similarity.
|
def calculate(self, token_list_x, token_list_y):
if len(token_list_x) == 0 or len(token_list_y) == 0:
return 0.0
document_list = token_list_x.copy()
[document_list.append(v) for v in token_list_y]
document_list = list(set(document_list))
tfidf_vectorizer = TfidfVectorizer(document_list)
vector_list_x = tfidf_vectorizer.vectorize(token_list_x)
vector_list_y = tfidf_vectorizer.vectorize(token_list_y)
if len(vector_list_x) > len(vector_list_y):
[vector_list_y.append(0.0) for _ in range(len(vector_list_x) - len(vector_list_y))]
elif len(vector_list_y) > len(vector_list_x):
[vector_list_x.append(0.0) for _ in range(len(vector_list_y) - len(vector_list_x))]
dot_prod = np.dot(vector_list_x, vector_list_y)
norm_x = np.linalg.norm(vector_list_x)
norm_y = np.linalg.norm(vector_list_y)
try:
result = dot_prod / (norm_x * norm_y)
if np.isnan(result) is True:
return 0.0
else:
return result
except ZeroDivisionError:
return 0.0
| 368,828
|
Init.
Args:
params_arr: The parameters.
cost_functionable: is-a `CostFunctionable`.
|
def __init__(self, params_arr, cost_functionable):
self.__params_arr = params_arr
if isinstance(cost_functionable, CostFunctionable):
self.__cost_functionable = cost_functionable
else:
raise TypeError
| 368,829
|
Compute distance.
Args:
x: Data point.
y: Data point.
Returns:
Distance.
|
def compute(self, x, y):
if x in self.__memo_dict:
x_v = self.__memo_dict[x]
else:
x_v = self.__cost_functionable.compute(self.__params_arr[x, :])
self.__memo_dict.setdefault(x, x_v)
if y in self.__memo_dict:
y_v = self.__memo_dict[y]
else:
y_v = self.__cost_functionable.compute(self.__params_arr[y, :])
self.__memo_dict.setdefault(y, y_v)
return abs(x_v - y_v)
| 368,830
|
Initialize map of maze and setup reward value.
Args:
map_arr: Map. the 2d- `np.ndarray`.
start_point_label: Label of start point.
end_point_label: Label of end point.
wall_label: Label of wall.
agent_label: Label of agent.
|
def initialize(self, map_arr, start_point_label="S", end_point_label="G", wall_label="#", agent_label="@"):
np.set_printoptions(threshold=np.inf)
self.__agent_label = agent_label
self.__map_arr = map_arr
self.__start_point_label = start_point_label
start_arr_tuple = np.where(self.__map_arr == self.__start_point_label)
x_arr, y_arr = start_arr_tuple
self.__start_point_tuple = (x_arr[0], y_arr[0])
end_arr_tuple = np.where(self.__map_arr == self.__end_point_label)
x_arr, y_arr = end_arr_tuple
self.__end_point_tuple = (x_arr[0], y_arr[0])
self.__wall_label = wall_label
for x in range(self.__map_arr.shape[1]):
for y in range(self.__map_arr.shape[0]):
if (x, y) == self.__start_point_tuple or (x, y) == self.__end_point_tuple:
continue
arr_value = self.__map_arr[y][x]
if arr_value == self.__wall_label:
continue
self.save_r_df((x, y), float(arr_value))
| 368,832
|
Concreat method.
Args:
state_key The key of state. this value is point in map.
Returns:
[(x, y)]
|
def extract_possible_actions(self, state_key):
x, y = state_key
if self.__map_arr[y][x] == self.__wall_label:
raise ValueError("It is the wall. (x, y)=(%d, %d)" % (x, y))
around_map = [(x, y-1), (x, y+1), (x-1, y), (x+1, y)]
possible_actoins_list = [(_x, _y) for _x, _y in around_map if self.__map_arr[_y][_x] != self.__wall_label and self.__map_arr[_y][_x] != self.__start_point_label]
return possible_actoins_list
| 368,833
|
Compute the reward value.
Args:
state_key: The key of state.
action_key: The key of action.
Returns:
Reward value.
|
def observe_reward_value(self, state_key, action_key):
x, y = state_key
if self.__map_arr[y][x] == self.__end_point_label:
return 100.0
elif self.__map_arr[y][x] == self.__start_point_label:
return 0.0
elif self.__map_arr[y][x] == self.__wall_label:
raise ValueError("It is the wall. (x, y)=(%d, %d)" % (x, y))
else:
reward_value = float(self.__map_arr[y][x])
self.save_r_df(state_key, reward_value)
return reward_value
| 368,834
|
Check the end flag.
If this return value is `True`, the learning is end.
Args:
state_key: The key of state in `self.t`.
Returns:
bool
|
def check_the_end_flag(self, state_key):
# As a rule, the learning can not be stopped.
x, y = state_key
end_point_tuple = np.where(self.__map_arr == self.__end_point_label)
end_point_x_arr, end_point_y_arr = end_point_tuple
if x == end_point_x_arr[0] and y == end_point_y_arr[0]:
return True
else:
return False
| 368,836
|
Inference route.
Args:
limit: the number of inferencing.
Returns:
[(x_1, y_1), (x_2, y_2), ...]
|
def inference(self, limit=1000):
route_list = []
memory_list = []
state_key = self.__start_point_tuple
x, y = state_key
end_x, end_y = self.__end_point_tuple
for i in range(limit):
q_df = self.q_df[self.q_df.state_key == state_key]
if len(memory_list):
q_df = q_df[~q_df.action_key.isin(memory_list)]
if q_df.shape[0] > 1:
q_df = q_df.sort_values(by=["q_value"], ascending=False)
action_key = q_df.iloc[0, :]["action_key"]
q_value = q_df.iloc[0, :]["q_value"]
elif q_df.shape[0] == 1:
action_key = q_df.action_key.values[0]
q_value = q_df.q_value.values[0]
else:
action_key_list = self.extract_possible_actions(state_key)
action_key_list = [v for v in action_key_list if v not in memory_list]
q_value = 0.0
if len(action_key_list):
action_key = random.choice(action_key_list)
_q_df = q_df[q_df.action_key == action_key]
if _q_df.shape[0]:
q_value = _q_df.q_value.values[0]
state_key = self.update_state(
state_key=state_key,
action_key=action_key
)
x, y = state_key
route_list.append((x, y, q_value))
memory_list.append(state_key)
if self.check_the_end_flag(state_key) is True:
break
return route_list
| 368,839
|
Update this Discriminator by ascending its stochastic gradient.
Args:
grad_arr: `np.ndarray` of gradients.
Returns:
`np.ndarray` of delta or gradients.
|
def learn(self, grad_arr):
encoder_delta_arr, _, encoder_grads_list = self.__encoder_decoder_controller.encoder.hidden_back_propagate(
grad_arr[:, -1]
)
encoder_grads_list.insert(0, None)
encoder_grads_list.insert(0, None)
self.__encoder_decoder_controller.encoder.optimize(
encoder_grads_list,
self.__learning_rate,
1
)
return encoder_delta_arr
| 368,842
|
Init.
Args:
function_approximator: is-a `FunctionApproximator`.
|
def __init__(self, function_approximator):
if isinstance(function_approximator, FunctionApproximator):
self.__function_approximator = function_approximator
else:
raise TypeError()
self.t = 1
self.__q_logs_arr = np.array([])
| 368,844
|
Learning and searching the optimal solution.
Args:
state_arr: `np.ndarray` of initial state.
limit: The maximum number of iterative updates based on value iteration algorithms.
|
def learn(self, state_arr, limit=1000):
while self.t <= limit:
# Draw samples of next possible actions from any distribution.
next_action_arr = self.extract_possible_actions(state_arr)
# Inference Q-Values.
predicted_q_arr = self.__function_approximator.inference_q(next_action_arr)
# Set `np.ndarray` of rewards and next Q-Values.
reward_value_arr = np.empty((next_action_arr.shape[0], 1))
next_max_q_arr = np.empty((next_action_arr.shape[0], 1))
for i in range(reward_value_arr.shape[0]):
# Observe reward values.
reward_value_arr[i] = self.observe_reward_value(state_arr, next_action_arr[i])
# Inference the Max-Q-Value in next action time.
next_next_action_arr = self.extract_possible_actions(next_action_arr[i])
next_max_q_arr[i] = self.__function_approximator.inference_q(next_next_action_arr).max()
# Select action.
action_arr, predicted_q = self.select_action(next_action_arr, predicted_q_arr)
# Update real Q-Values.
real_q_arr = self.update_q(
predicted_q_arr,
reward_value_arr,
next_max_q_arr
)
# Maximum of predicted and real Q-Values.
real_q = real_q_arr[np.where(predicted_q_arr == predicted_q)[0][0]]
if self.__q_logs_arr.shape[0] > 0:
self.__q_logs_arr = np.r_[
self.__q_logs_arr,
np.array([predicted_q, real_q]).reshape(1, 2)
]
else:
self.__q_logs_arr = np.array([predicted_q, real_q]).reshape(1, 2)
# Learn Q-Values.
self.learn_q(predicted_q_arr, real_q_arr)
# Update State.
state_arr = self.update_state(state_arr, action_arr)
# Epsode.
self.t += 1
# Check.
end_flag = self.check_the_end_flag(state_arr)
if end_flag is True:
break
| 368,845
|
Update Q.
Args:
predicted_q_arr: `np.ndarray` of predicted Q-Values.
reward_value_arr: `np.ndarray` of reward values.
next_max_q_arr: `np.ndarray` of maximum Q-Values in next time step.
Returns:
`np.ndarray` of real Q-Values.
|
def update_q(self, predicted_q_arr, reward_value_arr, next_max_q_arr):
# Update Q-Value.
return predicted_q_arr + (self.alpha_value * (reward_value_arr + (self.gamma_value * next_max_q_arr) - predicted_q_arr))
| 368,846
|
Compute distance.
Args:
x_arr: `np.ndarray` of vectors.
y_arr: `np.ndarray` of vectors.
Retruns:
`np.ndarray` of distances.
|
def compute(self, x_arr, y_arr):
x_arr = x_arr / np.linalg.norm(x_arr, ord=1)
y_arr = y_arr / np.linalg.norm(y_arr, ord=1)
mixture_arr = 0.5 * (x_arr + y_arr)
return 0.5 * (super().compute(x_arr, mixture_arr) + super().compute(y_arr, mixture_arr))
| 368,852
|
Filtering with top-n ranking.
Args:
scored_list: The list of scoring.
Retruns:
The list of filtered result.
|
def filter(self, scored_list):
top_n_key = -1 * self.top_n
top_n_list = sorted(scored_list, key=lambda x: x[1])[top_n_key:]
result_list = sorted(top_n_list, key=lambda x: x[0])
return result_list
| 368,855
|
Tokenize sentence.
Args:
[n-gram, n-gram, n-gram, ...]
|
def tokenize(self, data):
super().tokenize(data)
token_tuple_zip = self.n_gram.generate_tuple_zip(self.token, self.n)
token_list = []
self.token = ["".join(list(token_tuple)) for token_tuple in token_tuple_zip]
| 368,858
|
Extract Q-Value from `self.q_df`.
Args:
state_key: The key of state.
action_key: The key of action.
Returns:
Q-Value.
|
def extract_q_df(self, state_key, action_key):
q = 0.0
if self.q_df is None:
self.save_q_df(state_key, action_key, q)
return q
q_df = self.q_df[self.q_df.state_key == state_key]
q_df = q_df[q_df.action_key == action_key]
if q_df.shape[0]:
q = float(q_df["q_value"])
else:
self.save_q_df(state_key, action_key, q)
return q
| 368,861
|
Insert or update Q-Value in `self.q_df`.
Args:
state_key: State.
action_key: Action.
q_value: Q-Value.
Exceptions:
TypeError: If the type of `q_value` is not float.
|
def save_q_df(self, state_key, action_key, q_value):
if isinstance(q_value, float) is False:
raise TypeError("The type of q_value must be float.")
new_q_df = pd.DataFrame([(state_key, action_key, q_value)], columns=["state_key", "action_key", "q_value"])
if self.q_df is not None:
self.q_df = pd.concat([new_q_df, self.q_df])
self.q_df = self.q_df.drop_duplicates(["state_key", "action_key"])
else:
self.q_df = new_q_df
| 368,862
|
Insert or update R-Value in `self.r_df`.
Args:
state_key: The key of state.
r_value: R-Value(Reward).
action_key: The key of action if it is nesesary for the parametar of value function.
Exceptions:
TypeError: If the type of `r_value` is not float.
|
def extract_r_df(self, state_key, r_value, action_key=None):
if isinstance(r_value, float) is False:
raise TypeError("The type of r_value must be float.")
r = 0.0
if self.r_df is None:
self.save_r_df(state_key, r, action_key)
return r
r_df = self.r_df[self.r_df.state_key == state_key]
if action_key is not None:
r_df = r_df[r_df.action_key == action_key]
if r_df.shape[0]:
r = float(r_df["r_value"])
else:
self.save_r_df(state_key, r, action_key)
return r
| 368,865
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.