id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
178,838 |
def print_spiral(root):
s1 = []
s2 = []
s1.append(root)
while not len(s1) == 0 or not len(s2) == 0:
while not len(s1) == 0:
temp = s1.pop()
print(temp.data, end=' ')
if temp.right:
s2.append(temp.right)
if temp.left:
s2.append(temp.left)
while not len(s2) == 0:
temp = s2.pop()
print(temp.data, end=' ')
if temp.left:
s1.append(temp.left)
if temp.right:
s1.append(temp.right) | null |
178,839 |
def convert(root):
if root is None:
return
convert(root.left)
convert(root.right)
if root.left and root.right:
root.val = root.left.val & root.right.val | null |
178,840 | def is_leaf(root):
def sum_left(root):
s = 0
stack = []
while True:
if root:
stack.append(root)
root = root.left
else:
if not stack:
break
root = stack.pop()
if is_leaf(root.left):
s += root.left.val
root = root.right
return s | null |
178,841 |
def check_path(root, arr, n, index):
if root is None:
return n == 0
if root.left == None and root.right == None and root.val == arr[index] and index == n -1:
return True
return (index < n) and (root.val == arr[index]) and (check_path(root.left, arr, n, index + 1) or check_path(root.right, arr, n, index + 1)) | null |
178,842 |
def flip_tree(root):
if root is None:
return root
if root.left is None and root.right is None:
return root
flipped_root = flip_tree(root.left)
root.left.left = root.right
root.left.right = root
root.left = None
root.right = None
return flipped_root | null |
178,843 |
def print_route(root, stack):
if root == None:
return
stack.append(root.val)
if root.left == None and root.right == None:
for i in stack:
print(i, end=' ')
print()
print_route(root.left, stack)
print_route(root.right, stack)
stack.pop() | null |
178,844 |
def continuous(root):
# Can be continuous if
# 1. Root is none
# 2. Both left and right STs are none
# 3. If left ST is none, check for right
# 4. If right ST is none, check for left
# 5. Else check for everything
if root is None:
return True
if root.left == None and root.right == None:
return True
if root.left == None:
return (abs(root.val - root.right.val) == 1) and continuous(root.right)
if root.right == None:
return (abs(root.val - root.left.val) == 1) and continuous(root.left)
return (abs(root.val - root.right.val) == 1) and (abs(root.left.val - root.val) == 1) and continuous(root.left) and continuous(root.right) | null |
178,845 |
def find(root):
res = -999999999999
if not root:
return res
if root.left != None:
res = root.left.val
return max(find(root.left), res, find(root.right)) | null |
178,846 | class Node():
def __init__(self, val):
def __str__(self):
def remove_duplicates(head):
dummy = Node(0)
dummy.next = head
prev = dummy
while head and head.next:
if head.val == head.next.val:
temp = head
while temp.next and temp.next.val == temp.val:
temp = temp.next
prev.next = temp.next
head = temp.next
else:
head = head.next
prev = prev.next
return dummy.next | null |
178,847 | def merge_two_lists(l1, l2):
if not l1 and not l2:
return
elif not l2:
return l1
elif not l1:
return l2
if (l1.val < l2.val):
l1.next = merge_two_lists(l1.next, l2)
return l1
l2.next = merge_two_lists(l1, l2.next)
return l2
def merge_k_lists(lists):
length = len(lists)
if length == 0:
return;
elif length == 1:
return lists[0]
elif length == 2:
return merge_two_lists(lists[0], lists[1])
mid = length // 2
left_half = lists[:mid]
right_half = lists[mid:]
return merge_two_lists(merge_k_lists(left_half), merge_k_lists(right_half)) | null |
178,848 |
def remove(head, n):
res = head
slow = head
fast = head
for i in range(n+1):
fast = fast.next
while fast:
fast = fast.next
slow = slow.next
slow.next = slow.next.next
return res | null |
178,849 |
def print_list(head):
curr = head
while curr:
print(curr.val, end=" ")
curr = curr.next | null |
178,850 |
def arrange(head):
if not head:
return None
odd = head
even = head.next
even_head = even
while even and even.next:
odd.next = even.next
odd = odd.next
even.next = odd.next
even = even.next
odd.next = even_head
return head | null |
178,851 | curr = head
while curr:
print(curr.val)
curr = curr.next
curr = new_head
while curr:
print(curr.val)
curr = curr.next
def reverse(head):
if not head:
return None
prev = None
curr = head
while curr:
next = curr.next
curr.next = prev
prev = curr
curr = next
return prev | null |
178,852 |
def delete_last_occurrence(head, val):
if not head:
return None
curr = head
prev = None
final_prev = None
final_occ = None
while curr != None:
if curr.val == val:
final_prev = prev
final_occ = curr
prev = curr
curr = curr.next
if final_occ:
# special case that checks for a 1 node list that equals the val
if final_prev:
final_prev.next = final_occ.next
else:
head = None
return head | null |
178,853 | class Node():
def __init__(self, val):
self.val = val
self.next = None
def push(head, data):
node = Node(data)
curr = head
while curr.next:
curr = curr.next
curr.next = node | null |
178,854 | print()
print()
def print_list(head):
curr = head
while curr:
print(curr.val, end=' ')
curr = curr.next | null |
178,855 | class Node():
def __init__(self, val):
self.val = val
self.next = None
def sum_numbers(head1, head2):
carry = 0
prev = None
res = None
while head1 is not None or head2 is not None:
data1 = 0 if head1 is None else head1.val
data2 = 0 if head2 is None else head2.val
s = carry + data1 + data2
carry = 1 if s >= 10 else 0
s = s if s < 10 else s % 10
temp = Node(s)
# if this is the first node, make it head
if res is None:
res = temp
else:
prev.next = temp
prev = temp
# move pointers ahead
if head1 is not None:
head1 = head1.next
if head2 is not None:
head2 = head2.next
if carry > 0:
temp.next = Node(carry)
return res | null |
178,856 |
def detect_cycle(head):
if not head or not head.next:
return False
slow = head
fast = head.next
while slow != fast:
if fast == None or fast.next == None:
return False
slow = slow.next
fast = fast.next.next
return True | null |
178,857 |
def merge(l1, l2):
if not l1 and not l2:
return
elif not l2:
return l1
elif not l1:
return l2
if (l1.val < l2.val):
l1.next = merge(l1.next, l2)
return l1
l2.next = merge(l1, l2.next)
return l2 | null |
178,858 |
def remove(head, val):
while head and head.val == val:
head = head.next
if not head:
return None
curr = head
while curr.next:
if curr.next.val == val:
curr.next = curr.next.next
else:
curr = curr.next
return head | null |
178,859 | class Node():
def __init__(self, val):
self.val = val
self.next = None
def pair_swap(head):
if head == None or head.next == None:
return head
root = head.next
curr = head
prev = Node(0)
while curr.next:
curr.next = curr.next.next
curr.next.next = curr
prev.next = curr.next.next
prev = curr.next
curr = curr.next.next
return root | null |
178,860 | from collections import defaultdict
class Node:
def __init__(self, value, level):
self.value = value
self.level = level
def min_steps(x, y):
node_x = Node(x, 0)
visited = []
queue = []
queue.append(node_x)
while queue:
s = queue.pop(0)
if s.value == y:
return s.level
visited.append(s.value)
if s.value * 2 == y or s.value - 1 == y:
return s.level + 1
# If not visited already, add its children
if s.value * 2 not in visited:
new_node = Node(s.value * 2, s.level + 1)
queue.append(new_node)
if s.value - 1 not in visited:
new_node = Node(s.value - 1, s.level + 1)
queue.append(new_node) | null |
178,861 |
def toggle_string_1(string):
return string.swapcase() | null |
178,862 |
def toggle_string_2(string):
toggle_string=''
for s in string:
if s.isupper():
toggle_string+=s.lower()
elif s.islower():
toggle_string+=s.upper()
else:
toggle_string+=s
return toggle_string | null |
178,863 |
def roman_to_integer(input):
romans={'I':1,'V':5,'X':10,'L':50,'C':100,'D':500,'M':1000}
sum=0
for i in range(input):
# Getting the value of the symbol
value=romans[input[i]]
# Comparing if the next value is bigger or smaller than the current value
if i+1<len(input):
if romans[input[i+1]]>value:
# If it is big, the value is subtracted from the sum
sum-=value
else:
sum+=value
return sum | null |
178,864 |
def isInterleaving(string_A, string_B, string_C):
#Check if the length of String C is equal to sum of lengths of String A and B
#In other words, check if String C contains all characters of String A and B
if(len(string_C) != len(string_A) + len(string_B)): return False
#Create an empty array of length of String B
dp = [None] * (len(string_B) + 1)
for i in range(len(string_A) + 1):
for j in range(len(string_B) + 1):
if(i == 0 and j == 0):
#The first value of array dp always holds True
dp[j] = True
elif(i == 0):
dp[j] = dp[j - 1] and string_B[j - 1] == string_C[j - 1]
elif(j == 0):
dp[j] = dp[j] and string_A[i - 1] == string_C[i - 1]
else:
dp[j] = ((dp[j] and string_A[i - 1] == string_C[i + j - 1]) or (dp[j - 1] and string_B[j - 1] == string_C[i + j - 1]))
return dp[len(string_B)] | null |
178,865 |
def is_one_away(str1, str2):
edit_counter = 0
i = 0 # str1 index
j = 0 # str2 index
# Size difference must be less than 1
if abs(len(str1) - len(str2)) > 1:
return False
# Compare strings while counting edits
# If letters differ, update counter and compare next letter
# In this case, if strings have different sizes increment only index of the longest
# Otherwise increment both indexes
while i < len(str1) and j < len(str2):
if str1[i] != str2[j]:
# Only one edit is allowed
if edit_counter > 0:
return False
edit_counter += 1
if len(str1) > len(str2):
i += 1
continue
elif len(str1) < len(str2):
j += 1
continue
i += 1
j += 1
# If one string finished before the other, we will certainly
# have one more edit to consider (adding the last letter), so
# we must check if the edit counter is still empty
if (i < len(str1) or j < len(str2)) and edit_counter > 0:
return False
return True | null |
178,866 |
The provided code snippet includes necessary dependencies for implementing the `is_rotate_string` function. Write a Python function `def is_rotate_string(A, B)` to solve the following problem:
Given two strings, A and B. A shift on A consists of taking string A and moving the leftmost character to the rightmost position. For example, if A = 'abcde', then it will be 'bcdea' after one shift on A. Return True if and only if A can become B after some number of shifts on A. :type A: str :type B: str :rtype: bool
Here is the function:
def is_rotate_string(A, B):
"""
Given two strings, A and B.
A shift on A consists of taking string A and moving the leftmost character to the rightmost position. For example, if A = 'abcde', then it will be 'bcdea' after one shift on A. Return True if and only if A can become B after some number of shifts on A.
:type A: str
:type B: str
:rtype: bool
"""
len_a = len(A)
len_b = len(B)
if len_a != len_b:
return False
if len_a == 0:
return True
for i in range(len_a):
A = A[-1:] + A[:-1]
if A == B:
return True
return False | Given two strings, A and B. A shift on A consists of taking string A and moving the leftmost character to the rightmost position. For example, if A = 'abcde', then it will be 'bcdea' after one shift on A. Return True if and only if A can become B after some number of shifts on A. :type A: str :type B: str :rtype: bool |
178,867 |
def are_anagrams(string1, string2):
# If two strings have different size we return False as they cannot be anagrams of each other
if (len(string1) != len(string2)):
return False
# Variable to store the Xor Value
xor_value = 0
for i in range(len(string1)):
xor_value = xor_value ^ ord(string1[i])
xor_value = xor_value ^ ord(string2[i])
if(xor_value==0):
return True
else:
return False | null |
178,868 | from collections import Counter
def unique_char_check(S):
character_count = Counter(S)
if len(character_count) == len(S):
return True
return False | null |
178,869 | def is_vowel(character):
if character.lower() in ['a', 'e', 'i', 'o', 'u']:
return True
else:
return False
def adjacent_pairs(string):
string=string.lower()
n=len(string)
count = 0
for i in range(0,n):
if ((is_vowel(string[i]) and is_vowel(string[i + 1]))):
count += 1
return count | null |
178,870 |
def finding_substrings(string):
# Get all substrings of string and store them in an empty list
list =[]
count=0
for i in range(len(string)):
for j in range(i+1,(len(string)+1)):
list.append(string[i:j])
count=count+1
# printing result
print(list)
print(count) | null |
178,871 |
def reverse(s):
i = 0
j = len(s) - 1
while i < j:
if s[i].isalnum() and s[j].isalnum():
s[i], s[j] = s[j], s[i]
i += 1
j -= 1
if not s[i].isalnum():
i += 1
if not s[j].isalnum():
j -= 1
return ''.join(s) | null |
178,872 | def long_Prefix_Suffix_Array(pattern, P, long_prefix_suffix):
# length of the previous longest prefix suffix
l=0
long_prefix_suffix[0]=0
i=1
# the loop calculates long_prefix_suffix[i] for i = 1 to P-1
while i < P:
if pattern[i] == pattern[l]:
l += 1
long_prefix_suffix[i] = l
i += 1
else:
if l != 0:
l=long_prefix_suffix[l-1]
else:
long_prefix_suffix[i] = 0
i += 1
def KMP_pattern_search(pattern, text):
P = len(pattern)
Q = len(text)
# create long_prefix_suffix[] that will hold the longest prefix suffix
# values for pattern
long_prefix_suffix = [0]*P
# index for pattern[]
j=0
# preprocess the pattern (caluclate long_prefix_suffix[] array)
long_Prefix_Suffix_Array(pattern, P, long_prefix_suffix)
# index for text[]
i=0
while i < Q:
if pattern[j] == text[i]:
i += 1
j += 1
if j == P:
print("Pattern found at index " + str(i-j))
j=long_prefix_suffix[j-1]
# mismatch after j matches
elif i < Q and pattern[j] != text[i]:
if j != 0:
j=long_prefix_suffix[j-1]
else:
i += 1 | null |
178,873 | letter_counts = {}
def populate_letter_count(word1):
# Loop through each letter (looping is an O(n) operation)
for letter in word1:
# Check if it the letter is in the dictionary (checking is O(n) operation)
if letter_counts.get(letter) is None:
letter_counts[letter] = 1
else:
curr_count = letter_counts.get(letter) + 1
letter_counts[letter] = curr_count
def check_permutations(word1, word2):
# Case 1: Not matching length
if len(word1) != len(word2):
return False
# Case 2: Both strings have a length of zero
if len(word1) == 0 and len(word2) == 0:
return True
# Case 3: One Letter Strings
if len(word1) == 1 and len(word2) == 1:
return word1[0] == word2[0]
# Case 4: Length greater than 1 for both strings and lengths are equal
else:
populate_letter_count(word1)
# Loop through each letter (looping is an O(n) operation)
for letter in word2:
# Check if it the letter is in the dictionary (checking is O(n) operation)
if letter_counts.get(letter) is not None:
curr_count = letter_counts.get(letter)
if curr_count == 1:
letter_counts.pop(letter)
else:
letter_counts[letter] = curr_count - 1
else:
return False
return True | null |
178,874 |
def check_permutation2(word1, word2):
sorted(word1) == sorted(word2) | null |
178,875 |
def inorder(root):
if not root:
return None
stack = []
while True:
if root:
stack.append(root)
root = root.left
else:
if not stack:
break
root = stack.pop()
print(root.val, end=" ")
root = root.right() | null |
178,876 | def min_value_node(root):
def delete(root, val):
if not root:
return root
if val < root.val:
root.left = delete(root.left, val)
elif val > root.val:
root.right = delete(root.right, val)
else:
# Root with one child or no child
if root.left is None:
temp = root.right
root = None
return temp
elif root.right is None:
temp = root.left
root = None
return temp
temp = min_value_node(root.right)
root.val = temp.val
root.right = delete(root.right, temp.val)
return root | null |
178,877 | class Node():
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def insert(root, val):
new_node = Node(val)
parent = None
curr = root
while curr:
parent = curr
if curr.val <= val:
curr = curr.right
else:
curr = curr.left
if parent.val <= val:
parent.right = new_node
else:
parent.left = new_node | null |
178,878 |
def inorder(root):
if not root:
return None
stack = []
while True:
if root:
stack.append(root)
root = root.left
else:
if not stack:
break
root = stack.pop()
print(root.val, end=" ")
root = root.right | null |
178,879 | def height(root, ans):
if not root:
return 0
lheight = height(root.left, ans)
rheight = height(root.right, ans)
# Diameter is basically the max of (1 + lheight + rheight)
# So we are storing it here to reduce calling it again
# O(n)
ans[0] = max(ans[0], 1 + lheight + rheight)
return 1 + max(lheight, rheight)
def diameter(root):
if not root:
return 0
ans =[-99999999999]
h = height(root, ans)
return ans[0] | null |
178,880 | def find_largest(root):
curr = root
while curr:
if not curr.right:
return curr.val
curr = curr.right
def second_largest(root):
if not root or (not root.left and not root.right):
return "BST should have atleast 2 nodes"
curr = root
while curr:
if curr.left and not curr.right:
return find_largest(curr.left)
if curr.right and not curr.right.left and not curr.right.right:
return curr.val
curr = curr.right | null |
178,881 | class Node:
def __init__(self, val):
def insert(root, key):
if root == None:
return Node(key)
if key < root.val:
root.left = insert(root.left, key)
elif key > root.val:
root.right = insert(root.right, key)
return root | null |
178,882 |
def lca(root, n1, n2):
while root:
if root.val > n1 and root.val > n2:
root = root.left
elif root.val < n1 and root.val < n2:
root = root.right
else:
return root.val
return root | null |
178,883 |
def print_leaves(root):
def dfs(node):
if not node.left and not node.right:
yield node.val
yield from dfs(node.left)
yield from dfs(node.right)
return list(dfs(root)) | null |
178,884 | import collections
def mean(arr):
m = 0
for x in arr:
m += x
return m / len(arr)
def bfs(root):
if not root:
return
queue = collections.deque([root])
result = []
while queue:
next_queue = collections.deque()
for node in queue:
if node.left:
next_queue.append(node.left)
if node.right:
next_queue.append(node.right)
result.append(mean(queue))
queue = next_queue
return result | null |
178,885 |
def inorder(root):
if not root:
return None
stack = []
# Keep adding left until there is none
while True:
if root:
stack.append(root)
root = root.left
else:
if not stack:
break
root = stack.pop()
print(root.val, end=" ")
root = root.right | null |
178,886 |
def postorder(root):
if not root:
return None
stack = []
curr = root
while curr or stack:
if curr:
stack.append(curr)
curr = curr.left
else:
temp = stack[-1].right
if not temp:
temp = stack.pop()
print(temp.val, end=" ")
while stack and temp == stack[-1].right:
temp = stack.pop()
print(temp.val, end=" ")
else:
curr = temp | null |
178,887 |
def preorder(root):
if not root:
return None
stack = [root]
while stack:
root = stack.pop()
print(root.val, end=" ")
if root.right:
stack.append(root.right)
if root.left:
stack.append(root.left) | null |
178,888 |
def range_sum_preorder(root, L, R):
stack = [root]
sum = 0
while stack:
node = stack.pop()
if node:
if L <= node.val <= R:
sum += node.val
if L < node.val:
stack.append(node.left)
if node.val < R:
stack.append(node.right)
return sum | null |
178,889 | import sys
def closest_element(root, k):
if not root:
return None
curr = root
min_diff = sys.maxsize
element = None
while curr:
if curr.val == k:
return curr.val
if abs(curr.val - k) < min_diff:
min_diff = abs(curr.val - k)
element = curr.val
if curr.val > k:
curr = curr.left
else:
curr = curr.right
return element | null |
178,890 |
def reverse_inorder(root, k):
if not root:
return None
counter = 1
stack = []
while True:
if root:
stack.append(root)
root = root.right
else:
if not stack:
break
root = stack.pop()
if counter == k:
return root.val
else:
counter += 1
root = root.left
return "not enough elements in BST" | null |
178,891 |
def reverse_inorder(root):
if not root:
return None
stack = []
arr = []
while True:
if root:
stack.append(root)
root = root.right
else:
if not stack:
break
root = stack.pop()
arr.append(root.val)
root = root.left
return arr | null |
178,892 |
def inorder(root, k):
if not root:
return None
stack = []
counter = 1
while True:
if root:
stack.append(root)
root = root.left
else:
if not stack:
break
root = stack.pop()
if counter == k:
return root.val
else:
counter += 1
root = root.right
return "tree not big enough" | null |
178,893 |
def merge(t1, t2):
if not t1:
return t2
if not t2:
return t1
t1.val = t1.val + t2.val
t1.left = merge(t1.left, t2.left)
t2.right = merge(t1.right, t2.right)
return t1 | null |
178,894 | print("After deletion")
def inorder(root):
if not root:
return None
stack = []
while True:
if root:
stack.append(root)
root = root.left
else:
if not stack:
break
root = stack.pop()
print(root.val, "(", root.count, ")")
root = root.right | null |
178,895 | class Node():
def __init__(self, val):
self.val = val
self.left = None
self.right = None
self.count = 1
def insert(root, val):
if not root:
return Node(val)
if val == root.val:
root.count += 1
return root
if val < root.val:
root.left = insert(root.left, val)
else:
root.right = insert(root.right, val)
return root | null |
178,896 | def min_value_node(root):
curr = root
while curr:
curr = curr.left
return curr.val
def delete(root, val):
if not root:
return None
if val < root.val:
root.left = delete(root.left, val)
elif val > root.val:
root.right = delete(root.right, val)
else:
if root.count > 1:
root.count -= 1
else:
# check if left node is None
if not root.left:
temp = root.right
root = None
return temp
# chec if right node is None
if not root.right:
temp = root.left
root = None
return temp
temp = min_value_node(root.right)
root.val = temp.val
root.right = delete(root.right, temp.val)
return root | null |
178,897 |
def ceil(root, key):
if not root:
return -1
if root.val == key:
return root.val
if root.val < key:
return ceil(root.right, key)
val = ceil(root.left, key)
return val if val >= key else root.key | null |
178,898 |
def search(root, val):
if root is None or root.val == val:
return root
if root.val < val:
return search(root.left, val)
else:
return search(root.right, val) | null |
178,899 |
def search_iterative(root, val):
while root:
if val > root.val:
root = root.right
elif val < root.val:
root = root.left
else:
return True
return False | null |
178,900 |
def trim(root, L, R):
if not root:
return None
if root.val > R:
return trim(root.left, L, R)
if root.val < L:
return trim(root.right, L, R)
root.left = trim(root.left, L, R)
root.right = trim(root.right, L, R)
return root | null |
178,901 | class Node:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
root = sorted_array_to_bst(arr)
def sorted_array_to_bst(arr):
if not arr:
return None
mid = len(arr)//2
root = Node(arr[mid])
root.left = sorted_array_to_bst(arr[:mid])
root.right = sorted_array_to_bst(arr[mid+1:])
return root | null |
178,903 | def store_inorder(root, inorder):
if root is None:
return
store_inorder(root.left, inorder)
inorder.append(root.data)
store_inorder(root.right, inorder)
def count_nodes(root):
if root is None:
return 0
return count_nodes(root.left) + count_nodes(root.right) + 1
def array_to_bst(arr, root):
if root is None:
return
array_to_bst(arr, root.left)
root.data = arr[0]
arr.pop(0)
array_to_bst(arr, root.right)
def bt_to_bst(root):
if root is None:
return
n = count_nodes(root)
arr = []
store_inorder(root, arr)
arr.sort()
array_to_bst(arr, root) | null |
178,904 |
def inorder(root):
if root:
inorder(root.left)
print(root.val)
inorder(root.right) | null |
178,905 | def postorder(root):
if root:
postorder(root.left)
postorder(root.right)
print(root.val)
def preorder(root):
if root:
print(root.val)
postorder(root.left)
postorder(root.right) | null |
178,906 | class Node():
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def insertion_recursive(root, val):
if not root:
return Node(val)
else:
if root.val < val:
if root.right is None:
root.right = Node(val)
else:
insertion_recursive(root.right, val)
else:
if root.left is None:
root.left = Node(val)
else:
insertion_recursive(root.left, val) | null |
178,908 | class Node():
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def increasing_bst(root):
def inorder(node):
if node:
yield from inorder(node.left)
yield node.val
yield from inorder(node.right)
ans = curr = Node(None)
for v in inorder(root):
curr.right = Node(v)
curr = curr.right
return ans.right | null |
178,909 | def ll_to_bst_recur(head, n):
if n <= 0:
return None
# TODO: Fix me!
# left = ll_to_bst_recur(
def linked_list_to_bst(head):
if not head:
return None
curr = head
n = 0
while curr:
n += 1
curr = curr.next
return ll_to_bst_recur(head, n) | null |
178,910 | def are_identical(root1, root2):
def check(root1, root2):
if root1 == None:
return True
if root2 == None:
return False
if are_identical(root1, root2):
return True
return (check(root1.left, root2) or check(root1.right, root2)) | null |
178,911 | import sys
def check_BST(root, min, max):
if root is None:
return True
if root.val < min or root.val > max:
return False
return (check_BST(root.left, min, root.val - 1) and check_BST(root.right, root.val + 1, max)) | null |
178,912 |
def print_ancestor_recursive(root, key):
if not root:
return False
if root.val == key:
return True
if print_ancestor_recursive(root.left, key) or print_ancestor_recursive(root.right, key):
return root.data
return False | null |
178,913 |
def min_value(root):
if not root:
return None
curr = root
while curr.left:
curr = curr.left
return curr.val | null |
178,914 |
def max_value(root):
if not root:
return None
curr = root
while curr.right:
curr = curr.right
return curr.val | null |
178,915 | import collections
def bfs(root):
if not root:
return
queue = collections.deque([root])
while queue:
temp = queue.popleft()
print(temp.val)
if temp.right:
queue.append(temp.right)
if temp.left:
queue.append(temp.left) | null |
178,917 | from setuptools import find_packages, setup
version_file = 'mmseg/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__'] | null |
178,918 | from setuptools import find_packages, setup
The provided code snippet includes necessary dependencies for implementing the `parse_requirements` function. Write a Python function `def parse_requirements(fname='requirements.txt', with_version=True)` to solve the following problem:
Parse the package dependencies listed in a requirements file but strips specific versioning information. Args: fname (str): path to requirements file with_version (bool, default=False): if True include version specs Returns: List[str]: list of requirements items CommandLine: python -c "import setup; print(setup.parse_requirements())"
Here is the function:
def parse_requirements(fname='requirements.txt', with_version=True):
"""Parse the package dependencies listed in a requirements file but strips
specific versioning information.
Args:
fname (str): path to requirements file
with_version (bool, default=False): if True include version specs
Returns:
List[str]: list of requirements items
CommandLine:
python -c "import setup; print(setup.parse_requirements())"
"""
import sys
from os.path import exists
import re
require_fpath = fname
def parse_line(line):
"""Parse information from a line in a requirements text file."""
if line.startswith('-r '):
# Allow specifying requirements in other files
target = line.split(' ')[1]
for info in parse_require_file(target):
yield info
else:
info = {'line': line}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
else:
# Remove versioning from the package
pat = '(' + '|'.join(['>=', '==', '>']) + ')'
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if len(parts) > 1:
op, rest = parts[1:]
if ';' in rest:
# Handle platform specific dependencies
# http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
version, platform_deps = map(str.strip,
rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest # NOQA
info['version'] = (op, version)
yield info
def parse_require_file(fpath):
with open(fpath, 'r') as f:
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
for info in parse_line(line):
yield info
def gen_packages_items():
if exists(require_fpath):
for info in parse_require_file(require_fpath):
parts = [info['package']]
if with_version and 'version' in info:
parts.extend(info['version'])
if not sys.version.startswith('3.4'):
# apparently package_deps are broken in 3.4
platform_deps = info.get('platform_deps')
if platform_deps is not None:
parts.append(';' + platform_deps)
item = ''.join(parts)
yield item
packages = list(gen_packages_items())
return packages | Parse the package dependencies listed in a requirements file but strips specific versioning information. Args: fname (str): path to requirements file with_version (bool, default=False): if True include version specs Returns: List[str]: list of requirements items CommandLine: python -c "import setup; print(setup.parse_requirements())" |
178,919 | import os
import subprocess
import sys
version_file = '../mmseg/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__'] | null |
178,923 | import random
import warnings
import numpy as np
import torch
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import build_optimizer, build_runner
from mmseg.core import DistEvalHook, EvalHook
from mmseg.datasets import build_dataloader, build_dataset
from mmseg.utils import get_root_logger
The provided code snippet includes necessary dependencies for implementing the `train_segmentor` function. Write a Python function `def train_segmentor(model, dataset, cfg, distributed=False, validate=False, timestamp=None, meta=None)` to solve the following problem:
Launch segmentor training.
Here is the function:
def train_segmentor(model,
dataset,
cfg,
distributed=False,
validate=False,
timestamp=None,
meta=None):
"""Launch segmentor training."""
logger = get_root_logger(cfg.log_level)
# prepare data loaders
dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
data_loaders = [
build_dataloader(
ds,
cfg.data.samples_per_gpu,
cfg.data.workers_per_gpu,
# cfg.gpus will be ignored if distributed
len(cfg.gpu_ids),
dist=distributed,
seed=cfg.seed,
drop_last=True) for ds in dataset
]
# put model on gpus
if distributed:
find_unused_parameters = cfg.get('find_unused_parameters', False)
# Sets the `find_unused_parameters` parameter in
# torch.nn.parallel.DistributedDataParallel
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False,
find_unused_parameters=find_unused_parameters)
else:
model = MMDataParallel(
model.cuda(cfg.gpu_ids[0]), device_ids=cfg.gpu_ids)
# build runner
optimizer = build_optimizer(model, cfg.optimizer)
if cfg.get('runner') is None:
cfg.runner = {'type': 'IterBasedRunner', 'max_iters': cfg.total_iters}
warnings.warn(
'config is now expected to have a `runner` section, '
'please set `runner` in your config.', UserWarning)
runner = build_runner(
cfg.runner,
default_args=dict(
model=model,
batch_processor=None,
optimizer=optimizer,
work_dir=cfg.work_dir,
logger=logger,
meta=meta))
# register hooks
runner.register_training_hooks(cfg.lr_config, cfg.optimizer_config,
cfg.checkpoint_config, cfg.log_config,
cfg.get('momentum_config', None))
# an ugly walkaround to make the .log and .log.json filenames the same
runner.timestamp = timestamp
# register eval hooks
if validate:
val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))
val_dataloader = build_dataloader(
val_dataset,
samples_per_gpu=1,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
eval_cfg = cfg.get('evaluation', {})
eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner'
eval_hook = DistEvalHook if distributed else EvalHook
runner.register_hook(eval_hook(val_dataloader, **eval_cfg))
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow) | Launch segmentor training. |
178,924 | import matplotlib.pyplot as plt
import mmcv
import torch
from mmcv.parallel import collate, scatter
from mmcv.runner import load_checkpoint
from mmseg.datasets.pipelines import Compose
from mmseg.models import build_segmentor
The provided code snippet includes necessary dependencies for implementing the `init_segmentor` function. Write a Python function `def init_segmentor(config, checkpoint=None, device='cuda:0')` to solve the following problem:
Initialize a segmentor from config file. Args: config (str or :obj:`mmcv.Config`): Config file path or the config object. checkpoint (str, optional): Checkpoint path. If left as None, the model will not load any weights. device (str, optional) CPU/CUDA device option. Default 'cuda:0'. Use 'cpu' for loading model on CPU. Returns: nn.Module: The constructed segmentor.
Here is the function:
def init_segmentor(config, checkpoint=None, device='cuda:0'):
"""Initialize a segmentor from config file.
Args:
config (str or :obj:`mmcv.Config`): Config file path or the config
object.
checkpoint (str, optional): Checkpoint path. If left as None, the model
will not load any weights.
device (str, optional) CPU/CUDA device option. Default 'cuda:0'.
Use 'cpu' for loading model on CPU.
Returns:
nn.Module: The constructed segmentor.
"""
if isinstance(config, str):
config = mmcv.Config.fromfile(config)
elif not isinstance(config, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
'but got {}'.format(type(config)))
config.model.pretrained = None
config.model.train_cfg = None
model = build_segmentor(config.model, test_cfg=config.get('test_cfg'))
if checkpoint is not None:
checkpoint = load_checkpoint(model, checkpoint, map_location='cpu')
model.CLASSES = checkpoint['meta']['CLASSES']
model.PALETTE = checkpoint['meta']['PALETTE']
model.cfg = config # save the config in the model for convenience
model.to(device)
model.eval()
return model | Initialize a segmentor from config file. Args: config (str or :obj:`mmcv.Config`): Config file path or the config object. checkpoint (str, optional): Checkpoint path. If left as None, the model will not load any weights. device (str, optional) CPU/CUDA device option. Default 'cuda:0'. Use 'cpu' for loading model on CPU. Returns: nn.Module: The constructed segmentor. |
178,925 | import matplotlib.pyplot as plt
import mmcv
import torch
from mmcv.parallel import collate, scatter
from mmcv.runner import load_checkpoint
from mmseg.datasets.pipelines import Compose
from mmseg.models import build_segmentor
class LoadImage:
"""A simple pipeline to load image."""
def __call__(self, results):
"""Call function to load images into results.
Args:
results (dict): A result dict contains the file name
of the image to be read.
Returns:
dict: ``results`` will be returned containing loaded image.
"""
if isinstance(results['img'], str):
results['filename'] = results['img']
results['ori_filename'] = results['img']
else:
results['filename'] = None
results['ori_filename'] = None
img = mmcv.imread(results['img'])
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
return results
The provided code snippet includes necessary dependencies for implementing the `inference_segmentor` function. Write a Python function `def inference_segmentor(model, img)` to solve the following problem:
Inference image(s) with the segmentor. Args: model (nn.Module): The loaded segmentor. imgs (str/ndarray or list[str/ndarray]): Either image files or loaded images. Returns: (list[Tensor]): The segmentation result.
Here is the function:
def inference_segmentor(model, img):
"""Inference image(s) with the segmentor.
Args:
model (nn.Module): The loaded segmentor.
imgs (str/ndarray or list[str/ndarray]): Either image files or loaded
images.
Returns:
(list[Tensor]): The segmentation result.
"""
cfg = model.cfg
device = next(model.parameters()).device # model device
# build the data pipeline
test_pipeline = [LoadImage()] + cfg.data.test.pipeline[1:]
test_pipeline = Compose(test_pipeline)
# prepare data
data = dict(img=img)
data = test_pipeline(data)
data = collate([data], samples_per_gpu=1)
if next(model.parameters()).is_cuda:
# scatter to specified GPU
data = scatter(data, [device])[0]
else:
data['img_metas'] = [i.data[0] for i in data['img_metas']]
# forward the model
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
return result | Inference image(s) with the segmentor. Args: model (nn.Module): The loaded segmentor. imgs (str/ndarray or list[str/ndarray]): Either image files or loaded images. Returns: (list[Tensor]): The segmentation result. |
178,926 | import matplotlib.pyplot as plt
import mmcv
import torch
from mmcv.parallel import collate, scatter
from mmcv.runner import load_checkpoint
from mmseg.datasets.pipelines import Compose
from mmseg.models import build_segmentor
The provided code snippet includes necessary dependencies for implementing the `show_result_pyplot` function. Write a Python function `def show_result_pyplot(model, img, result, palette=None, fig_size=(15, 10))` to solve the following problem:
Visualize the segmentation results on the image. Args: model (nn.Module): The loaded segmentor. img (str or np.ndarray): Image filename or loaded image. result (list): The segmentation result. palette (list[list[int]]] | None): The palette of segmentation map. If None is given, random palette will be generated. Default: None fig_size (tuple): Figure size of the pyplot figure.
Here is the function:
def show_result_pyplot(model, img, result, palette=None, fig_size=(15, 10)):
"""Visualize the segmentation results on the image.
Args:
model (nn.Module): The loaded segmentor.
img (str or np.ndarray): Image filename or loaded image.
result (list): The segmentation result.
palette (list[list[int]]] | None): The palette of segmentation
map. If None is given, random palette will be generated.
Default: None
fig_size (tuple): Figure size of the pyplot figure.
"""
if hasattr(model, 'module'):
model = model.module
img = model.show_result(img, result, palette=palette, show=False)
plt.figure(figsize=fig_size)
plt.imshow(mmcv.bgr2rgb(img))
plt.show() | Visualize the segmentation results on the image. Args: model (nn.Module): The loaded segmentor. img (str or np.ndarray): Image filename or loaded image. result (list): The segmentation result. palette (list[list[int]]] | None): The palette of segmentation map. If None is given, random palette will be generated. Default: None fig_size (tuple): Figure size of the pyplot figure. |
178,927 | import mmcv
The provided code snippet includes necessary dependencies for implementing the `cityscapes_classes` function. Write a Python function `def cityscapes_classes()` to solve the following problem:
Cityscapes class names for external use.
Here is the function:
def cityscapes_classes():
"""Cityscapes class names for external use."""
return [
'road', 'sidewalk', 'building', 'wall', 'fence', 'pole',
'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky',
'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle',
'bicycle'
] | Cityscapes class names for external use. |
178,928 | import mmcv
The provided code snippet includes necessary dependencies for implementing the `ade_classes` function. Write a Python function `def ade_classes()` to solve the following problem:
ADE20K class names for external use.
Here is the function:
def ade_classes():
"""ADE20K class names for external use."""
return [
'wall', 'building', 'sky', 'floor', 'tree', 'ceiling', 'road', 'bed ',
'windowpane', 'grass', 'cabinet', 'sidewalk', 'person', 'earth',
'door', 'table', 'mountain', 'plant', 'curtain', 'chair', 'car',
'water', 'painting', 'sofa', 'shelf', 'house', 'sea', 'mirror', 'rug',
'field', 'armchair', 'seat', 'fence', 'desk', 'rock', 'wardrobe',
'lamp', 'bathtub', 'railing', 'cushion', 'base', 'box', 'column',
'signboard', 'chest of drawers', 'counter', 'sand', 'sink',
'skyscraper', 'fireplace', 'refrigerator', 'grandstand', 'path',
'stairs', 'runway', 'case', 'pool table', 'pillow', 'screen door',
'stairway', 'river', 'bridge', 'bookcase', 'blind', 'coffee table',
'toilet', 'flower', 'book', 'hill', 'bench', 'countertop', 'stove',
'palm', 'kitchen island', 'computer', 'swivel chair', 'boat', 'bar',
'arcade machine', 'hovel', 'bus', 'towel', 'light', 'truck', 'tower',
'chandelier', 'awning', 'streetlight', 'booth', 'television receiver',
'airplane', 'dirt track', 'apparel', 'pole', 'land', 'bannister',
'escalator', 'ottoman', 'bottle', 'buffet', 'poster', 'stage', 'van',
'ship', 'fountain', 'conveyer belt', 'canopy', 'washer', 'plaything',
'swimming pool', 'stool', 'barrel', 'basket', 'waterfall', 'tent',
'bag', 'minibike', 'cradle', 'oven', 'ball', 'food', 'step', 'tank',
'trade name', 'microwave', 'pot', 'animal', 'bicycle', 'lake',
'dishwasher', 'screen', 'blanket', 'sculpture', 'hood', 'sconce',
'vase', 'traffic light', 'tray', 'ashcan', 'fan', 'pier', 'crt screen',
'plate', 'monitor', 'bulletin board', 'shower', 'radiator', 'glass',
'clock', 'flag'
] | ADE20K class names for external use. |
178,929 | import mmcv
The provided code snippet includes necessary dependencies for implementing the `voc_classes` function. Write a Python function `def voc_classes()` to solve the following problem:
Pascal VOC class names for external use.
Here is the function:
def voc_classes():
"""Pascal VOC class names for external use."""
return [
'background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus',
'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',
'tvmonitor'
] | Pascal VOC class names for external use. |
178,930 | import mmcv
The provided code snippet includes necessary dependencies for implementing the `cityscapes_palette` function. Write a Python function `def cityscapes_palette()` to solve the following problem:
Cityscapes palette for external use.
Here is the function:
def cityscapes_palette():
"""Cityscapes palette for external use."""
return [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156],
[190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0],
[107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60],
[255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100], [0, 80, 100],
[0, 0, 230], [119, 11, 32]] | Cityscapes palette for external use. |
178,931 | import mmcv
The provided code snippet includes necessary dependencies for implementing the `ade_palette` function. Write a Python function `def ade_palette()` to solve the following problem:
ADE20K palette for external use.
Here is the function:
def ade_palette():
"""ADE20K palette for external use."""
return [[120, 120, 120], [180, 120, 120], [6, 230, 230], [80, 50, 50],
[4, 200, 3], [120, 120, 80], [140, 140, 140], [204, 5, 255],
[230, 230, 230], [4, 250, 7], [224, 5, 255], [235, 255, 7],
[150, 5, 61], [120, 120, 70], [8, 255, 51], [255, 6, 82],
[143, 255, 140], [204, 255, 4], [255, 51, 7], [204, 70, 3],
[0, 102, 200], [61, 230, 250], [255, 6, 51], [11, 102, 255],
[255, 7, 71], [255, 9, 224], [9, 7, 230], [220, 220, 220],
[255, 9, 92], [112, 9, 255], [8, 255, 214], [7, 255, 224],
[255, 184, 6], [10, 255, 71], [255, 41, 10], [7, 255, 255],
[224, 255, 8], [102, 8, 255], [255, 61, 6], [255, 194, 7],
[255, 122, 8], [0, 255, 20], [255, 8, 41], [255, 5, 153],
[6, 51, 255], [235, 12, 255], [160, 150, 20], [0, 163, 255],
[140, 140, 140], [250, 10, 15], [20, 255, 0], [31, 255, 0],
[255, 31, 0], [255, 224, 0], [153, 255, 0], [0, 0, 255],
[255, 71, 0], [0, 235, 255], [0, 173, 255], [31, 0, 255],
[11, 200, 200], [255, 82, 0], [0, 255, 245], [0, 61, 255],
[0, 255, 112], [0, 255, 133], [255, 0, 0], [255, 163, 0],
[255, 102, 0], [194, 255, 0], [0, 143, 255], [51, 255, 0],
[0, 82, 255], [0, 255, 41], [0, 255, 173], [10, 0, 255],
[173, 255, 0], [0, 255, 153], [255, 92, 0], [255, 0, 255],
[255, 0, 245], [255, 0, 102], [255, 173, 0], [255, 0, 20],
[255, 184, 184], [0, 31, 255], [0, 255, 61], [0, 71, 255],
[255, 0, 204], [0, 255, 194], [0, 255, 82], [0, 10, 255],
[0, 112, 255], [51, 0, 255], [0, 194, 255], [0, 122, 255],
[0, 255, 163], [255, 153, 0], [0, 255, 10], [255, 112, 0],
[143, 255, 0], [82, 0, 255], [163, 255, 0], [255, 235, 0],
[8, 184, 170], [133, 0, 255], [0, 255, 92], [184, 0, 255],
[255, 0, 31], [0, 184, 255], [0, 214, 255], [255, 0, 112],
[92, 255, 0], [0, 224, 255], [112, 224, 255], [70, 184, 160],
[163, 0, 255], [153, 0, 255], [71, 255, 0], [255, 0, 163],
[255, 204, 0], [255, 0, 143], [0, 255, 235], [133, 255, 0],
[255, 0, 235], [245, 0, 255], [255, 0, 122], [255, 245, 0],
[10, 190, 212], [214, 255, 0], [0, 204, 255], [20, 0, 255],
[255, 255, 0], [0, 153, 255], [0, 41, 255], [0, 255, 204],
[41, 0, 255], [41, 255, 0], [173, 0, 255], [0, 245, 255],
[71, 0, 255], [122, 0, 255], [0, 255, 184], [0, 92, 255],
[184, 255, 0], [0, 133, 255], [255, 214, 0], [25, 194, 194],
[102, 255, 0], [92, 0, 255]] | ADE20K palette for external use. |
178,932 | import mmcv
The provided code snippet includes necessary dependencies for implementing the `voc_palette` function. Write a Python function `def voc_palette()` to solve the following problem:
Pascal VOC palette for external use.
Here is the function:
def voc_palette():
"""Pascal VOC palette for external use."""
return [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128],
[128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0],
[192, 0, 0], [64, 128, 0], [192, 128, 0], [64, 0, 128],
[192, 0, 128], [64, 128, 128], [192, 128, 128], [0, 64, 0],
[128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64, 128]] | Pascal VOC palette for external use. |
178,933 | import mmcv
dataset_aliases = {
'cityscapes': ['cityscapes'],
'ade': ['ade', 'ade20k'],
'voc': ['voc', 'pascal_voc', 'voc12', 'voc12aug']
}
The provided code snippet includes necessary dependencies for implementing the `get_classes` function. Write a Python function `def get_classes(dataset)` to solve the following problem:
Get class names of a dataset.
Here is the function:
def get_classes(dataset):
"""Get class names of a dataset."""
alias2name = {}
for name, aliases in dataset_aliases.items():
for alias in aliases:
alias2name[alias] = name
if mmcv.is_str(dataset):
if dataset in alias2name:
labels = eval(alias2name[dataset] + '_classes()')
else:
raise ValueError(f'Unrecognized dataset: {dataset}')
else:
raise TypeError(f'dataset must a str, but got {type(dataset)}')
return labels | Get class names of a dataset. |
178,934 | import mmcv
dataset_aliases = {
'cityscapes': ['cityscapes'],
'ade': ['ade', 'ade20k'],
'voc': ['voc', 'pascal_voc', 'voc12', 'voc12aug']
}
The provided code snippet includes necessary dependencies for implementing the `get_palette` function. Write a Python function `def get_palette(dataset)` to solve the following problem:
Get class palette (RGB) of a dataset.
Here is the function:
def get_palette(dataset):
"""Get class palette (RGB) of a dataset."""
alias2name = {}
for name, aliases in dataset_aliases.items():
for alias in aliases:
alias2name[alias] = name
if mmcv.is_str(dataset):
if dataset in alias2name:
labels = eval(alias2name[dataset] + '_palette()')
else:
raise ValueError(f'Unrecognized dataset: {dataset}')
else:
raise TypeError(f'dataset must a str, but got {type(dataset)}')
return labels | Get class palette (RGB) of a dataset. |
178,935 | import mmcv
import numpy as np
def eval_metrics(results,
gt_seg_maps,
num_classes,
ignore_index,
metrics=['mIoU'],
nan_to_num=None,
label_map=dict(),
reduce_zero_label=False):
"""Calculate evaluation metrics
Args:
results (list[ndarray]): List of prediction segmentation maps.
gt_seg_maps (list[ndarray]): list of ground truth segmentation maps.
num_classes (int): Number of categories.
ignore_index (int): Index that will be ignored in evaluation.
metrics (list[str] | str): Metrics to be evaluated, 'mIoU' and 'mDice'.
nan_to_num (int, optional): If specified, NaN values will be replaced
by the numbers defined by the user. Default: None.
label_map (dict): Mapping old labels to new labels. Default: dict().
reduce_zero_label (bool): Wether ignore zero label. Default: False.
Returns:
float: Overall accuracy on all images.
ndarray: Per category accuracy, shape (num_classes, ).
ndarray: Per category evalution metrics, shape (num_classes, ).
"""
if isinstance(metrics, str):
metrics = [metrics]
allowed_metrics = ['mIoU', 'mDice']
if not set(metrics).issubset(set(allowed_metrics)):
raise KeyError('metrics {} is not supported'.format(metrics))
total_area_intersect, total_area_union, total_area_pred_label, \
total_area_label = total_intersect_and_union(results, gt_seg_maps,
num_classes, ignore_index,
label_map,
reduce_zero_label)
all_acc = total_area_intersect.sum() / total_area_label.sum()
acc = total_area_intersect / total_area_label
ret_metrics = [all_acc, acc]
for metric in metrics:
if metric == 'mIoU':
iou = total_area_intersect / total_area_union
ret_metrics.append(iou)
elif metric == 'mDice':
dice = 2 * total_area_intersect / (
total_area_pred_label + total_area_label)
ret_metrics.append(dice)
if nan_to_num is not None:
ret_metrics = [
np.nan_to_num(metric, nan=nan_to_num) for metric in ret_metrics
]
return ret_metrics
The provided code snippet includes necessary dependencies for implementing the `mean_iou` function. Write a Python function `def mean_iou(results, gt_seg_maps, num_classes, ignore_index, nan_to_num=None, label_map=dict(), reduce_zero_label=False)` to solve the following problem:
Calculate Mean Intersection and Union (mIoU) Args: results (list[ndarray]): List of prediction segmentation maps. gt_seg_maps (list[ndarray]): list of ground truth segmentation maps. num_classes (int): Number of categories. ignore_index (int): Index that will be ignored in evaluation. nan_to_num (int, optional): If specified, NaN values will be replaced by the numbers defined by the user. Default: None. label_map (dict): Mapping old labels to new labels. Default: dict(). reduce_zero_label (bool): Wether ignore zero label. Default: False. Returns: float: Overall accuracy on all images. ndarray: Per category accuracy, shape (num_classes, ). ndarray: Per category IoU, shape (num_classes, ).
Here is the function:
def mean_iou(results,
gt_seg_maps,
num_classes,
ignore_index,
nan_to_num=None,
label_map=dict(),
reduce_zero_label=False):
"""Calculate Mean Intersection and Union (mIoU)
Args:
results (list[ndarray]): List of prediction segmentation maps.
gt_seg_maps (list[ndarray]): list of ground truth segmentation maps.
num_classes (int): Number of categories.
ignore_index (int): Index that will be ignored in evaluation.
nan_to_num (int, optional): If specified, NaN values will be replaced
by the numbers defined by the user. Default: None.
label_map (dict): Mapping old labels to new labels. Default: dict().
reduce_zero_label (bool): Wether ignore zero label. Default: False.
Returns:
float: Overall accuracy on all images.
ndarray: Per category accuracy, shape (num_classes, ).
ndarray: Per category IoU, shape (num_classes, ).
"""
all_acc, acc, iou = eval_metrics(
results=results,
gt_seg_maps=gt_seg_maps,
num_classes=num_classes,
ignore_index=ignore_index,
metrics=['mIoU'],
nan_to_num=nan_to_num,
label_map=label_map,
reduce_zero_label=reduce_zero_label)
return all_acc, acc, iou | Calculate Mean Intersection and Union (mIoU) Args: results (list[ndarray]): List of prediction segmentation maps. gt_seg_maps (list[ndarray]): list of ground truth segmentation maps. num_classes (int): Number of categories. ignore_index (int): Index that will be ignored in evaluation. nan_to_num (int, optional): If specified, NaN values will be replaced by the numbers defined by the user. Default: None. label_map (dict): Mapping old labels to new labels. Default: dict(). reduce_zero_label (bool): Wether ignore zero label. Default: False. Returns: float: Overall accuracy on all images. ndarray: Per category accuracy, shape (num_classes, ). ndarray: Per category IoU, shape (num_classes, ). |
178,936 | import mmcv
import numpy as np
def eval_metrics(results,
gt_seg_maps,
num_classes,
ignore_index,
metrics=['mIoU'],
nan_to_num=None,
label_map=dict(),
reduce_zero_label=False):
"""Calculate evaluation metrics
Args:
results (list[ndarray]): List of prediction segmentation maps.
gt_seg_maps (list[ndarray]): list of ground truth segmentation maps.
num_classes (int): Number of categories.
ignore_index (int): Index that will be ignored in evaluation.
metrics (list[str] | str): Metrics to be evaluated, 'mIoU' and 'mDice'.
nan_to_num (int, optional): If specified, NaN values will be replaced
by the numbers defined by the user. Default: None.
label_map (dict): Mapping old labels to new labels. Default: dict().
reduce_zero_label (bool): Wether ignore zero label. Default: False.
Returns:
float: Overall accuracy on all images.
ndarray: Per category accuracy, shape (num_classes, ).
ndarray: Per category evalution metrics, shape (num_classes, ).
"""
if isinstance(metrics, str):
metrics = [metrics]
allowed_metrics = ['mIoU', 'mDice']
if not set(metrics).issubset(set(allowed_metrics)):
raise KeyError('metrics {} is not supported'.format(metrics))
total_area_intersect, total_area_union, total_area_pred_label, \
total_area_label = total_intersect_and_union(results, gt_seg_maps,
num_classes, ignore_index,
label_map,
reduce_zero_label)
all_acc = total_area_intersect.sum() / total_area_label.sum()
acc = total_area_intersect / total_area_label
ret_metrics = [all_acc, acc]
for metric in metrics:
if metric == 'mIoU':
iou = total_area_intersect / total_area_union
ret_metrics.append(iou)
elif metric == 'mDice':
dice = 2 * total_area_intersect / (
total_area_pred_label + total_area_label)
ret_metrics.append(dice)
if nan_to_num is not None:
ret_metrics = [
np.nan_to_num(metric, nan=nan_to_num) for metric in ret_metrics
]
return ret_metrics
The provided code snippet includes necessary dependencies for implementing the `mean_dice` function. Write a Python function `def mean_dice(results, gt_seg_maps, num_classes, ignore_index, nan_to_num=None, label_map=dict(), reduce_zero_label=False)` to solve the following problem:
Calculate Mean Dice (mDice) Args: results (list[ndarray]): List of prediction segmentation maps. gt_seg_maps (list[ndarray]): list of ground truth segmentation maps. num_classes (int): Number of categories. ignore_index (int): Index that will be ignored in evaluation. nan_to_num (int, optional): If specified, NaN values will be replaced by the numbers defined by the user. Default: None. label_map (dict): Mapping old labels to new labels. Default: dict(). reduce_zero_label (bool): Wether ignore zero label. Default: False. Returns: float: Overall accuracy on all images. ndarray: Per category accuracy, shape (num_classes, ). ndarray: Per category dice, shape (num_classes, ).
Here is the function:
def mean_dice(results,
gt_seg_maps,
num_classes,
ignore_index,
nan_to_num=None,
label_map=dict(),
reduce_zero_label=False):
"""Calculate Mean Dice (mDice)
Args:
results (list[ndarray]): List of prediction segmentation maps.
gt_seg_maps (list[ndarray]): list of ground truth segmentation maps.
num_classes (int): Number of categories.
ignore_index (int): Index that will be ignored in evaluation.
nan_to_num (int, optional): If specified, NaN values will be replaced
by the numbers defined by the user. Default: None.
label_map (dict): Mapping old labels to new labels. Default: dict().
reduce_zero_label (bool): Wether ignore zero label. Default: False.
Returns:
float: Overall accuracy on all images.
ndarray: Per category accuracy, shape (num_classes, ).
ndarray: Per category dice, shape (num_classes, ).
"""
all_acc, acc, dice = eval_metrics(
results=results,
gt_seg_maps=gt_seg_maps,
num_classes=num_classes,
ignore_index=ignore_index,
metrics=['mDice'],
nan_to_num=nan_to_num,
label_map=label_map,
reduce_zero_label=reduce_zero_label)
return all_acc, acc, dice | Calculate Mean Dice (mDice) Args: results (list[ndarray]): List of prediction segmentation maps. gt_seg_maps (list[ndarray]): list of ground truth segmentation maps. num_classes (int): Number of categories. ignore_index (int): Index that will be ignored in evaluation. nan_to_num (int, optional): If specified, NaN values will be replaced by the numbers defined by the user. Default: None. label_map (dict): Mapping old labels to new labels. Default: dict(). reduce_zero_label (bool): Wether ignore zero label. Default: False. Returns: float: Overall accuracy on all images. ndarray: Per category accuracy, shape (num_classes, ). ndarray: Per category dice, shape (num_classes, ). |
178,939 | import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
def resize(input,
size=None,
scale_factor=None,
mode='nearest',
align_corners=None,
warning=True):
if warning:
if size is not None and align_corners:
input_h, input_w = tuple(int(x) for x in input.shape[2:])
output_h, output_w = tuple(int(x) for x in size)
if output_h > input_h or output_w > output_h:
if ((output_h > 1 and output_w > 1 and input_h > 1
and input_w > 1) and (output_h - 1) % (input_h - 1)
and (output_w - 1) % (input_w - 1)):
warnings.warn(
f'When align_corners={align_corners}, '
'the output would more aligned if '
f'input size {(input_h, input_w)} is `x+1` and '
f'out size {(output_h, output_w)} is `nx+1`')
if isinstance(size, torch.Size):
size = tuple(int(x) for x in size)
return F.interpolate(input, size, scale_factor, mode, align_corners) | null |
178,940 | from collections.abc import Sequence
import mmcv
import numpy as np
import torch
from mmcv.parallel import DataContainer as DC
from ..builder import PIPELINES
The provided code snippet includes necessary dependencies for implementing the `to_tensor` function. Write a Python function `def to_tensor(data)` to solve the following problem:
Convert objects of various python types to :obj:`torch.Tensor`. Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`, :class:`Sequence`, :class:`int` and :class:`float`. Args: data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to be converted.
Here is the function:
def to_tensor(data):
"""Convert objects of various python types to :obj:`torch.Tensor`.
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
:class:`Sequence`, :class:`int` and :class:`float`.
Args:
data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to
be converted.
"""
if isinstance(data, torch.Tensor):
return data
elif isinstance(data, np.ndarray):
return torch.from_numpy(data)
elif isinstance(data, Sequence) and not mmcv.is_str(data):
return torch.tensor(data)
elif isinstance(data, int):
return torch.LongTensor([data])
elif isinstance(data, float):
return torch.FloatTensor([data])
else:
raise TypeError(f'type {type(data)} cannot be converted to tensor.') | Convert objects of various python types to :obj:`torch.Tensor`. Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`, :class:`Sequence`, :class:`int` and :class:`float`. Args: data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to be converted. |
178,941 | import copy
import platform
import random
from functools import partial
import numpy as np
from mmcv.parallel import collate
from mmcv.runner import get_dist_info
from mmcv.utils import Registry, build_from_cfg
from mmcv.utils.parrots_wrapper import DataLoader, PoolDataLoader
from torch.utils.data import DistributedSampler
def worker_init_fn(worker_id, num_workers, rank, seed):
"""Worker init func for dataloader.
The seed of each worker equals to num_worker * rank + worker_id + user_seed
Args:
worker_id (int): Worker id.
num_workers (int): Number of workers.
rank (int): The rank of current process.
seed (int): The random seed to use.
"""
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed)
The provided code snippet includes necessary dependencies for implementing the `build_dataloader` function. Write a Python function `def build_dataloader(dataset, samples_per_gpu, workers_per_gpu, num_gpus=1, dist=True, shuffle=True, seed=None, drop_last=False, pin_memory=True, dataloader_type='PoolDataLoader', **kwargs)` to solve the following problem:
Build PyTorch DataLoader. In distributed training, each GPU/process has a dataloader. In non-distributed training, there is only one dataloader for all GPUs. Args: dataset (Dataset): A PyTorch dataset. samples_per_gpu (int): Number of training samples on each GPU, i.e., batch size of each GPU. workers_per_gpu (int): How many subprocesses to use for data loading for each GPU. num_gpus (int): Number of GPUs. Only used in non-distributed training. dist (bool): Distributed training/test or not. Default: True. shuffle (bool): Whether to shuffle the data at every epoch. Default: True. seed (int | None): Seed to be used. Default: None. drop_last (bool): Whether to drop the last incomplete batch in epoch. Default: False pin_memory (bool): Whether to use pin_memory in DataLoader. Default: True dataloader_type (str): Type of dataloader. Default: 'PoolDataLoader' kwargs: any keyword argument to be used to initialize DataLoader Returns: DataLoader: A PyTorch dataloader.
Here is the function:
def build_dataloader(dataset,
samples_per_gpu,
workers_per_gpu,
num_gpus=1,
dist=True,
shuffle=True,
seed=None,
drop_last=False,
pin_memory=True,
dataloader_type='PoolDataLoader',
**kwargs):
"""Build PyTorch DataLoader.
In distributed training, each GPU/process has a dataloader.
In non-distributed training, there is only one dataloader for all GPUs.
Args:
dataset (Dataset): A PyTorch dataset.
samples_per_gpu (int): Number of training samples on each GPU, i.e.,
batch size of each GPU.
workers_per_gpu (int): How many subprocesses to use for data loading
for each GPU.
num_gpus (int): Number of GPUs. Only used in non-distributed training.
dist (bool): Distributed training/test or not. Default: True.
shuffle (bool): Whether to shuffle the data at every epoch.
Default: True.
seed (int | None): Seed to be used. Default: None.
drop_last (bool): Whether to drop the last incomplete batch in epoch.
Default: False
pin_memory (bool): Whether to use pin_memory in DataLoader.
Default: True
dataloader_type (str): Type of dataloader. Default: 'PoolDataLoader'
kwargs: any keyword argument to be used to initialize DataLoader
Returns:
DataLoader: A PyTorch dataloader.
"""
rank, world_size = get_dist_info()
if dist:
sampler = DistributedSampler(
dataset, world_size, rank, shuffle=shuffle)
shuffle = False
batch_size = samples_per_gpu
num_workers = workers_per_gpu
else:
sampler = None
batch_size = num_gpus * samples_per_gpu
num_workers = num_gpus * workers_per_gpu
init_fn = partial(
worker_init_fn, num_workers=num_workers, rank=rank,
seed=seed) if seed is not None else None
assert dataloader_type in (
'DataLoader',
'PoolDataLoader'), f'unsupported dataloader {dataloader_type}'
if dataloader_type == 'PoolDataLoader':
dataloader = PoolDataLoader
elif dataloader_type == 'DataLoader':
dataloader = DataLoader
data_loader = dataloader(
dataset,
batch_size=batch_size,
sampler=sampler,
num_workers=num_workers,
collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),
pin_memory=pin_memory,
shuffle=shuffle,
worker_init_fn=init_fn,
drop_last=drop_last,
**kwargs)
return data_loader | Build PyTorch DataLoader. In distributed training, each GPU/process has a dataloader. In non-distributed training, there is only one dataloader for all GPUs. Args: dataset (Dataset): A PyTorch dataset. samples_per_gpu (int): Number of training samples on each GPU, i.e., batch size of each GPU. workers_per_gpu (int): How many subprocesses to use for data loading for each GPU. num_gpus (int): Number of GPUs. Only used in non-distributed training. dist (bool): Distributed training/test or not. Default: True. shuffle (bool): Whether to shuffle the data at every epoch. Default: True. seed (int | None): Seed to be used. Default: None. drop_last (bool): Whether to drop the last incomplete batch in epoch. Default: False pin_memory (bool): Whether to use pin_memory in DataLoader. Default: True dataloader_type (str): Type of dataloader. Default: 'PoolDataLoader' kwargs: any keyword argument to be used to initialize DataLoader Returns: DataLoader: A PyTorch dataloader. |
178,942 | import mmcv
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import weight_reduce_loss
def flatten_binary_logits(logits, labels, ignore_index=None):
"""Flattens predictions in the batch (binary case) Remove labels equal to
'ignore_index'."""
logits = logits.view(-1)
labels = labels.view(-1)
if ignore_index is None:
return logits, labels
valid = (labels != ignore_index)
vlogits = logits[valid]
vlabels = labels[valid]
return vlogits, vlabels
def lovasz_hinge_flat(logits, labels):
"""Binary Lovasz hinge loss.
Args:
logits (torch.Tensor): [P], logits at each prediction
(between -infty and +infty).
labels (torch.Tensor): [P], binary ground truth labels (0 or 1).
Returns:
torch.Tensor: The calculated loss.
"""
if len(labels) == 0:
# only void pixels, the gradients should be 0
return logits.sum() * 0.
signs = 2. * labels.float() - 1.
errors = (1. - logits * signs)
errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
perm = perm.data
gt_sorted = labels[perm]
grad = lovasz_grad(gt_sorted)
loss = torch.dot(F.relu(errors_sorted), grad)
return loss
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
# if weight is specified, apply element-wise weight
if weight is not None:
assert weight.dim() == loss.dim()
if weight.dim() > 1:
assert weight.size(1) == 1 or weight.size(1) == loss.size(1)
loss = loss * weight
# if avg_factor is not specified, just reduce the loss
if avg_factor is None:
loss = reduce_loss(loss, reduction)
else:
# if reduction is mean, then average the loss by avg_factor
if reduction == 'mean':
loss = loss.sum() / avg_factor
# if reduction is 'none', then do nothing, otherwise raise an error
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
The provided code snippet includes necessary dependencies for implementing the `lovasz_hinge` function. Write a Python function `def lovasz_hinge(logits, labels, classes='present', per_image=False, class_weight=None, reduction='mean', avg_factor=None, ignore_index=255)` to solve the following problem:
Binary Lovasz hinge loss. Args: logits (torch.Tensor): [B, H, W], logits at each pixel (between -infty and +infty). labels (torch.Tensor): [B, H, W], binary ground truth masks (0 or 1). classes (str | list[int], optional): Placeholder, to be consistent with other loss. Default: None. per_image (bool, optional): If per_image is True, compute the loss per image instead of per batch. Default: False. class_weight (list[float], optional): Placeholder, to be consistent with other loss. Default: None. reduction (str, optional): The method used to reduce the loss. Options are "none", "mean" and "sum". This parameter only works when per_image is True. Default: 'mean'. avg_factor (int, optional): Average factor that is used to average the loss. This parameter only works when per_image is True. Default: None. ignore_index (int | None): The label index to be ignored. Default: 255. Returns: torch.Tensor: The calculated loss.
Here is the function:
def lovasz_hinge(logits,
labels,
classes='present',
per_image=False,
class_weight=None,
reduction='mean',
avg_factor=None,
ignore_index=255):
"""Binary Lovasz hinge loss.
Args:
logits (torch.Tensor): [B, H, W], logits at each pixel
(between -infty and +infty).
labels (torch.Tensor): [B, H, W], binary ground truth masks (0 or 1).
classes (str | list[int], optional): Placeholder, to be consistent with
other loss. Default: None.
per_image (bool, optional): If per_image is True, compute the loss per
image instead of per batch. Default: False.
class_weight (list[float], optional): Placeholder, to be consistent
with other loss. Default: None.
reduction (str, optional): The method used to reduce the loss. Options
are "none", "mean" and "sum". This parameter only works when
per_image is True. Default: 'mean'.
avg_factor (int, optional): Average factor that is used to average
the loss. This parameter only works when per_image is True.
Default: None.
ignore_index (int | None): The label index to be ignored. Default: 255.
Returns:
torch.Tensor: The calculated loss.
"""
if per_image:
loss = [
lovasz_hinge_flat(*flatten_binary_logits(
logit.unsqueeze(0), label.unsqueeze(0), ignore_index))
for logit, label in zip(logits, labels)
]
loss = weight_reduce_loss(
torch.stack(loss), None, reduction, avg_factor)
else:
loss = lovasz_hinge_flat(
*flatten_binary_logits(logits, labels, ignore_index))
return loss | Binary Lovasz hinge loss. Args: logits (torch.Tensor): [B, H, W], logits at each pixel (between -infty and +infty). labels (torch.Tensor): [B, H, W], binary ground truth masks (0 or 1). classes (str | list[int], optional): Placeholder, to be consistent with other loss. Default: None. per_image (bool, optional): If per_image is True, compute the loss per image instead of per batch. Default: False. class_weight (list[float], optional): Placeholder, to be consistent with other loss. Default: None. reduction (str, optional): The method used to reduce the loss. Options are "none", "mean" and "sum". This parameter only works when per_image is True. Default: 'mean'. avg_factor (int, optional): Average factor that is used to average the loss. This parameter only works when per_image is True. Default: None. ignore_index (int | None): The label index to be ignored. Default: 255. Returns: torch.Tensor: The calculated loss. |
178,943 | import mmcv
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import weight_reduce_loss
def flatten_probs(probs, labels, ignore_index=None):
"""Flattens predictions in the batch."""
if probs.dim() == 3:
# assumes output of a sigmoid layer
B, H, W = probs.size()
probs = probs.view(B, 1, H, W)
B, C, H, W = probs.size()
probs = probs.permute(0, 2, 3, 1).contiguous().view(-1, C) # B*H*W, C=P,C
labels = labels.view(-1)
if ignore_index is None:
return probs, labels
valid = (labels != ignore_index)
vprobs = probs[valid.nonzero().squeeze()]
vlabels = labels[valid]
return vprobs, vlabels
def lovasz_softmax_flat(probs, labels, classes='present', class_weight=None):
"""Multi-class Lovasz-Softmax loss.
Args:
probs (torch.Tensor): [P, C], class probabilities at each prediction
(between 0 and 1).
labels (torch.Tensor): [P], ground truth labels (between 0 and C - 1).
classes (str | list[int], optional): Classes choosed to calculate loss.
'all' for all classes, 'present' for classes present in labels, or
a list of classes to average. Default: 'present'.
class_weight (list[float], optional): The weight for each class.
Default: None.
Returns:
torch.Tensor: The calculated loss.
"""
if probs.numel() == 0:
# only void pixels, the gradients should be 0
return probs * 0.
C = probs.size(1)
losses = []
class_to_sum = list(range(C)) if classes in ['all', 'present'] else classes
for c in class_to_sum:
fg = (labels == c).float() # foreground for class c
if (classes == 'present' and fg.sum() == 0):
continue
if C == 1:
if len(classes) > 1:
raise ValueError('Sigmoid output possible only with 1 class')
class_pred = probs[:, 0]
else:
class_pred = probs[:, c]
errors = (fg - class_pred).abs()
errors_sorted, perm = torch.sort(errors, 0, descending=True)
perm = perm.data
fg_sorted = fg[perm]
loss = torch.dot(errors_sorted, lovasz_grad(fg_sorted))
if class_weight is not None:
loss *= class_weight[c]
losses.append(loss)
return torch.stack(losses).mean()
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
# if weight is specified, apply element-wise weight
if weight is not None:
assert weight.dim() == loss.dim()
if weight.dim() > 1:
assert weight.size(1) == 1 or weight.size(1) == loss.size(1)
loss = loss * weight
# if avg_factor is not specified, just reduce the loss
if avg_factor is None:
loss = reduce_loss(loss, reduction)
else:
# if reduction is mean, then average the loss by avg_factor
if reduction == 'mean':
loss = loss.sum() / avg_factor
# if reduction is 'none', then do nothing, otherwise raise an error
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
The provided code snippet includes necessary dependencies for implementing the `lovasz_softmax` function. Write a Python function `def lovasz_softmax(probs, labels, classes='present', per_image=False, class_weight=None, reduction='mean', avg_factor=None, ignore_index=255)` to solve the following problem:
Multi-class Lovasz-Softmax loss. Args: probs (torch.Tensor): [B, C, H, W], class probabilities at each prediction (between 0 and 1). labels (torch.Tensor): [B, H, W], ground truth labels (between 0 and C - 1). classes (str | list[int], optional): Classes choosed to calculate loss. 'all' for all classes, 'present' for classes present in labels, or a list of classes to average. Default: 'present'. per_image (bool, optional): If per_image is True, compute the loss per image instead of per batch. Default: False. class_weight (list[float], optional): The weight for each class. Default: None. reduction (str, optional): The method used to reduce the loss. Options are "none", "mean" and "sum". This parameter only works when per_image is True. Default: 'mean'. avg_factor (int, optional): Average factor that is used to average the loss. This parameter only works when per_image is True. Default: None. ignore_index (int | None): The label index to be ignored. Default: 255. Returns: torch.Tensor: The calculated loss.
Here is the function:
def lovasz_softmax(probs,
labels,
classes='present',
per_image=False,
class_weight=None,
reduction='mean',
avg_factor=None,
ignore_index=255):
"""Multi-class Lovasz-Softmax loss.
Args:
probs (torch.Tensor): [B, C, H, W], class probabilities at each
prediction (between 0 and 1).
labels (torch.Tensor): [B, H, W], ground truth labels (between 0 and
C - 1).
classes (str | list[int], optional): Classes choosed to calculate loss.
'all' for all classes, 'present' for classes present in labels, or
a list of classes to average. Default: 'present'.
per_image (bool, optional): If per_image is True, compute the loss per
image instead of per batch. Default: False.
class_weight (list[float], optional): The weight for each class.
Default: None.
reduction (str, optional): The method used to reduce the loss. Options
are "none", "mean" and "sum". This parameter only works when
per_image is True. Default: 'mean'.
avg_factor (int, optional): Average factor that is used to average
the loss. This parameter only works when per_image is True.
Default: None.
ignore_index (int | None): The label index to be ignored. Default: 255.
Returns:
torch.Tensor: The calculated loss.
"""
if per_image:
loss = [
lovasz_softmax_flat(
*flatten_probs(
prob.unsqueeze(0), label.unsqueeze(0), ignore_index),
classes=classes,
class_weight=class_weight)
for prob, label in zip(probs, labels)
]
loss = weight_reduce_loss(
torch.stack(loss), None, reduction, avg_factor)
else:
loss = lovasz_softmax_flat(
*flatten_probs(probs, labels, ignore_index),
classes=classes,
class_weight=class_weight)
return loss | Multi-class Lovasz-Softmax loss. Args: probs (torch.Tensor): [B, C, H, W], class probabilities at each prediction (between 0 and 1). labels (torch.Tensor): [B, H, W], ground truth labels (between 0 and C - 1). classes (str | list[int], optional): Classes choosed to calculate loss. 'all' for all classes, 'present' for classes present in labels, or a list of classes to average. Default: 'present'. per_image (bool, optional): If per_image is True, compute the loss per image instead of per batch. Default: False. class_weight (list[float], optional): The weight for each class. Default: None. reduction (str, optional): The method used to reduce the loss. Options are "none", "mean" and "sum". This parameter only works when per_image is True. Default: 'mean'. avg_factor (int, optional): Average factor that is used to average the loss. This parameter only works when per_image is True. Default: None. ignore_index (int | None): The label index to be ignored. Default: 255. Returns: torch.Tensor: The calculated loss. |
178,944 | import functools
import torch.nn.functional as F
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Avarage factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
# if weight is specified, apply element-wise weight
if weight is not None:
assert weight.dim() == loss.dim()
if weight.dim() > 1:
assert weight.size(1) == 1 or weight.size(1) == loss.size(1)
loss = loss * weight
# if avg_factor is not specified, just reduce the loss
if avg_factor is None:
loss = reduce_loss(loss, reduction)
else:
# if reduction is mean, then average the loss by avg_factor
if reduction == 'mean':
loss = loss.sum() / avg_factor
# if reduction is 'none', then do nothing, otherwise raise an error
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
The provided code snippet includes necessary dependencies for implementing the `weighted_loss` function. Write a Python function `def weighted_loss(loss_func)` to solve the following problem:
Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000)
Here is the function:
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred,
target,
weight=None,
reduction='mean',
avg_factor=None,
**kwargs):
# get element-wise loss
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper | Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated function will have the signature like `loss_func(pred, target, weight=None, reduction='mean', avg_factor=None, **kwargs)`. :Example: >>> import torch >>> @weighted_loss >>> def l1_loss(pred, target): >>> return (pred - target).abs() >>> pred = torch.Tensor([0, 2, 3]) >>> target = torch.Tensor([1, 1, 1]) >>> weight = torch.Tensor([1, 0, 1]) >>> l1_loss(pred, target) tensor(1.3333) >>> l1_loss(pred, target, weight) tensor(1.) >>> l1_loss(pred, target, reduction='none') tensor([1., 1., 2.]) >>> l1_loss(pred, target, weight, avg_factor=2) tensor(1.5000) |
178,945 | import torch.nn as nn
The provided code snippet includes necessary dependencies for implementing the `accuracy` function. Write a Python function `def accuracy(pred, target, topk=1, thresh=None)` to solve the following problem:
Calculate accuracy according to the prediction and target. Args: pred (torch.Tensor): The model prediction, shape (N, num_class, ...) target (torch.Tensor): The target of each prediction, shape (N, , ...) topk (int | tuple[int], optional): If the predictions in ``topk`` matches the target, the predictions will be regarded as correct ones. Defaults to 1. thresh (float, optional): If not None, predictions with scores under this threshold are considered incorrect. Default to None. Returns: float | tuple[float]: If the input ``topk`` is a single integer, the function will return a single float as accuracy. If ``topk`` is a tuple containing multiple integers, the function will return a tuple containing accuracies of each ``topk`` number.
Here is the function:
def accuracy(pred, target, topk=1, thresh=None):
"""Calculate accuracy according to the prediction and target.
Args:
pred (torch.Tensor): The model prediction, shape (N, num_class, ...)
target (torch.Tensor): The target of each prediction, shape (N, , ...)
topk (int | tuple[int], optional): If the predictions in ``topk``
matches the target, the predictions will be regarded as
correct ones. Defaults to 1.
thresh (float, optional): If not None, predictions with scores under
this threshold are considered incorrect. Default to None.
Returns:
float | tuple[float]: If the input ``topk`` is a single integer,
the function will return a single float as accuracy. If
``topk`` is a tuple containing multiple integers, the
function will return a tuple containing accuracies of
each ``topk`` number.
"""
assert isinstance(topk, (int, tuple))
if isinstance(topk, int):
topk = (topk, )
return_single = True
else:
return_single = False
maxk = max(topk)
if pred.size(0) == 0:
accu = [pred.new_tensor(0.) for i in range(len(topk))]
return accu[0] if return_single else accu
assert pred.ndim == target.ndim + 1
assert pred.size(0) == target.size(0)
assert maxk <= pred.size(1), \
f'maxk {maxk} exceeds pred dimension {pred.size(1)}'
pred_value, pred_label = pred.topk(maxk, dim=1)
# transpose to shape (maxk, N, ...)
pred_label = pred_label.transpose(0, 1)
correct = pred_label.eq(target.unsqueeze(0).expand_as(pred_label))
if thresh is not None:
# Only prediction values larger than thresh are counted as correct
correct = correct & (pred_value > thresh).t()
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / target.numel()))
return res[0] if return_single else res | Calculate accuracy according to the prediction and target. Args: pred (torch.Tensor): The model prediction, shape (N, num_class, ...) target (torch.Tensor): The target of each prediction, shape (N, , ...) topk (int | tuple[int], optional): If the predictions in ``topk`` matches the target, the predictions will be regarded as correct ones. Defaults to 1. thresh (float, optional): If not None, predictions with scores under this threshold are considered incorrect. Default to None. Returns: float | tuple[float]: If the input ``topk`` is a single integer, the function will return a single float as accuracy. If ``topk`` is a tuple containing multiple integers, the function will return a tuple containing accuracies of each ``topk`` number. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.