id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
178,838 |
def print_spiral(root):
s1 = []
s2 = []
s1.append(root)
while not len(s1) == 0 or not len(s2) == 0:
while not len(s1) == 0:
temp = s1.pop()
print(temp.data, end=' ')
if temp.right:
s2.append(temp.right)
if temp.left:
... | null |
178,839 |
def convert(root):
if root is None:
return
convert(root.left)
convert(root.right)
if root.left and root.right:
root.val = root.left.val & root.right.val | null |
178,840 | def is_leaf(root):
def sum_left(root):
s = 0
stack = []
while True:
if root:
stack.append(root)
root = root.left
else:
if not stack:
break
root = stack.pop()
if is_leaf(root.left):
s += root.left.va... | null |
178,841 |
def check_path(root, arr, n, index):
if root is None:
return n == 0
if root.left == None and root.right == None and root.val == arr[index] and index == n -1:
return True
return (index < n) and (root.val == arr[index]) and (check_path(root.left, arr, n, index + 1) or check_path(root.right... | null |
178,842 |
def flip_tree(root):
if root is None:
return root
if root.left is None and root.right is None:
return root
flipped_root = flip_tree(root.left)
root.left.left = root.right
root.left.right = root
root.left = None
root.right = None
return flipped_root | null |
178,843 |
def print_route(root, stack):
if root == None:
return
stack.append(root.val)
if root.left == None and root.right == None:
for i in stack:
print(i, end=' ')
print()
print_route(root.left, stack)
print_route(root.right, stack)
stack.pop() | null |
178,844 |
def continuous(root):
# Can be continuous if
# 1. Root is none
# 2. Both left and right STs are none
# 3. If left ST is none, check for right
# 4. If right ST is none, check for left
# 5. Else check for everything
if root is None:
return True
if root.left == None and roo... | null |
178,845 |
def find(root):
res = -999999999999
if not root:
return res
if root.left != None:
res = root.left.val
return max(find(root.left), res, find(root.right)) | null |
178,846 | class Node():
def __init__(self, val):
def __str__(self):
def remove_duplicates(head):
dummy = Node(0)
dummy.next = head
prev = dummy
while head and head.next:
if head.val == head.next.val:
temp = head
while temp.next and temp.next.val == temp.val:
... | null |
178,847 | def merge_two_lists(l1, l2):
if not l1 and not l2:
return
elif not l2:
return l1
elif not l1:
return l2
if (l1.val < l2.val):
l1.next = merge_two_lists(l1.next, l2)
return l1
l2.next = merge_two_lists(l1, l2.next)
return l2
def merge_k_lists(lists):
l... | null |
178,848 |
def remove(head, n):
res = head
slow = head
fast = head
for i in range(n+1):
fast = fast.next
while fast:
fast = fast.next
slow = slow.next
slow.next = slow.next.next
return res | null |
178,849 |
def print_list(head):
curr = head
while curr:
print(curr.val, end=" ")
curr = curr.next | null |
178,850 |
def arrange(head):
if not head:
return None
odd = head
even = head.next
even_head = even
while even and even.next:
odd.next = even.next
odd = odd.next
even.next = odd.next
even = even.next
odd.next = even_head
return head | null |
178,851 | curr = head
while curr:
print(curr.val)
curr = curr.next
curr = new_head
while curr:
print(curr.val)
curr = curr.next
def reverse(head):
if not head:
return None
prev = None
curr = head
while curr:
next = curr.next
curr.next = prev
prev = curr
cur... | null |
178,852 |
def delete_last_occurrence(head, val):
if not head:
return None
curr = head
prev = None
final_prev = None
final_occ = None
while curr != None:
if curr.val == val:
final_prev = prev
final_occ = curr
prev = curr
curr = curr.next
... | null |
178,853 | class Node():
def __init__(self, val):
self.val = val
self.next = None
def push(head, data):
node = Node(data)
curr = head
while curr.next:
curr = curr.next
curr.next = node | null |
178,854 | print()
print()
def print_list(head):
curr = head
while curr:
print(curr.val, end=' ')
curr = curr.next | null |
178,855 | class Node():
def __init__(self, val):
self.val = val
self.next = None
def sum_numbers(head1, head2):
carry = 0
prev = None
res = None
while head1 is not None or head2 is not None:
data1 = 0 if head1 is None else head1.val
data2 = 0 if head2 is None else head2.val
... | null |
178,856 |
def detect_cycle(head):
if not head or not head.next:
return False
slow = head
fast = head.next
while slow != fast:
if fast == None or fast.next == None:
return False
slow = slow.next
fast = fast.next.next
return True | null |
178,857 |
def merge(l1, l2):
if not l1 and not l2:
return
elif not l2:
return l1
elif not l1:
return l2
if (l1.val < l2.val):
l1.next = merge(l1.next, l2)
return l1
l2.next = merge(l1, l2.next)
return l2 | null |
178,858 |
def remove(head, val):
while head and head.val == val:
head = head.next
if not head:
return None
curr = head
while curr.next:
if curr.next.val == val:
curr.next = curr.next.next
else:
curr = curr.next
return head | null |
178,859 | class Node():
def __init__(self, val):
self.val = val
self.next = None
def pair_swap(head):
if head == None or head.next == None:
return head
root = head.next
curr = head
prev = Node(0)
while curr.next:
curr.next = curr.next.next
curr.next.next = curr
... | null |
178,860 | from collections import defaultdict
class Node:
def __init__(self, value, level):
self.value = value
self.level = level
def min_steps(x, y):
node_x = Node(x, 0)
visited = []
queue = []
queue.append(node_x)
while queue:
s = queue.pop(0)
if s.value == y:
... | null |
178,861 |
def toggle_string_1(string):
return string.swapcase() | null |
178,862 |
def toggle_string_2(string):
toggle_string=''
for s in string:
if s.isupper():
toggle_string+=s.lower()
elif s.islower():
toggle_string+=s.upper()
else:
toggle_string+=s
return toggle_string | null |
178,863 |
def roman_to_integer(input):
romans={'I':1,'V':5,'X':10,'L':50,'C':100,'D':500,'M':1000}
sum=0
for i in range(input):
# Getting the value of the symbol
value=romans[input[i]]
# Comparing if the next value is bigger or smaller than the current value
if i+1<len(input):
... | null |
178,864 |
def isInterleaving(string_A, string_B, string_C):
#Check if the length of String C is equal to sum of lengths of String A and B
#In other words, check if String C contains all characters of String A and B
if(len(string_C) != len(string_A) + len(string_B)): return False
#Create an empty array of l... | null |
178,865 |
def is_one_away(str1, str2):
edit_counter = 0
i = 0 # str1 index
j = 0 # str2 index
# Size difference must be less than 1
if abs(len(str1) - len(str2)) > 1:
return False
# Compare strings while counting edits
# If letters differ, update counter and compare next letter
# In th... | null |
178,866 |
The provided code snippet includes necessary dependencies for implementing the `is_rotate_string` function. Write a Python function `def is_rotate_string(A, B)` to solve the following problem:
Given two strings, A and B. A shift on A consists of taking string A and moving the leftmost character to the rightmost posit... | Given two strings, A and B. A shift on A consists of taking string A and moving the leftmost character to the rightmost position. For example, if A = 'abcde', then it will be 'bcdea' after one shift on A. Return True if and only if A can become B after some number of shifts on A. :type A: str :type B: str :rtype: bool |
178,867 |
def are_anagrams(string1, string2):
# If two strings have different size we return False as they cannot be anagrams of each other
if (len(string1) != len(string2)):
return False
# Variable to store the Xor Value
xor_value = 0
for i in range(len(string1)):
xor_value = xor_... | null |
178,868 | from collections import Counter
def unique_char_check(S):
character_count = Counter(S)
if len(character_count) == len(S):
return True
return False | null |
178,869 | def is_vowel(character):
if character.lower() in ['a', 'e', 'i', 'o', 'u']:
return True
else:
return False
def adjacent_pairs(string):
string=string.lower()
n=len(string)
count = 0
for i in range(0,n):
if ((is_vowel(string[i]) and is_vowel(string[i + 1]))):
... | null |
178,870 |
def finding_substrings(string):
# Get all substrings of string and store them in an empty list
list =[]
count=0
for i in range(len(string)):
for j in range(i+1,(len(string)+1)):
list.append(string[i:j])
count=count+1
# printing result
print(list)
... | null |
178,871 |
def reverse(s):
i = 0
j = len(s) - 1
while i < j:
if s[i].isalnum() and s[j].isalnum():
s[i], s[j] = s[j], s[i]
i += 1
j -= 1
if not s[i].isalnum():
i += 1
if not s[j].isalnum():
j -= 1
return ''.join(s) | null |
178,872 | def long_Prefix_Suffix_Array(pattern, P, long_prefix_suffix):
# length of the previous longest prefix suffix
l=0
long_prefix_suffix[0]=0
i=1
# the loop calculates long_prefix_suffix[i] for i = 1 to P-1
while i < P:
if pattern[i] == pattern[l]:
l += 1
long_prefix_s... | null |
178,873 | letter_counts = {}
def populate_letter_count(word1):
# Loop through each letter (looping is an O(n) operation)
for letter in word1:
# Check if it the letter is in the dictionary (checking is O(n) operation)
if letter_counts.get(letter) is None:
letter_counts[letter] = 1
else:... | null |
178,874 |
def check_permutation2(word1, word2):
sorted(word1) == sorted(word2) | null |
178,875 |
def inorder(root):
if not root:
return None
stack = []
while True:
if root:
stack.append(root)
root = root.left
else:
if not stack:
break
root = stack.pop()
print(root.val, end=" ")
root = root.... | null |
178,876 | def min_value_node(root):
def delete(root, val):
if not root:
return root
if val < root.val:
root.left = delete(root.left, val)
elif val > root.val:
root.right = delete(root.right, val)
else:
# Root with one child or no child
if root.left is None:
... | null |
178,877 | class Node():
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def insert(root, val):
new_node = Node(val)
parent = None
curr = root
while curr:
parent = curr
if curr.val <= val:
curr = curr.right
else:
... | null |
178,878 |
def inorder(root):
if not root:
return None
stack = []
while True:
if root:
stack.append(root)
root = root.left
else:
if not stack:
break
root = stack.pop()
print(root.val, end=" ")
root = root.... | null |
178,879 | def height(root, ans):
if not root:
return 0
lheight = height(root.left, ans)
rheight = height(root.right, ans)
# Diameter is basically the max of (1 + lheight + rheight)
# So we are storing it here to reduce calling it again
# O(n)
ans[0] = max(ans[0], 1 + lheight + rheight)
ret... | null |
178,880 | def find_largest(root):
curr = root
while curr:
if not curr.right:
return curr.val
curr = curr.right
def second_largest(root):
if not root or (not root.left and not root.right):
return "BST should have atleast 2 nodes"
curr = root
while curr:
if curr.le... | null |
178,881 | class Node:
def __init__(self, val):
def insert(root, key):
if root == None:
return Node(key)
if key < root.val:
root.left = insert(root.left, key)
elif key > root.val:
root.right = insert(root.right, key)
return root | null |
178,882 |
def lca(root, n1, n2):
while root:
if root.val > n1 and root.val > n2:
root = root.left
elif root.val < n1 and root.val < n2:
root = root.right
else:
return root.val
return root | null |
178,883 |
def print_leaves(root):
def dfs(node):
if not node.left and not node.right:
yield node.val
yield from dfs(node.left)
yield from dfs(node.right)
return list(dfs(root)) | null |
178,884 | import collections
def mean(arr):
m = 0
for x in arr:
m += x
return m / len(arr)
def bfs(root):
if not root:
return
queue = collections.deque([root])
result = []
while queue:
next_queue = collections.deque()
for node in queue:
if node.left:
... | null |
178,885 |
def inorder(root):
if not root:
return None
stack = []
# Keep adding left until there is none
while True:
if root:
stack.append(root)
root = root.left
else:
if not stack:
break
root = stack.pop()
print... | null |
178,886 |
def postorder(root):
if not root:
return None
stack = []
curr = root
while curr or stack:
if curr:
stack.append(curr)
curr = curr.left
else:
temp = stack[-1].right
if not temp:
temp = stack.pop()
pr... | null |
178,887 |
def preorder(root):
if not root:
return None
stack = [root]
while stack:
root = stack.pop()
print(root.val, end=" ")
if root.right:
stack.append(root.right)
if root.left:
stack.append(root.left) | null |
178,888 |
def range_sum_preorder(root, L, R):
stack = [root]
sum = 0
while stack:
node = stack.pop()
if node:
if L <= node.val <= R:
sum += node.val
if L < node.val:
stack.append(node.left)
if node.val < R:
stack.app... | null |
178,889 | import sys
def closest_element(root, k):
if not root:
return None
curr = root
min_diff = sys.maxsize
element = None
while curr:
if curr.val == k:
return curr.val
if abs(curr.val - k) < min_diff:
min_diff = abs(curr.val - k)
element = curr.... | null |
178,890 |
def reverse_inorder(root, k):
if not root:
return None
counter = 1
stack = []
while True:
if root:
stack.append(root)
root = root.right
else:
if not stack:
break
root = stack.pop()
if counter == k:
... | null |
178,891 |
def reverse_inorder(root):
if not root:
return None
stack = []
arr = []
while True:
if root:
stack.append(root)
root = root.right
else:
if not stack:
break
root = stack.pop()
arr.append(root.val)
... | null |
178,892 |
def inorder(root, k):
if not root:
return None
stack = []
counter = 1
while True:
if root:
stack.append(root)
root = root.left
else:
if not stack:
break
root = stack.pop()
if counter == k:
... | null |
178,893 |
def merge(t1, t2):
if not t1:
return t2
if not t2:
return t1
t1.val = t1.val + t2.val
t1.left = merge(t1.left, t2.left)
t2.right = merge(t1.right, t2.right)
return t1 | null |
178,894 | print("After deletion")
def inorder(root):
if not root:
return None
stack = []
while True:
if root:
stack.append(root)
root = root.left
else:
if not stack:
break
root = stack.pop()
print(root.val, "(", root.... | null |
178,895 | class Node():
def __init__(self, val):
self.val = val
self.left = None
self.right = None
self.count = 1
def insert(root, val):
if not root:
return Node(val)
if val == root.val:
root.count += 1
return root
if val < root.val:
root.left ... | null |
178,896 | def min_value_node(root):
curr = root
while curr:
curr = curr.left
return curr.val
def delete(root, val):
if not root:
return None
if val < root.val:
root.left = delete(root.left, val)
elif val > root.val:
root.right = delete(root.right, val)
else:
... | null |
178,897 |
def ceil(root, key):
if not root:
return -1
if root.val == key:
return root.val
if root.val < key:
return ceil(root.right, key)
val = ceil(root.left, key)
return val if val >= key else root.key | null |
178,898 |
def search(root, val):
if root is None or root.val == val:
return root
if root.val < val:
return search(root.left, val)
else:
return search(root.right, val) | null |
178,899 |
def search_iterative(root, val):
while root:
if val > root.val:
root = root.right
elif val < root.val:
root = root.left
else:
return True
return False | null |
178,900 |
def trim(root, L, R):
if not root:
return None
if root.val > R:
return trim(root.left, L, R)
if root.val < L:
return trim(root.right, L, R)
root.left = trim(root.left, L, R)
root.right = trim(root.right, L, R)
return root | null |
178,901 | class Node:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
root = sorted_array_to_bst(arr)
def sorted_array_to_bst(arr):
if not arr:
return None
mid = len(arr)//2
root = Node(arr[mid])
root.left = sorted_array_to_bst(arr[:mid])
roo... | null |
178,903 | def store_inorder(root, inorder):
if root is None:
return
store_inorder(root.left, inorder)
inorder.append(root.data)
store_inorder(root.right, inorder)
def count_nodes(root):
if root is None:
return 0
return count_nodes(root.left) + count_nodes(root.right) + 1
def array_to_bst(a... | null |
178,904 |
def inorder(root):
if root:
inorder(root.left)
print(root.val)
inorder(root.right) | null |
178,905 | def postorder(root):
if root:
postorder(root.left)
postorder(root.right)
print(root.val)
def preorder(root):
if root:
print(root.val)
postorder(root.left)
postorder(root.right) | null |
178,906 | class Node():
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def insertion_recursive(root, val):
if not root:
return Node(val)
else:
if root.val < val:
if root.right is None:
root.right = Node(val)
e... | null |
178,908 | class Node():
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def increasing_bst(root):
def inorder(node):
if node:
yield from inorder(node.left)
yield node.val
yield from inorder(node.right)
ans = curr = Node(No... | null |
178,909 | def ll_to_bst_recur(head, n):
if n <= 0:
return None
# TODO: Fix me!
# left = ll_to_bst_recur(
def linked_list_to_bst(head):
if not head:
return None
curr = head
n = 0
while curr:
n += 1
curr = curr.next
return ll_to_bst_recur(head, n) | null |
178,910 | def are_identical(root1, root2):
def check(root1, root2):
if root1 == None:
return True
if root2 == None:
return False
if are_identical(root1, root2):
return True
return (check(root1.left, root2) or check(root1.right, root2)) | null |
178,911 | import sys
def check_BST(root, min, max):
if root is None:
return True
if root.val < min or root.val > max:
return False
return (check_BST(root.left, min, root.val - 1) and check_BST(root.right, root.val + 1, max)) | null |
178,912 |
def print_ancestor_recursive(root, key):
if not root:
return False
if root.val == key:
return True
if print_ancestor_recursive(root.left, key) or print_ancestor_recursive(root.right, key):
return root.data
return False | null |
178,913 |
def min_value(root):
if not root:
return None
curr = root
while curr.left:
curr = curr.left
return curr.val | null |
178,914 |
def max_value(root):
if not root:
return None
curr = root
while curr.right:
curr = curr.right
return curr.val | null |
178,915 | import collections
def bfs(root):
if not root:
return
queue = collections.deque([root])
while queue:
temp = queue.popleft()
print(temp.val)
if temp.right:
queue.append(temp.right)
if temp.left:
queue.append(temp.left) | null |
178,917 | from setuptools import find_packages, setup
version_file = 'mmseg/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__'] | null |
178,918 | from setuptools import find_packages, setup
The provided code snippet includes necessary dependencies for implementing the `parse_requirements` function. Write a Python function `def parse_requirements(fname='requirements.txt', with_version=True)` to solve the following problem:
Parse the package dependencies listed i... | Parse the package dependencies listed in a requirements file but strips specific versioning information. Args: fname (str): path to requirements file with_version (bool, default=False): if True include version specs Returns: List[str]: list of requirements items CommandLine: python -c "import setup; print(setup.parse_r... |
178,919 | import os
import subprocess
import sys
version_file = '../mmseg/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__'] | null |
178,923 | import random
import warnings
import numpy as np
import torch
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import build_optimizer, build_runner
from mmseg.core import DistEvalHook, EvalHook
from mmseg.datasets import build_dataloader, build_dataset
from mmseg.utils import get_roo... | Launch segmentor training. |
178,924 | import matplotlib.pyplot as plt
import mmcv
import torch
from mmcv.parallel import collate, scatter
from mmcv.runner import load_checkpoint
from mmseg.datasets.pipelines import Compose
from mmseg.models import build_segmentor
The provided code snippet includes necessary dependencies for implementing the `init_segmento... | Initialize a segmentor from config file. Args: config (str or :obj:`mmcv.Config`): Config file path or the config object. checkpoint (str, optional): Checkpoint path. If left as None, the model will not load any weights. device (str, optional) CPU/CUDA device option. Default 'cuda:0'. Use 'cpu' for loading model on CPU... |
178,925 | import matplotlib.pyplot as plt
import mmcv
import torch
from mmcv.parallel import collate, scatter
from mmcv.runner import load_checkpoint
from mmseg.datasets.pipelines import Compose
from mmseg.models import build_segmentor
class LoadImage:
"""A simple pipeline to load image."""
def __call__(self, results):
... | Inference image(s) with the segmentor. Args: model (nn.Module): The loaded segmentor. imgs (str/ndarray or list[str/ndarray]): Either image files or loaded images. Returns: (list[Tensor]): The segmentation result. |
178,926 | import matplotlib.pyplot as plt
import mmcv
import torch
from mmcv.parallel import collate, scatter
from mmcv.runner import load_checkpoint
from mmseg.datasets.pipelines import Compose
from mmseg.models import build_segmentor
The provided code snippet includes necessary dependencies for implementing the `show_result_p... | Visualize the segmentation results on the image. Args: model (nn.Module): The loaded segmentor. img (str or np.ndarray): Image filename or loaded image. result (list): The segmentation result. palette (list[list[int]]] | None): The palette of segmentation map. If None is given, random palette will be generated. Default... |
178,927 | import mmcv
The provided code snippet includes necessary dependencies for implementing the `cityscapes_classes` function. Write a Python function `def cityscapes_classes()` to solve the following problem:
Cityscapes class names for external use.
Here is the function:
def cityscapes_classes():
"""Cityscapes class... | Cityscapes class names for external use. |
178,928 | import mmcv
The provided code snippet includes necessary dependencies for implementing the `ade_classes` function. Write a Python function `def ade_classes()` to solve the following problem:
ADE20K class names for external use.
Here is the function:
def ade_classes():
"""ADE20K class names for external use."""
... | ADE20K class names for external use. |
178,929 | import mmcv
The provided code snippet includes necessary dependencies for implementing the `voc_classes` function. Write a Python function `def voc_classes()` to solve the following problem:
Pascal VOC class names for external use.
Here is the function:
def voc_classes():
"""Pascal VOC class names for external u... | Pascal VOC class names for external use. |
178,930 | import mmcv
The provided code snippet includes necessary dependencies for implementing the `cityscapes_palette` function. Write a Python function `def cityscapes_palette()` to solve the following problem:
Cityscapes palette for external use.
Here is the function:
def cityscapes_palette():
"""Cityscapes palette f... | Cityscapes palette for external use. |
178,931 | import mmcv
The provided code snippet includes necessary dependencies for implementing the `ade_palette` function. Write a Python function `def ade_palette()` to solve the following problem:
ADE20K palette for external use.
Here is the function:
def ade_palette():
"""ADE20K palette for external use."""
retur... | ADE20K palette for external use. |
178,932 | import mmcv
The provided code snippet includes necessary dependencies for implementing the `voc_palette` function. Write a Python function `def voc_palette()` to solve the following problem:
Pascal VOC palette for external use.
Here is the function:
def voc_palette():
"""Pascal VOC palette for external use."""
... | Pascal VOC palette for external use. |
178,933 | import mmcv
dataset_aliases = {
'cityscapes': ['cityscapes'],
'ade': ['ade', 'ade20k'],
'voc': ['voc', 'pascal_voc', 'voc12', 'voc12aug']
}
The provided code snippet includes necessary dependencies for implementing the `get_classes` function. Write a Python function `def get_classes(dataset)` to solve the ... | Get class names of a dataset. |
178,934 | import mmcv
dataset_aliases = {
'cityscapes': ['cityscapes'],
'ade': ['ade', 'ade20k'],
'voc': ['voc', 'pascal_voc', 'voc12', 'voc12aug']
}
The provided code snippet includes necessary dependencies for implementing the `get_palette` function. Write a Python function `def get_palette(dataset)` to solve the ... | Get class palette (RGB) of a dataset. |
178,935 | import mmcv
import numpy as np
def eval_metrics(results,
gt_seg_maps,
num_classes,
ignore_index,
metrics=['mIoU'],
nan_to_num=None,
label_map=dict(),
reduce_zero_label=False):
"""Calculate evaluati... | Calculate Mean Intersection and Union (mIoU) Args: results (list[ndarray]): List of prediction segmentation maps. gt_seg_maps (list[ndarray]): list of ground truth segmentation maps. num_classes (int): Number of categories. ignore_index (int): Index that will be ignored in evaluation. nan_to_num (int, optional): If spe... |
178,936 | import mmcv
import numpy as np
def eval_metrics(results,
gt_seg_maps,
num_classes,
ignore_index,
metrics=['mIoU'],
nan_to_num=None,
label_map=dict(),
reduce_zero_label=False):
"""Calculate evaluati... | Calculate Mean Dice (mDice) Args: results (list[ndarray]): List of prediction segmentation maps. gt_seg_maps (list[ndarray]): list of ground truth segmentation maps. num_classes (int): Number of categories. ignore_index (int): Index that will be ignored in evaluation. nan_to_num (int, optional): If specified, NaN value... |
178,939 | import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
def resize(input,
size=None,
scale_factor=None,
mode='nearest',
align_corners=None,
warning=True):
if warning:
if size is not None and align_corners:
input_h... | null |
178,940 | from collections.abc import Sequence
import mmcv
import numpy as np
import torch
from mmcv.parallel import DataContainer as DC
from ..builder import PIPELINES
The provided code snippet includes necessary dependencies for implementing the `to_tensor` function. Write a Python function `def to_tensor(data)` to solve the ... | Convert objects of various python types to :obj:`torch.Tensor`. Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`, :class:`Sequence`, :class:`int` and :class:`float`. Args: data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to be converted. |
178,941 | import copy
import platform
import random
from functools import partial
import numpy as np
from mmcv.parallel import collate
from mmcv.runner import get_dist_info
from mmcv.utils import Registry, build_from_cfg
from mmcv.utils.parrots_wrapper import DataLoader, PoolDataLoader
from torch.utils.data import DistributedSam... | Build PyTorch DataLoader. In distributed training, each GPU/process has a dataloader. In non-distributed training, there is only one dataloader for all GPUs. Args: dataset (Dataset): A PyTorch dataset. samples_per_gpu (int): Number of training samples on each GPU, i.e., batch size of each GPU. workers_per_gpu (int): Ho... |
178,942 | import mmcv
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import weight_reduce_loss
def flatten_binary_logits(logits, labels, ignore_index=None):
"""Flattens predictions in the batch (binary case) Remove labels equal to
'ignore_index'."""
logits ... | Binary Lovasz hinge loss. Args: logits (torch.Tensor): [B, H, W], logits at each pixel (between -infty and +infty). labels (torch.Tensor): [B, H, W], binary ground truth masks (0 or 1). classes (str | list[int], optional): Placeholder, to be consistent with other loss. Default: None. per_image (bool, optional): If per_... |
178,943 | import mmcv
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import weight_reduce_loss
def flatten_probs(probs, labels, ignore_index=None):
"""Flattens predictions in the batch."""
if probs.dim() == 3:
# assumes output of a sigmoid layer
... | Multi-class Lovasz-Softmax loss. Args: probs (torch.Tensor): [B, C, H, W], class probabilities at each prediction (between 0 and 1). labels (torch.Tensor): [B, H, W], ground truth labels (between 0 and C - 1). classes (str | list[int], optional): Classes choosed to calculate loss. 'all' for all classes, 'present' for c... |
178,944 | import functools
import torch.nn.functional as F
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in lo... | Create a weighted version of a given loss function. To use this decorator, the loss function must have the signature like `loss_func(pred, target, **kwargs)`. The function only needs to compute element-wise loss without any reduction. This decorator will add weight and reduction arguments to the function. The decorated... |
178,945 | import torch.nn as nn
The provided code snippet includes necessary dependencies for implementing the `accuracy` function. Write a Python function `def accuracy(pred, target, topk=1, thresh=None)` to solve the following problem:
Calculate accuracy according to the prediction and target. Args: pred (torch.Tensor): The m... | Calculate accuracy according to the prediction and target. Args: pred (torch.Tensor): The model prediction, shape (N, num_class, ...) target (torch.Tensor): The target of each prediction, shape (N, , ...) topk (int | tuple[int], optional): If the predictions in ``topk`` matches the target, the predictions will be regar... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.