id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
12,763 | import sys
MAX = 0
arr = list(map(int, input().split()))
def DFS(x):
global MAX
if len(arr) == 2:
MAX = max(MAX, x)
return
for i in range(1, len(arr)-1):
save = arr[i]
arr.pop(i)
DFS(x + arr[i-1] * arr[i])
arr.insert(i, save) | null |
12,765 | import sys
N, M = map(int, input().split())
arr = sorted(list(map(int, input().split())))
choose = [ 0 for _ in range(10) ]
used = [ 0 for _ in range(10) ]
def dfs(idx, cnt):
global N, M
if cnt == M:
for idx in range(cnt):
print(arr[choose[idx]], end=' ')
print()
return
pre = -1
for i in range(idx, N):
if used[i] or pre == arr[i]:
continue
pre = arr[i]
used[i] = 1
choose[cnt] = i
dfs(i + 1, cnt + 1)
used[i] = 0 | null |
12,767 | import sys
N, M = map(int, input().split())
choose = [ 0 for _ in range(10) ]
used = [ 0 for _ in range(10) ]
def dfs(cnt):
global N, M
if cnt == M:
for idx in range(cnt):
print(choose[idx], end=' ')
print()
return
for i in range(1, N + 1):
if used[i]:
continue
used[i] = 1
choose[cnt] = i
dfs(cnt + 1)
used[i] = 0 | null |
12,768 | import sys
sys.setrecursionlimit(10**4)
def input():
return sys.stdin.readline().rstrip() | null |
12,769 | import sys
n, h, d = map(int, input().split())
visit = [[False] * n for _ in range(n)]
umbs = []
answer = INF
if answer == INF:
print(-1)
else:
print(answer)
def dfs(cur):
global answer, n
y, x, health, durability, cnt = cur
dist = abs(end[0] - y) + abs(end[1] - x)
if dist <= health + durability:
answer = min(answer, cnt + dist)
return
else:
for umb in umbs:
uy, ux = umb
if visit[uy][ux]: continue
dist2 = abs(uy - y) + abs(ux - x)
if dist2 - 1 >= health + durability: continue
visit[uy][ux] = True
if dist2 <= durability:
dfs((uy, ux, health, d, cnt + dist2))
else:
dfs((uy, ux, health + durability - dist2, d, cnt + dist2))
visit[uy][ux] = False | null |
12,771 | import sys
N, M = map(int, input().split())
arr = sorted(list(map(int, input().split())))
choose = [ 0 for _ in range(10) ]
used = [ 0 for _ in range(10) ]
def dfs(idx, cnt):
global N, M
if cnt == M:
for idx in range(cnt):
print(arr[choose[idx]], end=' ')
print()
return
for i in range(idx,N):
if used[i]:
continue
used[i] = 1
choose[cnt] = i
dfs(i + 1, cnt + 1)
used[i] = 0 | null |
12,773 | import sys
ans = 0
nx = [0, 0, 1, -1]
ny = [1, -1, 0, 0]
visited = [[0 for i in range(31)] for j in range(31)]
N = arr[0]
dir = arr[1:]
visited[14][14] = 1
def DFS(x,y,ct,now):
global ans
if ct == N:
ans += now
return
for i in range(4):
dx = x + nx[i]
dy = y + ny[i]
if visited[dx][dy] == 0:
visited[dx][dy] = 1
DFS(dx,dy,ct+1,now*dir[i]/100)
visited[dx][dy] = 0 | null |
12,774 | import sys
def input():
return sys.stdin.readline().strip() | null |
12,775 | import sys
N, M = map(int, input().split())
arr = sorted(list(map(int, input().split())))
choose = [ 0 for _ in range(10) ]
def dfs(cnt):
global N, M
if cnt == M:
for idx in range(cnt):
print(arr[choose[idx]], end=' ')
print()
return
for i in range(0,N):
choose[cnt] = i
dfs(cnt + 1) | null |
12,777 | import sys
N = int(input())
arr = []
visited = [0] * (N+1)
def DFS():
if len(arr) == N:
print(*arr)
return
for i in range(1, N+1):
if visited[i] == 0:
visited[i] = 1
arr.append(i)
DFS()
arr.pop()
visited[i] = 0 | null |
12,779 | import sys
N, M = map(int, input().split())
choose = [ 0 for _ in range(10) ]
def dfs(idx, cnt):
global N, M
if cnt == M:
for idx in range(cnt):
print(choose[idx], end=' ')
print()
return
for i in range(idx, N + 1):
choose[cnt] = i
dfs(i, cnt + 1) | null |
12,781 | import sys
N, M = map(int, input().split())
choose = [ 0 for _ in range(10) ]
def dfs(idx, cnt):
global N, M
if cnt == M:
for idx in range(cnt):
print(choose[idx], end=' ')
print()
return
for i in range(1, N + 1):
choose[cnt] = i
dfs(i + 1, cnt + 1) | null |
12,783 | import sys
N, M = map(int, input().split())
arr = sorted(list(map(int, input().split())))
choose = [ 0 for _ in range(10) ]
def dfs(cnt):
global N, M
if cnt == M:
for idx in range(cnt):
print(arr[choose[idx]], end=' ')
print()
return
pre = -1
for i in range(0, N):
if pre == arr[i]:
continue
pre = arr[i]
choose[cnt] = i
dfs(cnt + 1) | null |
12,785 | import sys
from collections import deque
arr = deque(list(map(int, input().split())))
oper = list(map(int, input().split()))
MIN = 1e9+1
MAX = -1e9-1
def DFS(ans):
global MIN, MAX
if not arr:
MIN = min(MIN, ans)
MAX = max(MAX, ans)
return
for i in range(4):
if oper[i] > 0:
oper[i] -= 1
first = arr[0]
if i == 0:
DFS(ans+arr.popleft())
elif i == 1:
DFS(ans-arr.popleft())
elif i == 2:
DFS(ans*arr.popleft())
elif i == 3:
if ans < 0 and arr[0] > 0 or ans > 0 and arr[0] < 0:
div = abs(ans) // abs(arr.popleft())
DFS(-div)
else:
DFS(ans//arr.popleft())
arr.appendleft(first)
oper[i] += 1 | null |
12,787 | import sys
from collections import deque
result = []
for i in range(10):
result.append(i)
if len(result) > N:
print(result[N])
else:
print(-1)
def bfs(N):
queue = deque()
for i in range(1, 10):
queue.append((i, str(i)))
while queue:
if len(result) == N + 1:
break
cur_num,totol_num = queue.popleft()
if cur_num != 0:
for k in range(cur_num):
next_num = totol_num + str(k)
queue.append((k,next_num))
result.append(next_num) | null |
12,789 | import sys
N, S = map(int, input().split())
arr = list(map(int, input().split()))
poc = []
ans = 0
def backTracking(idx):
global ans
if len(poc) >= N:
if sum(poc) == S:
ans += 1
return
else:
if sum(poc) == S and poc:
ans += 1
for i in range(idx,N):
poc.append(arr[i])
backTracking(i+1)
poc.pop() | null |
12,791 | import sys
N, M = map(int, input().split())
Map = [ [ 0 for _ in range(M + 1) ] for __ in range(N + 1) ]
answer = 0
def dfs(cnt):
global answer
if cnt == N * M:
answer += 1
return
y = cnt // M + 1
x = cnt % M + 1
dfs(cnt + 1)
if Map[y - 1][x] == 0 or Map[y][x - 1] == 0 or Map[y - 1][x - 1] == 0: # 만약 놓을 수 있는 곳이라면
Map[y][x] = 1
dfs(cnt + 1)
Map[y][x] = 0 | null |
12,793 | import sys
N, M = map(int, input().split())
arr = sorted(list(map(int, input().split())))
choose = [ 0 for _ in range(10) ]
used = [ 0 for _ in range(10) ]
def dfs(cnt):
global N, M
if cnt == M:
for idx in range(cnt):
print(arr[choose[idx]], end=' ')
print()
return
for i in range(0,N):
if used[i]:
continue
used[i] = 1
choose[cnt] = i
dfs(cnt + 1)
used[i] = 0 | null |
12,795 | import sys
N, M = map(int, input().split())
arr = sorted(list(map(int, input().split())))
choose = [ 0 for _ in range(10) ]
def dfs(idx, cnt):
global N, M
if cnt == M:
for idx in range(cnt):
print(arr[choose[idx]], end=' ')
print()
return
pre = -1
for i in range(idx, N):
if pre == arr[i]:
continue
pre = arr[i]
choose[cnt] = i
dfs(i, cnt + 1) | null |
12,797 | import sys
N, M = map(int, input().split())
arr = sorted(list(map(int, input().split())))
choose = [ 0 for _ in range(10) ]
used = [ 0 for _ in range(10) ]
def dfs(cnt):
global N, M
if cnt == M:
for idx in range(cnt):
print(arr[choose[idx]], end=' ')
print()
return
pre = -1
for i in range(N):
if used[i] or pre == arr[i]:
continue
pre = arr[i]
used[i] = 1
choose[cnt] = i
dfs(cnt + 1)
used[i] = 0 | null |
12,799 | import sys
for _ in range(F):
a, b = map(int, sys.stdin.readline().split())
relation[a][b] = True
relation[b][a] = True
def dfs(start, relation, friends):
global flag, visit, answer
if flag: # 친구 관계 성립되면 더이상 할 필요 X
return
if len(friends) == K: # 만족하는 친구관계가 K개 일때
flag = True
answer = friends
return
for nxt, status in enumerate(relation[start]):
if not status: continue
if visit[nxt]: continue
if not check(nxt, friends): continue
visit[nxt] = True
dfs(nxt, relation, friends + [nxt])
visit[nxt] = False
answer = solution(relation, K)
if flag:
for num in answer:
print(num)
else:
print(-1)
def solution(relation, K):
global flag, visit, answer
flag = False
answer = -1
visit = [False for _ in range(N+1)]
for i in range(1, N+1):
if sum(relation[i]) < K: continue # i번째 사람의 친구가 K보다 작으면 할 필요 X
if flag: # 성공한 적 있으면 더이상 할 필요 X
break
visit[i] = True
dfs(i, relation, [i])
visit[i] = False
return answer | null |
12,803 | import math
import sys
def input():
return sys.stdin.readline().rstrip() | null |
12,809 | import sys
dp = [0] * 101
def recursion(x):
if x == 1 or x == 2 or x == 3:
return 1
if dp[x] == 0:
dp[x] = recursion(x-2) + recursion(x-3)
return dp[x] | null |
12,816 | import sys
sys.setrecursionlimit(1000000)
def input():
return sys.stdin.readline().rstrip() | null |
12,817 | import sys
def dfs(cur):
flag = True
visited[cur] = True
for nei in graph[cur]:
if visited[nei]:
if label[cur] == label[nei]: #만약 neighbor node와 label이 같다면?
return False # Bipartite graph가 아니다!
else:
label[nei] = 3 - label[cur] # 현재 node와 다른 label 저장
flag &= dfs(nei)
return flag | null |
12,819 | import sys
from collections import deque
dx = [-1,1,0,0]
dy = [0,0,-1,1]
def bfs(x, y):
queue = deque()
queue.append((x, y))
visited[x][y] = True
while queue:
x,y = queue.popleft()
for i in range(4):
nx = x + dx[i]
ny = y + dy[i]
if not (0<=nx<N and 0<=ny<M):continue
if visited[nx][ny] or not field[x][y]:continue
queue.append((nx,ny))
visited[nx][ny] = True | null |
12,822 | import sys
M, N = map(int, input().split())
banner = [list(map(int, input().split())) for _ in range(M)]
dirx = (1, 0, -1, 0, 1, 1, -1, -1)
diry = (0, 1, 0, -1, 1, -1, 1, -1)
def find_string(row, col):
stack = [(row, col)]
banner[row][col] = 0
while stack:
x, y = stack.pop()
for dx, dy in zip(dirx, diry):
nx, ny = x + dx, y + dy
if 0 <= nx < M and 0 <= ny < N and banner[nx][ny]:
stack.append((nx, ny))
banner[nx][ny] = 0
return 1 | null |
12,824 | import sys
from collections import deque
m, n = map(int,input().split())
board = [list(map(int,input().split())) for _ in range(m)]
start = list(map(int,input().split()))
start[0]-=1
start[1]-=1
end = list(map(int,input().split()))
end[0]-=1
end[1]-=1
def move(dir):
def bfs():
y, x, dir = start
visit = [[[False] * n for _ in range(m)] for _ in range(5)]
visit[dir][y][x] = True
q = deque()
q.append((y, x, dir, 0))
result = 0
while q:
y, x, dir, cnt = q.popleft()
if y==end[0] and x==end[1] and dir==end[2]:
result = cnt
break
for i in range(1, 4):
dy,dx = move(dir)
ny = y + dy * i
nx = x + dx * i
if ny<0 or ny>=m or nx<0 or nx>=n: break
if board[ny][nx]==1: break
if visit[dir][ny][nx]: continue
visit[dir][ny][nx] = True
q.append((ny, nx, dir, cnt+1))
for next_dir in range(1, 5):
if dir==next_dir: continue
if visit[next_dir][y][x]: continue
visit[next_dir][y][x] = True
if (dir==1 and next_dir==2) : q.append((y, x, next_dir, cnt+2))
elif (dir==2 and next_dir==1) : q.append((y, x, next_dir, cnt+2))
elif (dir==3 and next_dir==4) : q.append((y, x, next_dir, cnt+2))
elif (dir==4 and next_dir==3) : q.append((y, x, next_dir, cnt+2))
else: q.append((y, x, next_dir, cnt+1))
return result | null |
12,826 | import sys
from collections import deque
def checkMap():
for z in range(H):
for i in range(N):
for j in range(M):
if arr[z][i][j] == 0:
return False
return True
M, N, H = map(int, input().split())
arr = []
nx = [-1,0,1,0,0,0]
ny = [0,-1,0,1,0,0]
nz = [0,0,0,0,-1,1]
queue = deque()
arr = [ [ list(map(int, input().split())) for _ in range(N) ] for _ in range(H) ]
for z in range(H):
for i in range(N):
for j in range(M):
if arr[z][i][j] == 1:
arr[z][i][j] = 1
queue.append(((z,i,j),0))
def BFS():
while queue:
q = queue.popleft()
z, x, y = q[0]
for i in range(6):
dx = x + nx[i]
dy = y + ny[i]
dz = z + nz[i]
if dx < 0 or dx >= N or dy < 0 or dy >= M or dz < 0 or dz >= H:
continue
if arr[dz][dx][dy] == 0:
arr[dz][dx][dy] = 1
queue.append(((dz,dx,dy), q[1]+1))
if checkMap():
return q[1]
return -1 | null |
12,828 | from collections import deque
import sys
q = deque()
fire_q = deque()
def bfs(q, cnt):
global r, c
next_q = deque()
while q:
y, x = q.popleft()
if board[y][x] == 2: continue
for i in range(4):
ny, nx = y + dy[i], x + dx[i]
if ny<0 or ny>=r+2 or nx<0 or nx>=c+2: continue
if board[ny][nx] == 1 or board[ny][nx] == 2: continue
if visit[ny][nx]: continue
if check((ny,nx), board):
return True, next_q
next_q.append([ny, nx])
visit[ny][nx] = True
return False, next_q
def fire_bfs(q):
global r, c
next_q = deque()
while q:
y, x = q.popleft()
for i in range(4):
ny, nx = y + dy[i], x + dx[i]
if ny<=0 or ny>=r+1 or nx<=0 or nx>=c+1: continue
if board[ny][nx] == 1 or board[ny][nx] == 2: continue
next_q.append([ny, nx])
board[ny][nx] = 2
return next_q
def solution():
global q, fire_q
cnt = 0
while True:
cnt += 1
chk, q = bfs(q, cnt)
if chk:
return cnt
if not q:
return 'IMPOSSIBLE'
fire_q = fire_bfs(fire_q)
return 'IMPOSSIBLE' | null |
12,829 | import sys
from collections import deque
from itertools import combinations
m = map(int, input().split())
arr = [list(map(int, input().split())) for _ in range(n)]
empty, virus = get_pos() ons(empty, 3)
m - (len(empty) + len(virus))
def get_pos():
empty, virus = [], []
for i in range(n):
for j in range(m):
# 비었다면 empty에 추가
if arr[i][j] == 0:
empty.append((i, j))
# 바이러스라면 virus에 추가
elif arr[i][j] == 2:
virus.append((i, j))
return empty, virus | null |
12,830 | import sys
from collections import deque
from itertools import combinations
arr = [list(map(int, input().split())) for _ in range(n)]
def set_wall(comb):
for y, x in comb:
arr[y][x] = 1 | null |
12,831 | import sys
from collections import deque
from itertools import combinations
arr = [list(map(int, input().split())) for _ in range(n)]
def collapse_wall(comb):
for y, x in comb:
arr[y][x] = 0 | null |
12,832 | import sys
from collections import deque
from itertools import combinations
direction = [(0, 1), (-1, 0), (0, -1), (1, 0)]
m = map(int, input().split())
arr = [list(map(int, input().split())) for _ in range(n)]
m - (len(empty) + len(virus))
def bfs(virus):
queue = deque(virus)
visited = [[False] * m for _ in range(n)] # 방문 여부
count = len(virus) # 바이러스 개수
while queue:
q_size = len(queue)
for _ in range(q_size):
y, x = queue.popleft()
visited[y][x] = True # 방문 처리
for dy, dx in direction:
ny, nx = y + dy, x + dx
# 지도 내에 있고
if (0 <= ny < n) and (0 <= nx < m):
# 빈 공간이고 방문하지 않았다면
if arr[ny][nx] == 0 and not visited[ny][nx]:
visited[ny][nx] = True # 방문 처리
queue.append((ny, nx)) # 큐에 삽입
count += 1 # 바이러스 개수 +1
return count | null |
12,834 | import sys
arr = [[] for i in range(N+1)]
visited = [0] * (N+1)
for i in range(M):
u,v = map(int, input().split())
arr[u].append(v)
arr[v].append(u)
for i in range(1,N+1):
if visited[i] == 0:
DFS(i)
ans += 1
def DFS(now):
visited[now] = 1
for i in arr[now]:
if visited[i] == 0:
DFS(i) | null |
12,836 | from collections import deque
import sys
def answer(G, r):
is_giga_find = False
longest = 0
body = 0
visited = [False for _ in range(len(G))]
q = deque()
q.append((0, r))
visited[r] = True
while q:
dist, node = q.popleft()
if dist > longest:
longest = dist
# 기가 가지가 아직 안나왔고, 2개 이상 연결되어있을 때
# or V자
if not is_giga_find and len(G[node]) > 2 \
or (node == r and len(G[node]) >= 2):
is_giga_find = True
body = dist
for branch in G[node]:
if not visited[branch]:
visited[branch] = True
q.append((dist + G[node][branch], branch))
# 기가를 못찾았다 -> 가지가 없다.
if not is_giga_find:
body = longest
longest = body
# 기둥, 가장 긴 가지 길이(루트~해당가지 길이 - 기둥 길이)
return body, longest - body | null |
12,838 | import sys
ans = []
if ans:
print(ans[0])
else:
print(-1)
def DFS(x,ct):
if x == B:
ans.append(ct)
return
if x * 10 + 1 <= B:
DFS(x * 10 + 1,ct+1)
if x * 2 <= B:
DFS(x*2,ct+1) | null |
12,840 | from collections import deque
import sys
def bfs(board):
end = (0,7)
que = deque()
que.append((7,0,0))
visit = [[[False] * 8 for _ in range(8)] for _ in range(9)]
visit[0][7][0] = True
dy = [0,0,0,-1,1,-1,1,-1,1]
dx = [0,-1,1,0,0,-1,1,1,-1]
result = 0
while que:
y,x,time = que.popleft()
if y==end[0] and x==end[1]:
result = 1
break
for i in range(9):
ny, nx = y + dy[i], x + dx[i]
ntime = min(time + 1, 8)
if ny<0 or ny>=8 or nx<0 or nx>=8: continue
if ny-time>=0 and board[ny-time][nx]=='#': continue
if ny-ntime>=0 and board[ny-ntime][nx]=='#': continue
if visit[ntime][ny][nx]: continue
visit[ntime][ny][nx] = True
que.append((ny,nx,ntime))
return result | null |
12,842 | import sys
N = int(input())
arr = [list(input()) for _ in range(N)]
def dfs(idx):
global N
visited = [ True for _ in range(N) ]
visited[idx] = False
stack = [ (idx, 0) ]
total = 0
while stack:
curent_x, p_cnt = stack.pop()
for either_x in range(N):
if arr[curent_x][either_x] == 'Y':
if visited[either_x] and p_cnt <= 1:
visited[either_x] = False
stack.append((either_x, p_cnt+1))
total += 1
return total | null |
12,843 | import sys
sys.setrecursionlimit(10000)
def input():
return sys.stdin.readline().rstrip() | null |
12,844 | import sys
nx = [-1,-1,-1,0,1,1,1,0]
ny = [-1,0,1,1,1,0,-1,-1]
while True:
w, h = map(int, input().split())
if w == 0 and h == 0:
break
arr = []
ct = 0
for i in range(h):
arr.append(list(map(int, input().split())))
for i in range(h):
for j in range(w):
if arr[i][j] == 1:
DFS(i,j)
ct += 1
print(ct)
def DFS(x, y):
arr[x][y] = 0
for i in range(8):
dx = nx[i] + x
dy = ny[i] + y
if dx < 0 or dx >= h or dy < 0 or dy >= w:
continue
if arr[dx][dy] == 1:
DFS(dx,dy) | null |
12,846 | from collections import deque
import sys
n, m = map(int, input().split())
G = []
def answer(row, col):
global m, n
shortest = 10000001
q = deque()
direction = [(1, 0), (0, 1), (-1, 0), (0, -1)]
visited = [[[0] * 2 for _ in range(m)] for _ in range(n)]
q.append((row, col, 1))
visited[row][col][1] = 1
while q:
r, c, is_able = q.popleft()
for r_, c_ in direction:
drow = r + r_
dcol = c + c_
if drow < 0 or dcol < 0 or drow >= n or dcol >= m:
continue
if visited[drow][dcol][is_able] == 0 and G[drow][dcol] == '0':
visited[drow][dcol][is_able] = visited[r][c][is_able] + 1
q.append((drow, dcol, is_able))
elif is_able == 1 and G[drow][dcol] == '1':
visited[drow][dcol][0] = visited[r][c][1] + 1
q.append((drow, dcol, 0))
shortest = visited[n - 1][m - 1]
return_val = None
if shortest[0] == 0 and shortest[1] == 0:
return_val = -1
else:
if shortest[0] > shortest[1]:
return_val = shortest[1]
if return_val == 0:
return_val = shortest[0]
else:
return_val = shortest[0]
if return_val == 0:
return_val = shortest[1]
return return_val | null |
12,848 | import sys
from collections import deque
L, W = map(int, input().split())
arr = []
nx = [-1, 0, 1, 0]
ny = [0, -1, 0, 1]
for i in range(L):
arr.append(input())
for i in range(L):
for j in range(W):
if arr[i][j] == 'L':
visited = [[0 for i in range(W)] for j in range(L)]
ct = BFS(i,j)
MIN_CT = max(MIN_CT, ct)
def BFS(x,y):
queue = deque()
queue.append((x,y,0))
visited[x][y] = 1
while queue:
q = queue.popleft()
for i in range(4):
dx = nx[i] + q[0]
dy = ny[i] + q[1]
if dx < 0 or dx >= L or dy < 0 or dy >= W:
continue
if visited[dx][dy] == 0 and arr[dx][dy] == 'L':
visited[dx][dy] = 1
queue.append((dx,dy,q[2]+1))
return q[2] | null |
12,849 | import os
import sys
import subprocess
import argparse
from bs4 import BeautifulSoup as bs
import requests
def load_arg():
parser = argparse.ArgumentParser()
arg = parser.add_argument
arg('--pr_number', type=int, help="Pull Request Number")
arg('--check_solution', action='store_true')
parser.set_defaults(check_solution=False)
return parser.parse_args() | null |
12,850 | import os
import sys
import subprocess
import argparse
from bs4 import BeautifulSoup as bs
import requests
def check_alreay_exist_solution(path):
if os.path.exists(path):
raise Exception("Alread Exists Solution")
print("It is a new Solution!!") | null |
12,851 | import os
import sys
import subprocess
import argparse
from bs4 import BeautifulSoup as bs
import requests
def run(command):
ret = subprocess.check_output(command, shell=True).decode('utf8')
return ret
def get_pr_file(pr_number):
run(f"git fetch origin +refs/pull/{pr_number}/merge")
files = run(f"git --no-pager diff --name-only FETCH_HEAD $(git merge-base FETCH_HEAD main)")
files = [file.strip() for file in files.split() if file.strip() != ""]
if len(files) != 1:
raise Exception("No 1 PR 1 Solution")
return files[0] | null |
12,852 | import os
import sys
import subprocess
import argparse
from bs4 import BeautifulSoup as bs
import requests
def run(command):
ret = subprocess.check_output(command, shell=True).decode('utf8')
return ret
def detect_tab(path):
with open(path, 'r') as f:
solution = f.readlines()
f.close()
for line in solution:
if '\t' in line:
raise Exception("Detected Tab in this solution")
print("No Detect Tab")
def get_example(problem_id):
url = f"https://www.acmicpc.net/problem/{problem_id}"
req = requests.get(url)
html = bs(req.text, 'html.parser')
spj = True if html.find('span', 'problem-label-spj') else False
if spj:
print("Found Special Judge")
examples = []
try:
sample_id = 1
while True:
sample_input = html.select(f'#sample-input-{sample_id}')[0].text
sample_output = html.select(f'#sample-output-{sample_id}')[0].text
examples.append((sample_input, sample_output))
sample_id += 1
except:
pass
return spj, examples
def compile_test(path):
try:
extension = get_solution_language(path)
if extension == 'cpp':
run(f"g++ -std=c++17 {path} -o Main")
elif extension == 'java':
run(f"javac {path} -d .")
except:
raise RuntimeError("Compile Error")
return extension
def judge_test(path):
detect_tab(path)
problem = path.split('/')[-2]
spj, samples = get_example(problem)
extension = compile_test(path)
print(f"Found {len(samples)} examples in {problem} problem")
for idx, (data_input, data_output) in enumerate(samples):
with open("example.in", 'w') as f:
f.write(data_input)
f.close()
try:
if extension == 'cpp':
result = run(f"./Main < example.in")
elif extension == 'python':
result = run(f"python3 {path} < example.in")
elif extension == 'java':
result = run(f"java Main < example.in")
except:
raise Exception("Runtime Error")
if spj:
continue
result = [ line.strip() for line in result if line.strip() != '']
data_output = [ line.strip() for line in data_output if line.strip() != '' ]
if result != data_output:
raise Exception("Wrong Solution")
print(f"Example {idx + 1} Correct !")
print(f"{problem} Judge Success") | null |
12,853 | import os
import subprocess as sp
def getCount(folder):
lines = list()
with open(f'./{folder}/list.md', 'r') as f:
lines = f.readlines()
f.close()
total = 0
cnt = 0
for line in lines:
S = line.split(",")[0]
total += 1
if S != '':
cnt += 1
return cnt, total
def make_table(file="./markdown/list.md"):
lines = list()
with open(file, 'r') as f:
lines = f.readlines()
f.close()
ret = "| 순번 | Tag | 태그 | 문제집 | 추천 문제 수 | 총 문제 수 | 상태 |\n"
ret += "| :--: | :--------------------------: | :-----------------: | :------: | :---------: | :------: |:---------------:|\n"
for idx, line in enumerate(lines):
folder, tag_en, tag_kr, status = line.strip().split(',')
rec_cnt, total = getCount(folder)
curLine = f"| {idx:02d} | {tag_en} | {tag_kr} | [바로가기](./{folder}) | {rec_cnt:02d} | {total:02d} | ![status][{status}] |\n"
ret += curLine
ret += ' \n \n\n '
with open('./markdown/workbook.md', 'w') as f:
f.write(ret)
f.close() | null |
12,854 | import os
import subprocess as sp
seq = [ "header.md", "codingtest_info.md", "workbook_header.md", "workbook.md", "workbook_footer.md", "contributors.md", "updatelog.md", "TODO.md", "footer.md" ]
def assemble():
with open('./README.md', 'w') as f:
f.close()
for md in seq:
os.system(f"cat ./markdown/{md} >> ./README.md") | null |
12,855 | import os
import subprocess as sp
def make_contributors():
os.system('python3 ./scripts/make_contributor.py') | null |
12,856 | import datetime
import random
import json
import urllib.request as request
from utils import Date, Communication
class Communication:
def get_json(url):
def get_database(cls):
def get_picked_problem(cls):
def get_today_problem(cls):
def make_table(data: dict, save_file: str) -> None:
database = Communication.get_database()
lines = []
for date, problems in data.items():
lines.append(f"## {date} \n\n")
lines.append("| 번호 | 문제 이름 |\n")
lines.append("|:----:|:---------:|\n")
for problem_id in problems:
problemName = database[problem_id]['problemName']
link = f"https://www.acmicpc.net/problem/{problem_id}"
line = f"| [{problem_id}]({link}) | [{problemName}]({link}) |\n"
lines.append(line)
lines.append('\n')
with open(save_file, 'w') as f:
f.writelines(lines)
f.close() | null |
12,857 | import datetime
import random
import json
import urllib.request as request
from utils import Date, Communication
def get_today_date():
year, month, day = Date.get_today_date()
timeformat = f"{year:04d}/{month:02d}/{day:02d}"
return timeformat
class Communication:
__URL = {
"database": "https://raw.githubusercontent.com/tony9402/baekjoon/main/scripts/database.json",
"picked_problem": "https://raw.githubusercontent.com/tony9402/baekjoon/main/scripts/picked.json",
}
__hyperparameter = {
"before_days": 20,
"n_section": 4,
"max_level": [8, 12, 15, 18], # [S3, G4, G1, P3]
"choose_count": [1, 2, 1, 1],
}
def get_json(url):
text = request.urlopen(url).read()
json_data = json.loads(text)
return json_data
def get_database(cls):
return cls.get_json(cls.__URL['database'])
def get_picked_problem(cls):
return cls.get_json(cls.__URL['picked_problem'])
def get_today_problem(cls):
today_problems = []
seed = Date.get_today_random_seed()
random.seed(seed)
all_problem = cls.get_database()
picked_problem = cls.get_picked_problem()
param = cls.__hyperparameter
dates = sorted(picked_problem.keys(), reverse=True)[:param['before_days']]
used_problem = set()
for date in dates:
used_problem.update(picked_problem[date])
used_problem = [[] for section in range(param['n_section'])]
splitted_problem = [[] for section in range(param['n_section'])]
for problem_id, problem_info in all_problem.items():
problemlevel = int(problem_info['problemLevel'])
if not 1 <= problemlevel <= 18:
continue
section = 0
while section < len(param['max_level']) and problemlevel > param['max_level'][section]:
section += 1
assert section < param['n_section']
if problem_id in used_problem:
used_problem[section].append(problem_id)
else:
splitted_problem[section].append(problem_id)
for section, problems in enumerate(splitted_problem):
if len(problems) < param['choose_count'][section]:
problems.extend(used_problem[section])
problems = random.sample(problems, k=param['choose_count'][section])
today_problems.extend(problems)
return today_problems
def pick():
picked_json = dict()
with open('./scripts/picked.json', 'r') as f:
picked_json = json.load(f)
f.close()
timeformat = get_today_date()
today_problems: list = Communication.get_today_problem()
new_data = {}
new_data[timeformat] = today_problems
new_data.update(picked_json)
with open('./scripts/picked.json', 'w') as f:
f.write(json.dumps(new_data, indent=4, ensure_ascii=False))
f.close()
return new_data | null |
12,858 | from API import SolvedAPI
import json
def urlProblem(number, name):
FORMAT = f"<a href=\"https://www.acmicpc.net/problem/{number}\" target=\"_blank\">{name}</a>"
return FORMAT | null |
12,859 | from API import SolvedAPI
import json
def urlSolution(link):
if link == "":
return ""
FORMAT = f"<a href=\"{link}\">바로가기</a>"
return FORMAT | null |
12,860 | from API import SolvedAPI
import json
def urlLevel(level):
url = f"https://static.solved.ac/tier_small/{level}.svg"
ret = f"<img height=\"25px\" width=\"25px\" src=\"{url}\"/>"
return ret | null |
12,861 | from urllib import request
import ssl
import json
import atexit
import time
import datetime
import pytz
from utils import Communication
ALPHA = [ 'B', 'S', 'G', 'P', 'D', 'R' ]
return f"{ALPHA[level // 5]}{5 - level % 5}
def changeLevel(level):
ALPHA = [ 'B', 'S', 'G', 'P', 'D', 'R' ]
level -= 1
return f"{ALPHA[level // 5]}{5 - level % 5}" | null |
12,862 | import os
import time
import datetime
import pytz
def getProblem(Dir):
ret = list()
with open(f"{Dir}/list.md", 'r') as f:
ret = f.readlines()
f.close()
return ret | null |
12,863 | import os
import time
import datetime
import pytz
def Assemble(*args):
problems = set()
for i in args:
for problem in i:
problemID = problem.split(',')[-2]
problems.add(problemID)
return problems | null |
12,864 | import os
import time
print("""# Status
간단하게 파이썬으로 진행사항 및 문제 수를 알아보기 위해 만들어 보았습니다.
[메인으로 돌아가기](https://github.com/tony9402/baekjoon)
""")
print(f"총 문제 수 : {len(TotalProblem)} ")
print(f"총 추천 문제 수 : {len(Recommend_List)} ({len(Recommend_List) / len(TotalProblem) * 100. :.2f}%) ")
print(f"알고리즘 Tag 개수 : {len(Folders)} ")
print("\n")
print("<hr>")
print(f"각 알고리즘 Tag 진행 사항 <b>(Tag는 사전순)</b> {_b:.2f}% <br><br>\n")
print(f"남은 문제 수 {_a}/{len(Recommend_List)}\n")
print("| Index | Tag(Folder Name) | 남은 문제 수 | Solution 진행도 |")
print("| :--: | :--------------- | :----------: | :------------: |")
import datetime
import pytz
print(f"\n\n**실행한 날짜(log) : {timeformat.strftime('%Y/%m/%d %H:%M:%S %Z')}**")
def Status(tags, *args): # Problems, Folders
for idx, problems in enumerate(args):
print(f"| {idx:02d} | [{tags[idx]}](./../../tree/main/{tags[idx]}) | ", end='')
totalProblem = 0 # Only ✔️
hasEditoral = 0
for problem in problems:
info = problem.split(',')
rec = info[0].strip()
link = info[-1].strip()
if not link == '' and not rec == '':
hasEditoral += 1
if not rec == '':
totalProblem += 1
if totalProblem == 0: # Except 0 / 0
totalProblem = 1 # Make 0 / 1
# https://img.shields.io/badge/-{1}-31AE0F
# DFFD26 0885CC
percent = hasEditoral / totalProblem * 100.
color = "DFFD26"
if percent == 100.0:
color = "0885CC"
elif percent != 0.0:
color = "31AE0F"
print(f"{totalProblem - hasEditoral} |", end='')
print(f" | ") | null |
12,865 | import os
import time
import datetime
import pytz
def getTier(Str):
if len(Str) == 2: # Ex p2, P2...
return Str.upper()
else: # Gold5...
return Str[0].upper() + Str[-1] | null |
12,866 | import os
import time
import datetime
import pytz
def getRecommend(*args):
ret = list() # Not Set, Get Problem Info (ProblemID, Problem Name, Tier)
for i in args:
for problem in i:
info = problem.split(",")
rec = info[0].strip()
if not rec == '':
ret.append(info[-2])
return ret | null |
12,867 | import os
import time
import datetime
import pytz
def calPercentageOfRec(*args):
total = 0
hasSolution = 0
for i in args:
for problem in i:
info = problem.split(",")
rec = info[0].strip()
link = info[-1].strip()
if rec == '':
continue
total += 1
if not link == '':
hasSolution += 1
return total - hasSolution, float(hasSolution) / total * 100 | null |
12,868 | import json
def read_json(path):
with open(path, 'r') as f:
data = json.load(f)
f.close()
return data | null |
12,869 | import json
def write_codeowners(data, path = "CODEOWNERS"):
LangtoExt = {
"*": "*",
"c": "c",
"cpp": "cpp",
"python": "py",
"java": "java",
"swift": "swift",
"rust": "rs",
"kotlin": "kt",
"javascript": "js",
"go": "go"
}
f = open(path, 'w')
invited = data['invited']
data = data['auth']
for user, langs in data.items():
if user not in invited:
continue
info = [f"# {user}"]
for lang in langs['language']:
ext = LangtoExt[lang]
info.append(
f"solution/*/*.{ext} @{user}"
)
info.append('\n')
f.write('\n'.join(info))
f.close() | null |
12,870 | from API import SolvedAPI
from make_table import Table
import json
import argparse
import os
table = None
def updateProblems():
print("update start")
table.run()
print("update end") | null |
12,871 | from API import SolvedAPI
from make_table import Table
import json
import argparse
import os
solution_list = dict()
def getFolder(path, EXCEPT=list()):
def updateSolution():
rootFolder = "./solution"
tagFolder = getFolder(rootFolder) # in ./solution
for tag in tagFolder:
solution_list[tag] = set()
problemPath = f"{rootFolder}/{tag}"
problems = getFolder(problemPath)
for problem in problems:
solution_list[tag].add(problem) | null |
12,872 | from API import SolvedAPI
from make_table import Table
import json
import argparse
import os
config = dict()
solution_list = dict()
solutionRPATH = "./../solution"
rootFolder = "./"
tagFolder = config.get('tags')
for tag in tagFolder:
currentPath = f"{rootFolder}/{tag}"
INFO = None
with open(f"{currentPath}/list.md", 'r') as f:
INFO = f.readlines()
f.close()
update = False
NEWINFO = list()
for line in INFO:
split_line = line.split(",")
problemId = split_line[-2]
if tag in solution_list and problemId in solution_list[tag]:
split_line[-1] = f"{solutionRPATH}/{tag}/{problemId}\n"
update = True
line = ",".join(split_line)
NEWINFO.append(line)
if update:
with open(f"{currentPath}/list.md", 'w') as f:
f.writelines(NEWINFO)
f.close()
def updateList():
solutionRPATH = "./../solution"
rootFolder = "./"
tagFolder = config.get('tags')
for tag in tagFolder:
currentPath = f"{rootFolder}/{tag}"
INFO = None
with open(f"{currentPath}/list.md", 'r') as f:
INFO = f.readlines()
f.close()
update = False
NEWINFO = list()
for line in INFO:
split_line = line.split(",")
problemId = split_line[-2]
if tag in solution_list and problemId in solution_list[tag]:
split_line[-1] = f"{solutionRPATH}/{tag}/{problemId}\n"
update = True
line = ",".join(split_line)
NEWINFO.append(line)
if update:
with open(f"{currentPath}/list.md", 'w') as f:
f.writelines(NEWINFO)
f.close() | null |
12,873 | from API import SolvedAPI
from make_table import Table
import json
import argparse
import os
def updateStatus():
os.system('python3 ./scripts/arrange.py > status.md') | null |
12,874 | from API import SolvedAPI
from make_table import Table
import json
import argparse
import os
table = None
def updateLevel():
table.run(force = True) | null |
12,875 | import torch
import numpy as np
import os
import cv2
from os.path import join
import pickle
def check_modelpath(paths):
if isinstance(paths, str):
assert os.path.exists(paths), paths
return paths
elif isinstance(paths, list):
for path in paths:
if os.path.exists(path):
print(f'Found model in {path}')
break
else:
print(f'No model found in {paths}!')
raise FileExistsError
return path
else:
raise NotImplementedError | null |
12,876 | import os
import numpy as np
import cv2
The provided code snippet includes necessary dependencies for implementing the `bbox_from_keypoints` function. Write a Python function `def bbox_from_keypoints(keypoints, rescale=1.2, detection_thresh=0.05, MIN_PIXEL=5)` to solve the following problem:
Get center and scale for bounding box from openpose detections.
Here is the function:
def bbox_from_keypoints(keypoints, rescale=1.2, detection_thresh=0.05, MIN_PIXEL=5):
"""Get center and scale for bounding box from openpose detections."""
valid = keypoints[:,-1] > detection_thresh
if valid.sum() < 3:
return [0, 0, 100, 100, 0]
valid_keypoints = keypoints[valid][:,:-1]
center = (valid_keypoints.max(axis=0) + valid_keypoints.min(axis=0))/2
bbox_size = valid_keypoints.max(axis=0) - valid_keypoints.min(axis=0)
# adjust bounding box tightness
if bbox_size[0] < MIN_PIXEL or bbox_size[1] < MIN_PIXEL:
return [0, 0, 100, 100, 0]
bbox_size = bbox_size * rescale
bbox = [
center[0] - bbox_size[0]/2,
center[1] - bbox_size[1]/2,
center[0] + bbox_size[0]/2,
center[1] + bbox_size[1]/2,
keypoints[valid, 2].mean()
]
return bbox | Get center and scale for bounding box from openpose detections. |
12,877 | import os
import numpy as np
import math
import cv2
import torch
from ..basetopdown import BaseTopDownModelCache
from .hrnet import HRNet
The provided code snippet includes necessary dependencies for implementing the `get_max_preds` function. Write a Python function `def get_max_preds(batch_heatmaps)` to solve the following problem:
get predictions from score maps heatmaps: numpy.ndarray([batch_size, num_joints, height, width])
Here is the function:
def get_max_preds(batch_heatmaps):
'''
get predictions from score maps
heatmaps: numpy.ndarray([batch_size, num_joints, height, width])
'''
assert isinstance(batch_heatmaps, np.ndarray), \
'batch_heatmaps should be numpy.ndarray'
assert batch_heatmaps.ndim == 4, 'batch_images should be 4-ndim: {}'.format(batch_heatmaps.shape)
batch_size = batch_heatmaps.shape[0]
num_joints = batch_heatmaps.shape[1]
width = batch_heatmaps.shape[3]
heatmaps_reshaped = batch_heatmaps.reshape((batch_size, num_joints, -1))
idx = np.argmax(heatmaps_reshaped, 2)
maxvals = np.amax(heatmaps_reshaped, 2)
maxvals = maxvals.reshape((batch_size, num_joints, 1))
idx = idx.reshape((batch_size, num_joints, 1))
preds = np.tile(idx, (1, 1, 2)).astype(np.float32)
preds[:, :, 0] = (preds[:, :, 0]) % width
preds[:, :, 1] = np.floor((preds[:, :, 1]) / width)
pred_mask = np.tile(np.greater(maxvals, 0.0), (1, 1, 2))
pred_mask = pred_mask.astype(np.float32)
preds *= pred_mask
return preds, maxvals | get predictions from score maps heatmaps: numpy.ndarray([batch_size, num_joints, height, width]) |
12,878 | import os
import numpy as np
import math
import cv2
import torch
from ..basetopdown import BaseTopDownModelCache
from .hrnet import HRNet
COCO17_IN_BODY25 = [0,16,15,18,17,5,2,6,3,7,4,12,9,13,10,14,11]
def coco17tobody25(points2d):
kpts = np.zeros((points2d.shape[0], 25, 3))
kpts[:, COCO17_IN_BODY25, :2] = points2d[:, :, :2]
kpts[:, COCO17_IN_BODY25, 2:3] = points2d[:, :, 2:3]
kpts[:, 8, :2] = kpts[:, [9, 12], :2].mean(axis=1)
kpts[:, 8, 2] = kpts[:, [9, 12], 2].min(axis=1)
kpts[:, 1, :2] = kpts[:, [2, 5], :2].mean(axis=1)
kpts[:, 1, 2] = kpts[:, [2, 5], 2].min(axis=1)
# 需要交换一下
# kpts = kpts[:, :, [1,0,2]]
return kpts | null |
12,879 | import os
import numpy as np
import torch
from functools import partial
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from .layers import drop_path, to_2tuple, trunc_normal_
from ..basetopdown import BaseTopDownModelCache
from ..topdown_keypoints import BaseKeypoints
The provided code snippet includes necessary dependencies for implementing the `drop_path` function. Write a Python function `def drop_path(x, drop_prob: float = 0., training: bool = False, scale_by_keep: bool = True)` to solve the following problem:
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument.
Here is the function:
def drop_path(x, drop_prob: float = 0., training: bool = False, scale_by_keep: bool = True):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument.
"""
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = x.new_empty(shape).bernoulli_(keep_prob)
if keep_prob > 0.0 and scale_by_keep:
random_tensor.div_(keep_prob)
return x * random_tensor | Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. |
12,880 | import os
import numpy as np
import torch
from functools import partial
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as checkpoint
from .layers import drop_path, to_2tuple, trunc_normal_
from ..basetopdown import BaseTopDownModelCache
from ..topdown_keypoints import BaseKeypoints
The provided code snippet includes necessary dependencies for implementing the `get_abs_pos` function. Write a Python function `def get_abs_pos(abs_pos, h, w, ori_h, ori_w, has_cls_token=True)` to solve the following problem:
Calculate absolute positional embeddings. If needed, resize embeddings and remove cls_token dimension for the original embeddings. Args: abs_pos (Tensor): absolute positional embeddings with (1, num_position, C). has_cls_token (bool): If true, has 1 embedding in abs_pos for cls token. hw (Tuple): size of input image tokens. Returns: Absolute positional embeddings after processing with shape (1, H, W, C)
Here is the function:
def get_abs_pos(abs_pos, h, w, ori_h, ori_w, has_cls_token=True):
"""
Calculate absolute positional embeddings. If needed, resize embeddings and remove cls_token
dimension for the original embeddings.
Args:
abs_pos (Tensor): absolute positional embeddings with (1, num_position, C).
has_cls_token (bool): If true, has 1 embedding in abs_pos for cls token.
hw (Tuple): size of input image tokens.
Returns:
Absolute positional embeddings after processing with shape (1, H, W, C)
"""
cls_token = None
B, L, C = abs_pos.shape
if has_cls_token:
cls_token = abs_pos[:, 0:1]
abs_pos = abs_pos[:, 1:]
if ori_h != h or ori_w != w:
new_abs_pos = F.interpolate(
abs_pos.reshape(1, ori_h, ori_w, -1).permute(0, 3, 1, 2),
size=(h, w),
mode="bicubic",
align_corners=False,
).permute(0, 2, 3, 1).reshape(B, -1, C)
else:
new_abs_pos = abs_pos
if cls_token is not None:
new_abs_pos = torch.cat([cls_token, new_abs_pos], dim=1)
return new_abs_pos | Calculate absolute positional embeddings. If needed, resize embeddings and remove cls_token dimension for the original embeddings. Args: abs_pos (Tensor): absolute positional embeddings with (1, num_position, C). has_cls_token (bool): If true, has 1 embedding in abs_pos for cls token. hw (Tuple): size of input image tokens. Returns: Absolute positional embeddings after processing with shape (1, H, W, C) |
12,881 | import torch
import math
import collections.abc
from itertools import repeat
import warnings
import torch.nn as nn
The provided code snippet includes necessary dependencies for implementing the `drop_path` function. Write a Python function `def drop_path(x, drop_prob: float = 0., training: bool = False, scale_by_keep: bool = True)` to solve the following problem:
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument.
Here is the function:
def drop_path(x, drop_prob: float = 0., training: bool = False, scale_by_keep: bool = True):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument.
"""
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = x.new_empty(shape).bernoulli_(keep_prob)
if keep_prob > 0.0 and scale_by_keep:
random_tensor.div_(keep_prob)
return x * random_tensor | Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. |
12,882 | import torch
import math
import collections.abc
from itertools import repeat
import warnings
import torch.nn as nn
def _ntuple(n):
def parse(x):
if isinstance(x, collections.abc.Iterable) and not isinstance(x, str):
return tuple(x)
return tuple(repeat(x, n))
return parse | null |
12,883 | import torch
import math
import collections.abc
from itertools import repeat
import warnings
import torch.nn as nn
def _trunc_normal_(tensor, mean, std, a, b):
# Cut & paste from PyTorch official master until it's in a few official releases - RW
# Method based on https://people.sc.fsu.edu/~jburkardt/presentations/truncated_normal.pdf
def norm_cdf(x):
# Computes standard normal cumulative distribution function
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
# Values are generated by using a truncated uniform distribution and
# then using the inverse CDF for the normal distribution.
# Get upper and lower cdf values
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
# Uniformly fill tensor with values from [l, u], then translate to
# [2l-1, 2u-1].
tensor.uniform_(2 * l - 1, 2 * u - 1)
# Use inverse cdf transform for normal distribution to get truncated
# standard normal
tensor.erfinv_()
# Transform to proper mean, std
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
# Clamp to ensure it's in the proper range
tensor.clamp_(min=a, max=b)
return tensor
The provided code snippet includes necessary dependencies for implementing the `trunc_normal_` function. Write a Python function `def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.)` to solve the following problem:
r"""Fills the input Tensor with values drawn from a truncated normal distribution. The values are effectively drawn from the normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` with values outside :math:`[a, b]` redrawn until they are within the bounds. The method used for generating the random values works best when :math:`a \leq \text{mean} \leq b`. NOTE: this impl is similar to the PyTorch trunc_normal_, the bounds [a, b] are applied while sampling the normal with mean/std applied, therefore a, b args should be adjusted to match the range of mean, std args. Args: tensor: an n-dimensional `torch.Tensor` mean: the mean of the normal distribution std: the standard deviation of the normal distribution a: the minimum cutoff value b: the maximum cutoff value Examples: >>> w = torch.empty(3, 5) >>> nn.init.trunc_normal_(w)
Here is the function:
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
r"""Fills the input Tensor with values drawn from a truncated
normal distribution. The values are effectively drawn from the
normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)`
with values outside :math:`[a, b]` redrawn until they are within
the bounds. The method used for generating the random values works
best when :math:`a \leq \text{mean} \leq b`.
NOTE: this impl is similar to the PyTorch trunc_normal_, the bounds [a, b] are
applied while sampling the normal with mean/std applied, therefore a, b args
should be adjusted to match the range of mean, std args.
Args:
tensor: an n-dimensional `torch.Tensor`
mean: the mean of the normal distribution
std: the standard deviation of the normal distribution
a: the minimum cutoff value
b: the maximum cutoff value
Examples:
>>> w = torch.empty(3, 5)
>>> nn.init.trunc_normal_(w)
"""
with torch.no_grad():
return _trunc_normal_(tensor, mean, std, a, b) | r"""Fills the input Tensor with values drawn from a truncated normal distribution. The values are effectively drawn from the normal distribution :math:`\mathcal{N}(\text{mean}, \text{std}^2)` with values outside :math:`[a, b]` redrawn until they are within the bounds. The method used for generating the random values works best when :math:`a \leq \text{mean} \leq b`. NOTE: this impl is similar to the PyTorch trunc_normal_, the bounds [a, b] are applied while sampling the normal with mean/std applied, therefore a, b args should be adjusted to match the range of mean, std args. Args: tensor: an n-dimensional `torch.Tensor` mean: the mean of the normal distribution std: the standard deviation of the normal distribution a: the minimum cutoff value b: the maximum cutoff value Examples: >>> w = torch.empty(3, 5) >>> nn.init.trunc_normal_(w) |
12,884 | import os
import os.path as osp
import torch
import torch.nn as nn
from torchvision.models.resnet import BasicBlock, Bottleneck
from torchvision.models.resnet import model_urls
from ..basetopdown import get_preds_from_heatmaps
def make_conv_layers(feat_dims, kernel=3, stride=1, padding=1, bnrelu_final=True):
layers = []
for i in range(len(feat_dims)-1):
layers.append(
nn.Conv2d(
in_channels=feat_dims[i],
out_channels=feat_dims[i+1],
kernel_size=kernel,
stride=stride,
padding=padding
))
# Do not use BN and ReLU for final estimation
if i < len(feat_dims)-2 or (i == len(feat_dims)-2 and bnrelu_final):
layers.append(nn.BatchNorm2d(feat_dims[i+1]))
layers.append(nn.ReLU(inplace=True))
return nn.Sequential(*layers) | null |
12,885 | import os
import os.path as osp
import torch
import torch.nn as nn
from torchvision.models.resnet import BasicBlock, Bottleneck
from torchvision.models.resnet import model_urls
from ..basetopdown import get_preds_from_heatmaps
def make_deconv_layers(feat_dims, bnrelu_final=True):
layers = []
for i in range(len(feat_dims)-1):
layers.append(
nn.ConvTranspose2d(
in_channels=feat_dims[i],
out_channels=feat_dims[i+1],
kernel_size=4,
stride=2,
padding=1,
output_padding=0,
bias=False))
# Do not use BN and ReLU for final estimation
if i < len(feat_dims)-2 or (i == len(feat_dims)-2 and bnrelu_final):
layers.append(nn.BatchNorm2d(feat_dims[i+1]))
layers.append(nn.ReLU(inplace=True))
return nn.Sequential(*layers) | null |
12,886 | import math
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `get_max_preds` function. Write a Python function `def get_max_preds(batch_heatmaps)` to solve the following problem:
get predictions from score maps heatmaps: numpy.ndarray([batch_size, num_joints, height, width])
Here is the function:
def get_max_preds(batch_heatmaps):
'''
get predictions from score maps
heatmaps: numpy.ndarray([batch_size, num_joints, height, width])
'''
assert isinstance(batch_heatmaps, np.ndarray), \
'batch_heatmaps should be numpy.ndarray'
assert batch_heatmaps.ndim == 4, 'batch_images should be 4-ndim: {}'.format(batch_heatmaps.shape)
batch_size = batch_heatmaps.shape[0]
num_joints = batch_heatmaps.shape[1]
width = batch_heatmaps.shape[3]
heatmaps_reshaped = batch_heatmaps.reshape((batch_size, num_joints, -1))
idx = np.argmax(heatmaps_reshaped, 2)
maxvals = np.amax(heatmaps_reshaped, 2)
maxvals = maxvals.reshape((batch_size, num_joints, 1))
idx = idx.reshape((batch_size, num_joints, 1))
preds = np.tile(idx, (1, 1, 2)).astype(np.float32)
preds[:, :, 0] = (preds[:, :, 0]) % width
preds[:, :, 1] = np.floor((preds[:, :, 1]) / width)
pred_mask = np.tile(np.greater(maxvals, 0.0), (1, 1, 2))
pred_mask = pred_mask.astype(np.float32)
preds *= pred_mask
return preds, maxvals | get predictions from score maps heatmaps: numpy.ndarray([batch_size, num_joints, height, width]) |
12,887 | import math
import numpy as np
def coco17tobody25(points2d):
kpts = np.zeros((points2d.shape[0], 25, 3))
kpts[:, COCO17_IN_BODY25, :2] = points2d[:, :, :2]
kpts[:, COCO17_IN_BODY25, 2:3] = points2d[:, :, 2:3]
kpts[:, 8, :2] = kpts[:, [9, 12], :2].mean(axis=1)
kpts[:, 8, 2] = kpts[:, [9, 12], 2].min(axis=1)
kpts[:, 1, :2] = kpts[:, [2, 5], :2].mean(axis=1)
kpts[:, 1, 2] = kpts[:, [2, 5], 2].min(axis=1)
# 需要交换一下
# kpts = kpts[:, :, [1,0,2]]
return kpts
def coco23tobody25(points2d):
kpts = coco17tobody25(points2d[:, :17])
kpts[:, [19, 20, 21, 22, 23, 24]] = points2d[:, [17, 18, 19, 20, 21, 22]]
return kpts | null |
12,888 | import torch
import torch.nn as nn
import torchvision.models.resnet as resnet
import numpy as np
import math
class Bottleneck(nn.Module):
""" Redefinition of Bottleneck residual block
Adapted from the official PyTorch implementation
"""
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class HMR(nn.Module):
""" SMPL Iterative Regressor with ResNet50 backbone
"""
def __init__(self, block, layers):
self.inplanes = 64
super(HMR, self).__init__()
npose = 3 + 45
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc1 = nn.Linear(512 * block.expansion + npose + 13, 1024)
self.drop1 = nn.Dropout()
self.fc2 = nn.Linear(1024, 1024)
self.drop2 = nn.Dropout()
self.decpose = nn.Linear(1024, npose)
self.decshape = nn.Linear(1024, 10)
self.deccam = nn.Linear(1024, 3)
nn.init.xavier_uniform_(self.decpose.weight, gain=0.01)
nn.init.xavier_uniform_(self.decshape.weight, gain=0.01)
nn.init.xavier_uniform_(self.deccam.weight, gain=0.01)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
init_pose = torch.zeros(npose).unsqueeze(0)
init_shape = torch.zeros(10).unsqueeze(0)
init_cam = torch.zeros(3).unsqueeze(0)
self.register_buffer('init_pose', init_pose)
self.register_buffer('init_shape', init_shape)
self.register_buffer('init_cam', init_cam)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x, init_pose=None, init_shape=None, init_cam=None, n_iter=3):
batch_size = x.shape[0]
if init_pose is None:
init_pose = self.init_pose.expand(batch_size, -1)
if init_shape is None:
init_shape = self.init_shape.expand(batch_size, -1)
if init_cam is None:
init_cam = self.init_cam.expand(batch_size, -1)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x1 = self.layer1(x)
x2 = self.layer2(x1)
x3 = self.layer3(x2)
x4 = self.layer4(x3)
xf = self.avgpool(x4)
xf = xf.view(xf.size(0), -1)
pred_pose = init_pose
pred_shape = init_shape
pred_cam = init_cam
for i in range(n_iter):
xc = torch.cat([xf, pred_pose, pred_shape, pred_cam],1)
xc = self.fc1(xc)
xc = self.drop1(xc)
xc = self.fc2(xc)
xc = self.drop2(xc)
pred_pose = self.decpose(xc) + pred_pose
pred_shape = self.decshape(xc) + pred_shape
pred_cam = self.deccam(xc) + pred_cam
# pred_rotmat = rot6d_to_rotmat(pred_pose).view(batch_size, 24, 3, 3)
return {
'poses': pred_pose,
'shapes': pred_shape,
'cam': pred_cam
}
The provided code snippet includes necessary dependencies for implementing the `hmr` function. Write a Python function `def hmr(pretrained=True, **kwargs)` to solve the following problem:
Constructs an HMR model with ResNet50 backbone. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet
Here is the function:
def hmr(pretrained=True, **kwargs):
""" Constructs an HMR model with ResNet50 backbone.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = HMR(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
resnet_imagenet = resnet.resnet50(pretrained=True)
model.load_state_dict(resnet_imagenet.state_dict(),strict=False)
return model | Constructs an HMR model with ResNet50 backbone. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet |
12,889 | '''
Date: 2021-10-25 11:51:37 am
Author: dihuangdh
Descriptions:
-----
LastEditTime: 2021-10-25 1:50:40 pm
LastEditors: dihuangdh
'''
import torch
from torchvision.transforms import Normalize
import numpy as np
import cv2
from .models import hmr
class constants:
FOCAL_LENGTH = 5000.
IMG_RES = 224
# Mean and standard deviation for normalizing input image
IMG_NORM_MEAN = [0.485, 0.456, 0.406]
IMG_NORM_STD = [0.229, 0.224, 0.225]
def crop(img, center, scale, res, rot=0, bias=0):
"""Crop image according to the supplied bounding box."""
# Upper left point
ul = np.array(transform([1, 1], center, scale, res, invert=1))-1
# Bottom right point
br = np.array(transform([res[0]+1,
res[1]+1], center, scale, res, invert=1))-1
# Padding so that when rotated proper amount of context is included
pad = int(np.linalg.norm(br - ul) / 2 - float(br[1] - ul[1]) / 2)
if not rot == 0:
ul -= pad
br += pad
new_shape = [br[1] - ul[1], br[0] - ul[0]]
if len(img.shape) > 2:
new_shape += [img.shape[2]]
new_img = np.zeros(new_shape) + bias
# Range to fill new array
new_x = max(0, -ul[0]), min(br[0], len(img[0])) - ul[0]
new_y = max(0, -ul[1]), min(br[1], len(img)) - ul[1]
# Range to sample from original image
old_x = max(0, ul[0]), min(len(img[0]), br[0])
old_y = max(0, ul[1]), min(len(img), br[1])
new_img[new_y[0]:new_y[1], new_x[0]:new_x[1]] = img[old_y[0]:old_y[1],
old_x[0]:old_x[1]]
if not rot == 0:
# Remove padding
new_img = scipy.misc.imrotate(new_img, rot)
new_img = new_img[pad:-pad, pad:-pad]
new_img = cv2.resize(new_img, (res[0], res[1]))
return new_img
class
if __name__ == '__main__':
pass
The provided code snippet includes necessary dependencies for implementing the `process_image` function. Write a Python function `def process_image(img, bbox, input_res=224)` to solve the following problem:
Read image, do preprocessing and possibly crop it according to the bounding box. If there are bounding box annotations, use them to crop the image. If no bounding box is specified but openpose detections are available, use them to get the bounding box.
Here is the function:
def process_image(img, bbox, input_res=224):
"""Read image, do preprocessing and possibly crop it according to the bounding box.
If there are bounding box annotations, use them to crop the image.
If no bounding box is specified but openpose detections are available, use them to get the bounding box.
"""
img = img[:, :, ::-1].copy()
normalize_img = Normalize(mean=constants.IMG_NORM_MEAN, std=constants.IMG_NORM_STD)
l, t, r, b = bbox[:4]
center = [(l+r)/2, (t+b)/2]
width = max(r-l, b-t)
scale = width/200.0
img = crop(img, center, scale, (input_res, input_res))
img = img.astype(np.float32) / 255.
img = torch.from_numpy(img).permute(2,0,1)
norm_img = normalize_img(img.clone())[None]
return img, norm_img | Read image, do preprocessing and possibly crop it according to the bounding box. If there are bounding box annotations, use them to crop the image. If no bounding box is specified but openpose detections are available, use them to get the bounding box. |
12,890 |
def solve_translation(X, x, K):
A = np.zeros((2*X.shape[0], 3))
b = np.zeros((2*X.shape[0], 1))
fx, fy = K[0, 0], K[1, 1]
cx, cy = K[0, 2], K[1, 2]
for nj in range(X.shape[0]):
A[2*nj, 0] = 1
A[2*nj + 1, 1] = 1
A[2*nj, 2] = -(x[nj, 0] - cx)/fx
A[2*nj+1, 2] = -(x[nj, 1] - cy)/fy
b[2*nj, 0] = X[nj, 2]*(x[nj, 0] - cx)/fx - X[nj, 0]
b[2*nj+1, 0] = X[nj, 2]*(x[nj, 1] - cy)/fy - X[nj, 1]
A[2*nj:2*nj+2, :] *= x[nj, 2]
b[2*nj:2*nj+2, :] *= x[nj, 2]
trans = np.linalg.inv(A.T @ A) @ A.T @ b
return trans.T[0] | null |
12,891 | '''
Date: 2021-10-25 11:51:37 am
Author: dihuangdh
Descriptions:
-----
LastEditTime: 2021-10-25 1:50:40 pm
LastEditors: dihuangdh
'''
import torch
from torchvision.transforms import Normalize
import numpy as np
import cv2
from .models import hmr
def estimate_translation_np(S, joints_2d, joints_conf, K):
"""Find camera translation that brings 3D joints S closest to 2D the corresponding joints_2d.
Input:
S: (25, 3) 3D joint locations
joints: (25, 3) 2D joint locations and confidence
Returns:
(3,) camera translation vector
"""
num_joints = S.shape[0]
# focal length
f = np.array([K[0, 0], K[1, 1]])
# optical center
center = np.array([K[0, 2], K[1, 2]])
# transformations
Z = np.reshape(np.tile(S[:,2],(2,1)).T,-1)
XY = np.reshape(S[:,0:2],-1)
O = np.tile(center,num_joints)
F = np.tile(f,num_joints)
weight2 = np.reshape(np.tile(np.sqrt(joints_conf),(2,1)).T,-1)
# least squares
Q = np.array([F*np.tile(np.array([1,0]),num_joints), F*np.tile(np.array([0,1]),num_joints), O-np.reshape(joints_2d,-1)]).T
c = (np.reshape(joints_2d,-1)-O)*Z - F*XY
# weighted least squares
W = np.diagflat(weight2)
Q = np.dot(W,Q)
c = np.dot(W,c)
# square matrix
A = np.dot(Q.T,Q)
b = np.dot(Q.T,c)
# solution
trans = np.linalg.solve(A, b)
return trans
class
if __name__ == '__main__':
pass
def init_with_hmr(body_model, spin_model, img, bbox, kpts, camera):
body_params = spin_model.forward(img.copy(), bbox)
body_params = body_model.check_params(body_params)
# only use body joints to estimation translation
nJoints = 15
keypoints3d = body_model(return_verts=False, return_tensor=False, **body_params)[0]
trans = estimate_translation_np(keypoints3d[:nJoints], kpts[:nJoints, :2], kpts[:nJoints, 2], camera['K'])
body_params['Th'] += trans[None, :]
# convert to world coordinate
Rhold = cv2.Rodrigues(body_params['Rh'])[0]
Thold = body_params['Th']
Rh = camera['R'].T @ Rhold
Th = (camera['R'].T @ (Thold.T - camera['T'])).T
body_params['Th'] = Th
body_params['Rh'] = cv2.Rodrigues(Rh)[0].reshape(1, 3)
vertices = body_model(return_verts=True, return_tensor=False, **body_params)[0]
keypoints3d = body_model(return_verts=False, return_tensor=False, **body_params)[0]
results = {'body_params': body_params, 'vertices': vertices, 'keypoints3d': keypoints3d}
return results | null |
12,892 | import os
from os.path import join
import numpy as np
import cv2
import torch
import torch.nn as nn
import pickle
import math
The provided code snippet includes necessary dependencies for implementing the `get_warp_matrix` function. Write a Python function `def get_warp_matrix(theta, size_input, size_dst, size_target)` to solve the following problem:
Calculate the transformation matrix under the constraint of unbiased. Paper ref: Huang et al. The Devil is in the Details: Delving into Unbiased Data Processing for Human Pose Estimation (CVPR 2020). Args: theta (float): Rotation angle in degrees. size_input (np.ndarray): Size of input image [w, h]. size_dst (np.ndarray): Size of output image [w, h]. size_target (np.ndarray): Size of ROI in input plane [w, h]. Returns: np.ndarray: A matrix for transformation.
Here is the function:
def get_warp_matrix(theta, size_input, size_dst, size_target):
"""Calculate the transformation matrix under the constraint of unbiased.
Paper ref: Huang et al. The Devil is in the Details: Delving into Unbiased
Data Processing for Human Pose Estimation (CVPR 2020).
Args:
theta (float): Rotation angle in degrees.
size_input (np.ndarray): Size of input image [w, h].
size_dst (np.ndarray): Size of output image [w, h].
size_target (np.ndarray): Size of ROI in input plane [w, h].
Returns:
np.ndarray: A matrix for transformation.
"""
theta = np.deg2rad(theta)
matrix = np.zeros((2, 3), dtype=np.float32)
scale_x = size_dst[0] / size_target[0]
scale_y = size_dst[1] / size_target[1]
matrix[0, 0] = math.cos(theta) * scale_x
matrix[0, 1] = -math.sin(theta) * scale_x
matrix[0, 2] = scale_x * (-0.5 * size_input[0] * math.cos(theta) +
0.5 * size_input[1] * math.sin(theta) +
0.5 * size_target[0])
matrix[1, 0] = math.sin(theta) * scale_y
matrix[1, 1] = math.cos(theta) * scale_y
matrix[1, 2] = scale_y * (-0.5 * size_input[0] * math.sin(theta) -
0.5 * size_input[1] * math.cos(theta) +
0.5 * size_target[1])
return matrix | Calculate the transformation matrix under the constraint of unbiased. Paper ref: Huang et al. The Devil is in the Details: Delving into Unbiased Data Processing for Human Pose Estimation (CVPR 2020). Args: theta (float): Rotation angle in degrees. size_input (np.ndarray): Size of input image [w, h]. size_dst (np.ndarray): Size of output image [w, h]. size_target (np.ndarray): Size of ROI in input plane [w, h]. Returns: np.ndarray: A matrix for transformation. |
12,893 | import os
from os.path import join
import numpy as np
import cv2
import torch
import torch.nn as nn
import pickle
import math
def generate_patch_image_cv(cvimg, c_x, c_y, bb_width, bb_height, patch_width, patch_height, do_flip, scale, rot):
trans, inv_trans = gen_trans_from_patch_cv(c_x, c_y, bb_width, bb_height, patch_width, patch_height, scale, rot, inv=False)
img_patch = cv2.warpAffine(cvimg, trans, (int(patch_width), int(patch_height)),
flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)
return img_patch, trans, inv_trans
def get_single_image_crop_demo(image, bbox, scale=1.2, crop_size=224,
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], fliplr=False):
crop_image, trans, inv_trans = generate_patch_image_cv(
cvimg=image.copy(),
c_x=bbox[0],
c_y=bbox[1],
bb_width=bbox[2],
bb_height=bbox[3],
patch_width=crop_size[0],
patch_height=crop_size[1],
do_flip=False,
scale=scale,
rot=0,
)
if fliplr:
crop_image = cv2.flip(crop_image, 1)
# cv2.imwrite('debug_crop.jpg', crop_image[:,:,::-1])
# cv2.imwrite('debug_crop_full.jpg', image[:,:,::-1])
crop_image = crop_image.transpose(2,0,1)
mean1=np.array(mean, dtype=np.float32).reshape(3,1,1)
std1= np.array(std, dtype=np.float32).reshape(3,1,1)
crop_image = (crop_image.astype(np.float32))/255.
# _max = np.max(abs(crop_image))
# crop_image = np.divide(crop_image, _max)
crop_image = (crop_image - mean1)/std1
return crop_image, inv_trans | null |
12,894 | import os
from os.path import join
import numpy as np
import cv2
import torch
import torch.nn as nn
import pickle
import math
def xyxy2ccwh(bbox):
w = bbox[:, 2] - bbox[:, 0]
h = bbox[:, 3] - bbox[:, 1]
cx = (bbox[:, 2] + bbox[:, 0])/2
cy = (bbox[:, 3] + bbox[:, 1])/2
return np.stack([cx, cy, w, h], axis=1) | null |
12,895 | import os
from os.path import join
import numpy as np
import cv2
import torch
import torch.nn as nn
import pickle
import math
def get_max_preds(batch_heatmaps):
'''
get predictions from score maps
heatmaps: numpy.ndarray([batch_size, num_joints, height, width])
'''
assert isinstance(batch_heatmaps, np.ndarray), \
'batch_heatmaps should be numpy.ndarray'
assert batch_heatmaps.ndim == 4, 'batch_images should be 4-ndim'
batch_size = batch_heatmaps.shape[0]
num_joints = batch_heatmaps.shape[1]
width = batch_heatmaps.shape[3]
heatmaps_reshaped = batch_heatmaps.reshape((batch_size, num_joints, -1))
idx = np.argmax(heatmaps_reshaped, 2)
maxvals = np.amax(heatmaps_reshaped, 2)
maxvals = maxvals.reshape((batch_size, num_joints, 1))
idx = idx.reshape((batch_size, num_joints, 1))
preds = np.tile(idx, (1, 1, 2)).astype(np.float32)
preds[:, :, 0] = (preds[:, :, 0]) % width
preds[:, :, 1] = np.floor((preds[:, :, 1]) / width)
pred_mask = np.tile(np.greater(maxvals, 0.0), (1, 1, 2))
pred_mask = pred_mask.astype(np.float32)
preds *= pred_mask
return preds, maxvals
def get_preds_from_heatmaps(batch_heatmaps):
coords, maxvals = get_max_preds(batch_heatmaps)
heatmap_height = batch_heatmaps.shape[2]
heatmap_width = batch_heatmaps.shape[3]
# post-processing
if True:
for n in range(coords.shape[0]):
for p in range(coords.shape[1]):
hm = batch_heatmaps[n][p]
px = int(math.floor(coords[n][p][0] + 0.5))
py = int(math.floor(coords[n][p][1] + 0.5))
if 1 < px < heatmap_width-1 and 1 < py < heatmap_height-1:
diff = np.array(
[
hm[py][px+1] - hm[py][px-1],
hm[py+1][px]-hm[py-1][px]
]
)
coords[n][p] += np.sign(diff) * .25
coords = coords.astype(np.float32) * 4
pred = np.dstack((coords, maxvals))
return pred | null |
12,896 | import os
from os.path import join
import numpy as np
import cv2
import torch
import torch.nn as nn
import pickle
import math
def gdown_models(ckpt, url):
print('Try to download model from {} to {}'.format(url, ckpt))
os.makedirs(os.path.dirname(ckpt), exist_ok=True)
cmd = 'gdown "{}" -O {}'.format(url, ckpt)
print('\n', cmd, '\n')
os.system(cmd) | null |
12,897 |
def get_backbone_info(backbone):
info = {
'resnet18': {'n_output_channels': 512, 'downsample_rate': 4},
'resnet34': {'n_output_channels': 512, 'downsample_rate': 4},
'resnet50': {'n_output_channels': 2048, 'downsample_rate': 4},
'resnet50_adf_dropout': {'n_output_channels': 2048, 'downsample_rate': 4},
'resnet50_dropout': {'n_output_channels': 2048, 'downsample_rate': 4},
'resnet101': {'n_output_channels': 2048, 'downsample_rate': 4},
'resnet152': {'n_output_channels': 2048, 'downsample_rate': 4},
'resnext50_32x4d': {'n_output_channels': 2048, 'downsample_rate': 4},
'resnext101_32x8d': {'n_output_channels': 2048, 'downsample_rate': 4},
'wide_resnet50_2': {'n_output_channels': 2048, 'downsample_rate': 4},
'wide_resnet101_2': {'n_output_channels': 2048, 'downsample_rate': 4},
'mobilenet_v2': {'n_output_channels': 1280, 'downsample_rate': 4},
'hrnet_w32': {'n_output_channels': 480, 'downsample_rate': 4},
'hrnet_w48': {'n_output_channels': 720, 'downsample_rate': 4},
# 'hrnet_w64': {'n_output_channels': 2048, 'downsample_rate': 4},
'dla34': {'n_output_channels': 512, 'downsample_rate': 4},
}
return info[backbone] | null |
12,898 | from torch import nn
The provided code snippet includes necessary dependencies for implementing the `_make_divisible` function. Write a Python function `def _make_divisible(v, divisor, min_value=None)` to solve the following problem:
This function is taken from the original tf repo. It ensures that all layers have a channel number that is divisible by 8 It can be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py :param v: :param divisor: :param min_value: :return:
Here is the function:
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v | This function is taken from the original tf repo. It ensures that all layers have a channel number that is divisible by 8 It can be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py :param v: :param divisor: :param min_value: :return: |
12,899 | from torch import nn
model_urls = {
'mobilenet_v2': 'https://download.pytorch.org/models/mobilenet_v2-b0353104.pth',
}
class MobileNetV2(nn.Module):
def __init__(self,
num_classes=1000,
width_mult=1.0,
inverted_residual_setting=None,
round_nearest=8,
block=None,
norm_layer=None):
"""
MobileNet V2 main class
Args:
num_classes (int): Number of classes
width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
inverted_residual_setting: Network structure
round_nearest (int): Round the number of channels in each layer to be a multiple of this number
Set to 1 to turn off rounding
block: Module specifying inverted residual building block for mobilenet
norm_layer: Module specifying the normalization layer to use
"""
super(MobileNetV2, self).__init__()
if block is None:
block = InvertedResidual
if norm_layer is None:
norm_layer = nn.BatchNorm2d
input_channel = 32
last_channel = 1280
if inverted_residual_setting is None:
inverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# only check the first element, assuming user knows t,c,n,s are required
if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4:
raise ValueError("inverted_residual_setting should be non-empty "
"or a 4-element list, got {}".format(inverted_residual_setting))
# building first layer
input_channel = _make_divisible(input_channel * width_mult, round_nearest)
self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest)
features = [ConvBNReLU(3, input_channel, stride=2, norm_layer=norm_layer)]
# building inverted residual blocks
for t, c, n, s in inverted_residual_setting:
output_channel = _make_divisible(c * width_mult, round_nearest)
for i in range(n):
stride = s if i == 0 else 1
features.append(block(input_channel, output_channel, stride, expand_ratio=t, norm_layer=norm_layer))
input_channel = output_channel
# building last several layers
features.append(ConvBNReLU(input_channel, self.last_channel, kernel_size=1, norm_layer=norm_layer))
# make it nn.Sequential
self.features = nn.Sequential(*features)
# building classifier
# self.classifier = nn.Sequential(
# nn.Dropout(0.2),
# nn.Linear(self.last_channel, num_classes),
# )
# weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
def _forward_impl(self, x):
# This exists since TorchScript doesn't support inheritance, so the superclass method
# (this one) needs to have a name other than `forward` that can be accessed in a subclass
x = self.features(x)
# Cannot use "squeeze" as batch-size can be 1 => must use reshape with x.shape[0]
# x = nn.functional.adaptive_avg_pool2d(x, 1).reshape(x.shape[0], -1)
# x = self.classifier(x)
return x
def forward(self, x):
return self._forward_impl(x)
The provided code snippet includes necessary dependencies for implementing the `mobilenet_v2` function. Write a Python function `def mobilenet_v2(pretrained=False, progress=True, **kwargs)` to solve the following problem:
Constructs a MobileNetV2 architecture from `"MobileNetV2: Inverted Residuals and Linear Bottlenecks" <https://arxiv.org/abs/1801.04381>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr
Here is the function:
def mobilenet_v2(pretrained=False, progress=True, **kwargs):
"""
Constructs a MobileNetV2 architecture from
`"MobileNetV2: Inverted Residuals and Linear Bottlenecks" <https://arxiv.org/abs/1801.04381>`_.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
model = MobileNetV2(**kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls['mobilenet_v2'],
progress=progress)
model.load_state_dict(state_dict, strict=False)
return model | Constructs a MobileNetV2 architecture from `"MobileNetV2: Inverted Residuals and Linear Bottlenecks" <https://arxiv.org/abs/1801.04381>`_. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr |
12,900 | import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from yacs.config import CfgNode as CN
The provided code snippet includes necessary dependencies for implementing the `conv3x3` function. Write a Python function `def conv3x3(in_planes, out_planes, stride=1)` to solve the following problem:
3x3 convolution with padding
Here is the function:
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False) | 3x3 convolution with padding |
12,901 | import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from yacs.config import CfgNode as CN
def get_pose_net(cfg, is_train):
model = PoseHighResolutionNet(cfg)
if is_train and cfg['MODEL']['INIT_WEIGHTS']:
model.init_weights(cfg['MODEL']['PRETRAINED'])
return model
def get_cfg_defaults(pretrained, width=32, downsample=False, use_conv=False):
# pose_multi_resoluton_net related params
HRNET = CN()
HRNET.PRETRAINED_LAYERS = [
'conv1', 'bn1', 'conv2', 'bn2', 'layer1', 'transition1',
'stage2', 'transition2', 'stage3', 'transition3', 'stage4',
]
HRNET.STEM_INPLANES = 64
HRNET.FINAL_CONV_KERNEL = 1
HRNET.STAGE2 = CN()
HRNET.STAGE2.NUM_MODULES = 1
HRNET.STAGE2.NUM_BRANCHES = 2
HRNET.STAGE2.NUM_BLOCKS = [4, 4]
HRNET.STAGE2.NUM_CHANNELS = [width, width*2]
HRNET.STAGE2.BLOCK = 'BASIC'
HRNET.STAGE2.FUSE_METHOD = 'SUM'
HRNET.STAGE3 = CN()
HRNET.STAGE3.NUM_MODULES = 4
HRNET.STAGE3.NUM_BRANCHES = 3
HRNET.STAGE3.NUM_BLOCKS = [4, 4, 4]
HRNET.STAGE3.NUM_CHANNELS = [width, width*2, width*4]
HRNET.STAGE3.BLOCK = 'BASIC'
HRNET.STAGE3.FUSE_METHOD = 'SUM'
HRNET.STAGE4 = CN()
HRNET.STAGE4.NUM_MODULES = 3
HRNET.STAGE4.NUM_BRANCHES = 4
HRNET.STAGE4.NUM_BLOCKS = [4, 4, 4, 4]
HRNET.STAGE4.NUM_CHANNELS = [width, width*2, width*4, width*8]
HRNET.STAGE4.BLOCK = 'BASIC'
HRNET.STAGE4.FUSE_METHOD = 'SUM'
HRNET.DOWNSAMPLE = downsample
HRNET.USE_CONV = use_conv
cfg = CN()
cfg.MODEL = CN()
cfg.MODEL.INIT_WEIGHTS = True
cfg.MODEL.PRETRAINED = pretrained # 'data/pretrained_models/hrnet_w32-36af842e.pth'
cfg.MODEL.EXTRA = HRNET
cfg.MODEL.NUM_JOINTS = 24
return cfg
def hrnet_w32(
pretrained=True,
pretrained_ckpt='data/pretrained_models/pose_coco/pose_hrnet_w32_256x192.pth',
downsample=False,
use_conv=False,
):
cfg = get_cfg_defaults(pretrained_ckpt, width=32, downsample=downsample, use_conv=use_conv)
return get_pose_net(cfg, is_train=True) | null |
12,902 | import os
import torch
import torch.nn as nn
import torch.nn.functional as F
from yacs.config import CfgNode as CN
def get_pose_net(cfg, is_train):
def get_cfg_defaults(pretrained, width=32, downsample=False, use_conv=False):
def hrnet_w48(
pretrained=True,
pretrained_ckpt='data/pretrained_models/pose_coco/pose_hrnet_w48_256x192.pth',
downsample=False,
use_conv=False,
):
cfg = get_cfg_defaults(pretrained_ckpt, width=48, downsample=downsample, use_conv=use_conv)
return get_pose_net(cfg, is_train=True) | null |
12,903 | import torch
import torch.nn as nn
The provided code snippet includes necessary dependencies for implementing the `conv3x3` function. Write a Python function `def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1)` to solve the following problem:
3x3 convolution with padding
Here is the function:
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation) | 3x3 convolution with padding |
12,904 | import torch
import torch.nn as nn
The provided code snippet includes necessary dependencies for implementing the `conv1x1` function. Write a Python function `def conv1x1(in_planes, out_planes, stride=1)` to solve the following problem:
1x1 convolution
Here is the function:
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) | 1x1 convolution |
12,905 | import torch
import torch.nn as nn
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict, strict=False)
return model
The provided code snippet includes necessary dependencies for implementing the `resnet18` function. Write a Python function `def resnet18(pretrained=False, progress=True, **kwargs)` to solve the following problem:
r"""ResNet-18 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr
Here is the function:
def resnet18(pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs) | r"""ResNet-18 model from `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ Args: pretrained (bool): If True, returns a model pre-trained on ImageNet progress (bool): If True, displays a progress bar of the download to stderr |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.