contest_id
stringlengths
1
4
index
stringclasses
43 values
title
stringlengths
2
63
statement
stringlengths
51
4.24k
tutorial
stringlengths
19
20.4k
tags
listlengths
0
11
rating
int64
800
3.5k
code
stringlengths
46
29.6k
1660
C
Get an Even String
A string $a=a_1a_2\dots a_n$ is called even if it consists of a concatenation (joining) of strings of length $2$ consisting of the same characters. In other words, a string $a$ is even if two conditions are satisfied \textbf{at the same time}: - its length $n$ is even; - for all odd $i$ ($1 \le i \le n - 1$), $a_i = a_{i+1}$ is satisfied. For example, the following strings are even: "" (empty string), "tt", "aabb", "oooo", and "ttrrrroouuuuuuuukk". The following strings are not even: "aaa", "abab" and "abba". Given a string $s$ consisting of lowercase Latin letters. Find the minimum number of characters to remove from the string $s$ to make it even. The deleted characters do not have to be consecutive.
We will act greedily: we will make an array $prev$ consisting of $26$ elements, in which we will mark $prev[i] = true$ if the letter is already encountered in the string, and $prev[i] = false$ otherwise. In the variable $m$ we will store the length of the even string that can be obtained from $s$. We will go through the string by executing the following algorithm: if $prev[i] = false$, mark $prev[i] = true$. if $prev[i] = true$, then we already have a pair of repeating characters to add to an even string - add $2$ to the number $m$ and clear the array $used$. Clearing $prev$ is necessary because both characters that will make up the next pair must be in the $s$ string after the current character. In other words, if the last character in the current pair was $s_t$, then the first character in the new pair can be $s_k$, where $t \lt k \lt n$. Then we calculate the answer as $n - m$.
[ "dp", "greedy", "strings" ]
1,300
#include<bits/stdc++.h> using namespace std; int sz = 26; void solve(){ string s; cin >> s; int m = 0, n = (int)s.size(); vector<bool>prev(sz, false); for(auto &i : s){ if(prev[i - 'a']){ m += 2; for(int j = 0; j < sz; j++) prev[j] = false; } else prev[i - 'a'] = true; } cout << n - m << endl; } int main(){ int t; cin >> t; while (t--){ solve(); } }
1660
D
Maximum Product Strikes Back
You are given an array $a$ consisting of $n$ integers. For each $i$ ($1 \le i \le n$) the following inequality is true: $-2 \le a_i \le 2$. You can remove any number (possibly $0$) of elements from the beginning of the array and any number (possibly $0$) of elements from the end of the array. You are allowed to delete the whole array. You need to answer the question: how many elements should be removed from the beginning of the array, and how many elements should be removed from the end of the array, so that the result will be an array whose product (multiplication) of elements is \textbf{maximal}. If there is more than one way to get an array with the maximum product of elements on it, you are allowed to output \textbf{any} of them. The product of elements of an \textbf{empty} array (array of length $0$) should be assumed to be $1$.
First, we can always get a product value equal to $1$ if we remove all elements of the array. Then we need to know what maximal positive value of the product we can get. Consequently, the remaining array (after removing the corresponding prefix and suffix) should have no $0$ elements. We can find maxima in all sections between zeros. Now we are left with a set of nonzero numbers. If the value of the product on the current segment is positive, it makes no sense to remove any more elements. Otherwise, the product is negative, then we must remove one negative number from the product (either to the left or to the right). Compare the values of the product on the prefix and suffix to the nearest negative value, and remove either the suffix or the prefix, respectively.
[ "brute force", "implementation", "math", "two pointers" ]
1,600
#include <bits/stdc++.h> using namespace std; #define forn(i, n) for (int i = 0; i < int(n); i++) #define sz(v) (int)v.size() void solve() { int n; cin >> n; vector<int> a(n); forn(i, n) cin >> a[i]; int ans = 0; int ap = n, as = 0; for(int i = 0, l = -1; i <= n; i++) { if (i == n || a[i] == 0) { int cnt = 0; bool neg = false; int left = -1, right = -1; int cl = 0, cr = 0; for (int j = l+1; j < i; j++) { neg ^= a[j] < 0; if (a[j] < 0) { right = j; cr = 0; } if (abs(a[j]) == 2) { cnt++, cr++; if (left == -1) cl++; } if (a[j] < 0 && left == -1) { left = j; } } if (neg) { if (cnt - cl > cnt - cr) { right = i; cnt -= cl; } else { left = l; cnt -= cr; } } else { left = l, right = i; } if (ans < cnt) { ans = cnt; ap = left + 1, as = n - right; } l = i; } } cout << ap << ' ' << as << endl; } int main() { int t; cin >> t; forn(tt, t) { solve(); } }
1660
E
Matrix and Shifts
You are given a binary matrix $A$ of size $n \times n$. Rows are numbered from top to bottom from $1$ to $n$, columns are numbered from left to right from $1$ to $n$. The element located at the intersection of row $i$ and column $j$ is called $A_{ij}$. Consider a set of $4$ operations: - Cyclically shift all rows up. The row with index $i$ will be written in place of the row $i-1$ ($2 \le i \le n$), the row with index $1$ will be written in place of the row $n$. - Cyclically shift all rows down. The row with index $i$ will be written in place of the row $i+1$ ($1 \le i \le n - 1$), the row with index $n$ will be written in place of the row $1$. - Cyclically shift all columns to the left. The column with index $j$ will be written in place of the column $j-1$ ($2 \le j \le n$), the column with index $1$ will be written in place of the column $n$. - Cyclically shift all columns to the right. The column with index $j$ will be written in place of the column $j+1$ ($1 \le j \le n - 1$), the column with index $n$ will be written in place of the column $1$. \begin{center} {\small The $3 \times 3$ matrix is shown on the left before the $3$-rd operation is applied to it, on the right — after.} \end{center} You can perform an arbitrary (possibly zero) number of operations on the matrix; the operations can be performed in any order. After that, you can perform an arbitrary (possibly zero) number of new xor-operations: - Select any element $A_{ij}$ and assign it with new value $A_{ij} \oplus 1$. In other words, the value of $(A_{ij} + 1) \bmod 2$ will have to be written into element $A_{ij}$. Each application of this xor-operation costs one burl. Note that the $4$ shift operations — are free. These $4$ operations can only be performed before xor-operations are performed. Output the minimum number of burles you would have to pay to make the $A$ matrix unitary. A unitary matrix is a matrix with ones on the main diagonal and the rest of its elements are zeros (that is, $A_{ij} = 1$ if $i = j$ and $A_{ij} = 0$ otherwise).
Count to the variable $sum$ the number of all ones in the matrix. Then consider pairs of diagonals, one of which starts in cell $A[i][0]$, and the other - in cell $A[0][n - i]$ (for $1 \le i \le n - 1$). Using cyclic shifts, we can assemble the main diagonal from this pair. Then among all such pairs (and the main diagonal), find the one that contains the maximal number of ones, and store this number in the variable $Max$. The number of zeros on the main diagonal, which should be turned into ones, is equal to $n - Max$. The number of ones to be turned into zeros, because they are not on the main diagonal, is calculated as $sum - Max$. The total answer is calculated as $n - Max + sum - Max = n + sum - 2Max$.
[ "brute force", "constructive algorithms", "greedy", "implementation" ]
1,600
#include <bits/stdc++.h> using namespace std; #define forn(i, n) for (int i = 0; i < int(n); i++) #define sz(v) (int)v.size() #define all(v) v.begin(),v.end() #define eb emplace_back const int INF = INT_MAX >> 1; void solve() { int n; cin >> n; int cnt1 = 0; vector<int> cnt (n, 0); for (int i = 0; i < n; i++) { string s; cin >> s; for (int j = 0, k = (n - i) % n; j < n; j++, k = (k + 1 == n ? 0 : k + 1)) if (s[j] == '1') { cnt1++; cnt[k]++; } } int ans = INF; for (int i = 0; i < sz(cnt); i++) { ans = min(ans, cnt1 - cnt[i] + (n - cnt[i])); } cout << ans << endl; } int main() { int t; cin >> t; forn(tt, t) { solve(); } }
1660
F1
Promising String (easy version)
\textbf{This is the easy version of Problem F. The only difference between the easy version and the hard version is the constraints.} We will call a non-empty string \textbf{balanced} if it contains the same number of plus and minus signs. For example: strings "+--+" and "++-+--" are balanced, and strings "+--", "--" and "" are not balanced. We will call a string \textbf{promising} if the string can be made balanced by several (possibly zero) uses of the following operation: - replace two \textbf{adjacent} minus signs with one plus sign. In particular, every balanced string is promising. However, the converse is not true: not every promising string is balanced. For example, the string "-+---" is promising, because you can replace two adjacent minuses with plus and get a balanced string "-++-", or get another balanced string "-+-+". How many non-empty substrings of the given string $s$ are promising? Each non-empty promising substring must be counted in the answer as many times as it occurs in string $s$. Recall that a substring is a sequence of consecutive characters of the string. For example, for string "+-+" its substring are: "+-", "-+", "+", "+-+" (the string is a substring of itself) and some others. But the following strings are not its substring: "--", "++", "-++".
Note the fact that if the number of minus signs is greater than the number of plus signs by at least $2$, then there is sure to be a pair of standing next to minus signs (according to the Dirichlet principle). When we apply the operation of replacing two adjacent minus signs with a plus sign, the balance (the difference of plus signs and minus signs) increases by $3$. Then we need to find the number of subsections such that the balance on them is a multiple of $3$ and non-positive (then we can apply the operations until the balance is $0$). The balance value on the segment equals the balance value on the right boundary minus the balance value on the left boundary, i.e. we can find $O(1)$ by prefix sums.
[ "brute force", "implementation", "math", "strings" ]
1,700
tst = int(input()) for _ in range(tst): n = int(input()) s = input() b = [0 for i in range(n+1)] bal = n b[0] = bal ans = 0 for i in range(1,n+1): if s[i-1] == '+': bal += 1 else: bal -= 1 b[i] = bal for j in range(i): if b[j] >= b[i] and (b[j] - b[i]) % 3 == 0: ans += 1 print(ans)
1660
F2
Promising String (hard version)
\textbf{This is the hard version of Problem F. The only difference between the easy version and the hard version is the constraints.} We will call a non-empty string \textbf{balanced} if it contains the same number of plus and minus signs. For example: strings "+--+" and "++-+--" are balanced, and strings "+--", "--" and "" are not balanced. We will call a string \textbf{promising} if the string can be made balanced by several (possibly zero) uses of the following operation: - replace two \textbf{adjacent} minus signs with one plus sign. In particular, every balanced string is promising. However, the converse is not true: not every promising string is balanced. For example, the string "-+---" is promising, because you can replace two adjacent minuses with plus and get a balanced string "-++-", or get another balanced string "-+-+". How many non-empty substrings of the given string $s$ are promising? Each non-empty promising substring must be counted in the answer as many times as it occurs in string $s$. Recall that a substring is a sequence of consecutive characters of the string. For example, for string "+-+" its substring are: "+-", "-+", "+", "+-+" (the string is a substring of itself) and some others. But the following strings are not its substring: "--", "++", "-++".
Now we need to quickly find for a given balance value (on the prefix), the number of matching left boundaries. The boundary is suitable if the balance on the boundary is comparable modulo $3$ to the current balance and the current balance is less than the balance on the boundary, since we need the balance on the segment to be non-positive. That is, we need to be able to find a number of numbers for each value of the balance that is not less than ours. This can be done either by data structure, or notice that the balance takes only $2n+1$ different values, then you can find the number of numbers not less on the prefix for $O(1)$.
[ "data structures", "implementation", "math", "strings" ]
2,100
tst = int(input()) for _ in range(tst): n = int(input()) s = input() f = [0 for i in range(3)] cur_bal = n cnt_bal = [0 for i in range(2 * n + 1)] cnt_bal[cur_bal] += 1 f[cur_bal % 3] += 1 ans = 0 for i in range(n): #print(f) #print(cur_bal, ans) new_bal = cur_bal if s[i] == '-': new_bal -= 1 f[new_bal % 3] += cnt_bal[new_bal] ans += f[new_bal % 3] cnt_bal[new_bal] += 1 f[new_bal % 3] += 1 else: f[new_bal % 3] -= cnt_bal[new_bal] new_bal += 1 ans += f[new_bal % 3] cnt_bal[new_bal] += 1 f[new_bal % 3] += 1 cur_bal = new_bal print(ans)
1661
A
Array Balancing
You are given two arrays of length $n$: $a_1, a_2, \dots, a_n$ and $b_1, b_2, \dots, b_n$. You can perform the following operation any number of times: - Choose integer index $i$ ($1 \le i \le n$); - Swap $a_i$ and $b_i$. What is the minimum possible sum $|a_1 - a_2| + |a_2 - a_3| + \dots + |a_{n-1} - a_n|$ $+$ $|b_1 - b_2| + |b_2 - b_3| + \dots + |b_{n-1} - b_n|$ (in other words, $\sum\limits_{i=1}^{n - 1}{\left(|a_i - a_{i+1}| + |b_i - b_{i+1}|\right)}$) you can achieve after performing several (possibly, zero) operations?
Let's look at our arrays $a$ and $b$. Note that for any position $p$ such that $|a_{p-1} - a_p| + |b_{p-1} - b_p| > |a_{p-1} - b_p| + |b_{p-1} - a_p|$ we can always "fix it" by swapping all positions $i$ from $p$ to $n$. In that case, contribution from all $i < p$ won't change, contribution of pair $(p - 1, p)$ will decrease and contribution from all $i > p$ won't change again, since we swapped all of them. It means that we already can use the following algorithm: while exists such $p$ that $|a_{p-1} - a_p| + |b_{p-1} - b_p| > |a_{p-1} - b_p| + |b_{p-1} - a_p|$ just swap all $i$ from $p$ to $n$. This solution works for $O(n^2)$ per test, that should be enough. But we can optimize our approach by realizing that we can (instead of searching $p$ each time) just go from $2$ to $n$ and fix pairs one by one: if $|a_1 - a_2| + |b_1 - b_2| > |a_1 - b_2| + |b_1 - a_2|$ then swap $a_2$ with $b_2$; next, if $|a_2 - a_3| + |b_2 - b_3| > |a_2 - b_3| + |b_2 - a_3|$ then swap $a_3$ with $b_3$ and so on. In such way, solution works in $O(n)$.
[ "greedy", "math" ]
800
import kotlin.math.abs fun sum(a1: Int, a2: Int, b1: Int, b2: Int) = abs(a1 - a2) + abs(b1 - b2) fun main() { repeat(readLine()!!.toInt()) { val n = readLine()!!.toInt() val a = readLine()!!.split(' ').map { it.toInt() }.toIntArray() val b = readLine()!!.split(' ').map { it.toInt() }.toIntArray() var sum = 0L for (i in 1 until n) { if (sum(a[i - 1], a[i], b[i - 1], b[i]) > sum(a[i - 1], b[i], b[i - 1], a[i])) a[i] = b[i].also { b[i] = a[i] } sum += sum(a[i - 1], a[i], b[i - 1], b[i]) } println(sum) } }
1661
B
Getting Zero
Suppose you have an integer $v$. In one operation, you can: - either set $v = (v + 1) \bmod 32768$ - or set $v = (2 \cdot v) \bmod 32768$. You are given $n$ integers $a_1, a_2, \dots, a_n$. What is the minimum number of operations you need to make each $a_i$ equal to $0$?
Note that $32768 = 2^{15}$, so you can make any value equal to $0$ by multiplying it by two $15$ times, since $(v \cdot 2^{15}) \bmod 2^{15} = 0$. So, the answer for each value $a_i$ is at most $15$. Now, let's note that there is always an optimal answer that consists of: at first, add one $cntAdd$ times, then multiply by two $cntMul$ times - and $cntAdd + cntMul$ is the minimum answer. In other words, let's just iterate over all $cntAdd \le 15$ and $cntMul \le 15$ and check that $(v + cntAdd) \cdot 2^{cntMul} \bmod 32768 = 0$. The answer is minimum $cntAdd + cntMul$ among them. To prove that it's optimal to add at first and only then to multiply, note that it's not optimal to add more than once after muptiplying ($v \rightarrow 2v \rightarrow 2v + 2$ can be replaced by $v \rightarrow v + 1 \rightarrow 2(v + 1)$). So there is at most one $+1$ between two $\cdot 2$, but it's not optimal to make even one $+1$ since we need to make $v$ divisible by $2^{15}$ and $+1$ break divisibility. There are many other approaches to this task except this one: for example, since $a_i < 32768$ you can write bfs to find the shortest paths from $0$ to all $a_i$.
[ "bitmasks", "brute force", "dfs and similar", "dp", "graphs", "greedy", "shortest paths" ]
1,300
fun main() { val n = readLine()!!.toInt() val a = readLine()!!.split(' ').map { it.toInt() }.toIntArray() for (v in a) { var ans = 20 for (cntAdd in 0..15) { for (cntMul in 0..15) { if (((v + cntAdd) shl cntMul) % 32768 == 0) ans = minOf(ans, cntAdd + cntMul) } } print("$ans ") } }
1661
C
Water the Trees
There are $n$ trees in a park, numbered from $1$ to $n$. The initial height of the $i$-th tree is $h_i$. You want to water these trees, so they all grow to the \textbf{same} height. The watering process goes as follows. You start watering trees at day $1$. During the $j$-th day you can: - Choose a tree and water it. If the day is odd (e.g. $1, 3, 5, 7, \dots$), then the height of the tree increases by $1$. If the day is even (e.g. $2, 4, 6, 8, \dots$), then the height of the tree increases by $2$. - Or skip a day without watering any tree. Note that you can't water more than one tree in a day. Your task is to determine the \textbf{minimum} number of days required to water the trees so they grow to the same height. You have to answer $t$ independent test cases.
The first observation we need to solve this problem: the required height is either $max$ or $max + 1$, where $max$ is the maximum initial height of some tree. We don't need heights greater than $max + 1$, because, for example, if the height is $max + 2$, we can remove some moves and get the answer for the height $max$. The same thing applies to all heights greater than $max + 1$. Why do we even need the height $max + 1$? In some cases (like $[1, 1, 1, 1, 1, 1, 2]$) the answer for the height $max + 1$ is better than the answer for the height $max$ (in this particular case, it is $9$ vs $11$). Now, we have two ways to solve the problem: either use some gross formulas, or just write a binary search on the answer. I won't consider the solution with formulas (but we have one), so let's assume we use binary search. Let the current answer be $mid$. Then let $cnt_1 = \lceil\frac{mid}{2}\rceil$ be the number of $+1$ operations we can do and $cnt_2 = \lfloor\frac{mid}{2}\rfloor$ be the number of $+2$ operations we can do. We can use $+2$ operations greedily and then just check if the number of $+1$ operations is sufficient to grow up the remaining heights. Time complexity: $O(n \log{n})$ per test case.
[ "binary search", "greedy", "math" ]
1,700
#include <bits/stdc++.h> using namespace std; #define forn(i, n) for(int i = 0; i < int(n); i++) int main(){ int tc; scanf("%d", &tc); while (tc--) { int n; scanf("%d", &n); vector<int> a(n); forn(i, n) scanf("%d", &a[i]); long long ans = 1e18; int mx = *max_element(a.begin(), a.end()); for (int x : {mx, mx + 1}){ long long cnt1 = 0, cnt2 = 0; forn(i, n){ cnt2 += (x - a[i]) / 2; cnt1 += (x - a[i]) % 2; } long long dif = max(0ll, cnt2 - cnt1 - 1) / 3; cnt1 += dif * 2; cnt2 -= dif; ans = min(ans, max(cnt1 * 2 - 1, cnt2 * 2)); if (cnt2 > 0){ cnt1 += 2; --cnt2; ans = min(ans, max(cnt1 * 2 - 1, cnt2 * 2)); } } printf("%lld\n", ans); } }
1661
D
Progressions Covering
You are given two arrays: an array $a$ consisting of $n$ zeros and an array $b$ consisting of $n$ integers. You can apply the following operation to the array $a$ an arbitrary number of times: choose some subsegment of $a$ of length $k$ and add the arithmetic progression $1, 2, \ldots, k$ to this subsegment — i. e. add $1$ to the first element of the subsegment, $2$ to the second element, and so on. The chosen subsegment should be inside the borders of the array $a$ (i.e., if the left border of the chosen subsegment is $l$, then the condition $1 \le l \le l + k - 1 \le n$ should be satisfied). Note that the progression added is always $1, 2, \ldots, k$ but not the $k, k - 1, \ldots, 1$ or anything else (i.e., the leftmost element of the subsegment always increases by $1$, the second element always increases by $2$ and so on). Your task is to find the \textbf{minimum} possible number of operations required to satisfy the condition $a_i \ge b_i$ for each $i$ from $1$ to $n$. Note that the condition $a_i \ge b_i$ should be satisfied for all elements at once.
Let's solve the problem greedily. But not from the beginning, because if we solve it from the beginning, we can't be sure what option is more optimal for the next elements (e.g. for the second element it is not clear if we need to add $2$ to it starting our segment from the first position or add $1$ to it starting our segment from the second position). So, let's solve the problem from right to left, then anything becomes clearer. Actually, let's operate with the array $b$ and decrease its elements instead of using some other array. Let's carry some variables: $sum$, $cnt$ and the array $closed$ of length $n$ (along with the answer). The variable $sum$ means the value we need to subtract from the current element from currently existing progressions, $cnt$ is the number of currently existing progressions, and $closed_i$ means the number of progressions that will end at the position $i+1$ (i.e. will not add anything from the position $i$ and further to the left). When we consider the element $i$, firstly let's fix $sum$ (decrease it by $cnt$). Then, let's fix $cnt$ (decrease it by $closed_i$). Then, let's decrease $b_i$ by $sum$, and if it becomes less than or equal to zero, just proceed. Otherwise, the number by which we can decrease the $i$-th element with one progression, equals to $el = min(k, i + 1)$ (zero-indexed). Then the number of progressions we need to satisfy this element is $need = \lceil\frac{b_i}{el}\rceil$. Let's add this number to the answer, increase $sum$ by $el \cdot need$, increase $cnt$ by $need$, and if $i - el \ge 0$ then we need to end these progressions somewhere, so let's add $need$ to $closed_{i - el}$. Time complexity: $O(n)$.
[ "data structures", "greedy" ]
1,900
#include <bits/stdc++.h> using namespace std; int main() { #ifdef _DEBUG freopen("input.txt", "r", stdin); // freopen("output.txt", "w", stdout); #endif int n, k; scanf("%d %d", &n, &k); vector<long long> b(n); for (auto &it : b) { scanf("%lld", &it); } vector<long long> closed(n); long long sum = 0, cnt = 0, ans = 0; for (int i = n - 1; i >= 0; --i) { sum -= cnt; cnt -= closed[i]; b[i] -= sum; if (b[i] <= 0) { continue; } int el = min(i + 1, k); long long need = (b[i] + el - 1) / el; sum += need * el; cnt += need; ans += need; if (i - el >= 0) { closed[i - el] += need; } } printf("%lld\n", ans); return 0; }
1661
E
Narrow Components
You are given a matrix $a$, consisting of $3$ rows and $n$ columns. Each cell of the matrix is either free or taken. A free cell $y$ is reachable from a free cell $x$ if at least one of these conditions hold: - $x$ and $y$ share a side; - there exists a free cell $z$ such that $z$ is reachable from $x$ and $y$ is reachable from $z$. A connected component is a set of free cells of the matrix such that all cells in it are reachable from one another, but adding any other free cell to the set violates this rule. You are asked $q$ queries about the matrix. Each query is the following: - $l$ $r$ — count the number of connected components of the matrix, consisting of columns from $l$ to $r$ of the matrix $a$, inclusive. Print the answers to all queries.
Consider the naive approach to the problem. Cut off the columns directly and count the connected components. There are two main solutions to this problem: either DFS (or BFS) or DSU. I personally found the DSU method easier to adjust to the full problem. So, to count connected components with DSU, you should do the following. Initialize the structure without edges: every free cell is its own connected component. Then add edges one by one. Each edge connects two cells either vertically or horizontally. When an edge connects different components, they merge, and the number of components decreases by one. Thus, the number of components on a range of columns is the number of free cells on it minus the number of meaningful edges on it (the ones that will merge components if the algorithm is performed only on these columns - the spanning forest edges). Let's try to adjust this algorithm to the full problem. It would be great if we could just calculate the spanning forest of the entire matrix, and then print the number of free cells minus the number of its edges on the segment. Unfortunately, it's not as easy as that. For components that lie fully in the segment, it works. However, if a component is split by a border of a segment, it can both stay connected or fall apart. If we determine its outcome, we can fix the answer. There are probably a lot of ways to adjust for that, but I'll tell you the one I found the neatest to code. Let's add the edges into DSU in the following order. Go column by column left to right. First add all vertical edges in any order, then all horizontal edges to the previous column in any order. If you start this algorithm at the first column, you will be able to answer all queries with $l=1$. Since the algorithm adds columns iteratively, the spanning forest it's building is correct after every column. So the answer for each query is indeed the number of cells minus the number of edges on the range. Let's investigate the difference between starting at the first column and an arbitrary column $l$. Look at the column $l$. If it contains $1$ or $3$ free cells or $2$ that are adjacent, then the cells are always in the same component, regardless of what has been before column $l$. If there are no free cells, nothing to the left matters, too. This tells us that the spanning forest that the first algorithm has built, is correct for any queries that start in this $l$. The only non-trivial case is when only rows $1$ and $3$ of the $l$-th column contain a free cell. Then we can't tell if the algorithm is correct or not, because these two cells can be in the same component already or not. Let's call this a "101" column. Imagine you started processing from the leftmost column of the query, left to right to the rightmost column. Our previous observations tell us that once we encounter a column that is not a "101", the algorithm onwards will be correct. Until then, we only have some "101" columns to deal with. We can add the part from the first non-"101" column onwards to the answer (the number of cells minus the number of edges). And then handle the prefix with some easy casework: if the leftmost column is not "101", then add nothing; if all columns in the query are "101", then the answer is $2$; if the first non-"101" column is "111", then add nothing (since the "101"s get merged into the component of this column); if the first non-"101" column is "000" or "010", then add $2$ components (since neither row $1$ nor row $3$ is merged anywhere); otherwise, add $1$ component. The number of free cells and edges on a segment can be precalculated with some prefix sums. The closest non-"101" column can also be precalculated with a linear algorithm. Overall complexity: $O(n \cdot \alpha(n) + q)$.
[ "brute force", "data structures", "dp", "dsu", "math", "trees" ]
2,500
#include <bits/stdc++.h> using namespace std; #define forn(i, n) for(int i = 0; i < int(n); i++) int main(){ cin.tie(0); iostream::sync_with_stdio(false); int n; cin >> n; vector<string> s(3); forn(i, 3) cin >> s[i]; vector<int> pr(n + 1); forn(i, n){ pr[i + 1] += pr[i]; forn(j, 3) pr[i + 1] += (s[j][i] == '1'); } vector<int> p(3 * n), rk(3 * n, 1); iota(p.begin(), p.end(), 0); function<int(int)> getp; getp = [&](int a) -> int { return a == p[a] ? a : p[a] = getp(p[a]); }; auto unite = [&](int a, int b) -> bool { a = getp(a), b = getp(b); if (a == b) return false; if (rk[a] < rk[b]) swap(a, b); p[b] = a; rk[a] += rk[b]; return true; }; vector<int> prhe(n + 1), prve(n + 1); forn(j, n){ forn(i, 2) if (s[i][j] == '1' && s[i + 1][j] == '1' && unite(i * n + j, (i + 1) * n + j)) ++prve[j + 1]; forn(i, 3) if (j > 0 && s[i][j] == '1' && s[i][j - 1] == '1' && unite(i * n + j, i * n + (j - 1))) ++prhe[j]; } forn(i, n) prve[i + 1] += prve[i]; forn(i, n) prhe[i + 1] += prhe[i]; vector<int> nxt(n + 1, 0); for (int i = n - 1; i >= 0; --i) nxt[i] = (s[0][i] == '1' && s[1][i] == '0' && s[2][i] == '1' ? (nxt[i + 1] + 1) : 0); int m; cin >> m; forn(_, m){ int l, r; cin >> l >> r; --l, --r; int non101 = l + nxt[l]; if (non101 > r){ cout << "2\n"; continue; } int tot = pr[r + 1] - pr[non101]; int in = (prve[r + 1] - prve[non101]) + (prhe[r] - prhe[non101]); int res = tot - in; if (non101 != l){ if (s[0][non101] == '1' && s[1][non101] == '1' && s[2][non101] == '1'); else if (s[0][non101] == '0' && s[2][non101] == '0') res += 2; else ++res; } cout << res << "\n"; } return 0; }
1661
F
Teleporters
There are $n+1$ teleporters on a straight line, located in points $0$, $a_1$, $a_2$, $a_3$, ..., $a_n$. It's possible to teleport from point $x$ to point $y$ if there are teleporters in \textbf{both} of those points, and it costs $(x-y)^2$ energy. You want to install some additional teleporters so that it is possible to get from the point $0$ to the point $a_n$ (possibly through some other teleporters) spending \textbf{no more} than $m$ energy in total. Each teleporter you install must be located in an \textbf{integer point}. What is the minimum number of teleporters you have to install?
Initial $n+1$ portals divide the path from $0$ to $a_n$ into $n$ separate sections. If we place a new portal between two given ones, it only affects the section between these two portals. Let's suppose we want to place $k$ new portals into a section of length $x$. This will divide it into $(k+1)$ sections, and it's quite easy to prove that these sections should be roughly equal in size (to prove it, we can show that if the sizes of two sections differ by more than $1$, the longer one can be shortened and the shorter one can be elongated so the sum of squares of their lengths decreases). So, a section of length $x$ should be divided into $x \bmod (k+1)$ sections of length $\lceil \frac{x}{k+1} \rceil$ and $(k+1) - x \bmod (k+1)$ sections of length $\lfloor \frac{x}{k+1} \rfloor$. Let's denote the total energy cost of a section of length $x$ divided by $k$ new portals as $f(x, k)$; since we divide it in roughly equal parts, it's easy to see that $f(x, k) = (x \bmod (k+1)) \cdot (\lceil \frac{x}{k+1} \rceil)^2 + ((k+1) - x \bmod (k+1)) \cdot (\lfloor \frac{x}{k+1} \rfloor)^2$ The key observation that we need to make now is that $f(x, k) - f(x, k+1) \ge f(x, k+1) - f(x, k+2)$; i. e. if we add more portals to the same section, the energy cost change from adding a new portal doesn't go up. Unfortunately, we can't give a simple, strict proof of this fact, but we have faith and stress (this would be easy to prove if it was possible to place portals in non-integer points, we could just analyze the derivative, but in integer case, it's way more difficult). Okay, what should we do with the fact that $f(x, k) - f(x, k+1) \ge f(x, k+1) - f(x, k+2)$ for a section of length $x$? The main idea of the solution is binary search over the value of $f(x, k) - f(x, k+1)$; i. e., we use binary search to find the minimum possible change that a new portal would give us. Let's say that we want to check that using the portals that give the cost change $\ge c$ is enough; then, for each section, we want to find the number of new portals $k$ such that $f(x, k-1) - f(x, k) \ge c$, but $f(x, k) - f(x, k+1) < c$; we can use another binary search to do that. For a fixed integer $c$, we can calculate not only the number of new portals that we can add if the cost change for each portal should be at least $c$, but also the total cost of the path after these changes; let's denote $g(c)$ as the total cost of the path if we place new portals until the cost change is less than $c$, and $h(c)$ is the number of portals we will place in that case. We have to find the minimum value of $c$ such that $g(c) \le m$. Now, it looks like $h(c)$ is the answer, but this solution gives WA on one of the sample tests. The key observation we are missing is that, for the value $c$, we don't have to add all of the portals that change the answer by $c$; we might need only some of them. To calculate the answer, let's compute four values: $g(c+1)$; $h(c+1)$; $g(c)$; $h(c)$. If we place $h(c+1)$ portals and add new portals one by one, until the total cost becomes not greater than $m$, the cost change from each new portal will be equal to $\frac{g(c+1) - g(c)}{h(c) - h(c+1)}$ (or just $c$ if we consider the fact that we start using the portals which change the cost by $c$). So, we can easily calculate how many more additional portals we need to add if we start from $h(c+1)$ portals and cost $g(c+1)$. The total complexity of our solution is $O(n \log^2 A)$: we have a binary search over the cost change for each new portal; and for a fixed cost change, to determine the number of portals we place in each section, we run another binary search in every section separately.
[ "binary search", "greedy" ]
2,600
#include <bits/stdc++.h> using namespace std; int n; vector<int> lens; long long sqr(int x) { return x * 1ll * x; } long long eval_split(int len, int k) { return sqr(len / k) * (k - len % k) + sqr(len / k + 1) * (len % k); } pair<int, long long> eval_segment(int len, long long bound) { // only take options with value >= bound if(bound <= 2 || len == 1) return make_pair(len - 1, len); int lf = 0; int rg = len - 1; while(rg - lf > 1) { int mid = (lf + rg) / 2; if(eval_split(len, mid) - eval_split(len, mid + 1) >= bound) lf = mid; else rg = mid; } return make_pair(lf, eval_split(len, lf + 1)); } pair<int, long long> eval_full(long long bound) { pair<int, long long> ans; for(auto x : lens) { pair<int, long long> cur = eval_segment(x, bound); ans.first += cur.first; ans.second += cur.second; } return ans; } int main() { scanf("%d", &n); int pr = 0; for(int i = 0; i < n; i++) { int x; scanf("%d", &x); lens.push_back(x - pr); pr = x; } long long w; scanf("%lld", &w); long long lf = 0ll; long long rg = (long long)(1e18) + 43; if(eval_full(rg).second <= w) { cout << 0 << endl; return 0; } while(rg - lf > 1) { long long mid = (lf + rg) / 2; pair<int, long long> res = eval_full(mid); if(res.second <= w) lf = mid; else rg = mid; } pair<int, long long> resL = eval_full(lf); pair<int, long long> resR = eval_full(rg); assert(resL.second <= w && resR.second > w); long long change = (resR.second - resL.second) / (resR.first - resL.first); cout << resL.first + (w - resL.second) / change << endl; }
1662
A
Organizing SWERC
Gianni, SWERC's chief judge, received a huge amount of high quality problems from the judges and now he has to choose a problem set for SWERC. He received $n$ problems and he assigned a beauty score and a difficulty to each of them. The $i$-th problem has beauty score equal to $b_i$ and difficulty equal to $d_i$. The beauty and the difficulty are integers between $1$ and $10$. If there are no problems with a certain difficulty (the possible difficulties are $1,2,\dots,10$) then Gianni will ask for more problems to the judges. Otherwise, for each difficulty between $1$ and $10$, he will put in the problem set one of the most beautiful problems with such difficulty (so the problem set will contain exactly $10$ problems with distinct difficulties). You shall compute the total beauty of the problem set, that is the sum of the beauty scores of the problems chosen by Gianni.
This is the ice breaker problem of the contest. To solve it one shall implement what is described in the statement. One way to implement it is to keep an array $\texttt{beauty}[1\dots10]$ (initialized to $0$), so that, for $1\le \texttt{diff}\le 10$, the value $\texttt{beauty}[\texttt{diff}]$ corresponds to the maximum beauty of a proposed problem with difficulty equal to $\texttt{diff}$. One can update the entries of $\texttt{beauty}$ while processing the input. In the end, if some entries of $\texttt{beauty}$ are still $0$ then the correct output is $\texttt{MOREPROBLEMS}$, otherwise it is the sum of the entries $\texttt{beauty}[1] + \texttt{beauty}[2] + \cdots + \texttt{beauty}[10]$. Notice that the small size of the input allows for less efficient solutions. For instance, one could iterate $10$ times over the problems, once for each difficulty, and find the maximum beauty associated with difficulty $i$ during the $i$-th iteration (this way, array $\texttt{beauty}$ is not needed).
[ "brute force", "implementation" ]
null
null
1662
B
Toys
Vittorio has three favorite toys: a teddy bear, an owl, and a raccoon. Each of them has a name. Vittorio takes several sheets of paper and writes a letter on each side of every sheet so that it is possible to spell any of the three names by arranging some of the sheets in a row (sheets can be reordered and flipped as needed). The three names do not have to be spelled at the same time, it is sufficient that it is possible to spell each of them using all the available sheets (and the same sheet can be used to spell different names). Find the minimum number of sheets required. In addition, produce a list of sheets with minimum cardinality which can be used to spell the three names (if there are multiple answers, print any).
Toys is one of the most challenging problems of the contest, but no particular knowledge of algorithms and data structures is required to solve it. We propose two greedy solutions that start with a common reformulation of the problem statement. The limit to the length of the three names is very permissive ($\leq 1000$), so there is no need to pay much attention to computational efficiency. Problem reformulation and notations Suppose to have a set of sheets that allows to spell all three names. Once we fix the way all names are spelled, each sheet either contributes to the $i$-th name with a letter $x_i$ or does not contribute to the $i$-th name. In the latter case, we conventionally set $x_i = \star$. The triplet $(x_1, \, x_2, \, x_3)$ consists of three English letters ($\texttt{A}$-$\texttt{Z}$) or $\star$, with the constraint that at most two different English letters can occur (we say that such a triplet is valid). The problem can then be reformulated as follows: find a set of valid triplets $(x_1^1, \, x_2^1, \, x_3^1), \, \dots,$ $(x_1^m, \, x_2^m, \, x_3^m)$ of minimal size $m$ such that, for each $i \in \{1, \, 2, \, 3\}$, the letters $x_i^1, \, \dots, \, x_i^m$ are a permutation of the $i$-th name with some extra $\star$'s. A set of valid triplets satisfying this property (not necessarily of minimal size) will be called an admissible set; if it is also of minimal size, it will be called an optimal admissible set. For instance, in the first sample, the two sheets $\texttt{AG}$ and $\texttt{AM}$ can be represented by the triples $(\texttt{A}, \, \texttt{G}, \, \texttt{A})$ and $(\texttt{A}, \, \texttt{A}, \, \texttt{M})$ to form the three names $\texttt{AA}$, $\texttt{GA}$, $\texttt{MA}$. In the second sample (where the three names are $\texttt{TEDDY}$, $\texttt{HEDWIG}$, and $\texttt{RACCOON}$), the eight sheets of the sample output can be represented by the triplets in the following columns: $\texttt{T}$$\star$$\texttt{Y}$$\texttt{D}$$\texttt{D}$$\texttt{E}$$\star$$\star$$\star$$\texttt{H}$$\star$$\texttt{G}$$\texttt{D}$$\texttt{E}$$\texttt{I}$$\texttt{W}$$\texttt{A}$$\texttt{C}$$\texttt{C}$$\star$$\texttt{O}$$\texttt{R}$$\texttt{N}$$\texttt{O}$ Denote by $f_i(x)$ the number of occurrences of the letter $x$ in the $i$-th name. Let $f(x) =$ $(f_1(x),$ $f_2(x),$ $f_3(x))$. For instance, in the second sample, $f(\texttt{D}) = (2, \, 1, \, 0)$. Denote by $l_1, \, l_2, \, l_3$ the lengths of the three names. Comments on the sample inputs/outputs The first sample (where the three input names are $\texttt{AA}$, $\texttt{GA}$, $\texttt{MA}$) is very small but it already shows that one can cleverly construct a solution with only two sheets, by putting the $\texttt{G}$ and the $\texttt{M}$ in two different sheets. If we are not careful and put the $\texttt{G}$ and the $\texttt{M}$ on the two sides of the same sheet, then we are forced to use a total of three sheets which is sub-optimal. Note that $\texttt{AA}$, $\texttt{GA}$, and $\texttt{MA}$ are indeed among Vittorio's early favorite words. A trivial lower bound to the number of sheets needed is $\max(l_1, \, l_2, \, l_3)$. The second sample ($\texttt{TEDDY}$, $\texttt{HEDWIG}$, $\texttt{RACCOON}$) is a case where $\max(l_1, \, l_2, \, l_3) = 7$ sheets are not enough. To become convinced that this is indeed the case, one can try to construct an admissible set of $7$ triplets having the letters of $\texttt{RACCOON}$ in the third positions. None of the letters of $\texttt{RACCOON}$ is reusable in the other two names, so the only way to save some space is to have two $\texttt{D}$'s on one sheet and two $\texttt{E}$'s on another sheet (these are the letters in common between $\texttt{TEDDY}$ and $\texttt{HEDWIG}$). Since $l_1 + l_2 = 11$, we would still need $9$ sheets. If we try instead to construct $8$ sheets, one of them is not required to spell $\texttt{RACCOON}$ (so the corresponding triplet has a $\star$ in the third position) and so it can fit two different letters from $\texttt{TEDDY}$ and $\texttt{HEDWIG}$ (for instance, $\texttt{D}$ and $\texttt{G}$ in the sample output). At this point, one could guess that the minimal number of sheets is given by the formula $\max\left(l_1, \, l_2, \, l_3, \, \left\lceil \frac{1}{2} \sum_x \max f(x) \right\rceil \right).$ First solution We start with the following observation. It is relatively simple, but it will turn out to be very useful. Lemma 1. If $f_1(x) \geq f_2(x) \geq f_3(x)$ and $f_1(x), \, f_2(x) > 0$, then there exists an optimal admissible set that includes a triplet of the form $(x, \, x, \, y)$ for some $y$. Proof. Suppose to have an optimal admissible set where no triplet has the form $(x, \, x, \, y)$. There is at least one triplet $s_1 = (x, \, y, \, y')$ where $x$ appears in first position (but not in second) and at least one triplet $s_2 = (z, \, x, \, z')$ where $x$ appears in second position (but not in first). If $s_1$ can be chosen so that $y, \, y' \neq x$, then we can replace $s_1$ and $s_2$ with the valid triplets $(x, \, x, \, z')$, $(z, \, y, \, y')$. If $s_2$ can be chosen so that $z, \, z' \neq x$, then we can replace $s_1$ and $s_2$ with the valid triplets $(x, \, x, \, y')$, $(z, \, y, \, z')$. If none of the previous cases occurs, all triplets containing $x$ are of the form $(t, \, t', \, x)$. Therefore, $f_3(x) \geq f_1(x), \, f_2(x)$. This implies that $f_1(x) = f_2(x) = f_3(x)$, so all triplets containing $x$ are equal to $(x, \, x, \, x)$. $\blacksquare$ We now describe the first and most important step in our solution. As long as the hypothesis of Lemma 1 holds for some letter $x$ with $f_1(x) + f_2(x) + f_3(x) \geq 3$, replace one occurrence of the letter $x$ in the first and second name with a new letter $x'$ (not appearing in any of the three names and not necessarily belonging to the English alphabet). This operation does not decrease the minimal number of triplets needed: any set that is admissible for the three new names is also admissible for the old names, after replacing $x'$ with $x$. Thanks to Lemma 1, the minimal number of triplets needed remains the same. When the hypothesis of Lemma 1 does not hold anymore (even after reordering the three names), each letter appears either in at most one name (possibly several times) or exactly twice across all three names. For each letter $x$ that appears in only one name (say, $k$ times), replace all occurrences of $x$ with different new letters $x_1, \, \dots, \, x_k$. This does not impact the optimal size of an admissible set, because all occurrences of $x$ necessarily appear in different triplets. After this operation, every letter appears at most once in each name and at most twice across all three names. For instance, the three names of the second sample could become as follows (we create new letters by adding subscripts to standard English letters): $\texttt{T}_1\texttt{E}_1\texttt{D}_1\texttt{D}_2\texttt{Y}_1$, $\texttt{H}_1\texttt{E}_1\texttt{D}_1\texttt{W}_1\texttt{I}_1\texttt{G}_1$, $\texttt{R}_1\texttt{A}_1\texttt{C}_1\texttt{C}_2\texttt{O}_1\texttt{O}_2\texttt{N}_1$. We say that a letter is double if it appears twice across the three names (such as $\texttt{D}_1$ in the previous example) and single if it appears once (such as $\texttt{D}_2$). After changing the three names as described above, our greedy solution is as follows. As long as there are a double letter $x$ and a single letter $y$ that appear in different names (assume without loss of generality that $x$ appears in the first two names and $y$ appears in the third), create a valid triplet $(x, \, x, \, y)$ and erase the occurrences of $x$ and $y$ from the three names. As long as there are three double letters $x, \, y, \, z$ with $f(x) = (1, \, 1, \, 0)$, $f(y) = (0, \, 1, \, 1)$, and $f(z) = (1, \, 0, \, 1)$, create the two valid triplets $(x, \, x, \, z)$ and $(z, \, y, \, y)$, then erase all occurrences of $x, \, y, \, z$ from the three names. As long as there is a double letter $x$ (assume without loss of generality that it appears in the first two names), create a valid triplet $(x, \, x, \, \star)$ and erase the two occurrences of $x$ in the first two names. As long as at least two names are non-empty, take letters $x, \, y$ from the two longest names (assume without loss of generality that they are the first and the second name), create a valid triplet $(x, \, y, \, \star)$, and erase the occurrences of $x$ and $y$. As long as exactly one of the names is non-empty (assume without loss of generality that it is the first), create a triplet $(x, \, \star, \, \star)$ using a letter $x$ of the first name and erase $x$ from the first name. The following lemmas show that the solution we have just described leads to an optimal set of triplets. Lemma 2. Suppose that $x$ is a double letter and $y$ is a single letter with $f(x) = (1, \, 1, \, 0)$ and $f(y) = (0, \, 0, \, 1)$. Then there exists an optimal admissible set that includes the triplet $(x, \, x, \, y)$. Proof. An optimal admissible set necessarily contains one triplet of the form $(z, \, z', \, y)$ with $z, \, z' \neq y$. We can swap $z$ with the only occurrence of $x$ in the first position of some triplet and $z'$ with the only occurrence of $x$ in the second position of some triplet. All triplets remain valid after this operation. $\blacksquare$ Lemma 3. Suppose that the hypothesis of Lemma 2 does not apply. If $x, \, y, \, z$ are double letters with $f(x) = (1, \, 1, \, 0)$, $f(y) = (0, \, 1, \, 1)$, and $f(z) = (1, \, 0, \, 1)$, then there is an optimal admissible set that includes the triplets $(x, \, x, \, z)$ and $(z, \, y, \, y)$. Proof. An optimal admissible set necessarily contains one triplet of the form $(x, \, x, \, z')$. Since there are no single letters (otherwise, the hypothesis of Lemma 2 would hold), $z'$ is a double letter, so the solution also contains a valid triplet $(z', \, y', \, y")$ or $(y', \, z', \, y")$ with $y', \, y" \neq z'$. In the first case, swap the two occurrences of $z'$ with the two occurrences of $z$; then swap $y'$ with one occurrence of $y$ and $y"$ with the other occurrence of $y$. We obtain the valid triplets $(x, \, x, \, z)$ and $(z, \, y, \, y)$, as desired. In the second case, we can similarly obtain $(x, \, x, \, y)$ and $(z, \, y, \, z)$. One additional swap of $y$ and $z$ in the third position leads to $(x, \, x, \, z)$ and $(z, \, y, \, y)$. $\blacksquare$ Lemma 4. Suppose that the hypotheses of Lemmas 2 and 3 do not apply. If $x$ is a double letter with $f(x) = (1, \, 1, \, 0)$, then there is an optimal admissible set that includes the triplet $(x, \, x, \, \star)$. Proof. Among all optimal admissible sets, choose one which maximizes the number of triplets with two equal letters $\neq \star$. The chosen set necessarily includes a triplet of the form $(x, \, x, \, y)$ with $y \neq x$. We are going to prove that $y = \star$ (using the maximality assumption). Suppose by contradiction that $y \neq \star$. Then $y$ is a double letter, otherwise, Lemma 2 would apply. Therefore, the set includes a valid triplet of the form $(y, \, z, \, z')$ or $(z, \, y, \, z')$, for some $z, \, z' \neq y$; without loss of generality, assume that the first case occurs. If $z = z'$ is a double letter, then Lemma 3 applies, which is a contradiction. This means that at least one of $z$ and $z'$ is $\star$. We can then swap the $y$ and the $z'$ in the third positions, obtaining the valid triplets $(x, \, x, \, z')$ and $(y, \, z, \, y)$. This increases by $1$ the number of triplets with two equal letters, violating the maximality assumption. $\blacksquare$ Lemma 5. Suppose that there are single letters only. If the lengths of the three names satisfy $l_1 \geq l_2 \geq l_3$ and $l_1, \, l_2 > 0$, then there is an optimal admissible set that includes a triplet $(x, \, y, \, \star)$ where $x$ is any (single) letter of the first name and $y$ is any (single) letter of the second name. Proof. Suppose by contradiction to have an optimal set with no triplets of the form $(x, \, y, \, \star)$. For $i \in \{1, \, 2, \, 3\}$ denote by $t_i$ the number of triplets with one single letter at position $i$ and two $\star$; for $1 \leq i < j \leq 3$, denote by $t_{i,j}$ the number of triplets with single letters at positions $i$ and $j$. At most one of $t_1, \, t_2, \, t_3$ is non-zero, otherwise, we could merge two triplets into one, which goes against the optimality assumption. In addition, $t_{1,2} = 0$. By counting the letters occurring in each position of the triplets, we easily get $l_1 = t_1 + t_{1,3}$, $l_2 = t_2 + t_{2,3}$, and $l_3 = t_3 + t_{1,3} + t_{2,3}$. Since at least one of $t_1$ and $t_2$ is zero, we have that $l_3 \geq \min(l_1, \, l_2)$, so $l_2 = l_3$. This forces $t_3 = 0$ and $t_2 = t_{1,3}$. If $t_2 = t_{1,3} > 0$, then there is at least one triplet of the form $(x, \, \star, \, z)$ and one the form $(\star, \, y, \, \star)$; we can swap $x$ and $\star$ the first positions to obtain $(x, \, y, \, \star)$ and $(\star, \, \star, \, z)$, as desired. Otherwise, $t_2 = t_{1,3} = 0$, so $t_1 \geq t_{2,3} > 0$ and we conclude in a similar way by transforming two triplets of the form $(x, \, \star, \, \star)$ and $(\star, \, y, \, z)$ into $(x, \, y, \, \star)$ and $(\star, \, \star, \, z)$. $\blacksquare$ Second solution The alternative solution we propose does not use the idea of changing the letters. Instead, the key greedy step is the following: as long as one name (say, the first) contains a letter $x$ with $f_1(x) > f_2(x) + f_3(x)$ and the other two names contain a common letter $y$ with $f_1(y) < f_2(y) + f_3(y)$, we create the triplet $(x, \, y, \, y)$ and decrease the length of the three names by $1$. The optimality of this step is proved in Lemma 8 below. Once this can no longer be done, we are left with four possible cases (described by Lemma 9 below) and we show how to conclude greedily in each case: one case is in common with the first solution; in the other three cases, there is an admissible set of size $\max(l_1, \, l_2, \, l_3)$ which is clearly optimal (since we need at least one triplet for each letter of the longest name). Lemma 6. Suppose that $f_1(x) > f_2(x) + f_3(x)$ for some English letter $x$. Then any admissible set includes a triplet $(x, \, y, \, y')$ with $y, \, y' \neq x$, where $y$ and $y'$ can be either English letters or $\star$. Proof. For all $i \in \{1, \, 2, \, 3\}$, any admissible set must have exactly $f_i(x)$ triplets with $x$ in the $i$-th position. Therefore, at least one triplet has $x$ in the first position but not in the second or third position. $\blacksquare$ Lemma 7. Suppose that $f_1(x) < f_2(x) + f_3(x)$, $f_2(x) > 0$, and $f_3(x) > 0$, for some English letter $x$. Then there is an optimal admissible set that includes a triplet $(y, \, x, \, x)$ where $y$ can be either an English letter or $\star$. Proof. Fix an optimal admissible set. If at least one triplet has $x$ both in the second and third position, then we are done. Otherwise, the inequality $f_1(x) < f_2(x) + f_3(x)$ ensures that at least one triplet $s_1$ has $x$ in the second or third position but not in the first position. Suppose without loss of generality that $s_1 = (z, \, z', \, x)$ with $z, \, z' \neq x$. Since $f_2(x) > 0$, there exists at least one triplet $s_2$ of the form $(y, \, x, \, y')$. We can then replace the triplets $s_1$ and $s_2$ with the following two valid triplets: $(y, \, x, \, x)$ and $(z, \, z', \, y')$. $\blacksquare$ Lemma 8. Suppose that $f_1(x) > f_2(x) + f_3(x)$, $f_1(y) < f_2(y) + f_3(y)$, and $f_2(y), \, f_3(y) > 0$ for some English letters $x, \, y$. Then there is an optimal admissible set that includes a triplet $(x, \, y, \, y)$. Proof. By Lemma 7, there exists an optimal admissible set that includes a triplet $s_1$ of the form $(z, \, y, \, y)$. If $z = x$, then we are done. Otherwise, by Lemma 6, the set also includes a triplet $s_2$ of the form $(x, \, t, \, t')$ with $t, \, t' \neq x$. We can then replace $s_1$ and $s_2$ with the following two valid triplets: $(x, \, y, \, y)$ and $(z, \, t, \, t')$. $\blacksquare$ Lemma 9. If the hypothesis of Lemma 8 does not apply, we are in at least one of the following cases: For all letters $x$, the numbers $f_1(x), \, f_2(x), \, f_3(x)$ form the sides of a (possibly degenerate) triangle. Up to changing the order of the three names, $f_1(x) \geq f_2(x) + f_3(x)$ for all letters $x$. One of the names is empty. The three names have disjoint letters. Proof. We say that the first name is beautiful if $f_1(x) > f_2(x) + f_3(x)$ for at least one letter $x$, and we analogously define beauty for the second and third name. If none of the three names is beautiful, then we are in case (1). If exactly one name is beautiful (say, the first one), then $f_1(x) > f_2(x) + f_3(x)$ for some letter $x$; if $f_1(y) < f_2(y) + f_3(y)$ for some $y$, then $f_2(y), \, f_3(y) > 0$ (otherwise, the second or third name would be beautiful), so Lemma 8 applies, which is a contradiction. Therefore we are in case (2). If exactly two names are beautiful (say, the first and the second name), we are going to prove that the third name is empty, so we are in case (3). Indeed, suppose by contradiction that the third name contains some letter $y$. Then $f_3(y) \leq f_1(y) + f_2(y)$ because the third name is not beautiful, so at least one other name (say, the second) contains the letter $y$. Since the first name is beautiful and Lemma 8 does not apply, we have $f_1(y) \geq f_2(y) + f_3(y)$ and in particular, the first name also contains the letter $y$. Similarly, since the second name is beautiful and Lemma 8 does not apply (after swapping the first and second name), we get $f_2(y) \geq f_1(y) + f_3(y)$. Combining the previous two inequalities, we get $f_1(y) \geq f_2(y) + f_3(y) \geq f_1(y) + 2 f_3(y)$, so $f_3(y) = 0$, a contradiction. Finally, if all three names are beautiful, we are going to prove that we are in case (4). Indeed, suppose by contradiction that two names (say, the second and the third) have a letter $y$ in common. Since the first name is beautiful and Lemma 8 does not apply, we have $f_1(y) \geq f_2(y) + f_3(y)$, so in particular the first name also contains $y$. Since the second name is beautiful and Lemma 8 does not apply, we also have $f_2(y) \geq f_1(y) + f_3(y)$, which is a contradiction because $f_3(y) > 0$. $\blacksquare$ If we are in case (2), we easily conclude with $l_1$ additional triplets. If we are in case (3), assuming without loss of generality that the third name is empty, we conclude with additional $\max(l_1, \, l_2)$ triplets. Case (4) is handled as in Lemma 5 at the end of the first solution (we are left with single letters only). Only case (1) remains, and we are going to show that in such case we can conclude with $\max(l_1, \, l_2, \, l_3)$ additional triplets. We say that the first and second names are friends if there is a letter $x$ such that $f_1(x), \, f_2(x) > 0$ and $f_1(x)-1, \, f_2(x)-1, \, f_3(x)$ form the sides of a triangle (we say that the letter $x$ proves the friendship between the first and second names). We define the friendship for the other two pairs of names analogously. Iteratively proceed as follows: As long as there is a letter $x$ such that $f_1(x)-1, \, f_2(x)-1, \, f_3(x)-1$ form the sides of a triangle, we create a triplet $(x, \, x, \, x)$ and erase one occurrence of $x$ from each name. As long as each name is a friend of the other two, then let $x, \, y, \, z$ be three letters that prove the friendships. We create two triplets $(x, \, x, \, y)$ and $(z, \, y, \, z)$, then erase $x$ and $z$ from the first name, $x$ and $y$ from the second name, $y$ and $z$ from the third name. If we are not in case (1) or (2), suppose without loss of generality that the second and third names are not friends. We are going to prove that $f_1(x) \geq f_2(x) + f_3(x)$ for all letters $x$, so we can conclude as in case (2). If $f_2(x) = 0$, then $f_1(x) = f_3(x)$ (because we are in case (1)), and similarly $f_3(x) = 0$ implies $f_1(x) = f_3(x)$. In both these cases, $f_1(x) \geq f_2(x) + f_3(x)$. We can therefore assume that $f_2(x), \, f_3(x) > 0$. Since the second and third names are not friends, we have $f_1(x) > f_2(x) - 1 + f_3(x) - 1$, i.e., $f_1(x) \geq f_2(x) + f_3(x) - 1$. Since we are not in case (a), we necessarily have $f_1(x) \geq f_2(x) + f_3(x)$.
[ "greedy", "strings" ]
null
null
1662
C
European Trip
The map of Europe can be represented by a set of $n$ cities, numbered from $1$ through $n$, which are connected by $m$ bidirectional roads, each of which connects two distinct cities. A trip of length $k$ is a sequence of $k+1$ cities $v_1, v_2, \ldots, v_{k+1}$ such that there is a road connecting each consecutive pair $v_i$, $v_{i+1}$ of cities, for all $1 \le i \le k$. A special trip is a trip that does not use the same road twice in a row, i.e., a sequence of $k+1$ cities $v_1, v_2, \ldots, v_{k+1}$ such that it forms a trip and $v_i \neq v_{i + 2}$, for all $1 \le i \le k - 1$. Given an integer $k$, compute the number of distinct special trips of length $k$ which begin and end in the same city. Since the answer might be large, give the answer modulo $998\,244\,353$.
Warm-up problem Let us consider a related simpler problem first. Let $G$ be the graph that represents the cities and roads in the problem statement and let $A$ be its adjacency matrix. Suppose that instead of special trips we wanted to compute the number of distinct trips of length $k$ that begin and end in the same city. We can use an important property of the adjacency matrix $A$. Lemma 1. For any $k \ge 1$, the $(i, \, j)$-th entry of the $k$-th power of $A$, $(A^k)_{ij}$ is equal to the number of trips of length $k$ that start at $i$ and end at $j$. Proof. For $k = 1$ this is obvious from the definition of adjacency matrix. Assume the lemma holds for $k \geq 1$ and consider the matrix $A^{k+1} = A^k A$. By the inductive hypothesis, $(A^k)_{ij}$ is equal to the number of trips of length $k$ that start at $i$ and end at $j$. Now, the number of trips of length $k + 1$ between $i$ and $j$ is equal the number of trips of length $k$ from $i$ to some vertex $l$ adjacent to $j$, but this is given by $A^k A$, so the result follows by induction. $\blacksquare$ Thus, to compute the number of trips of length $k$ that begin and end in the same city we only need to compute $A^k$ and sum the elements in the diagonal of the matrix (this is called the trace of the matrix). To do so efficiently, we can use the binary exponentiation technique, which is summarized in the following recursive approach. Let $\displaystyle I = \begin{bmatrix} 1 & 0 & \cdots & 0 \\ 0 & 1 & \cdots & 0 \\ \vdots & \vdots & \ddots & \vdots \\ 0 & 0 & \cdots & 1 \end{bmatrix}$ be the identity matrix. Then: $A^k = \left\{ \begin{matrix} I & \mathrm{if} \; k = 0, \\ \left(A^{\frac{k}{2}}\right)^2 & \mathrm{if} \; k > 0 \; \mathrm{and} \; k \; \mathrm{even}, \\ \left(A^{\frac{k - 1}{2}}\right)^2 \cdot A & \mathrm{if} \; k > 0 \; \mathrm{and} \; k \; \mathrm{odd}. \end{matrix} \right.$ Since we half the exponent in each step, this procedure takes $\mathcal O(\log k)$ steps. In each step we perform one or two matrix multiplications of two $n \times n$ square matrices, which takes (with the naive algorithm) $\mathcal O(n^3)$ time. Overall this algorithm takes $\mathcal O(n^3 \log k)$ time to compute the answer. Full problem Let us now turn our attention back to the problem of computing the number of special trips that begin and end in the same city. There is no simple matrix whose powers correspond to special trips, but we can try to reason about how to describe these trips in terms of the adjacency matrix. Let $S^{(k)}$ be an $n \times n$ matrix whose $(i, \, j)$ entry is equal to the number of special trips of length $k$ that start in $i$ and end in $j$. Additionally, let $D$ be the degrees matrix, a diagonal matrix where $D_{ii}$ is equal to the degree of vertex $i$ in $G$. Lemma 2. The following equalities hold. $S^{(1)} = A$. $S^{(2)} = A^2 - D$. $S^{(k)} = S^{(k - 1)} \cdot A - S^{(k - 2)} \cdot (D - I)$, for $k > 2$. Proof. For $k = 1$ it is clear that the special trips of length $1$ are equivalent to edges in the graph. For $k = 2$, we know that $A^2$ corresponds to trips of length $2$. Special trips cannot use the same edge twice in a row, but $A^2$ ignores this constraint. However, the length 2 trips that use two edges in a row are exactly the trips that start in a vertex $v$, follow one of the $\deg(v)$ adjacent edges, where $\deg(v)$ is the degree of $v$, and then follow that same edge back. The number of such trips is given exactly by $D$, so by subtracting this matrix from the total number of trips of length $2$ we get $S^{(2)} = A^2 - D$. Now consider $k \ge 3$. Note that $S^{(k - 1)} \cdot A$ is the number of trips of length $k$ that do not use the same road twice in a row, except maybe on the very last step. This is almost what we want, but we need to subtract the number of trips that do not use the same road twice in a row, except on the very last step. Applying a similar reasoning as in the $k = 2$ case, this is equal to the number of special trips of length $k - 2$ that end at some vertex $v$, then follow one of its $\deg(v)$ adjacent edges, and finally follow that same edge back. However, we do not need to count the edge that preceded getting to $v$, since that would lead to walking the same edge twice in the second to last step. The number of such trips is given by $S^{(k - 2)} \cdot (D - I)$, where $D - I$ is the "degree minus one" diagonal matrix. So the result follows. $\blacksquare$ We are still not done because it is not obvious how to solve this recurrence efficiently. Note that this is a linear recurrence, so we can use any linear recurrence solving technique. There are several ways of doing this, but let us consider a matrix exponentiation method. Consider the following block matrix: $L = \begin{bmatrix} A & -(D - I)\\ I & 0 \end{bmatrix}.$ Note that $L$ is a $2n \times 2n$ matrix. If we multiply it by some column vector we get: $\begin{bmatrix} A & -(D - I)\\ I & 0 \end{bmatrix} \begin{bmatrix} S_1\\ S_2 \end{bmatrix} = \begin{bmatrix} A S_1 - (D - I) S_2 \\ S_1 \end{bmatrix}.$ In particular, this has the exact form of the recurrence in Lemma 2, which means that the product of $L^{k-2}$ by a column vector with entries $S^{(2)}$ and $S^{(1)}$ will result in a column vector with entries $S^{(k)}$ and $S^{(k - 1)}$. To compute $L^{k-2}$ we can use the same binary exponentiation method from the first section, so overall this method takes $\mathcal O(n^3 \log k)$ time.
[ "dp", "graphs", "math", "matrices" ]
null
null
1662
D
Evolution of Weasels
A wild basilisk just appeared at your doorstep. You are not entirely sure what a basilisk is and you wonder whether it evolved from your favorite animal, the weasel. How can you find out whether basilisks evolved from weasels? Certainly, a good first step is to sequence both of their DNAs. Then you can try to check whether there is a sequence of possible mutations from the DNA of the weasel to the DNA of the basilisk. Your friend Ron is a talented alchemist and has studied DNA sequences in many of his experiments. He has found out that DNA strings consist of the letters A, B and C and that single mutations can only remove or add substrings at any position in the string (a substring is a contiguous sequence of characters). The substrings that can be removed or added by a mutation are AA, BB, CC, ABAB or BCBC. During a sequence of mutations a DNA string may even become empty. Ron has agreed to sequence the DNA of the weasel and the basilisk for you, but finding out whether there is a sequence of possible mutations that leads from one to the other is too difficult for him, so you have to do it on your own.
To solve the problem we need the following observations. Every mutation is reversible. Hence, instead of trying to find a sequence of mutations of the string $u$ to get to the string $v$, we can try to find a sequence of mutations of the string $u$ and a sequence of mutations of the string $v$ such that both of them are the same after the mutations. In all possible mutations, we never change the parity of the occurrence of a character. Thus, if the parity of the occurrences of $\texttt{A}$, $\texttt{B}$ and $\texttt{C}$ is not the same in $u$ and $v$, there does not exist a sequence of mutations to get from $u$ to $v$. For the rest of the solution we assume that the parities are the same in both strings. We can transform the string $\texttt{AB}$ to the string $\texttt{BA}$ via the following sequence of moves: start with $\texttt{A}\texttt{B}$, then insert $\texttt{B}\texttt{B}$ at the back of the string to get $\texttt{ABBB}$, then insert the string $\texttt{ABAB}$ in the second to last position to get $\texttt{ABBABABB}$. Removing the two occurences of $\texttt{B}\texttt{B}$ we get the string $\texttt{A}\texttt{A}\texttt{B}\texttt{A}$ and then removing $\texttt{A}\texttt{A}$ we get to $\texttt{B}\texttt{A}$. A similar trick can be done to transform the string $\texttt{BC}$ to the string $\texttt{CB}$. While this series of steps might seem magical, viewing the problem as a group theoretic problem (see a later section) gives us this observation almost for free. The observation above tells us that we can move the letter $\texttt{B}$ to any position we want and can essentially ignore it; let $u'$ and $v'$ be the strings we obtain from $u$ and $v$ when removing all occurrences of $\texttt{B}$. When ignoring the letter $\texttt{B}$, the strings $\texttt{ABAB}$ and $\texttt{BCBC}$ are the same as the strings $\texttt{AA}$ and $\texttt{CC}$, thus we now have strings consisting of the letters $\texttt{A}$ and $\texttt{C}$ and we can add and remove substrings of the form $\texttt{AA}$ and $\texttt{CC}$. This is a much easier problem. We can iteratively remove occurences of $\texttt{AA}$ or $\texttt{CC}$ from $u'$ and $v'$ until no removal is possible anymore. If the strings are the same after the removal, a sequence of mutations is possible, otherwise it is not. Optimizing the runtime. If we remove occurrences of $\texttt{AA}$ and $\texttt{CC}$ iteratively the runtime of our algorithm is quadratic (in the length of the strings), which is fast enough to pass. There exists a linear time algorithm: Keep a stack of the substring you visited but could not remove (at the beginning, this is empty). Then iterate through your string. If the letter you currently look at is the same as the last letter in your stack, delete the last letter in your stack. If not add the letter to your stack. Viewing the problem as a group theoretic problem This is a different perspective on the problem and might require some knowledge about groups and their presentations to understand. In the language of groups, the problem statement can be reformulated as follows: Do the words $u$ and $v$ represent the same element in the group $G=\{\texttt{A},\texttt{B}, \texttt{C}\mid\texttt{A}^2 = \texttt{B}^2=\texttt{C}^2 = \texttt{ABAB} = \texttt{BCBC} = 1\}$? The words $u$ and $v$ represent the same element if and only if $w = uv^{-1}$ represents the identity in $G$. Furthermore $\texttt{A}$ and $\texttt{B}$ commute: $\texttt{A}^2 = 1$ implies that $\texttt{A} = \texttt{A}^{-1}$ and the same holds for $\texttt{B}$ and $\texttt{C}$. Thus, $1 = \texttt{A}\texttt{B}\texttt{A}\texttt{B} = \texttt{A}\texttt{B}\texttt{A}^{-1}\texttt{B}^{-1}$, which directly implies $\texttt{AB} = \texttt{B}\texttt{A}$. Analogously, we get that $\texttt{BC} = \texttt{CB}$. Now the only thing left to check is: Does the letter $\texttt{B}$ occur evenly many times in $w$? After deleting all occurences of $\texttt{B}$ from $w$, can we get from $w$ to the empty word by deleting $\texttt{AA}$ or $\texttt{CC}$? If the answer to one of those questions is no, then $w$ does not represent the identity. If both answers are yes, $w$ does represent the identity. Some background about the problem. The problem of telling whether two words represent the same element in a group is called the word problem, and is a widely studied problem in Mathematics. While it is proven to be unsolvable for some groups, the group $G$ we are dealing with in this problem is a Coxeter group, where the word problem is always solvable using Tits' algorithm.
[ "greedy", "implementation", "strings" ]
null
null
1662
E
Round Table
There are $n$ people, numbered from $1$ to $n$, sitting at a round table. Person $i+1$ is sitting to the right of person $i$ (with person $1$ sitting to the right of person $n$). You have come up with a better seating arrangement, which is given as a permutation $p_1, p_2, \dots, p_n$. More specifically, you want to change the seats of the people so that at the end person $p_{i+1}$ is sitting to the right of person $p_i$ (with person $p_1$ sitting to the right of person $p_n$). Notice that for each seating arrangement there are $n$ permutations that describe it (which can be obtained by rotations). In order to achieve that, you can swap two people sitting at adjacent places; but there is a catch: for all $1 \le x \le n-1$ you cannot swap person $x$ and person $x+1$ (notice that you \textbf{can} swap person $n$ and person $1$). What is the minimum number of swaps necessary? It can be proven that any arrangement can be achieved.
Main assumption After playing a bit with the second sample test case one can notice that there are many sequences of swaps of the same length that achieve the same result, and as long as we always swap two people such that the person with a larger number is on the left before the swap, then we always achieve the goal in the minimum number of swaps. Let us first construct a solution which relies on this assumption, and then prove that the assumption is correct. We need to find any way to arrive at the given permutation from the initial one such that in each swap the person with a larger number is on the left, and count the number of swaps needed efficiently. Algorithm We will construct our permutation in steps. After the $i$-th step, the people with numbers from $1$ to $i$ will be in the correct circular order, and the people with numbers from $i+1$ to $n$ will be sitting in the order of their numbers directly to the right of person $i$. In the starting arrangement the condition above is already satisfied for $i=2$, since there is just one circular order for two people. To perform the $(i+1)$-th step, we need to do the following: person $i+1$ is currently sitting to the right of person $i$, but they need to be sitting to the right of some other person $j_i$ which is determined as the closest person on the left of person $i+1$ that has a number between $1$ and $i$. Since we cannot move the person $i+1$ to the left, as we cannot swap person $i$ and person $i+1$, the only way to achieve this is by moving person $i+1$ to the right. But we can't do this directly because person $i+2$ is there, so what we are going to do is move the entire block of people from $i+1$ to $n$ to the right. Moving the block of people from $i+1$ to $n$ by one position to the right takes $n-i$ swaps, and we need to do this $d_i$ times where $d_i$ is the circular distance from person $i$ to person $j_i$ when going to the right, and only considering people with numbers from $1$ to $i$. The total number of swaps can therefore be computed as $\displaystyle f(p)=\sum_{i=2}^{n-1}d_i(n-i)$. The numbers $d_i$ can be computed using a standard data structure such as a segment tree, a Fenwick tree, or a balanced search tree in $\mathcal O(n\log n)$. Proof of the main assumption Now we need to repay our debts and prove the assumption. It suffices to prove the following. Lemma. If a permutation $q$ differs from a permutation $p$ in only one swap of adjacent elements $a$ (on the left) and $b$ (on the right) such that $a>b$, then $f(q)-f(p)=1$, where $f(p)$ is the number of operations used by the above algorithm. Proof. Indeed, almost all terms in $f(p)$ and $f(q)$ sums will be the same. Which terms are going to differ? The quantity $d_{a-1}$ will increase by $1$, since the person $a$ will now need to travel one more step to the right to overtake $b$. Conversely, $d_a$ will decrease by $1$, since the person $a+1$ will now need to travel one less step to the right as they will have already overtaken $b$ earlier. All other values of $d_i$ will stay unchanged. So $f(q)-f(p)=(n-(a-1))-(n-a)=1$. $\blacksquare$ By applying the lemma in reverse, we can also see that performing a swap where the largest element is on the right will decrease $f(p)$ by $1$. So every swap either increases or decreases $f(p)$ by $1$, and $f(\mathrm{id})=0$ ($\mathrm{id}$ stands for the identity permutation), therefore $f(p)$ is indeed the smallest number of swaps needed to reach permutation $p$, and it does not matter in which order we make the swaps as long as we always make swaps where the person with a larger number is on the left. Parting words This problem was inspired by the following paper, where you can learn more about this setup: Abram, Antoine, Nathan Chapelier-Laget, and Christophe Reutenauer. "An Order on Circular Permutations." The Electronic Journal of Combinatorics (2021): P3-31. The paper also mentions that this setup was used for USAMO 2010, Problem 2. We hope that none of the participants have seen the paper or the USAMO problem before. We found it quite remarkable that the answer is $\mathcal O(n^3)$ in magnitude but the order of swaps does not matter, just like in the non-circular problem without any restrictions on swaps where one would simply count the number of inversions in a permutation. We hope you enjoyed solving this problem, too!
[ "math" ]
null
null
1662
F
Antennas
There are $n$ equidistant antennas on a line, numbered from $1$ to $n$. Each antenna has a power rating, the power of the $i$-th antenna is $p_i$. The $i$-th and the $j$-th antenna can communicate directly if and only if their distance is at most the minimum of their powers, i.e., $|i-j| \leq \min(p_i, p_j)$. Sending a message directly between two such antennas takes $1$ second. What is the minimum amount of time necessary to send a message from antenna $a$ to antenna $b$, possibly using other antennas as relays?
Let's model the problem as an undirected graph, where the vertex $i$ corresponds to the antenna $i$ and vertices $i$ and $j$ are connected with an edge if and only if the corresponding antennas are able to communicate directly, that is $|i - j| \leq \min(p_i, \, p_j)$. In this formulation, the answer to the question posed in the problem statement is simply the shortest distance between $a$ and $b$. To compute the shortest distance, we would like to be able to construct the graph and run a breadth-first search. Unfortunately, we cannot afford to do that, as the number of edges can be in the order of $n^2$. We need a more efficient approach. We examine two options. Improving the BFS The inefficiency of a breadth-first search on a dense graph lies in the fact that the majority of the traversed edges lead to a vertex that has already been visited. Ideally, for every vertex, we would like to process only one of the incoming edges. Should we achieve this property, the running time would be linear in the number of vertices, not in the number of edges. Let's decompose each edge into two directed edges, one in each direction. As soon as we traverse the first edge incoming to a particular vertex, we remove all other edges incoming to this vertex. This idea leads to the desired property of only processing each vertex once, nevertheless on its own it doesn't affect the time complexity as we still generate the full graph. Let's consider for a brief moment how can we generate the graph in the first place. A naive approach is to consider, for a fixed $i$, all $1 \leq j \leq n$ and check whether $i$ and $j$ can communicate with each other. However, we can observe that we do not need to test all $j$'s up to $n$ - it is sufficient to only iterate up to $\min(n, \, i + p_i)$, because $|i - j| = j - i \leq p_i$ must hold. Similarly, a tighter lower bound for the iteration is $\max(1, \, i - p_i)$ instead of $i$. We can do even better than that. Define $l_j := j - p_j$. For $j \in(i, \, i + p_i]$ to be able to communicate with $i$, we just need $l_j \leq i$. Similarly, for $j \in [i - p_i, \, i)$ we need $r_j := j + p_j \geq i$. To summarise, we need to find all $j \in [i - p_i, \, i)$ such that $r_j \geq i$ and all $j \in (i, \, i + p_i]$ such that $l_j \leq i$. We can do this by storing the values $r_j$ and $l_j$ in two segment trees that return the minimum and maximum on a queried interval, respectively, together with the index where this minimum or maximum is achieved, and querying the appropriate interval as long as the inequality $r_j \ge i$ or $l_j \le i$ holds. Now, recall the previous idea - removing all edges incoming to a vertex once we find its distance from $a$. In this implementation, we can achieve this in $\mathcal O(\log n)$ time by simply setting the corresponding $l_j$ to $\infty$ and $r_j$ to $-\infty$. Using these techniques, we can process a vertex in $\mathcal O(d\log n)$, where $d$ is the out-degree of the vertex when we are processing it. Since, for each vertex, we only process at most a single incoming edge, the total running time is $\mathcal O(n\log n)$. Iterating on a restricted problem Let us consider a restricted version of the problem. In this version, antenna $i$ is allowed to send a message to $j$ if $|i - j| \leq \min(p_i, \, p_j)$ and $i < j$. In other words, a message can only be sent to an antenna with a strictly larger index, and never in the opposite direction. This problem can be solved by a single sweep, as follows. Maintain a segment tree $S$ of size $n$, that returns the minimum on an interval, and supports single element update. We process the antennas in order of increasing index. When processing an antenna $i$, we do the following operations: Set $S[i] = \mathrm{dist}[i]$ where $\mathrm{dist}[i]$ is the length of the shortest known path from $a$ to $i$. Initially $\mathrm{dist}[a] = 0$, and all other values are initialised to $\infty$. For all antennas $j$ such that $j + p_j - 1 = i$ (i.e. $j$ is able to reach $i - 1$ but not $i$), set $S[j] = \infty$. Set $\mathrm{dist}[i] = 1 + S.\mathrm{min}(i-p_i, \, i)$. The purpose of removing an antenna by setting $S[j] = \infty$ is evident - as the antennas are processed in order of increasing indices, we know that none of the antennas to be processed can communicate with $j$. The complexity of this algorithm is $\mathcal O(n \log n)$. Note that it is trivial to modify this algorithm for a version of the problem where all communication goes to antennas with a lower index instead. How does this help to solve our original problem? We can alternate between the two versions - the one that only allows communication towards antennas with larger indices (i.e. to the right), and the one with smaller ones (i.e. to the left). This process is repeated while the array $\mathrm{dist}$ is changing. Once a fixed state is reached, we have a correct solution. We argue that only $\mathcal O(\log n)$ iterations of this outer loop will be performed, in other words there exists a shortest path that "changes direction" at most $\mathcal O(\log n)$ times. The basic idea of the proof is that when two successive direction changes are performed, the distances between antennas at least doubled, otherwise there would exist a path of a shorter or equal length with fewer direction changes. The details of the proof are left as an exercise to the reader. All things considered, this yields an $\mathcal O(n\log^2 n)$ algorithm, which is sufficient given a careful implementation.
[ "data structures", "dfs and similar", "graphs", "implementation", "shortest paths" ]
null
null
1662
G
Gastronomic Event
SWERC organizers want to hold a gastronomic event. The location of the event is a building with $n$ rooms connected by $n-1$ corridors (each corridor connects two rooms) so that it is possible to go from any room to any other room. In each room you have to set up the tasting of a typical Italian dish. You can choose from $n$ typical Italian dishes rated from $1$ to $n$ depending on how good they are ($n$ is the best possible rating). The $n$ dishes have distinct ratings. You want to assign the $n$ dishes to the $n$ rooms so that the number of pleasing tours is maximal. A pleasing tour is a nonempty sequence of rooms so that: - Each room in the sequence is connected to the next one in the sequence by a corridor. - The ratings of the dishes in the rooms (in the order given by the sequence) are increasing. If you assign the $n$ dishes optimally, what is the maximum number of pleasing tours?
This is a classical problem in which one must find a "simple" characterization of what the optimal solution looks like, and then restrict the search for the maximum to the (much smaller, much more regular) family of instances that satisfy the characterization. In our specific case, the first part of this paradigm is by far the most difficult and the one that requires all the important insights, while the second part can be solved through the application of a standard technique. We are going to split the core of the solution in two parts, the first of which deals with the characterization of optimal solutions. Then, in the second, we will explain how to compute the answer efficiently in the restricted search space and we will discuss some improvements and further considerations. Preliminary observations and notation First, however, let us set up the bases. It goes without saying that we are dealing with a graph - actually, a tree - problem. Also, the statement is deceptively straightforward: assign a permutation of $\{1, \, \dots, \, n\}$ to the vertices so as to maximize the number of increasing paths. And here comes the first non-trivial (however simple) idea: we are actually going to consider ways to orient the edges of the tree, and find an orientation that maximizes the number of directed paths; we forget about the vertex numeration altogether. After all, an assignment of the numbers $1, \, \dots, \, n$ naturally induces an orientation of the edges, where edge $(u, \, v)$ points towards $v$ if $u < v$ and towards $u$ otherwise. This begs the question of whether the converse is also true, i.e., does an orientation of the edges "induce" a numbering of the vertices? The answer is, in general, no. Nevertheless, we would still be happy if it were true that every orientation is induced by at least one numbering (for this would mean that, in switching to the orientation perspective, we wouldn't be considering forbidden configurations). Luckily, it is, and it follows immediately from the existence of a topological sorting of the vertices of a directed tree (which is, in fact, a DAG): just number the vertices in their topological order! (Incidentally, note that any topological order works, and since it is generally not unique, neither is the numbering.) From now on, we shall call a path any path in the undirected tree, as opposed to directed path which refers to the underlying orientation of the edges (supposing we have fixed one in the context of the sentence). A single vertex is both a path and a directed path. Given an orientation of the tree, we call its value the number of directed paths it produces; therefore, the problem asks to maximize the value of an orientation. We say that a (directed) path has length $\ell$ if it is made up of $\ell$ vertices. Finally, we denote $d(u, \, v)$ the distance between $u$ and $v$ in the undirected tree. Characterizing the optimal solutions The next two results are crucial. Lemma 1. Fix an orientation of the edges. Suppose there exist four distinct vertices $a$, $b$, $c$ and $d$ such that (refer to the figure below for clarity): there are edges $a \rightarrow b$ and $c \rightarrow d$; there is a directed path from $c$ to $b$. Proof. Let's see what happens, in terms of value, when we reverse edge $a \rightarrow b$ and the subtree rooted at $a$. Let: $A$ be the number of directed paths (in the initial orientation) ending at $a$, including the path of length $1$; $B_{\mathrm{out}}$ be the number of directed paths of length $\ge 2$ starting at $b$; $B_{\mathrm{in}}$ be the number of directed paths of length $\ge 2$ ending at $b$ that do not go through edge $a \rightarrow b$. $v \rightarrow \cdots \rightarrow b \leftarrow a \leftarrow \cdots \leftarrow w,$ With similar reasoning and notation, if we invert the edge $c \rightarrow d$ and the subtree rooted at $d$, the vaue increases (or decreased) by $\Delta_2 = D(C_{\mathrm{out}} - C_{\mathrm{in}})$. Now, since every directed path beginning at $b$ can be extended to the left until we get to $c$ (i.e. we can pre-append the path $c \rightarrow \cdots \rightarrow b$), and since $c \rightarrow \cdots \rightarrow b$ is itself a path beginning at $c$, it holds $C_{\mathrm{out}} \ge B_{\mathrm{out}} + 1$. Likewise, we can prove $B_{\mathrm{in}} \ge C_{\mathrm{in}} + 1$. Thus, $(B_{\mathrm{in}} - B_{\mathrm{out}}) + (C_{\mathrm{out}} - C_{\mathrm{in}}) \ge 2$ and without loss of generality $B_{\mathrm{in}} - B_{\mathrm{out}} > 0$, which implies, since $A > 0$, that $\Delta_1 > 0$ as desired. $\blacksquare$ From Lemma 1 it follows that, in an orientation that yields the optimal solution, every path "changes direction" at most once. Formally: given a path $(v_1, \, v_2, \, \dots, \, v_k)$, there exists at most one index $2 \le i \le k - 1$ such that the path $(v_{i - 1}, \, v_i, \, v_{i + 1})$ is not directed. This condition is equivalent to a very simple property which opens up the way to an efficient algorithm to find the solution. Lemma 2. Suppose a directed tree satisfies the aforementioned condition. Then, there exists a (not necessarily unique) vertex $v^*$ such that all paths having $v^*$ as an endpoint are also directed paths. Proof. We shall start by fixing an arbitrary vertex $v$ and rooting the tree at $v$. Call a vertex $u \ne v$ evil if it has a child $w$ such that the path $(v, \, \dots, \, u, \, w)$ changes direction at $u$. If there are no evil vertices, $v$ is a valid candidate for $v^*$ and we are done. Otherwise, all evil vertices must lie in the same $v$-subtree. Indeed, if $u$ and $u'$ were two evil vertices belonging to different subtrees, and $w$, $w'$ the respective children, the path $(w, \, u, \, \dots, \, v, \, \dots, \, u', \, w')$ would change direction at least twice (see figure below, (a)). Moreover, if $u$ is an evil vertex and $r$ is the root of its subtree, the edge $(v, \, r)$ must point in a different direction than all other edges incident on $v$. To see why, suppose without loss of generality $v \rightarrow r$ and $v \rightarrow r' \ne r$; then the path $(w, \, u, \, \dots, \, r, \, v, \, r')$ changes direction twice (see figure below, (b)). Thanks to the previous observations, if we replace $v$ with $r$ the set of evil vertices decreases or stays the same. If we repeat this argument while there are evil vertices, we will produce a downward path in the tree rooted at $v$, which must stop eventually, at which point the number of evil vertices must vanish. $\blacksquare$ Thanks to Lemmas 1 and 2 combined, we know that any optimal orientation admits the existence of a vertex $v^*$ satisfying the property of Lemma 2. This makes us finally able to tackle the problem algorithmically. Computing the maximum Call a vertex $v^*$ that satisfies the property of Lemma 2 a crossroad. We are going to iterate over all $n$ vertices and find the maximum value of an orientation where said vertex is a crossroad. Thus, fix a vertex $v^*$ and consider the $\deg(v^*)$ subtrees the tree gets split into if we erase $v^*$. In an optimal orientation, in each subtree all edges point either "inwards" or "outwards": let $S$ be the sum of the sizes of subtrees of the first kind, and $T$ be the sum of the sizes of the subtrees of the second kind. Of course, $S + T = n - 1$. Let's calculate the value of such an orientation. There are $n$ directed paths having $v^*$ as an endpoint. The directed paths lying entirely in one subtree are $\displaystyle \sum_{v \ne v^*} d(v^*, \, v)$. Finally, the paths starting in a subtree of the first kind and ending in a subtree of the second kind contribute with an additional $S \cdot T$ term. Observe that the first two terms are independent of $S$ and $T$. It is trivial to compute the sizes of the subtrees in $\mathcal O(\deg(v^*))$ after preprocessing. Furthermore, the sum of distances can be updated in $\mathcal O(1)$ when moving from $v^*$ to one of its neighbors. As for the third term, it is clear that it is maximized when $S$ and $T$ are as close as possible to $\frac{n - 1}{2}$. If one of the subtrees has size $k \ge \frac{n}{2}$ (there can only be one of those), the way of doing so is trivial: just orient the edges of the big subtree one way, and all other edges the other way, so that $ST = k(n - 1 - k)$. The matter is more complicated if all subtrees have sizes smaller than $\frac{n}{2}$. In this case, $v^*$ is a centroid of the tree. Recall that any tree has exactly one or two centroids - and the latter can only occur if $n$ is even. What we are dealing with is an instance of the knapsack problem: we are given positive integers $k_1, \, \dots, \, k_{\deg(v^*)}$ - the sizes of the subtrees - whose sum is $n - 1$, and we shall find a subset whose sum is as close as possible to $\frac{n - 1}{2}$. The naive dynamic programming solution runs in $\mathcal O(n^2)$ at worst (with $\mathcal O(n)$ space), way too slow for our purposes. We can optimize the runtime, reducing it by a factor $32$, via the use of bitsets. This is still not enough. There is a standard, yet somewhat advanced, trick to speed the algorithm up to $\mathcal O\left(\frac{n\sqrt{n}}{\mathrm{sizeof}(\mathrm{int})}\right)$. We won't go over it here, but you can find a complete tutorial in this Codeforces blog entry (under Subset Sum Speedup 1), together with a number of other interesting related techniques. Since we only need to run this algorithm at most twice, this final optimization, carefully implemented, does the trick. Finally, let's comment further on the nature of the characterization. So far, we know very little about the crossroad $v^*$, but intuitively it makes sense that it should be "in the middle" of the tree in order to yield an optimal solution. Lemma 3. There is an optimal solution where at least one centroid of the tree is a crossroad. Proof. Consider an optimal orientation where a vertex $v^*$, which is not a centroid, is a crossroad. Let $\tau$ be the subtree of size $\ge \frac{n}{2}$. We have already observed that all edges of $\tau$ must point (WLOG) toward $v^*$, and all other edges away from $v^*$. Now let $v$ be the neighbor of $v^*$ belonging to $\tau$. If we make $v$ a crossroad, by inverting edge $v \rightarrow v^*$ and keeping the orientations of all other edges, a simple computation shows that the value of the new configuration has not decreased. We can therefore iterate until we reach a centroid. $\blacksquare$ Although this result is not necessary to solve the problem, it is relatively easy to guess and prove, and it simplifies the implementation. One can even prove this slightly stronger conclusion: for each centroid, there is an optimal orientation in which it is a crossroad. [It was briefly considered whether to give this problem with $n \le 10^7$. It turns out that the "slow" part of the algorithm is not the optimized knapsack, but rather the tree traversal needed to find the centroid. To fit the new constraint, one must come up with a way to do this without any traversal. Taking a look at the input format might be a good starting point...]
[ "dp", "greedy", "trees" ]
null
null
1662
H
Boundary
Bethany would like to tile her bathroom. The bathroom has width $w$ centimeters and length $l$ centimeters. If Bethany simply used the basic tiles of size $1 \times 1$ centimeters, she would use $w \cdot l$ of them. However, she has something different in mind. - On the interior of the floor she wants to use the $1 \times 1$ tiles. She needs exactly $(w-2) \cdot (l-2)$ of these. - On the floor boundary she wants to use tiles of size $1 \times a$ for some positive integer $a$. The tiles can also be rotated by $90$ degrees. For which values of $a$ can Bethany tile the bathroom floor as described? Note that $a$ can also be $1$.
First, observe that we can tile any rectangle with tiles of size $1 \times 1$. From now on, we will consider a fixed $a > 1$. Let's investigate what happens in the corners of the rectangle. Each corner is covered either by a tile that is placed horizontally, or by a tile that is placed vertically. Below is an example of one of $2^4 = 16$ possibilities, with the corner tiles in gray. Once we place those tiles (up to $4$, fewer if the tile length is greater than $\frac{w}{2}$ or $\frac{l}{2}$) the positions of the rest are uniquely determined. We can now put four constraints on the value of $a$, one for each side of the rectangle. These constraints are of form $a \mid w - x$ or $a \mid l - x$, where $\mid$ is the "divides without remainde" relation, and $x$ is the number of positions on that side that are covered by a tile placed in a perpendicular direction, e.g. a vertical tile on a horizontal side. Clearly, this can only happen in the corners of the rectangle, hence $x$ is between $0$ and $2$ for all sides. In the above figure, the top side yields the constraint $a \mid w - 0$, the bottom one $a \mid w - 2$, and both the left and right sides $a \mid l - 1$. Put together, we have a necessary condition $a \mid \gcd(w - 0, \, w - 2, \, l - 1, \, l - 1)$, where $\gcd$ is the greatest common divisor. Clearly, the condition is also sufficient, i.e. for an $a$ fulfilling this divisibility there is a rectangle tiling. To summarise, we consider all $16$ possibilities for the tile orientations in the corners, find the $\gcd$ of the constraints using Euclidean algorithm, and then compute its divisiors by a trial division. Finally, we return the sorted set of all the divisors. The total time complexity is $\mathcal O(\sqrt {\min(w, \, l)})$ per test case, dominated by the trial division. Note that some of the $16$ cases are the same up to mirroring or rotation, and some are clearly impossible. Hence we only need to factor the following values: $\gcd(w - 1, \, l - 1)$ $\gcd(w, \, l - 2)$ $\gcd(w - 2, \, l)$ $\gcd(w - 1, \, l - 2, \, l)$ $\gcd(w - 2, \, w, \, l - 1)$ The last two can only be $1$ or $2$, and if they are both $1$, this implies that one of the first three $\gcd$s is divisible by $2$. Therefore $a = 2$ is always a solution - we can simply insert it to the solution set, and only compute and factor the first three $\gcd$s. This does not change the asymptotic time complexity, nevertheless it is a useful optimisation.
[ "brute force", "math" ]
null
null
1662
I
Ice Cream Shop
On a beach there are $n$ huts in a perfect line, hut $1$ being at the left and hut $i+1$ being $100$ meters to the right of hut $i$, for all $1 \le i \le n - 1$. In hut $i$ there are $p_i$ people. There are $m$ ice cream sellers, also aligned in a perfect line with all the huts. The $i$-th ice cream seller has their shop $x_i$ meters to the right of the first hut. All ice cream shops are at distinct locations, but they may be at the same location as a hut. You want to open a new ice cream shop and you wonder what the best location for your shop is. You can place your ice cream shop anywhere on the beach (not necessarily at an integer distance from the first hut) as long as it is aligned with the huts and the other ice cream shops, even if there is already another ice cream shop or a hut at that location. You know that people would come to your shop only if it is strictly closer to their hut than any other ice cream shop. If every person living in the huts wants to buy exactly one ice cream, what is the maximum number of ice creams that you can sell if you place the shop optimally?
Suppose there are ice cream shops located $a$ and $b$ meters to the right of the first hut, such that $a < b$. If we place our ice cream shop in any location in the interval $[a, \, b]$ it can only be strictly closer to huts located in the interval $(a, \, b)$, since any hut located at or before $a$ will be closer to the ice cream shop located at $a$ and any hut located at or after $b$ will be closer to the ice cream shop located at $b$. We now present two approaches to solve the problem starting from this observation. First solution Let $a < b$ be the positions of two ice cream shops, and let us further assume that there is no other ice cream shop between $a$ and $b$. Of course, it is never convenient to put the new shop at $a$ or $b$. What happens if we place our new shop at $s \in (a, \, b)$? It is easy to see that the only huts whose people will buy ice creams are those in the open interval $\left(\frac{a + s}{2}, \, \frac{s + b}{2}\right)$. Notice that this interval has length $\frac{s + b}{2} - \frac{a + s}{2} = \frac{b - a}{2}$, i.e., half the length of $(a, \, b)$, which is independent of $s$. On the other hand, consider an interval $(l, \, r)$ such that $a \le l < r \le b$ and $r - l = \frac{b - a}{2}$. If we choose $s = 2l - a$, the interval $\left(\frac{a + s}{2}, \, \frac{s + b}{2}\right)$ is exactly $(l, \, r)$. This means that for every interval $(l, \, r)$ of length $\frac{b - a}{2}$ which lies entirely in $(a, \, b)$, it is possible to achieve a total number of ice creams given by $f(l, \, r) := \sum_{100i \, \in \, (l, \, r)} p_{i + 1}.$ Thus the answer is the maximum of $f(l, \, r)$, over all intervals such that there exist $a$ and $b$ as above and $r - l \le \frac{b - a}{2}$. Also notice that we can restrict our search to the intervals with $l = 100k - \frac{1}{2}$ for some $0 \le k \le n - 1$. We can therefore proceed as follows. Iterate over $k = 0, \, \dots, \, n - 1$. For each $k$, find the greatest $x_j < 100k$. This can be done in amortized constant time - or via binary search - after sorting the $x_j$'s. Compute $f(l, \, r)$, where $l = 100k - \frac{1}{2}$ and $r = \min\left(x_{j + 1}, \, l + \frac{x_{j + 1} - x_j}{2}\right)$ (assume the $x_j$'s are sorted). This is easy to do having precomputed the prefix sums of $p_1, \, \dots, \, p_n$. Return the maximum of all these values. One must be careful with "boundary conditions", that is, values of $k$ such that $100k$ lies to the left or to the right of all ice cream shops. These are best handled by introducing two dummy shops at positions $-\infty$ and $+\infty$. The solution has $\mathcal O(n + m\log m)$ time complexity and $\mathcal O(n + m)$ space complexity. Second solution Pick again two ice cream shops at $a < b$, with no other ice cream shops in between, and consider a hut located at $h$ such that $a < h < b$. If we place our shop in $s$ such that $s$ is between $a$ and $h$, then we will always be closer to $h$ than the ice cream shop located at $a$. However, we are only closer than the ice cream shop at $b$ if the distance from $s$ to $h$ is smaller than the distance from $h$ to $b$. Formally this means $h - s < b - h$. If we rearrange this expression, we conclude that we will be closer to $h$ if $s > 2h - b$. Analogously, if we place our shop in $s$ such that $s$ is between $h$ and $b$, we will be closer to $h$ if $s < 2h - a$. Hence, we conclude that if we want to be closer to $h$ then we have to place our shop in the interval $(2h - b, \, 2h - a)$ (note that this is an open interval since we want to be strictly closer to $h$). Using this idea, we can now compute for every hut $h$ an interval such that if we place our shop in any point in that interval then our shop will be the closest to hut $h$. Let us assume we have all these intervals, how do we determine the optimal solution from this information? Notice that the optimal solution is a point that maximizes the sum of the $p_i$ of all the intervals it is contained in. We can find this maximum sum efficiently using a sweep-line approach. Picture a vertical line that is going to sweep through all of these intervals from left to right. As this line moves we want to keep the sum of the $p_i$ corresponding to the intervals it currently intersects, so we store that value in a counter and whenever the line reaches the starting point of an interval we add its value to the counter, conversely whenever it reaches the endpoint of an interval we subtract its value from the counter. The solution is the maximum value the counter achieves during this process. To implement this efficiently, the only thing we need to observe is that the only relevant "events" to this procedure are the start- and endpoints of intervals. So our sweep-line algorithm can be implemented as follows: for every interval $(l_i, \, r_i)$ corresponding to hut $p_i$ add a point $(l_i, \, p_i)$ and a point $(r_i, \, -p_i)$ to a list of points. Sort this list according to the first component. Iterate through the sorted points and store a counter (initially $0$). Whenever we reach a point $(a, \, b)$ add $b$ to the counter. This requires time $\mathcal O(n \log n)$ since we have to sort all of the $2n$ "event" points and then iterate through them once. Before doing that, however, we need to compute each hut's interval. To do so we have to determine the closest ice cream shops $a$ and $b$ such that $a < h$ and $b > h$ and then the interval is given by $(2h - b, \, 2h - a)$. If we sort the $x_i$ locations of all the ice cream shops we can find this using binary search or by iterating through the huts and ice cream shops simultaneously (this is also known as a two pointers technique). The time complexity of the binary search method is $\mathcal O((m + n) \log m)$, since we first sort the $x_i$ and then for each hut we perform one binary search. The time complexity of the two pointers method is $\mathcal O(m \log m + n)$, since we first sort the $x_i$ and then iterate through all the $n$ huts and $m$ ice cream shops once. Combining all of this we get a solution that runs in $\mathcal O(m\log m + n \log n)$ time.
[ "brute force", "implementation", "sortings" ]
null
null
1662
J
Training Camp
You are organizing a training camp to teach algorithms to young kids. There are $n^2$ kids, organized in an $n$ by $n$ grid. Each kid is between $1$ and $n$ years old (inclusive) and any two kids who are in the same row or in the same column have different ages. You want to select exactly $n$ kids for a programming competition, with exactly one kid from each row and one kid from each column. Moreover, kids who are not selected must be either older than both kids selected in their row and column, or younger than both kids selected in their row and column (otherwise they will complain). Notice that it is always possible to select $n$ kids satisfying these requirements (for example by selecting $n$ kids who have the same age). During the training camp, you observed that some kids are good at programming, and the others are not. What is the maximum number of kids good at programming that you can select while satisfying all the requirements?
In this task, you are given the ages of $n^2$ kids as a Latin square $S$ ($n \times n$ grid such that each row and each column contains all the integers from $1$ to $n$), and you are asked to find a subset containing exactly one kid from each row/column, satisfying a certain "stability" constraint, and maximizing the number of kids who are good at programming. Definitions A solution $\mu \subseteq \{1, \, \dots, \, n\}^2$ is a subset of kids containing exactly one kid from each row/column. For convenience, we will denote $\mu_i$ the unique $j$ such that $(i, \, j) \in \mu$, and $\mu^j$ the unique $i$ such that $(i, \, j) \in \mu$. We say that a kid $(i, \, j)$ is blocking a solution $\mu$ if the age of $(i, \, j)$ is between the ages of $(i, \, \mu_i)$ and $(\mu^j, \, j)$, that is if $(S_{i,j} - S_{i,\mu_i})(S_{i,j} - S_{\mu^j,j}) < 0$. We say that a solution is stable if there is no blocking kid. To understand what stability means, we will have a closer look at the second sample. As a first attempt, we can try to draw the graph $G$ containing pairs of kids who cannot belong to the same solution, either because they are on the same row/column (black edges), or because they would create a blocking kid (gray edges). Kids who are good at programming are colored in gray. Stable solutions correspond to maximum (cardinality $n$) independent sets in $G$, which (unfortunalely) are difficult to compute without additional structure, especially because $G$ may contain $\Theta(n^4)$ edges. Reducing the problem to maximum-weight antichain We came to the conclusion that we need to understand the structure of $G$. For that matter, define the directed acyclic graph $\vec G$, which has an edge from kid $a$ to kid $b$ iff they are on the same row/column and $a$ is younger that $b$. The graph $\vec G$ has $\Theta(n^3)$ edges, and an important observation is that edges of $G$ corresponds to paths of length $\leq 2$ in $\vec G$. After looking at length-$2$ paths, we may try to look at longer paths. For that matter, denote $\vec G^{(k)}$ the $k$-th power of $\vec G$, that is the graph which contains an edge from kid $a$ to kid $b$ iff $\vec G$ contains a path of length $\leq k$ from $a$ to $b$. In particular, $\vec G^{(2)}$ is an orientation of the graph $G$ defined in the previous section. Lemma 1. For all $k \geq 2$, the following properties are satisfied. Any independent set of size $n$ in $\vec G^{(k)}$ is a stable solution. Maximum independent sets of $\vec G^{(k)}$ have size $n$. Stable solutions are independent sets in $\vec G^{(k)}$. Proof. To prove (1), observe that $\vec G \subseteq \vec G^{(k)}$ (independent sets contain exactly one kid per row/column) and $\vec G^{(2)} \subseteq \vec G^{(k)}$ (independent sets are stable). To prove (2), observe that a choice of kids having the same age is an independent set, and no set of size $> n$ can be independent (by the pigeonhole principle, some row/column would contain $2$ kids). We prove (3) by induction on $k$. It is true when $k = 2$ by construction. Now choose the smallest $k > 2$ such that there is a path $a_1 \rightarrow a_2 \rightarrow \cdots \rightarrow a_k$ in $\vec G$ where $a_1 \in \mu$ and $a_k \in \mu$ for some stable solution $\mu$. By minimality of $k$, we may assume (without loss of generality) that $a_1 = (i, \, j') \in \mu$, $a_2 = (i, \, j) \notin \mu$ and $a_3 = (i', \, j) \notin \mu$. Let $a_1' = (\mu^j, \, j) \in \mu$ be the kid selected in row $j$. By stability of $\mu$, kid $a_2$ is older than both $a_1$ and $a_1'$, therefore there is an edge $a_1' \rightarrow a_3$, which contradicts the minimality of $k$. $\blacksquare$ An antichain of a partially ordered set is a subset of elements such that no pair of elements are comparable (using transitivity). As a corollary of Lemma 1, stable solutions are exactly the maximum antichains of the partial order induced by $\vec G$. Because antichains are defined using the transitive closure, we can simplify the definition of $\vec G$ while keeping the same transitive closure, so that there is an edge from kid $a$ to kid $b$ iff they are on the same row/column and $a$ is exactly $1$ year younger than $b$. This new definition of $\vec G$ has $\Theta(n^2)$ edges, while keeping the property that stable solutions correspond to maximum antichains. Finally, remember that our goal was to find the "best" stable solution in terms of number of kids who are good at programming. Thus, we give each kid a weight between $0$ and $M=1$, and our goal is to find a maximum-weight antichain, under the constraint that it is a maximum-cardinality antichain. We deal with the cardinality constraint by adding a large weight $\omega > n\cdot M$ to each kid, which reduces our problem to finding the maximum-weight antichain. Reducing maximum-weight antichain to maximum flow Finally, we are going to reduce the maximum-weight antichain problem to the maximum flow problem. Define a flow graph $\vec F$, with two special nodes $s$ (source) and $t$ (target), and two copies $v_1\in V_1$ and $v_2\in V_2$ of each node $v$ from $\vec G$. For each node $v$ of weight $w(v)$ in $\vec G$, add two edges $s \rightarrow v_1$ and $v_2 \rightarrow t$ with capacity $w(v)$. For each edge $u \rightarrow v$ in $\vec G$, add three edges $u_1 \rightarrow v_1$, $u_2 \rightarrow v_2$ and $u_1 \rightarrow v_2$ with infinite capacity. Using the max-flow min-cut theorem, the maximum $s$-$t$ flow is equal to the minimum $s$-$t$ cut. Our reduction is easier to understand when looking at cuts (see picture below). Recall that a cut is partition $(\mathcal S, \, \mathcal T)$ of the nodes, where $s\in \mathcal S$ and $t\in \mathcal T$. Given an antichain $A$ in $\vec G$, denote $X$ the set of vertices before $A$ and $Y$ the set of vertices after $A$. We define $\mathcal S := \{s\} \cup \{v_1 : v\in A\cup Y\} \cup \{v_2 : v\in Y\}$ and $\mathcal T := \{t\} \cup \{v_1 : v\in X\} \cup \{v_2 : v\in X\cup A\}$. The weight of this cut is exactly equal to $\sum_{v\notin A} w(v)$. Conversely, let $(\mathcal S, \, \mathcal T)$ be a cut of finite weight. We first show that $\mathcal S\cap V_1$ must be downward closed (if $u_1 \in \mathcal S$ and $u_1 \rightarrow v_1$, then $v_1\in \mathcal S$) and $\mathcal T \cap V_2$ must be upward closed (if $v_2 \in \mathcal T$ and $u_2 \rightarrow v_2$ then $u_2\in \mathcal T$). Then, $A := \{v : v_1\in \mathcal S \: \mathrm{and} \: v_2\in \mathcal T\}$ is an antichain, and the cut has weight $\sum_{v\notin A} w(v)$. Therefore, computing a maximum weight antichain in $\vec G$ is equivalent to computing a minimum weight cut in $\vec F$, which can be done by computing a maximum flow in $\vec F$. In terms of complexity, $\vec F$ has $2+n^2$ nodes and $\Theta(n^2)$ edges, which should make any reasonable flow algorithm (such as Edmonds-Karp or Dinic's algorithm) efficient enough. Dilworth's theorem The reduction from the previous section can be seen as an application of Dilworth's theorem. In the unweighted case, the theorem states that the maximal size of an antichain is equal to the minimum number of paths necessary to cover all elements. Moreover, minimum path cover in a directed acyclic graph can be reduced to maximum cardinality matching in a bipartite graph, which in turn can be reduced to maximum flow. In the weighted case, a generalization of Dilworth's theorem states that the maximal weight of an antichain is equal to the minimum number of paths needed to cover each element as many times as its weight. Then, this weighted path cover problem can be reduced to maximum flow. The resulting flow instance is similar to $\vec F$, however, there is no edge within sets $V_1$ and $V_2$, and edges from $V_1$ to $V_2$ correspond to the transitive closure of $\vec G$. This gives a flow instance with $\Theta(n^4)$ edges, which is too slow for this problem. Observing that we can avoid computing the transitive closure of $\vec G$ by adding edges within $V_1$ (and $V_2$) yields the solution described in the previous section. Stable matchings In this editorial, we defined the notions of blocking kids and stable solutions. These choices of terms are deliberate, because this task is in fact a (hopefully well) hidden stable marriage problem. Lemma 2. Stable solutions are exactly solution of the stable marriage problem where: rows have preferences over columns: row $i$ ranks column $j$ in $M_{i,j}$-th position; columns have preferences over rows: column $j$ ranks row $i$ in $(n + 1 - M_{i,j})$-th position; a row-column pair is blocking if both prefer each other to their respective partners; a matching is stable if it contains no blocking pair. Proof. The only difference between the task statement and the definition of stability given in Lemma 2 is that a kid $(i, \, j)$ is blocking iff $M_{\mu^j,j} < M_{i,j} < M_{i,\mu_i}$ (type 1) or $M_{i,\mu_i} < M_{i,j} < M_{\mu^j,j}$ (type 2), whereas a row-column pair is blocking (in the stable marriage instance) iff $M_{\mu^j,j} < M_{i,j} < M_{i,\mu_i}$ (type 1). Interestingly, one can show that if a solution has a type 2 blocking kid, then it also has a type 1 blocking kid (left as an exercise). $\blacksquare$ In the stable marriage problem, one stable matching is optimal for the rows (kids of age $1$) and one stable matching is optimal for the columns (kids of age $n$). The set of stable matchings has a distributive lattice structure, which can be represented by closed subsets of a directed graph (closely related to $\vec G$). The problem of finding a maximum-weight stable matching can be reduced to the closure problem, which can itself be reduced to maximum flow. Let us finish this editorial with a few fun facts. In the second sample used in this task, $n$ is a power of $2$, each kid $(i, \, j)$ has age $1 + (i - 1) \wedge (j - 1)$, and the number of stable matchings is given by A005154. This family of instance was first proposed by Donald Knuth, who conjectured that it maximizes the number of stable matchings (still open).
[ "flows", "graphs" ]
null
null
1662
K
Pandemic Restrictions
After a long time living abroad, you have decided to move back to Italy and have to find a place to live, but things are not so easy due to the ongoing global pandemic. Your three friends Fabio, Flavio and Francesco live at the points with coordinates $(x_1, y_1), (x_2, y_2)$ and $(x_3, y_3)$, respectively. Due to the mobility restrictions in response to the pandemic, meetings are limited to $3$ persons, so you will only be able to meet $2$ of your friends at a time. Moreover, in order to contain the spread of the infection, the authorities have imposed the following additional measure: for each meeting, the sum of the lengths travelled by each of the attendees from their residence place to the place of the meeting must not exceed $r$. What is the minimum value of $r$ (which can be any nonnegative real number) for which there exists a place of residence that allows you to hold the three possible meetings involving you and two of your friends? Note that the chosen place of residence need not have integer coordinates.
Let us start with some definitions that will make the presentation clearer. Given three points $X, Y$ and $Z$ in the plane, let $f(X, \, Y, \, Z)$ be the minimum of $\overline{XP}+\overline{YP}+\overline{ZP}$ over all points $P$. It is well known that this minimum actually exists and is achieved at a unique point called the Fermat point of the triangle $XYZ$. For fixed $X$ and $Y$, let $f_{XY}(Z) := f(X, \, Y, \, Z)$ and define $g(W) := \max(f_{AB}(W), \, f_{AC}(W), \, f_{BC}(W)),$ For a given parameter $r$, the condition that $Z$ is a valid residence point is equivalent to $g(Z) \leq r$. Thus, no valid residence point exists if and only if $\min g > r$, and therefore the minimum value of $r$ that works is equal to the minimum of $g$ over all points of the plane. We claim that $g$ is convex, hence this minimum exists and we can find it efficiently. This relies on the following two observations: The pointwise maximum of a finite set of convex functions is convex. The function $f_{XY}$ is convex. The first observation is a standard fact. For the second observation, let $X, \, Y$ be two points, $0 \leq t \leq 1$, and let $P, \, Q$ be the respective Fermat points of the triangles $ABX$ and $ABY$. Then, denoting $Z = tX + (1-t)Y$ and $R = tP + (1-t)Q$, we have that $\overline{AR} = |A - tP - (1-t)Q| \leq t \overline{AP} + (1-t)\overline{AQ},$ $\overline{BR} = |B - tP - (1-t)Q| \leq t \overline{BP} + (1-t)\overline{BQ},$ $\overline{ZR} = |t(X-P) + (1-t)(Y-Q)| \leq t \overline{XP} + (1-t)\overline{YQ},$ $f_{AB}(Z) \leq \overline{AR}+\overline{BR}+\overline{ZR} \leq t f_{AB}(X) + (1-t) f_{AB}(Y).$ In view of these facts, if we know how to compute $f(X, \, Y, \, Z)$ efficiently we can solve the problem with a double ternary search on $g$. There are ways to achieve this in $\mathcal O(1)$ time by distinguishing two cases: If one of the angles of triangle $XYZ$ is greater than $2 \pi / 3$, then the corresponding vertex is the Fermat point. Otherwise the Fermat point can be computed using the construction of the Napoleon triangle, or the sum of its distances can be computed more straightforwardly using the following formula: $f(X, \, Y, \, Z) = \sqrt{\frac{a^2+b^2+c^2+4\sqrt{3}S}{2}}.$ (Here $a, \, b, \, c$ are the sidelengths and $S$ is the area of the triangle.) $f(X, \, Y, \, Z) = \sqrt{\frac{a^2+b^2+c^2+4\sqrt{3}S}{2}}.$
[ "geometry", "ternary search" ]
null
null
1662
L
Il Derby della Madonnina
The derby between Milan and Inter is happening soon, and you have been chosen as the assistant referee for the match, also known as linesman. Your task is to move along the touch-line, namely the side of the field, always looking very carefully at the match to check for offside positions and other offences. Football is an extremely serious matter in Italy, and thus it is fundamental that you keep very close track of the ball for as much time as possible. This means that you want to maximise the number of kicks which you monitor closely. You are able to monitor closely a kick if, when it happens, you are in the position along the touch-line with minimum distance from the place where the kick happens. Fortunately, expert analysts have been able to accurately predict all the kicks which will occur during the game. That is, you have been given two lists of integers, $t_1, \ldots, t_n$ and $a_1, \ldots, a_n$, indicating that $t_i$ seconds after the beginning of the match the ball will be kicked and you can monitor closely such kick if you are at the position $a_i$ along the touch-line. At the beginning of the game you start at position $0$ and the maximum speed at which you can walk along the touch-line is $v$ units per second (i.e., you can change your position by at most $v$ each second). What is the maximum number of kicks that you can monitor closely?
Let $x_i := v t_i - a_i$ and $y_i := v t_i + a_i$ for $i = 1, \, \ldots, \, n$. The main observation to solve this problem is that a sequence of kicks with indices $i_1, \, \ldots, \, i_k$ can be seen (in this order and starting from the first one) if and only if both sequences $x_{i_1}, \, \ldots, \, x_{i_k}$ and $y_{i_1}, \, \ldots, \, y_{i_k}$ are nondecreasing. To see this, observe that $x_i \leq x_j \iff v t_i - a_i \leq v t_j - a_j \iff a_j - a_i \leq v(t_j - t_i)$ $y_i \leq y_j \iff v t_i + a_i \leq v t_j + a_j \iff a_i - a_j \leq v(t_j - t_i).$ $t_i = \frac{x_i + y_i}{2 v},$ In order to impose the condition that the events can be reached starting from position $0$ at time $0$, it is enough to remove all points which cannot be reached from the origin, that is, with $|a_i| > v t_i$. We will assume that these events have been eliminated and still denote by $n$ the total number of events. We have thus reduced the problem to finding the longest increasing subsequence of the $y$-values when ordered by increasing $x$-value. More precisely, let $(p_1, \, \ldots, \, p_n)$ be the permutation of $(1, \, \ldots, \, n)$ such that $i < j \implies x_{p_i} < x_{p_j} \text{ or } (x_{p_i} = x_{p_j} \text{ and } y_{p_i} < y_{p_j}).$ There are classical algorithms to solve this efficiently in $\mathcal O(n \log n)$ time. For instance, this complexity is achieved by an approach that processes the elements from left to right and uses binary searches to update the value of the smallest possible last element of a length-$k$ increasing subsequence in every prefix for every $k$.
[ "data structures", "dp", "math" ]
null
null
1662
N
Drone Photo
Today, like every year at SWERC, the $n^2$ contestants have gathered outside the venue to take a drone photo. Jennifer, the social media manager for the event, has arranged them into an $n\times n$ square. Being very good at her job, she knows that the contestant standing on the intersection of the $i$-th row with the $j$-th column is $a_{i,j}$ years old. Coincidentally, she notices that no two contestants have the same age, and that everyone is between $1$ and $n^2$ years old. Jennifer is planning to have some contestants hold a banner with the ICPC logo parallel to the ground, so that it is clearly visible in the aerial picture. Here are the steps that she is going to follow in order to take the perfect SWERC drone photo. - First of all, Jennifer is going to select four contestants standing on the vertices of an axis-aligned rectangle. - Then, she will have the two younger contestants hold one of the poles, while the two older contestants will hold the other pole. - Finally, she will unfold the banner, using the poles to support its two ends. Obviously, this can only be done if the two poles are parallel and \textbf{do not cross}, as shown in the pictures below. Being very indecisive, Jennifer would like to try out all possible arrangements for the banner, but she is worried that this may cause the contestants to be late for the competition. How many different ways are there to choose the four contestants holding the poles in order to take a perfect photo? Two choices are considered different if at least one contestant is included in one but not the other.
For each $i = 1, \, \ldots, \, n^2$, let: $s_i$ denote the $i$-year-old contestant; $u_i$ be the number of contestants on the same row as $s_i$ who are (strictly) under $i$ years old; $v_i$ be the number of contestants on the same column as $s_i$ who are (strictly) under $i$ years old. Let $A$ be the answer to the problem, i.e. the number of ways to choose four contestants so that the poles do not cross. We claim that $2A=\sum_{i=1}^{n^2} \: [u_i(n - 1 - v_i) + (n - 1 - u_i)v_i].$ Given an axis-aligned rectangle, we say that a contestant $s$ standing on one if its vertices is intermediate if exactly one of the two contestants on vertices adjacent to $s$ is younger than $s$. Rectangles in which the poles don't cross can be characterised by the fact that exactly two of the contestants on their vertices are intermediate (see figure (a) below for an example). On the contrary, rectangles in which the poles do cross have no intermediate contestants (figure (b)). The right-hand side of the equation is the sum over all contestants $s$ of the number of rectangles for which $s$ is intermediate. Therefore, by the previous remark, this summation equals twice the number of rectangles with non-crossing poles, i.e. twice the answer to the problem.
[ "combinatorics", "math", "sortings" ]
null
null
1662
O
Circular Maze
You are given a circular maze such as the ones shown in the figures. Determine if it can be solved, i.e., if there is a path which goes from the center to the outside of the maze which does not touch any wall. The maze is described by $n$ walls. Each wall can be either circular or straight. - Circular walls are described by a radius $r$, the distance from the center, and two angles $\theta_1, \theta_2$ describing the beginning and the end of the wall in the clockwise direction. Notice that swapping the two angles changes the wall. - Straight walls are described by an angle $\theta$, the direction of the wall, and two radii $r_1 < r_2$ describing the beginning and the end of the wall. Angles are measured in degrees; the angle $0$ corresponds to the upward pointing direction; and angles increase clockwise (hence the east direction corresponds to the angle $90$).
In this task, you are given circular mazes described by collections of walls, which can be either circular (constant radius) or straight (constant angle). The goal is to determine if a maze can be solved, that is, if there is a path from the center to the outside which does not cross any wall. Reducing the problem to a grid maze The first remark to be made is that this task can be reformulated into the much simpler problem of finding a path in a grid maze (with the left and right sides of the grid being connected by some portal). The image bellow illustrates the grid obtained from the first input sample, where each row corresponds to a radius (between $1$ and $20$) and each column corresponds to an angle (between $0$ and $360$). A possible path solving the maze is displayed in orange. The first part of the solution consists of building the grid maze, and fill 2D arrays with the walls. Because walls do not overlap, it is sufficient to iterate over each wall (number of operations is in the order of $t \cdot 20 \cdot 360 \leq 10^6$). Observe that with overlapping walls, the naive implementation might be too slow (worst case is $t \cdot n \cdot 360 \geq 10^8$), but one could compute cumulative sums of arrays containing the endpoints of walls. Finally, we build the undirected graph where each cell is connected to the neighbouring cells such that there is no wall in between. Solving the grid maze Now that we have a grid maze, where each cell is connected to up to $4$ neighbours, we need to determine if there is a path between the inside and the outside of the maze. The only remaining detail is to describe how we deal with the inside (below radius $r=1$) and the outside of the maze (above radius $r=20$). Multiple approaches exist: Adding two "sentinel" rows of cells to the maze, one at the bottom (inside) and one at the top (outside). Because there are no walls in sentinel rows, it is enough to pick (arbitrarily) one cell from each row and check if there is a path between them. Adding two special cells, one for the inside and one for the outside, connected (when there is no wall) to cells from the bottom and the top row. It is now enough to check if there is a path between these two special cells. Finally, checking if there is a path between two cells can be done with various algorithms computing the connected components of a graph. Depth-first search: the procedure builds the component of a starting cell by exploring (recursively) adjacent cells, making sure it does not explore the same cell twice. Breadth-first search: the procedure builds the component of a starting cell by exploring (iteratively) cells at distance $1$, then $2$, etc. Disjoint-Set Union data structure: starting from a collection of singleton sets (each containing one cell), the data structure can merge sets (containing two neighbouring cells). The resulting collection of sets corresponds to connected components.
[ "brute force", "dfs and similar", "graphs", "implementation" ]
null
null
1665
A
GCD vs LCM
You are given a positive integer $n$. You have to find $4$ \textbf{positive} integers $a, b, c, d$ such that - $a + b + c + d = n$, and - $\gcd(a, b) = \operatorname{lcm}(c, d)$. If there are several possible answers you can output any of them. It is possible to show that the answer always exists. In this problem $\gcd(a, b)$ denotes the greatest common divisor of $a$ and $b$, and $\operatorname{lcm}(c, d)$ denotes the least common multiple of $c$ and $d$.
In this problem it is enough to print $n - 3$, $1$, $1$, $1$. It is easy to see that this answer is correct for any $n \ge 4$.
[ "constructive algorithms", "math" ]
800
#include <bits/stdc++.h> using namespace std; int main() { int T; cin >> T; while (T --> 0) { int n; cin >> n; cout << n - 3 << ' ' << 1 << ' ' << 1 << ' ' << 1 << '\n'; } return 0; }
1665
B
Array Cloning Technique
You are given an array $a$ of $n$ integers. Initially there is only one copy of the given array. You can do operations of two types: - Choose any array and clone it. After that there is one more copy of the chosen array. - Swap two elements from \textbf{any} two copies (maybe in the same copy) on any positions. You need to find the minimal number of operations needed to obtain a copy where all elements are equal.
We will use a greedy technique. Let's find the most common element in the array. Let it be $x$ and let it occur $k$ times in the array. Then let's make a copy where all elements are $x$. To do that we can make a copy of the given array and put all $x$ in one array. Now we will repeat the algorithm for the new array until we get a copy with $n$ numbers $x$.
[ "constructive algorithms", "greedy", "sortings" ]
900
#include <bits/stdc++.h> using namespace std; int main() { ios_base::sync_with_stdio(false); cin.tie(nullptr); cout.tie(nullptr); int T; cin >> T; while (T --> 0) { int n; cin >> n; map<int, int> q; for (int i = 0; i < n; ++i) { int x; cin >> x; ++q[x]; } int am = 0; for (auto &[x, y] : q) { am = max(am, y); } int ans = 0; while (am < n) { int d = min(n - am, am); ans += 1 + d; am += d; } cout << ans << '\n'; } return 0; }
1665
C
Tree Infection
A tree is a connected graph without cycles. A rooted tree has a special vertex called the root. The parent of a vertex $v$ (different from root) is the previous to $v$ vertex on the shortest path from the root to the vertex $v$. Children of the vertex $v$ are all vertices for which $v$ is the parent. You are given a rooted tree with $n$ vertices. The vertex $1$ is the root. Initially, all vertices are healthy. Each second you do \textbf{two} operations, the spreading operation and, after that, the injection operation: - Spreading: for \textbf{each} vertex $v$, if at least one child of $v$ is infected, you can spread the disease by infecting at most one other child of $v$ of your choice. - Injection: you can choose any healthy vertex and infect it. This process repeats each second until the whole tree is infected. You need to find the minimal number of seconds needed to infect the whole tree.
Firstly, we can see that for any two different vertices, their children are independent. It means that infection can not spread from children of one vertex to children of another. Also it does not matter how the infection spreads among the children of some vertex, so we only need to know the amount of vertices with the same parent. Using this knowledge we can reduce the problem to this one: You are given an array of $k$ positive integers, each integer denotes the amount of healthy vertices with the same parent. Each second you can infect an integer in this array (by injection). Also each second all infected integers decrease by 1 (because of spreading). Let's now use a greedy algorithm. We will sort this array in the decreasing order and infect all integers one by one. These injections are always needed because the integers are independent. After that each second all numbers decrease by 1 and we can choose one number to be decreased once more in the same second. This should be the max number. This problem can be solved by simulating the whole process, because the sum of all integers in the beginning is $n$.
[ "binary search", "greedy", "sortings", "trees" ]
1,600
#include <bits/stdc++.h> using namespace std; int ans; void proc(vector<int>& a) { if (a.empty()) return; int n = a.size(); int last = 0; for (int i = 0; i < n; ++i) { if (a[i] == a[0]) { last = i; } else { break; } } --a[last]; for (int i = 0; i < n; ++i) --a[i]; ++ans; while (!a.empty() && a.back() <= 0) { a.pop_back(); } proc(a); } int main() { ios_base::sync_with_stdio(false); cin.tie(nullptr); cout.tie(nullptr); int T; cin >> T; while (T --> 0) { int n; cin >> n; vector<int> a(n); ans = 0; for (int i = 1; i < n; ++i) { int x; cin >> x; ++a[--x]; } a.emplace_back(1); sort(a.rbegin(), a.rend()); while (!a.empty() && a.back() <= 0) a.pop_back(); n = a.size(); for (int i = 0; i < n; ++i) { a[i] = a[i] - (n - i); ++ans; } sort(a.rbegin(), a.rend()); while (!a.empty() && a.back() <= 0) a.pop_back(); proc(a); cout << ans << '\n'; } return 0; }
1665
D
GCD Guess
\textbf{This is an interactive problem.} There is a positive integer $1 \le x \le 10^9$ that you have to guess. In one query you can choose two positive integers $a \neq b$. As an answer to this query you will get $\gcd(x + a, x + b)$, where $\gcd(n, m)$ is the greatest common divisor of the numbers $n$ and $m$. To guess one hidden number $x$ you are allowed to make no more than $30$ queries.
Solution 1 Let's iteratively find the remainder of $x \bmod$ each power of $2$. Initially, we know that $x \bmod 2^0 = x \bmod 1 = 0$. If we know that $x \bmod 2^k = r$, then how do we find $x \bmod 2^{k + 1}$? To do that let's ask $\gcd(x + 2^k - r, 2^{k + 1}) = \gcd(x + 2^k - r, x + 2^k - r + 2^{k + 1})$. If $\gcd = 2^{k + 1}$, then $x \bmod 2^{k + 1} = r + 2^{k - 1}$ else $x \bmod 2^{k + 1} = r$. Using this algorithm we will find $x \bmod 2^{30}$ which is just $x$. It takes exactly $30$ queries. Solution 2 Let's consider a set of pairwise coprime numbers ${23, 19, 17, 13, 11, 9, 7, 5, 4}$. Their $\text{lcm} > 10^9$ that's why $x \bmod \text{lcm} = x$. Let's find $x \bmod$ each of these numbers. To do that, for each $1 \le i \le 23$ we can ask $\gcd(x + i, x + \text{lcm} + i)$ (the query is $(i, \text{lcm} + i)$). If the $\gcd$ is a multiple of some number from our set then $x \bmod$ this number is $-i$. After that we can use the chinese remainder theorem to find $x$ that gives the same remainders for numbers from the set. This solution asks only $23$ queries. Observation 1: It's enough to make only $22$ queries, because if we did not find anything for $1 \le i \le 22$ then we can guarantee that $i = 23$ will do. Observation 2: All moduli are small, that's why it is possible to use a simplified CRT (check the implementation).
[ "bitmasks", "chinese remainder theorem", "constructive algorithms", "games", "interactive", "math", "number theory" ]
2,000
#include <bits/stdc++.h> using namespace std; #define nl "\n" #define nf endl #define ll long long #define pb push_back #define _ << ' ' << #define INF (ll)1e18 #define mod 998244353 #define maxn 110 #define lc 1338557220 ll i, i1, j, k, k1, t, n, m, res, flag[10], a, b; ll x, rs[maxn], p; vector<ll> pw = {23, 19, 17, 13, 11, 9, 7, 5, 4}; ll ask(ll a, ll b) { cout << "?" _ a _ b << nf; ll x; cin >> x; return x; } void clm(ll x) { cout << "!" _ x << nf; } int main() { ios::sync_with_stdio(0); cin.tie(0); /* #if !ONLINE_JUDGE && !EVAL ifstream cin("input.txt"); ofstream cout("output.txt"); #endif */ // kudos for automatic wa cin >> t; while (t--) { for (i = 1; i <= 23; i++) { k = ask(x + i, lc + i); for (j = 0; j < 9; j++) { if (k % pw[j] == 0) rs[j] = i % pw[j]; } } k = 1; p = 1; for (j = 0; j < 9; j++) { // cout << "p =" _ p << nf; while (p % pw[j] != rs[j]) p += k; k *= pw[j]; } clm(lc - p); } return 0; }
1665
E
MinimizOR
You are given an array $a$ of $n$ non-negative integers, numbered from $1$ to $n$. Let's define the cost of the array $a$ as $\displaystyle \min_{i \neq j} a_i | a_j$, where $|$ denotes the bitwise OR operation. There are $q$ queries. For each query you are given two integers $l$ and $r$ ($l < r$). For each query you should find the cost of the subarray $a_{l}, a_{l + 1}, \ldots, a_{r}$.
The key idea for the solution is that the answer always lies among no more than 31 minimal numbers. According to this idea, it is possible to build a segment tree for minimum on a segment. After that we only need to find no more than 31 minimums on the segment (each time we find one we change it to $\infty$) and, finally, we can find all $OR$s pairwise among these 31 numbers. It is also possible to use the Merge Sort Tree and the same idea. Now let's prove the key idea: let's prove by induction that if all numbers are less than $2^k$ then it's enough to consider $k + 1$ minimal numbers. Base case: $k=1$, all numbers are from $0$ to $1$ and the proof is obvious. Inductive step: Let's show that for any $k \ge 1$ if for $k$ the statement is true then it's true for $k + 1$. If all numbers have 1 in $k$-th bit then the $k$-th bit of the answer is also 1, that's why we only have to minimize the remaining bits. For these bits we can apply the induction hypothesis that $k + 1$ minimal numbers are enough. If at least two numbers have 0 in their $k$-th bit then the $k$-th bit in the answer is also 0. That's why we only consider only numbers with 0 in $k$-th bit and we have to minimize the remaining bits. Again applying the induction hypothesis, $k + 1$ minimal numbers are enough. If there is exactly one number with 0 in $k$-th bit then the $k$-th bit in the answer is 1 and we have to find $k + 1$ minimal numbers over $k$ bits. They are among $k + 2$ minimal numbers over $k + 1$ bits, so $k + 2$ minimal numbers are enough. Problem A Idea: shishyando Polygon: shishyando Idea: shishyando Polygon: shishyando Problem B Idea: shishyando Polygon: shishyando Idea: shishyando Polygon: shishyando Problem C Idea: shishyando Polygon: shishyando Idea: shishyando Polygon: shishyando Problem D Idea: Artyom123 Polygon: shishyando Idea: Artyom123 Polygon: shishyando Problem E Idea: I_love_teraqqq Polygon: shishyando Idea: I_love_teraqqq Polygon: shishyando English translation: shishyando Special thanks: KAN for coordinating the coordinator and double checking everything Another special thanks: NEAR for supporting the Codeforces Community! Yet another special thanks: everyone who participated and tested!
[ "bitmasks", "brute force", "data structures", "divide and conquer", "greedy", "implementation", "two pointers" ]
2,500
#include <bits/stdc++.h> #define F first #define S second #define all(a) a.begin(), a.end() using namespace std; using ll = long long; template<class T> bool ckmin(T &a, T b) { return a > b ? a = b, true : false; } template<class T> bool ckmax(T &a, T b) { return a < b ? a = b, true : false; } void solve() { int n; cin >> n; vector<int> a(n); for (auto &x : a) cin >> x; vector<unordered_map<int, vector<int>>> cnt(30); for (int i = 0; i < n; ++i) { for (int j = 0; j < 30; ++j) { cnt[29-j][a[i]>>j].push_back(i); } } int q; cin >> q; while (q--) { int l, r; cin >> l >> r; l--; r--; int x = 0, pref = 0; vector<int> tmp; for (int j = 0; j < 30; ++j) { pref*=2; vector<int> newtmp; int k = upper_bound(all(cnt[j][pref]), r) - lower_bound(all(cnt[j][pref]), l); int k0 = k; for (auto y : tmp) { if (((y>>(29-j))&1) == 0) k++, newtmp.push_back(y); } if (k0 == 1) { int id = lower_bound(all(cnt[j][pref]), l) - cnt[j][pref].begin(); tmp.push_back(a[cnt[j][pref][id]]); } if (k < 2) pref++; else tmp.swap(newtmp); } cout << pref << "\n"; } } int main() { ios::sync_with_stdio(0); cin.tie(0); int t; cin >> t; while (t--) solve(); return 228/1337; }
1667
A
Make it Increasing
You are given an array $a$ consisting of $n$ positive integers, and an array $b$, with length $n$. Initially $b_i=0$ for each $1 \leq i \leq n$. In one move you can choose an integer $i$ ($1 \leq i \leq n$), and add $a_i$ to $b_i$ or subtract $a_i$ from $b_i$. What is the minimum number of moves needed to make $b$ increasing (that is, every element is strictly greater than every element before it)?
If the final array, is $b_1$, $b_2$ ... $b_n$, than the solution is surely unoptimal if there is an $2 \le i \le n$, when $b_i>0$, and $b_i-a_i>b_{i-1}$, or $b_1>0$. Because there was one unnecessary move on $b_i$ or on $b_1$. Similarly it is unoptimal, if $b_i<0$ and $b_i+a_i<b_{i+1}$ or $b_n<0$. We can see, that there will be a $0$ in the final array. If we fix the position of the $0$ element, than we can set the other values greadily: find the smallest value for each element, which is bigger than the previous one, and similarly before that element. We can fix each element, and calculate the answer for that in $O(n)$ time. The minimum of these values will be the final answer. So the final complexity is $O(n^2)$.
[ "brute force", "greedy", "math" ]
1,300
#include <bits/stdc++.h> using namespace std; long long n, a[5005], ans=1e18; int main() { cin >> n; for (int i=1; i<=n; i++) { cin >> a[i]; } for (int pos=1; pos<=n; pos++) { long long prev=0, sum=0; for (int i=pos-1; i>=1; i--) { prev+=a[i]-prev%a[i]; sum+=prev/a[i]; } prev=0; for (int i=pos+1; i<=n; i++) { prev+=a[i]-prev%a[i]; sum+=prev/a[i]; } ans=min(ans, sum); } cout << ans << "\n"; return 0; }
1667
B
Optimal Partition
You are given an array $a$ consisting of $n$ integers. You should divide $a$ into continuous non-empty subarrays (there are $2^{n-1}$ ways to do that). Let $s=a_l+a_{l+1}+\ldots+a_r$. The value of a subarray $a_l, a_{l+1}, \ldots, a_r$ is: - $(r-l+1)$ if $s>0$, - $0$ if $s=0$, - $-(r-l+1)$ if $s<0$. What is the maximum sum of values you can get with a partition?
Let $dp_i$ be the answer for the first $i$ elements, and $v_{(i, j)}$ the value of the subarray $[i, j]$. With prefix sums it is easy to calculate $v_{(i, j)}$ quickly. With this we can get a $n^2$ solution: $dp_i=max(dp_j+v_{(j+1, i)})$ for $j<i$. Lets call a segment winning, drawing, or losing, if the value of it is positive, $0$, or negative respectively. There is an optimal solution if the length of the drawing and losing segments are $1$. (The task is solvable without this observation, but it is harder to implement.) Proof: For a losing segment in the worst case we can get two losing segments with the same total length (the same value). For a drawing segment with length $k$ if $k$ is even than the answer is the same if we split it into two segments with length $k/2$. For odd $k$ if the sum in the first $(k-1)/2$ or last $(k-1)/2$ elements is negative, than it is possible to increase the answer, otherwise one can split the segment into $(k-1)/2$, $1$, and $(k-1)/2$ long segments, and the answer for the new partition can't lessen. So there is an optimal solution when only winning segments might be longer than $1$. It is easy to handle the $1$ long segments. For each $i$ ($1 \le i \le n$) we have to find $j$, $0<=j<i$, where $v_{(j+1, i)}>0$, and $dp_j+v_{(j+1, i)}$ is maximal ($dp_0=0$). If we store the prefix sums, and assign a permutation according to the prefix sums, than we can get all the positions $1 \le j<i$, where $v_{(j+1, i)}>0$. Than $v_{(j+1, i)}=i-j$. So when we calculate $dp_i$, we should update with $dp_i-i$. This way, finding the optimal $j$ for each $i$ is just a prefix maximum. One can solve the problem with Fenwick tree or segment tree. Final complexity is $O(n \cdot log(n))$.
[ "data structures", "dp" ]
2,100
#include <bits/stdc++.h> using namespace std; const int max_n=500005, inf=10000000; int t, n, a[max_n], dp[max_n], ord[max_n], fen[max_n]; long long pref[max_n]; // Fenwick tree with prefix maximum int lsb(int a) { return (a & -a); } void add(int pos, int val) { while (pos<=n) { fen[pos]=max(fen[pos], val); pos+=lsb(pos); } } int ask(int pos) { int val=-inf; while (pos) { val=max(fen[pos], val); pos-=lsb(pos); } return val; } int main() { ios_base::sync_with_stdio(false); cin.tie(0); cin >> t; while (t--) { cin >> n; vector<pair<long long, int> > v; for (int i=1; i<=n; i++) { cin >> a[i]; pref[i]=pref[i-1]+a[i]; v.push_back({pref[i], -i}); } sort(v.begin(), v.end()); for (int i=0; i<n; i++) { ord[-v[i].second]=i+1; } // smaller prefix sum, smaller ord[i] // if j<i they have equal prefix sums, than ord[i]<ord[j], this way we cannot count [j+1, ... i] as a winning segment for (int i=1; i<=n; i++) { fen[i]=-inf; } for (int i=1; i<=n; i++) { dp[i]=(dp[i-1]+(a[i]<0 ? -1 : a[i]>0 ? 1 : 0)); // The last segment is 1 long. dp[i]=max(dp[i], ask(ord[i])+i); if (pref[i]>0) dp[i]=i; // Segment [1, ... i] is winning, so dp[i]=i; add(ord[i], dp[i]-i); } cout << dp[n] << "\n"; for (int i=0; i<=n; i++) { a[i]=0, dp[i]=0, ord[i]=0, fen[i]=0, pref[i]=0; } } return 0; }
1667
C
Half Queen Cover
You are given a board with $n$ rows and $n$ columns, numbered from $1$ to $n$. The intersection of the $a$-th row and $b$-th column is denoted by $(a, b)$. A half-queen attacks cells in the same row, same column, and on one diagonal. More formally, a half-queen on $(a, b)$ attacks the cell $(c, d)$ if $a=c$ or $b=d$ or $a-b=c-d$. \begin{center} {\small The blue cells are under attack.} \end{center} What is the minimum number of half-queens that can be placed on that board so as to ensure that each square is attacked by at least one half-queen?Construct an optimal solution.
Let's assume that there is a solution for $k$ half-queens. There are at least $n-k$ rows, and columns, which contains no half-queen. If the uncovered rows are $r_1, r_2, ... r_a$, and the columns are $c_1, c_2, ... c_b$, (in increasing order), each diagonal (when the difference is a constant) contains at most one of the following $a+b-1$ squares: $(r_a, c_1), (r_a-1, c_1), ... (r_1, c_1), (r_1, c_2), ... (r_1, c_b)$. So a different half-queen attacks these cells. We know that: $a+b-1 \le k, n-k \le a, n-k \le b$, so $2 \cdot n \le 3 \cdot k+1$. We have a lower bound for $k$. It turns out, that there is a consturction, for this $k$. For $n=3 \cdot x+2$, $k=2 \cdot x+1$, and we can place $x+1$ in the top left corner, diagonally, and $x$ half queens in the bottom right corner diagonally. For $n=8$ an optimal construction could be: $(1, 3)$, $(2, 2)$, $(3, 1)$, $(7, 8)$, $(8, 7)$. If $n=3 \cdot x$, or $n=3 \cdot x+1$ we can put one or two half-queens, in the bottom right corner, and use the previous construction.
[ "constructive algorithms", "math" ]
2,400
#include <bits/stdc++.h> using namespace std; int main() { int n; cin >> n; cout << n/3+(n+2)/3 << "\n"; if (n==1) { cout << 1 << " " << 1 << "\n"; return 0; } while (n%3!=2) { cout << n << " " << n << "\n"; n--; } int a=(n+1)/3; for (int i=1; i<=a; i++) { cout << i << " " << a+1-i << "\n"; } for (int i=1; i<a; i++) { cout << n-a+i+1 << " " << n-i+1 << "\n"; } return 0; }
1667
D
Edge Elimination
You are given a tree (connected, undirected, acyclic graph) with $n$ vertices. Two edges are adjacent if they share exactly one endpoint. In one move you can remove an arbitrary edge, if that edge is adjacent to an even number of remaining edges. Remove all of the edges, or determine that it is impossible. If there are multiple solutions, print any.
When an edge is removed, the two neighbouring vertex have the same parity of edges. We say that an edge is odd, if the parity is odd, and the edge is even otherwise. One can see, that a vertex with even degree will have the same amount of odd and even edges. For a vertex with odd degree, there will be one more odd edge. Starting from the leaves, we can decide the parity of each edge (an edge connected to a leaf is odd). If there is a contradiction somewhere than the answer is NO. Otherwise, there is a construction. In each vertex decide the removal order of the outgoing edges. Any order is good, when it always changes parity, and ends with an odd edge. Consider the directed graph with these conditions. One can see, that this graph is acyclic, so there is a topological order of that graph which will satisfy all the conditions. Also, it is possible to solve it recursively.
[ "constructive algorithms", "dfs and similar", "dp", "trees" ]
2,900
#include <bits/stdc++.h> using namespace std; const int c=200005; int t, n, up[c]; bool parity[c], vis[c], no_sol; vector<int> edges[c]; void dfs(int a) { vis[a]=true; int cnt[2]={0, 0}; for (auto x:edges[a]) { if (!vis[x]) { up[x]=a; dfs(x); cnt[parity[x]]++; } } if (a!=1) { if (parity[a]=(cnt[0]>=cnt[1])); cnt[parity[a]]++; } if (cnt[1]-cnt[0]<0 || cnt[1]-cnt[0]>1) { no_sol=1; } } void solve(int a) { vector<int> p[2]; for (auto x:edges[a]) { if (x!=up[a]) { p[parity[x]].push_back(x); } else { p[parity[a]].push_back(a); } } int si=edges[a].size(), id=si%2; for (int i=0; i<si; i++) { int val=p[id].back(); if (val==a) { cout << a << " " << up[a] << "\n"; } else { solve(val); } p[id].pop_back(); id=1-id; } } int main() { ios_base::sync_with_stdio(false); cin >> t; for (int test=0; test<t; test++) { cin >> n; for (int i=1; i<n; i++) { int a, b; cin >> a >> b; edges[a].push_back(b), edges[b].push_back(a); } dfs(1); if (no_sol) { cout << "NO\n"; } else { cout << "YES\n"; solve(1); } for (int i=1; i<=n; i++) { parity[i]=0, up[i]=0, vis[i]=0; edges[i].clear(); } no_sol=0; } return 0; }
1667
E
Centroid Probabilities
Consider every tree (connected undirected acyclic graph) with $n$ vertices (\textbf{$n$ is odd}, vertices numbered from $1$ to $n$), and for each $2 \le i \le n$ the $i$-th vertex is adjacent to exactly one vertex with a smaller index. For each $i$ ($1 \le i \le n$) calculate the number of trees for which the $i$-th vertex will be the centroid. The answer can be huge, output it modulo $998\,244\,353$. A vertex is called a centroid if its removal splits the tree into subtrees with at most $(n-1)/2$ vertices each.
Let $S=\frac{n+1}{2}$, $binom_{i, j}=\frac{i!}{j! \cdot (i-j)!}$, $dp_i$ the result of some precalculation (see below) and $ans_i$ the final answer for the $i$-th vertex. Root the tree in vertex $1$. It is easy to see that in the possible trees the parent of vertex $2 \le i \le n$ is smaller than $i$. The cetroid will be the largest vertex, where the size of its subtree is at least $S$. For each $i$ first calculate: how many times the subtree of vertex $i$ will be at least $S$ ($dp_i$). If $i=1$, then $dp_i=(n-1)!$. If $i>S$ then the $dp_i=0$. Otherwise ($2 \le i \le S$) $dp_i = \sum_{j=S-1}^{n-i} binom_{n-i, j} \cdot j! \cdot (n-j-2)! \cdot (i-1)$ Proof: Let's assume that the size of the subtree is $j+1$. Color the subtree of vertex $i$ except $i$ red. Color every other vertex except the first and the $i$-th one to blue. We have $binom_{n-i, j}$ differnt colorings, because we have to choose $j$ values between $i+1$ and $n$. ($binom_{n-i, j}$) There are $k$ possibilities for the parent of the $k$-th smallest blue or the $k$-th smallest red vertex. ($j! \cdot (n-j-2)!$) The parent of the $i$-th vertex can be anything. ($i-1$) If we multiply all of these, we get the described formula. Then $ans_i=dp_i-\sum_{j=i+1}^{n} \frac{ans_j}{i}$ Proof: if the subtree of $i$ is at least $S$, and the centroid is not $i$, than the centroid is in the subtree of $i$. If $j$ is the centroid, there is exactly $\frac{1}{i}$ chance, that the path from $j$ to $1$ will cross vertex $i$. So we have to subract $\frac{ans_j}{i}$ for each $j>i$. This gives us an $O(n^2)$ solution, which is slow because of the two sum formulas. $\sum_{j=S-1}^{n-i} binom_{n-i, j} \cdot j! \cdot (n-j-2)! \cdot (i-1) = \sum_{j=S-1}^{n-i} \frac {(n-i)! \cdot j! \cdot (n-j-1)! \cdot (i-1)}{j! \cdot (n-i-j)!} = \sum_{j=S-1}^{n-i} \frac {(n-i)! \cdot (n-j-2)! \cdot (i-1)}{(n-i-j)!}$ $(n-i)! \cdot (i-1)$ is a constant (for fixed $i$), and the difference between $n-j-2$ and $n-i-j$ is $i-2$, is also a constant for fixed $i$. If we reverse the inv array, then we can calculate $\sum_{j=S-1}^{n-i} \frac{(n-j-2)!}{(n-i-j)!}$ in $n \cdot log(n)$ time for $2 \le i \le S$ with ntt. We can do the calculation of $ans_i$ in linear time if we store the suffix sums of the latter values. This gives us the final complexity: $O(n \cdot log(n))$.
[ "combinatorics", "dp", "fft", "math" ]
3,000
#include <bits/stdc++.h> using namespace std; // ntt - this code is not mine const int _ = 1 << 20 , mod = 998244353 , G = 3; int upd(int x) { return x + (x >> 31 & mod); } int add(int x , int y) { return upd(x + y - mod); } int sub (int x , int y){ return upd(x - y); } int mul (int a, int b) { return 1ll*a*b%mod; } int poww(long long a , int b) { int tms = 1; while (b) { if(b & 1) tms = tms * a % mod; a = a * a % mod; b >>= 1;} return tms; } int dir[_] , need , invnd , w[_]; void init(int len){ static int L = 1; need = 1; while (need < len) need <<= 1; invnd = poww(need , mod - 2); for (int i = 1 ; i < need ; ++i) dir[i] = (dir[i >> 1] >> 1) | (i & 1 ? need >> 1 : 0); for (int &i = L ; i < need ; i <<= 1) { w[i] = 1; int wn = poww(G , mod / i / 2); for(int j = 1 ; j < i ; ++j) w[i + j] = 1ll * w[i + j - 1] * wn % mod; } } void dft(vector < int > &arr , int tmod){ arr.resize(need); for (int i = 1 ; i < need ; ++i) { if (i < dir[i]) swap(arr[i] , arr[dir[i]]); } for(int i = 1 ; i < need ; i <<= 1) { for (int j = 0 ; j < need ; j += i << 1) { for (int k = 0 ; k < i ; ++k) { int x = arr[j + k] , y = 1ll * arr[i + j + k] * w[i + k] % mod; arr[j + k] = add(x , y); arr[i + j + k] = sub(x , y); } } } if(tmod == -1) { reverse(arr.begin() + 1 , arr.end()); for(auto &t : arr) { t = 1ll * t * invnd % mod; } } } vector<int> multiply(vector<int> const& a, vector<int> const& b) { init(a.size()+b.size()); vector<int> fa(a.begin(), a.end()), fb(b.begin(), b.end()); dft(fa, 1); dft(fb, 1); for (int i = 0; i < need; i++) { fa[i] = 1ll * fa[i] * fb[i] % mod; } dft(fa, -1); return fa; } const int max_n=200005; long long n, fact[max_n], inv[max_n], dp[max_n], ans[max_n], suf, s; vector<int> v1, v2, v3; long long po(long long a, long long b) { long long res=1; while (b) { if (b%2) res=res*a%mod; a=a*a%mod; b/=2; } return res; } int main() { cin >> n; s=(n+1)/2; fact[0]=1, inv[0]=1; for (int i=1; i<=n; i++) { fact[i]=fact[i-1]*i%mod; inv[i]=po(fact[i], mod-2); } for (int i=0; i<s-1; i++) { v1.push_back(fact[i]); v2.push_back(inv[i]); } reverse(v2.begin(), v2.end()); v3=multiply(v1, v2); dp[1]=fact[n-1]; for (int i=2; i<=s; i++) { dp[i]=fact[n-i]*v3[i+s-4]%mod*(i-1)%mod; } for (int i=s; i>=1; i--) { ans[i]=(dp[i]-suf*po(i, mod-2)%mod+mod)%mod; suf=(suf+ans[i])%mod; } for (int i=1; i<=n; i++) { cout << ans[i] << " "; } cout << "\n"; return 0; }
1667
F
Yin Yang
You are given a rectangular grid with $n$ rows and $m$ columns. $n$ and $m$ are divisible by $4$. Some of the cells are already colored black or white. It is guaranteed that no two colored cells share a corner or an edge. Color the remaining cells in a way that both the black and the white cells becomes orthogonally connected or determine that it is impossible. Consider a graph, where the black cells are the nodes. Two nodes are adjacent if the corresponding cells share an edge. If the described graph is connected, the black cells are orthogonally connected. Same for white cells.
Border: cells in the first or last row or first or last column. One can see that on the border both the black and the white part is connected. So there is no solution if there is a BWBW subsequence on the border. Otherwise there is a solution. Solve an easier task first. Assume that there is no colored cell on the border. Then there is a nice construction. Color white all cells in the first column, and in the $4*k+2$-nd and $4*k+3$-rd row, which is not in the last column. One can see that the stripes (2 consequtive white rows) are connected, because no two initially colored cell shares a corner. Also different stripes are connected to each other because of the left column. If there is a white cell in the middle of a black stripe, then it must be orthogonally adjacent to a white stripe. Similarly the black cells will be connected too. If there are colored cells on the border, than it is impossible to do exactly this. But the most of the cells might be the same: the white stripes, and the white column on the left side. In this way, the white part is connected. For the black part, there might be $3$ issues: the border is not connected to the black stripes, there are isolated black cells in the $2$-nd or $n-1$-th row, and different black stripes might be unconnected. All of these issues is solvable locally with changing the color of some cells from white to black and vice versa.
[ "implementation" ]
3,500
#include <bits/stdc++.h> using namespace std; const int c=505; int t, n, m, fix[c][c], ans[c][c], rotcnt, change, old_cl, new_cl; int fix2[c][c], ans2[c][c]; void color_boundary() { for (int cnt=1; cnt<=2; cnt++) { for (int j=2; j<=m; j++) { if (!ans[1][j]) ans[1][j]=ans[1][j-1]; if (ans[1][j]!=ans[1][j-1]) change++; } for (int i=2; i<=n; i++) { if (!ans[i][m]) ans[i][m]=ans[i-1][m]; if (ans[i][m]!=ans[i-1][m]) change++; } for (int j=m-1; j>=1; j--) { if (!ans[n][j]) ans[n][j]=ans[n][j+1]; if (ans[n][j]!=ans[n][j+1]) change++; } for (int i=n-1; i>=1; i--) { if (!ans[i][1]) ans[i][1]=ans[i+1][1]; if (ans[i][1]!=ans[i+1][1]) change++; } if (!ans[1][1]) ans[1][1]=1; if (cnt==1) change=0; } } void rotate_90() { rotcnt++; for (int i=1; i<=n; i++) { for (int j=1; j<=m; j++) { ans2[i][j]=ans[i][j], fix2[i][j]=fix[i][j]; } } for (int i=1; i<=n; i++) { for (int j=1; j<=m; j++) { ans[j][n+1-i]=ans2[i][j]; fix[j][n+1-i]=fix2[i][j]; } } swap(n, m); } void good_rotation() { for (int cnt=1; cnt<=4; cnt++) { bool same=1, opposite=0; for (int i=1; i<=n; i++) { if (ans[i][1]!=ans[1][1]) same=0; if (1<i && i<n && ans[1][1]!=ans[i][m]) opposite=1; } if (!same || !opposite) rotate_90(); } } void color_the_stripes() { for (int i=2; i<n; i++) { for (int j=2; j<m; j++) { if (!fix[i][j]) { if (i%4==2 || i%4==3) ans[i][j]=old_cl; else ans[i][j]=new_cl; } } } } void avoid_touching() { for (int i=1; i<n; i++) { if (ans[i][m-1]==ans[i+1][m] && ans[i+1][m-1]==ans[i][m] && ans[i][m]!=ans[i+1][m]) { if (!fix[i][m]) ans[i][m]=3-ans[i][m]; else ans[i+1][m]=3-ans[i+1][m]; } } } void boundary_stripe_connection() { int first=0, last=0; for (int i=1; i<=n; i++) { if (ans[i][m]==new_cl) { if (!first) first=i; last=i; } } if (first==0 || (last>3 && first<n-2)) return; if (last<=3 && fix[4][m-1]==old_cl) { for (int i=3; i<=5; i++) { ans[i][m]=new_cl; } return; } if (first>=n-2 && fix[n-3][m-1]==old_cl) { for (int i=n-4; i<=n-2; i++) { ans[i][m]=new_cl; } return; } int x=(last<=3 ? 2 : n-2); for (int i=x; i<=x+1; i++) { for (int j=m-1; j<=m; j++) { if (!fix[i][j]) ans[i][j]=new_cl; } } } void connect_isolated_point(int x, int y) { if (ans[x-1][y]==new_cl || ans[x+1][y]==new_cl || ans[x][y+1]==new_cl) return; int x1=(x==2 ? 1 : n), x2=(x==2 ? 2 : n-1), x3=(x==2 ? 3 : n-2), x4=(x==2 ? 4 : n-3); if (y<=m-2 && ans[x1][y+2]==new_cl) { ans[x1][y]=new_cl; ans[x1][y+1]=new_cl; return; } if (y<=m-2 && ans[x2][y+2]==new_cl) { ans[x2][y+1]=new_cl; return; } if (fix[x4][y]!=old_cl) { ans[x3][y]=new_cl; } else { int y2=(y+1<m ? y+1 : y-1); ans[x2][y2]=new_cl; ans[x3][y2]=new_cl; } } void bridge_through_the_stripe(int x) { for (int j=2; j<=4; j++) { bool good=1; for (int i=x-1; i<=x+2; i++) { if (fix[i][j]==old_cl) good=0; } if (good) { ans[x][j]=new_cl; ans[x+1][j]=new_cl; return; } } for (int i=x-1; i<=x+2; i++) { if (!fix[i][3]) ans[i][3]=new_cl; } if (fix[x-1][3]) { ans[x-1][2]=old_cl; ans[x][4]=new_cl; } if (fix[x+2][3]) { ans[x+2][2]=old_cl; ans[x+1][4]=new_cl; } if (fix[x][3] || fix[x+1][3]) { ans[x][2]=new_cl; ans[x+1][2]=new_cl; } } int main() { cin >> t; for (int tc=1; tc<=t; tc++) { cin >> n >> m; for (int i=1; i<=n; i++) { for (int j=1; j<=m; j++) { char c; cin >> c; fix[i][j]=(c=='B' ? 1 : c=='W' ? 2 : 0); ans[i][j]=fix[i][j]; } } color_boundary(); if (change>=4) { cout << "NO\n"; } else { good_rotation(); old_cl=ans[1][1], new_cl=3-ans[1][1]; color_the_stripes(); avoid_touching(); boundary_stripe_connection(); for (int j=2; j<m; j++) { if (fix[2][j]==new_cl) connect_isolated_point(2, j); if (fix[n-1][j]==new_cl) connect_isolated_point(n-1, j); } for (int i=6; i<=n-6; i+=4) { if (ans[1][1]==ans[i][m] || ans[1][1]==ans[i+1][m]) bridge_through_the_stripe(i); } while (rotcnt<4) rotate_90(); cout << "YES\n"; for (int i=1; i<=n; i++) { for (int j=1; j<=m; j++) { cout << (ans[i][j]==1 ? "B" : "W"); } cout << "\n"; } } rotcnt=0, change=0; for (int i=1; i<=n; i++) { for (int j=1; j<=m; j++) { fix[i][j]=0, ans[i][j]=0; fix[j][i]=0, ans[j][i]=0; } } } return 0; }
1668
A
Direction Change
You are given a grid with $n$ rows and $m$ columns. Rows and columns are numbered from $1$ to $n$, and from $1$ to $m$. The intersection of the $a$-th row and $b$-th column is denoted by $(a, b)$. Initially, you are standing in the top left corner $(1, 1)$. Your goal is to reach the bottom right corner $(n, m)$. You can move in four directions from $(a, b)$: up to $(a-1, b)$, down to $(a+1, b)$, left to $(a, b-1)$ or right to $(a, b+1)$. You cannot move in the same direction in two consecutive moves, and you cannot leave the grid. What is the minimum number of moves to reach $(n, m)$?
The moves are symmetrical, so we can assume that $n \ge m$. There is no solution if $m=1$ and $n \ge 3$, because one can only move up and down, but two consecutive down moves is required to reach $(n, 1)$. Otherwise, there is a solution. One should move downwards at least $n-1$ times, and it is forbidden to do that twice in a row, so another $n-2$ move is necessary ($1$ between each pair). So at least $n-1+n-2=2 \cdot n-3$ moves required. If $n+m$ is even, then one more, because the parity of $a+b$ changes after every move, and the parity is even before the first and after the last move, so the total number of moves should be even. There is a construction for that lower bound: Move alternately down and right. After reaching the $m$-th column, repeat the following sequence of moves: down, left, down, right. With this $4$ move long sequence, one can move down two times. So we will reach $(n-1, m)$, then one more move is required, or we will reach $(n, m)$. If we add all of these moves, we get the formula: if $n+m$ is even then: $2 \cdot (m-1)+4 \cdot (n-m)/2=2 \cdot n-2$,and if $n+m$ is odd then: $2 \cdot (m-1)+4 \cdot (n-m-1)/2+1=2 \cdot n-3$.
[ "implementation", "math" ]
800
#include <bits/stdc++.h> using namespace std; int t, n, m; int main() { ios_base::sync_with_stdio(false); cin >> t; while (t--) { cin >> n >> m; if (n<m) { swap(n, m); } if (m==1 && n>=3) { cout << -1 << "\n"; } else { cout << 2*n-2-(n+m)%2 << "\n"; } } return 0; }
1668
B
Social Distance
$m$ chairs are arranged in a circle sequentially. The chairs are numbered from $0$ to $m-1$. $n$ people want to sit in these chairs. The $i$-th of them wants at least $a[i]$ empty chairs both on his right and left side. More formally, if the $i$-th person sits in the $j$-th chair, then no one else should sit in the following chairs: $(j-a[i]) \bmod m$, $(j-a[i]+1) \bmod m$, ... $(j+a[i]-1) \bmod m$, $(j+a[i]) \bmod m$. Decide if it is possible to sit down for all of them, under the given limitations.
If there is no one between the $i$-th and $j$-th person then $max(a_i, a_j)$ free chairs should be between them. So we should find a permutation $p$ of the array $a$, when $max(p_1, p_2)+max(p_2, p_3) ... +max(p_{n-1}, p_n)+max(p_n, p_1)$ is minimal. We can assume that the array is non-decreasing ($a_i \leq a_{i+1}$). For each $i$ ($1 \le i<n$) the $i$ largest elements from $a$ ($a_{n-i+1}$ ... $a_n$) will appear in the formula at least $i+1$ times. Every element occurs in two segments, and we only can count $i-1$ segments twice. So we get a lower bound for the number of free chairs: $a_2+a_3+...+a_{n-1}+2 \cdot a_n$. This lower bound is reachable for the empty chairs if the permutation of $p$ is sorted. Because $max(p_1, p_2)=p_2$, $max(p_2, p_3)=p_3$, ... $max(p_{n-1}, p_n)=p_n$, and $max(p_n, p_1)=p_n$. They also sit on $n$ chairs. If we add all of these, we get that the answer is YES if: $n+ \sum_{i=1}^{n}(a_i)-min(a_i)+max(a_i) \le m$. ($a_i$={$a_1, a_2, ... a_n$})
[ "greedy", "math", "sortings" ]
900
#include <bits/stdc++.h> using namespace std; int main() { ios_base::sync_with_stdio(false); int t; cin >> t; while (t--) { long long n, m; cin >> n >> m; long long sum=0, min_val=1e9, max_val=0; for (int i=1; i<=n; i++) { long long x; cin >> x; sum+=x, min_val=min(min_val, x), max_val=max(max_val, x); } cout << (n+sum-min_val+max_val<=m ? "YES" : "NO") << "\n"; } return 0; }
1669
A
Division?
Codeforces separates its users into $4$ divisions by their rating: - For Division 1: $1900 \leq \mathrm{rating}$ - For Division 2: $1600 \leq \mathrm{rating} \leq 1899$ - For Division 3: $1400 \leq \mathrm{rating} \leq 1599$ - For Division 4: $\mathrm{rating} \leq 1399$ Given a $\mathrm{rating}$, print in which division the $\mathrm{rating}$ belongs.
For this problem you just need to implement what it asks you. To be able to implement it you need to know about the "if" statement.
[ "implementation" ]
800
#include "bits/stdc++.h" using namespace std; int main() { int t; cin >> t; while(t--) { int x; cin >> x; if(x < 1400) cout << "Division 4\n"; else if(x < 1600) cout << "Division 3\n"; else if(x < 1900) cout << "Division 2\n"; else cout << "Division 1\n"; } }
1669
B
Triple
Given an array $a$ of $n$ elements, print any value that appears at least three times or print -1 if there is no such value.
Approach 1: Sort the array using an efficient sorting algorithm. For every element check if the next two in the array are equal to it. If you find such an element output it. Time complexity is $\mathcal{O}(n \log n)$. Approach 2: Notice that elements have an upper bound of $n$, you can use an auxiliary array to store the count of each value. Go through each value and see if its count is bigger than or equal to $3$. Time complexity is $\mathcal{O}(n)$.
[ "implementation", "sortings" ]
800
#include <bits/stdc++.h> using namespace std; int main() { int t; cin >> t; while(t--) { int n; cin >> n; vector<int> cnt(n + 1, 0); int ans = -1; for(int i = 0; i < n; i++) { int x; cin >> x; if(++cnt[x] >= 3) { ans = x; } } cout << ans << endl; } }
1669
C
Odd/Even Increments
Given an array $a=[a_1,a_2,\dots,a_n]$ of $n$ positive integers, you can do operations of two types on it: - Add $1$ to \textbf{every} element with an \textbf{odd} index. In other words change the array as follows: $a_1 := a_1 +1, a_3 := a_3 + 1, a_5 := a_5+1, \dots$. - Add $1$ to \textbf{every} element with an \textbf{even} index. In other words change the array as follows: $a_2 := a_2 +1, a_4 := a_4 + 1, a_6 := a_6+1, \dots$. Determine if after any number of operations it is possible to make the final array contain only even numbers or only odd numbers. In other words, determine if you can make all elements of the array have the same parity after any number of operations. Note that you can do operations of both types any number of times (even none). Operations of different types can be performed a different number of times.
Note is that after doing two operations of the same type, they are "cancelled out" in terms of parity, since we would change the parity of all elements once, then change it back again. So, we know that we will do each operation exactly $0$ or $1$ time. It is possible to check all possible cases just by simulating, or we can notice that all elements on all indices of the same parity must have the same parity and if they do we can always find an answer, by doing just a single type of operation a single time (in case the array doesn't already contain all elements of the same parity). The time complexity is $\mathcal{O}(n)$.
[ "greedy", "implementation", "math" ]
800
#include "bits/stdc++.h" using namespace std; int main() { int t; cin >> t; while(t--) { int n; cin >> n; vector<int> a(n); int even1 = 0, even2 = 0, odd1 = 0, odd2 = 0; for(int i = 0; i < n; ++i) { cin >> a[i]; if(i % 2 == 0) { if(a[i] % 2 == 1) odd1 = 1; else even1 = 1; } else { if(a[i] % 2 == 1) odd2 = 1; else even2 = 1; } } if(even1 && odd1) { cout << "NO\n"; } else if(even2 && odd2) { cout << "NO\n"; } else { cout << "YES\n"; } } }
1669
D
Colorful Stamp
A row of $n$ cells is given, all initially white. Using a stamp, you can stamp any two neighboring cells such that one becomes red and the other becomes blue. A stamp can be rotated, i.e. it can be used in both ways: as $\textcolor{blue}{B}\textcolor{red}{R}$ and as $\textcolor{red}{R}\textcolor{blue}{B}$. During use, the stamp must completely fit on the given $n$ cells (it cannot be partially outside the cells). The stamp can be applied multiple times to the same cell. Each usage of the stamp recolors both cells that are under the stamp. For example, one possible sequence of stamps to make the picture $\textcolor{blue}{B}\textcolor{red}{R}\textcolor{blue}{B}\textcolor{blue}{B}W$ could be $WWWWW \to WW\textcolor{brown}{\underline{\color{red}{R}\textcolor{blue}{B}}}W \to \textcolor{brown}{\underline{\color{blue}{B}\textcolor{red}{R}}}\textcolor{red}{R}\textcolor{blue}{B}W \to \textcolor{blue}{B}\textcolor{brown}{\underline{\color{red}{R}\textcolor{blue}{B}}}\textcolor{blue}{B}W$. Here $W$, $\textcolor{red}{R}$, and $\textcolor{blue}{B}$ represent a white, red, or blue cell, respectively, and the cells that the stamp is used on are marked with an underline. Given a final picture, is it possible to make it using the stamp zero or more times?
First note that parts of the picture separated by $\texttt{W}$ are independent. That is, any stamps used on one part doesn't have any impact on the other, since a character $\texttt{W}$ means no stamp has been placed on that cell. So let's split the string by $\texttt{W}$s (for example, with split() method in Python), and consider the resulting strings containing only $\texttt{R}$ and $\texttt{B}$. Call one of these parts $p$. In the final stamp we place on $p$, we must have placed $\texttt{RB}$, so it should have both the characters $\texttt{R}$ and $\texttt{B}$. Therefore, if the string has only $\texttt{R}$ or only $\texttt{B}$, the answer is NO. Otherwise, the answer is YES. Let's show it. As we have just shown, we must have $\texttt{R}$ next to $\texttt{B}$ for the string to be possible. Consider the way to make $\texttt{RBRRBBBB}$. The final stamp can be $\texttt{RBR}\underline{\texttt{RB}}\texttt{BBB}$. For the rest of the cells, we can make them one by one as below. $\texttt{WWWWWWWW} \to \underline{\texttt{RB}}\texttt{WWWWWW} \to \texttt{R}\underline{\texttt{BR}}\texttt{WWWWW} \to \texttt{RB}\underline{\texttt{RB}}\texttt{WWWW}\text{,}$ $\texttt{RBRBWWWW} \to \texttt{RBRBWW}\underline{\texttt{RB}} \to \texttt{RBRBW}\underline{\texttt{RB}}\texttt{B} \to \texttt{RBRB}\underline{\texttt{RB}}\texttt{BB}\text{.}$ Finally, we can put the final stamp to make the whole string. $\texttt{RBRBRBBB} \to \texttt{RBR}\underline{\texttt{RB}}\texttt{BBB}\text{.}$
[ "implementation" ]
1,100
for i in range(int(input())): n = int(input()) l = input().split('W') bad = False for s in l: b1 = 'R' in s b2 = 'B' in s if (b1 ^ b2): bad = True print("NO" if bad else "YES")
1669
E
2-Letter Strings
Given $n$ strings, each of length $2$, consisting of lowercase Latin alphabet letters \textbf{from 'a' to 'k}', output the number of pairs of indices $(i, j)$ such that $i < j$ and the $i$-th string and the $j$-th string differ in exactly one position. In other words, count the number of pairs $(i, j)$ ($i < j$) such that the $i$-th string and the $j$-th string have \textbf{exactly} one position $p$ ($1 \leq p \leq 2$) such that ${s_{i}}_{p} \neq {s_{j}}_{p}$. The answer may not fit into 32-bit integer type, so you should use 64-bit integers like long long in C++ to avoid integer overflow.
One solution is to go through all given strings, generate all strings that differ in exactly one position, and count the number of times these strings occur in the array. A possible way to count them is by using either the map/dictionary data structure or even simpler - a frequency array. Depending on the implementation, you may need to divide the answer by $2$ because of overcounting pairs. The solution runs in $\mathcal{O}(n \log n)$ or $\mathcal{O}(n)$ depending on the implementation.
[ "data structures", "math", "strings" ]
1,200
#include <bits/stdc++.h> using namespace std; int main() { int t; cin >> t; while(t--) { int n; cin >> n; vector<vector<int>> cnt(12, vector<int>(12, 0)); long long ans = 0; for(int i = 0;i < n; ++i) { string s; cin >> s; for(int j = 0;j < 2; ++j) { for(char c = 'a'; c <= 'k'; ++c) { if(c == s[j]) continue; string a = s; a[j] = c; ans += cnt[a[0] - 'a'][a[1] - 'a']; } } ++cnt[s[0] - 'a'][s[1] - 'a']; } cout << ans << "\n"; } }
1669
F
Eating Candies
There are $n$ candies put from left to right on a table. The candies are numbered from left to right. The $i$-th candy has weight $w_i$. Alice and Bob eat candies. Alice can eat any number of candies from the left (she can't skip candies, she eats them in a row). Bob can eat any number of candies from the right (he can't skip candies, he eats them in a row). Of course, if Alice ate a candy, Bob can't eat it (and vice versa). They want to be fair. Their goal is to eat the same total weight of candies. What is the most number of candies they can eat in total?
We can solve the problem with a two pointers technique. Let $i$ be the left pointer, initially at $1$, and $j$ be the right pointer, initially at $n$. Let's store Alice and Bob's current totals as $a$ and $b$. Let's iterate $i$ from the left to the right. For each $i$, we should do the following. Increase $a$ by $a_i$ (Alice eats the $i$-th candy). Move $j$ leftwards until Bob's total is at least Alice's total, and update $b$ every time we move. If the two pointers have crossed, then both Alice and Bob took the same candy, which is not possible. So we should exit and output the current answer. Otherwise, if $a=b$ after this step, we should update the current answer to be the value that is equal to Alice and Bob. Both $i$ and $j$ move at most $n$ times in total, so the solution runs in $\mathcal{O}(n)$.
[ "binary search", "data structures", "greedy", "two pointers" ]
1,100
t = int(input()) for test in range(t): n = int(input()) a = list(map(int, input().split())) l = 0 r = n - 1 suml = a[0] sumr = a[n-1] ans = 0 while l < r: if suml == sumr: ans = max(ans, l + 1 + n - r) if suml <= sumr: l+=1 suml+=a[l] elif sumr < suml: r-=1 sumr+=a[r] print(ans)
1669
G
Fall Down
There is a grid with $n$ rows and $m$ columns, and three types of cells: - An empty cell, denoted with '.'. - A stone, denoted with '*'. - An obstacle, denoted with the lowercase Latin letter 'o'. All stones fall down until they meet the floor (the bottom row), an obstacle, or other stone which is already immovable. (In other words, all the stones just fall down as long as they can fall.) Simulate the process. What does the resulting grid look like?
Note that the columns don't affect each other, so we can solve for each column by itself. For each column, go from the bottom to the top, and keep track of the row of the last obstacle seen; call it $\mathrm{last}$. Note that initially, $\mathrm{last}=n+1$, since we treat the floor as the $n+1$th row of obstacles. Whenever we see a new obstacle, we should update $\mathrm{last}$. Now, if we ever see a stone, we should move it to row $\mathrm{last} - 1$, since it will be one row above the last obstacle seen (it will fall on top of it). Afterwards, we should also decrease $\mathrm{last}$ by $1$, because if any future stones fall on top of it, they will land on the row above this stone. This solution works in $\mathcal{O}(nm)$. We also accepted slower solutions that run in $\mathcal{O}(n^2m)$ that simulate each stone falling.
[ "dfs and similar", "implementation" ]
1,200
#include <bits/stdc++.h> using namespace std; const int MAX = 200007; const int MOD = 1000000007; void solve() { int n, m; cin >> n >> m; char g[n + 7][m + 7]; for (int i = 0; i < n; i++) { for (int j = 0; j < m; j++) { cin >> g[i][j]; } } for (int j = 0; j < m; j++) { int last = n - 1; for (int i = n - 1; i >= 0; i--) { if (g[i][j] == 'o') {last = i - 1;} else if (g[i][j] == '*') {swap(g[i][j], g[last][j]); last--;} } } for (int i = 0; i < n; i++) { for (int j = 0; j < m; j++) { cout << g[i][j]; } cout << '\n'; } } int main() { ios::sync_with_stdio(false); cin.tie(nullptr); int tt; cin >> tt; for (int i = 1; i <= tt; i++) {solve();} // solve(); }
1669
H
Maximal AND
Let $\mathsf{AND}$ denote the bitwise AND operation, and $\mathsf{OR}$ denote the bitwise OR operation. You are given an array $a$ of length $n$ and a non-negative integer $k$. You can perform \textbf{at most} $k$ operations on the array of the following type: - Select an index $i$ ($1 \leq i \leq n$) and replace $a_i$ with $a_i$ $\mathsf{OR}$ $2^j$ where $j$ is any integer between $0$ and $30$ \textbf{inclusive}. In other words, in an operation you can choose an index $i$ ($1 \leq i \leq n$) and set the $j$-th bit of $a_i$ to $1$ ($0 \leq j \leq 30$). Output the maximum possible value of $a_1$ $\mathsf{AND}$ $a_2$ $\mathsf{AND}$ $\dots$ $\mathsf{AND}$ $a_n$ after performing \textbf{at most} $k$ operations.
The optimal strategy is to greedily take the highest bit we have enough operations to set in every array element. To do this, we maintain a count for each bit with the number of elements that have it set already. The cost to set the $i$-th bit will be $n-\mathrm{count}_i$. We go from the highest bit to the lowest: If we have enough operations left, we set the bit, subtract its cost from the operations and move to the next lower bit. If we don't have enough operations, we move on to the next lower bit and don't modify the operations. The time complexity is $\mathcal{O}(n \log a_i)$.
[ "bitmasks", "greedy", "math" ]
1,300
#include "bits/stdc++.h" using namespace std; int main() { int t; cin >> t; while(t--) { int n, k; cin >> n >> k; vector<int> cnt(31, 0), a(n); for(int i = 0;i < n; ++i) { cin >> a[i]; for(int j = 30; j >= 0; --j) { if(a[i] & (1 << j)) ++cnt[j]; } } int ans = 0; for(int i = 30; i >= 0; --i) { int need = n - cnt[i]; if(need <= k) { k -= need; ans += (1 << i); } } cout << ans << "\n"; } }
1670
A
Prof. Slim
{One day Prof. Slim decided to leave the kingdom of the GUC to join the kingdom of the GIU. He was given an easy online assessment to solve before joining the GIU. Citizens of the GUC were \sout{happy} sad to see the prof leaving, so they decided to hack into the system and change the online assessment into a harder one so that he stays at the GUC. After a long argument, they decided to change it into the following problem.} Given an array of $n$ integers $a_1,a_2,\ldots,a_n$, \textbf{where $a_{i} \neq 0$}, check if you can make this array sorted by using the following operation any number of times (possibly zero). An array is sorted if its elements are arranged in a non-decreasing order. - select two indices $i$ and $j$ ($1 \le i,j \le n$) such that $a_i$ and $a_j$ have \textbf{different signs}. In other words, one must be positive and one must be negative. - swap the \textbf{signs} of $a_{i}$ and $a_{j}$. For example if you select $a_i=3$ and $a_j=-2$, then they will change to $a_i=-3$ and $a_j=2$. Prof. Slim saw that the problem is still too easy and isn't worth his time, so he decided to give it to you to solve.
We can notice that to make the array sorted we must move all the negative signs to the beginning of the array. So let's say the number of negative elements is $k$. Then we must check that the first $k$ elements are non-increasing and the remaining elements are non-decreasing. Complexity is $O(n)$.
[ "greedy", "implementation", "sortings" ]
800
import java.io.*; import java.util.StringTokenizer; public class A { public static void main(String[] args) throws IOException { Scanner sc = new Scanner(System.in); PrintWriter pw = new PrintWriter(System.out); int tc = sc.nextInt(); for (int test = 1; test <= tc; test++) { int n = sc.nextInt(); int[] arr = new int[n]; for (int i = 0; i < n; i++) arr[i] = sc.nextInt(); int i = 0, j = 0; while (i < n) { if (arr[i] < 0) { arr[j++] *= -1; arr[i] *= -1; } i++; } if (isSorted(arr)) { pw.println("YES"); } else pw.println("NO"); } pw.flush(); } private static boolean isSorted(int[] arr) { for (int i = 1; i < arr.length; i++) if (arr[i] < arr[i - 1]) return false; return true; } static class Scanner { BufferedReader br; StringTokenizer st; public Scanner(InputStream s) { br = new BufferedReader(new InputStreamReader(s)); } public Scanner(FileReader f) { br = new BufferedReader(f); } public String next() throws IOException { while (st == null || !st.hasMoreTokens()) st = new StringTokenizer(br.readLine()); return st.nextToken(); } public int nextInt() throws IOException { return Integer.parseInt(next()); } public long nextLong() throws IOException { return Long.parseLong(next()); } public double nextDouble() throws IOException { return Double.parseDouble(next()); } public int[] nextIntArr(int n) throws IOException { int[] arr = new int[n]; for (int i = 0; i < n; i++) { arr[i] = Integer.parseInt(next()); } return arr; } } }
1670
B
Dorms War
Hosssam decided to sneak into Hemose's room while he is sleeping and change his laptop's password. He already knows the password, which is a string $s$ of length $n$. He also knows that there are $k$ special letters of the alphabet: $c_1,c_2,\ldots, c_k$. Hosssam made a program that can do the following. - The program considers the current password $s$ of some length $m$. - Then it finds all positions $i$ ($1\le i<m$) such that $s_{i+1}$ is one of the $k$ special letters. - Then it deletes all of those positions from the password $s$ \textbf{even if $s_{i}$ is a special character}. If there are no positions to delete, then the program displays an error message which has a very loud sound. For example, suppose the string $s$ is "abcdef" and the special characters are 'b' and 'd'. If he runs the program once, the positions $1$ and $3$ will be deleted as they come before special characters, so the password becomes "bdef". If he runs the program again, it deletes position $1$, and the password becomes "def". If he is wise, he won't run it a third time. Hosssam wants to know how many times he can run the program on Hemose's laptop without waking him up from the sound of the error message. Can you help him?
Let's consider the non-special characters as '0' and special characters as '1' since they are indistinguishable. So now the problem is that we have a binary string, where each '1' character removes the character before it each time the program is run. The trivial case is when there is only one '1' character, the answer then is just the number of '0' characters before it. But what if there is more than one '1' character? lets take for example when there are two '1' characters as follows: $00000010001\to 000001001\to 0000101\to 00011\to 001\to 01\to 1$ The observation here is that when the first '1' character from the right reached the second '1', it acts as if it just replaced its place, so we can say that each '1' character replaces another '1' as soon as it reaches it. So we can partition the binary string into small partitions where each partition contains only one '1' character that is the rightmost character in the partition. For example, the string $00010000001011$ can be partitioned into: $(0001),(0000001),(01),(1)$ We first calculate the amount of time each partition requires to remove all the '0' characters before it, which is basically the number of '0' characters before it. Each partition except for the first partition requires one more second to replace the '1' character in the previous partition. So the answer is the maximum time required among all the partitions.
[ "brute force", "implementation", "strings" ]
1,100
import java.io.*; import java.util.StringTokenizer; public class B{ public static void main(String[] args) throws IOException { Scanner sc = new Scanner(System.in); PrintWriter pw = new PrintWriter(System.out); int tests = sc.nextInt(); for (int test = 0; test < tests; test++) { int n = sc.nextInt(); char[] arr = sc.next().toCharArray(); int k = sc.nextInt(); boolean[] special = new boolean[26]; for (int i = 0; i < k; i++) special[sc.next().charAt(0) - 'a'] = true; int idx = -1; for (int i = 0; i < n; i++) if (special[arr[i] - 'a']) idx = i; int max=0; for(int i=idx-1;i>=0;i--){ int j=i; while (j>0&&!special[arr[j]-'a']) j--; max=Math.max(max,i+1-j); i=j; } pw.println(max); } pw.flush(); } public static class Scanner { StringTokenizer st; BufferedReader br; public Scanner(InputStream s) { br = new BufferedReader(new InputStreamReader(s)); } public Scanner(String s) throws FileNotFoundException { br = new BufferedReader(new InputStreamReader(new FileInputStream(s))); } public String next() throws IOException { while (st == null || !st.hasMoreTokens()) st = new StringTokenizer(br.readLine()); return st.nextToken(); } public int nextInt() throws IOException { return Integer.parseInt(next()); } public long nextLong() throws IOException { return Long.parseLong(next()); } } }
1670
C
Where is the Pizza?
While searching for the pizza, baby Hosssam came across two permutations $a$ and $b$ of length $n$. Recall that a permutation is an array consisting of $n$ distinct integers from $1$ to $n$ in arbitrary order. For example, $[2,3,1,5,4]$ is a permutation, but $[1,2,2]$ is not a permutation ($2$ appears twice in the array) and $[1,3,4]$ is also not a permutation ($n=3$ but there is $4$ in the array). Baby Hosssam forgot about the pizza and started playing around with the two permutations. While he was playing with them, some elements of the first permutation got mixed up with some elements of the second permutation, and to his surprise those elements also formed a permutation of size $n$. Specifically, he mixed up the permutations to form a new array $c$ in the following way. - For each $i$ ($1\le i\le n$), he either made $c_i=a_i$ or $c_i=b_i$. - The array $c$ is a permutation. You know permutations $a$, $b$, and values at some positions in $c$. Please count the number different permutations $c$ that are consistent with the described process and the given values. Since the answer can be large, print it modulo $10^9+7$. It is guaranteed that there exists at least one permutation $c$ that satisfies all the requirements.
Let's first solve the version where the array $d$ is filled with $0$'s (in other words there is no constrain on the permutation $c$ that needs to be formed). Let's say we have the permutation $[1,2,3,4]$ as $a$ and the permutation $[3,1,2,4]$ as $b$. Suppose that we have chosen the first element of the array $c$ to be the first element of array $a$, this way we can't choose the first element of array $b$. Since we want array $c$ to be a permutation, we will have to get the first element of $b$ from $a$ (which is $3$). If we search for $3$ in array $a$ and add it to array $c$, we wont be able to choose the element of $b$ in the corresponding index (which is $2$), so we again search for $2$ in array $a$ and add it to $c$. This time, the element in $b$ at the corresponding index is $1$, which is already included in the array $c$, so we are not obliged to select another element from array a. We observe that the elements that we were obliged to choose from $a$ along with the initial element we selected $[1,2,3]$ are a permutation of the elements at the corresponding indices of $b$ $[3,1,2]$, and for each group that has a size bigger than one we have $2$ options, either we select the whole group from $a$, or we select the whole group from $b$. So the answer to this version is to just count the number of groups of size bigger than $1$ (let's say the number of groups is $p$) and print $2^p$. Now what if array $d$ is not filled with $0$'s? We just have to make sure that each group we count has $0$'s in all the corresponding indices of the group we are considering, otherwise this group has only one option and we don't count it. This solution can be implemented in many ways, but using DSU to union each group together is the most elegant way to implement it in my opinion.
[ "data structures", "dfs and similar", "dsu", "graphs", "implementation", "math" ]
1,400
import java.io.*; import java.util.HashSet; import java.util.StringTokenizer; public class C{ static int mod = (int) 1e9 + 7; public static void main(String[] args) throws IOException { Scanner sc = new Scanner(System.in); PrintWriter pw = new PrintWriter(System.out); int tests = sc.nextInt(); for (int test = 0; test < tests; test++) { int n = sc.nextInt(); int[] first = new int[n]; int[] second = new int[n]; int[] third=new int[n]; for (int i = 0; i < n; i++) first[i] = sc.nextInt() - 1; for (int i = 0; i < n; i++) second[i] = sc.nextInt() - 1; for(int i=0;i<n;i++) third[i]= sc.nextInt()-1; UnionFind uf = new UnionFind(n); for (int i = 0; i < n; i++) uf.unionSet(first[i], second[i]); HashSet<Integer> set = new HashSet<>(); for (int i = 0; i < n; i++) set.add(uf.findSet(i)); for(int i=0;i<n;i++){ if(third[i]==-1) continue; set.remove(uf.findSet(third[i])); } int pow = 0; for (int x : set) if (uf.sizeOfSet(x) > 1) pow++; int ans = 1; for (int i = 0; i < pow; i++) ans = (int) ((2L * ans) % mod); pw.println(ans); } pw.flush(); } public static class UnionFind { int[] p, rank, setSize; int numSets; public UnionFind(int N) { p = new int[numSets = N]; rank = new int[N]; setSize = new int[N]; for (int i = 0; i < N; i++) { p[i] = i; setSize[i] = 1; } } public int findSet(int i) { return p[i] == i ? i : (p[i] = findSet(p[i])); } public boolean isSameSet(int i, int j) { return findSet(i) == findSet(j); } public void unionSet(int i, int j) { if (isSameSet(i, j)) return; numSets--; int x = findSet(i), y = findSet(j); if (rank[x] > rank[y]) { p[y] = x; setSize[x] += setSize[y]; } else { p[x] = y; setSize[y] += setSize[x]; if (rank[x] == rank[y]) rank[y]++; } } public int numDisjointSets() { return numSets; } public int sizeOfSet(int i) { return setSize[findSet(i)]; } } public static class Scanner { StringTokenizer st; BufferedReader br; public Scanner(InputStream s) { br = new BufferedReader(new InputStreamReader(s)); } public Scanner(String s) throws FileNotFoundException { br = new BufferedReader(new InputStreamReader(new FileInputStream(s))); } public String next() throws IOException { while (st == null || !st.hasMoreTokens()) st = new StringTokenizer(br.readLine()); return st.nextToken(); } public int nextInt() throws IOException { return Integer.parseInt(next()); } public long nextLong() throws IOException { return Long.parseLong(next()); } } }
1670
D
Very Suspicious
Sehr Sus is an infinite hexagonal grid as pictured below, controlled by MennaFadali, ZerooCool and Hosssam. They love equilateral triangles and want to create $n$ equilateral triangles on the grid by adding some straight lines. The triangles must all be empty from the inside (in other words, no straight line or hexagon edge should pass through any of the triangles). You are allowed to add straight lines parallel to the edges of the hexagons. Given $n$, what is the minimum number of lines you need to add to create at least $n$ equilateral triangles as described? \begin{center} {\small Adding two red lines results in two new yellow equilateral triangles.} \end{center}
We can notice that there are $3$ different slopes in which we can draw a line, and we can also notice that drawing the lines exactly on the edges of the hexagons will result in the creation of $2$ equilateral triangles at each intersection of $2$ lines, so we can say that: Number of equilateral triangles = 2 *( number of intersections at center of some hexagon). Now we only need to find a way to draw the lines such that it maximizes the number of intersections. The best way to do that is to keep the number of lines on all $3$ slopes as close as possible (the proof will be explained at the bottom). One way to do so is to add the lines once at each slope then repeat. Let's say that slopes are numbered $1$, $2$, and $3$, so we will add the lines as follows $1,2,3,1,2,3,$ and so on. The increase in the intersection will be the number of lines in the other two slopes added together. It will be as follows : $+0, +1, +2, +2, +3, +4, +4, +5, +6, +6, +7, +8, \ldots$ If we separate that into groups of 3 we will get $\{0,1,2\},\{2,3,4\},\{4,5,6\},\ldots$ The sum of the groups is $3,9,15,21,\ldots$ To get the sum of the first $X$ groups it will be $3X^2$. So, to get the number of intersections using $N$ lines we will first find the number of complete groups which is $\lfloor \frac{N}{3} \rfloor$ and then loop over the last group to find the total number of intersections. Now that we have a way to find the number of equilateral triangles created by $N$ lines we can find the number of lines needed to get $X$ equilateral triangles by using binary search. The proof that the best way to maximize the number of intersections is to keep the number of lines on all $3$ slopes as close as possible: Imagine a case in which the difference between the lines in two slops is more than $2$ lines, now we can see that if we moved one line from the larger group to the smaller we will obtain more intersections because after moving, the intersections with the $3$-rd line will be the same and will not be affected and the intersection between the slopes will decrease by the size of the smaller group and increased by the size of the larger group minus $1$ so overall the intersections will increase by at least $1$ so that proves that we can't have any difference more than $1$ and the groups must be as close as possible.
[ "binary search", "brute force", "geometry", "greedy", "implementation", "math" ]
1,700
import java.util.*; import java.io.*; public class D{ public static void main(String[] args) throws Exception { int t=sc.nextInt(); while(t-->0) { pw.println(sol(sc.nextInt())); } pw.close(); } public static int sol(int n) { int low=0; int high=(int)1e9; int mid=(low+high)/2; while(low<=high) { if(calc(mid)<n) { low=mid+1; }else { high=mid-1; } mid=(low+high)/2; } return low; } public static long calc(long n) { n--; long ans=0; ans=(n/3)*(n/3)*3; for (int i = 0; i <= n % 3; i++) ans += (n / 3) * 2 + i; return ans*2; } static class Scanner { StringTokenizer st; BufferedReader br; public Scanner(InputStream s) { br = new BufferedReader(new InputStreamReader(s)); } public Scanner(FileReader r) { br = new BufferedReader(r); } public String next() throws IOException { while (st == null || !st.hasMoreTokens()) st = new StringTokenizer(br.readLine()); return st.nextToken(); } public int nextInt() throws IOException { return Integer.parseInt(next()); } public long nextLong() throws IOException { return Long.parseLong(next()); } public String nextLine() throws IOException { return br.readLine(); } public double nextDouble() throws IOException { String x = next(); StringBuilder sb = new StringBuilder("0"); double res = 0, f = 1; boolean dec = false, neg = false; int start = 0; if (x.charAt(0) == '-') { neg = true; start++; } for (int i = start; i < x.length(); i++) if (x.charAt(i) == '.') { res = Long.parseLong(sb.toString()); sb = new StringBuilder("0"); dec = true; } else { sb.append(x.charAt(i)); if (dec) f *= 10; } res += Long.parseLong(sb.toString()) / f; return res * (neg ? -1 : 1); } public long[] nextlongArray(int n) throws IOException { long[] a = new long[n]; for (int i = 0; i < n; i++) a[i] = nextLong(); return a; } public Long[] nextLongArray(int n) throws IOException { Long[] a = new Long[n]; for (int i = 0; i < n; i++) a[i] = nextLong(); return a; } public int[] nextIntArray(int n) throws IOException { int[] a = new int[n]; for (int i = 0; i < n; i++) a[i] = nextInt(); return a; } public Integer[] nextIntegerArray(int n) throws IOException { Integer[] a = new Integer[n]; for (int i = 0; i < n; i++) a[i] = nextInt(); return a; } public boolean ready() throws IOException { return br.ready(); } } static Scanner sc = new Scanner(System.in); static PrintWriter pw = new PrintWriter(System.out); }
1670
E
Hemose on the Tree
After the last regional contest, Hemose and his teammates finally qualified to the ICPC World Finals, so for this great achievement and his love of trees, he gave you this problem as the name of his team "Hemose 3al shagra" (Hemose on the tree). You are given a tree of $n$ vertices where $n$ is a power of $2$. You have to give each node and edge an integer value in the range $[1,2n -1]$ (inclusive), where all the values are distinct. After giving each node and edge a value, you should select some root for the tree such that the maximum cost of any simple path starting from the root and ending at any \textbf{node or edge} is minimized. The cost of the path between two nodes $u$ and $v$ or any node $u$ and edge $e$ is defined as the bitwise XOR of all the node's and edge's values between them, including the endpoints (note that in a tree there is only one simple path between two nodes or between a node and an edge).
Let's look at the minimum maximum value that we can get if we have an array of numbers from $[1,2^{(p+1)}-1]$ and we are trying to get any prefix xor, the answer will be $2^p$ because you can stop at the first integer that will have the bit $p$ so the answer will be $\geq 2^p$. We can apply the same concept here, for any arrangement we can start at the root and stop at the first node/edge that has the bit $p$ on. Let's try to find a construction that will make our answer always $2^p$. This is one of the valid ways. Select an arbitrary root. Put $2^p$ at the root. Create $2^p-1$ pairs from the remaining numbers of the form $(x,x+2^p)$ where $x < 2^p$ For every node we will do the following: If its parent has the bit $p$ in its value the node will take the value $x$ and the edge to the parent will take $x+2^p$. If its parent doesn't have the bit $p$ in its value the node will take the value $x+2^p$ and the edge to the parent will take $x$. Using this construction you will find that the xor value form the root will alternate between $0$ and $2^p$ and $x$ which is always $\le 2^p$ . If its parent has the bit $p$ in its value the node will take the value $x$ and the edge to the parent will take $x+2^p$. If its parent doesn't have the bit $p$ in its value the node will take the value $x+2^p$ and the edge to the parent will take $x$. Using this construction you will find that the xor value form the root will alternate between $0$ and $2^p$ and $x$ which is always $\le 2^p$ .
[ "bitmasks", "constructive algorithms", "dfs and similar", "trees" ]
2,200
import java.util.*; import java.io.*; public class E{ static ArrayList<int[]>[] adj; static int[] nodeval, edgeval; static int count, N; public static void main(String[] args) throws IOException { Scanner sc = new Scanner(System.in); PrintWriter pw = new PrintWriter(System.out); int t = sc.nextInt(); while(t-->0){ int n = sc.nextInt(); N = 1 << n; adj = new ArrayList[N]; for (int i = 0; i < N; i++) adj[i] = new ArrayList<>(); for (int i = 0; i < N - 1; i++) { int u = sc.nextInt() - 1; int v = sc.nextInt() - 1; adj[u].add(new int[]{v, i}); adj[v].add(new int[]{u, i}); } nodeval = new int[N]; edgeval = new int[N]; nodeval[0] = N; count =0; dfs(0, -1); pw.println(1); for (int i = 0; i < N; i++) pw.print(nodeval[i] + " "); pw.println(); for (int i = 0; i < N - 1; i++) pw.print(edgeval[i] + " "); pw.println(); } pw.close(); } private static void dfs(int u, int p) { for (int[] nxt : adj[u]) { int v = nxt[0]; int idx = nxt[1]; if (v == p) continue; count++; edgeval[idx] = count + ((nodeval[u] & N) != 0 ? N : 0); nodeval[v] = count + ((nodeval[u] & N) != 0 ? 0 : N); dfs(v, u); } } static class Scanner { BufferedReader br; StringTokenizer st; public Scanner(InputStream s) { br = new BufferedReader(new InputStreamReader(s)); } public Scanner(FileReader f) { br = new BufferedReader(f); } public String next() throws IOException { while (st == null || !st.hasMoreTokens()) st = new StringTokenizer(br.readLine()); return st.nextToken(); } public int nextInt() throws IOException { return Integer.parseInt(next()); } public long nextLong() throws IOException { return Long.parseLong(next()); } public double nextDouble() throws IOException { return Double.parseDouble(next()); } public int[] nextIntArr(int n) throws IOException { int[] arr = new int[n]; for (int i = 0; i < n; i++) { arr[i] = Integer.parseInt(next()); } return arr; } } }
1670
F
Jee, You See?
{During their training for the ICPC competitions, team "Jee You See" stumbled upon a very basic counting problem. After many "Wrong answer" verdicts, they finally decided to give up and \sout{destroy} turn-off the PC. Now they want your help in up-solving the problem.} You are given 4 integers $n$, $l$, $r$, and $z$. Count the number of arrays $a$ of length $n$ containing non-negative integers such that: - $l\le a_1+a_2+\ldots+a_n\le r$, and - $a_1\oplus a_2 \oplus \ldots\oplus a_n=z$, where $\oplus$ denotes the bitwise XOR operation. Since the answer can be large, print it modulo $10^9+7$.
Let's put aside the XOR constraint and only focus on the sum constraint. let $G(X)$ be the number of ways to construct $n$ integers such that their sum is at most $X$. We will construct each bit of the $n$ integers at the same time, we want to guarantee that the contribution of the sum of the bits generated at each position plus the sum of the previous bits wont exceed $X$ we only have to know the difference between the previous bits of $X$ (add 1 if the current bit is on) and the sum of the generated bits. However we know for sure that at each position we can generate at most $n$ bits which will sum to $n$ at most, so for the next position the difference will be the $2$ * (current difference - the sum of the bits at the cur position). we can see that if the current difference has a value $\geq$ $2n$ we can place any number of bits at the remaining positions. Let's define $dp[m][k]$ as the number of ways to construct the first $m$ bits of the $n$ integers such that their sum doesn't exceed $X$, where $k$ is min between (the difference between the previous bits and $X$) and $2n$. We can have $count$ from 0 to $min(k,n)$ ones placed at the current bit and for each $count$ we have $n \choose count$ ways to distribute them. Formally $dp[m][k]= \sum_{count=0}^{\min(n,k)} {n \choose count}\cdot dp[m+1][2(k-count+currentBit)]$ where $currentBit$ is one if the limit have the bit $m$ on. For the XOR constraint we only have to make sure that count is even if the current bit of Z is 0 or odd if the current bit is 1. The answer of the problem will be $G(R)-G(L-1)$.
[ "bitmasks", "combinatorics", "dp" ]
2,400
import java.io.*; import java.util.Arrays; import java.util.StringTokenizer; public class F { static final int mod = (int) 1e9 + 7; static int n; static long l, r, z; static long[][] memo; static long[][] memo2; public static long nCr(int n, int r) { if (r == 0) return 1; if (n == 0) return 0; if (memo[n][r] != -1) return memo[n][r]; return memo[n][r] = (nCr(n - 1, r) + nCr(n - 1, r - 1)) % mod; } public static void main(String[] args) throws IOException { Scanner sc = new Scanner(System.in); PrintWriter pw = new PrintWriter(System.out); int tests = 1; memo = new long[1001][1001]; for (long[] x : memo) Arrays.fill(x, -1); for (int test = 0; test < tests; test++) { n = sc.nextInt(); l = sc.nextLong(); r = sc.nextLong(); z = sc.nextLong(); long ans = (compute(r) - compute(l - 1) + mod) % mod; pw.println(ans); } pw.flush(); } private static long compute(long val) { memo2 = new long[61][2001]; for (long[] x : memo2) Arrays.fill(x, -1); return dp(60, 0, val); } private static long dp(int idx, int rem, long val) { if (rem > 2000) rem = 2000; if (rem < 0) return 0; if (idx == -1) return 1; if (memo2[idx][rem] != -1) return memo2[idx][rem]; long ans = 0; int currentBitXor = (z & 1L << idx)== 0 ? 0 : 1; for (int i = currentBitXor == 1 ? 1 : 0; i <= n; i += 2) { int currentBitSum = (val & 1L << idx) == 0 ? 0 : 1; int nextRem = 2 * (rem + currentBitSum - i); long toAdd = (nCr(n, i) * dp(idx - 1, nextRem, val)) % mod; ans = (ans + toAdd) % mod; } return memo2[idx][rem] = ans; } public static class Scanner { StringTokenizer st; BufferedReader br; public Scanner(InputStream s) { br = new BufferedReader(new InputStreamReader(s)); } public Scanner(String s) throws FileNotFoundException { br = new BufferedReader(new InputStreamReader(new FileInputStream(s))); } public String next() throws IOException { while (st == null || !st.hasMoreTokens()) st = new StringTokenizer(br.readLine()); return st.nextToken(); } public int nextInt() throws IOException { return Integer.parseInt(next()); } public long nextLong() throws IOException { return Long.parseLong(next()); } } }
1671
A
String Building
You are given a string $s$. You have to determine whether it is possible to build the string $s$ out of strings aa, aaa, bb and/or bbb by concatenating them. You can use the strings aa, aaa, bb and/or bbb any number of times and in any order. For example: - aaaabbb can be built as aa $+$ aa $+$ bbb; - bbaaaaabbb can be built as bb $+$ aaa $+$ aa $+$ bbb; - aaaaaa can be built as aa $+$ aa $+$ aa; - abab cannot be built from aa, aaa, bb and/or bbb.
Every character in strings aa, aaa, bb and bbb has at least one character adjacent to it that is the same. So, if there is an isolated character in our string (a character that has no neighbors equal to it), we cannot build it. It's easy to see that in the other case, we can build the string: we can split it into blocks of consecutive equal characters, and since there are no isolated characters, each block will have at least $2$ characters, so it can be formed from strings of length $2$ and/or $3$ consisting of equal characters. So, the problem is reduced to checking if each character has a neighbor equal to it.
[ "implementation" ]
800
t = int(input()) for i in range(t): s = input() ans = True n = len(s) for j in range(n): if (j == 0 or s[j] != s[j - 1]) and (j == n - 1 or s[j] != s[j + 1]): ans = False print('YES' if ans else 'NO')
1671
B
Consecutive Points Segment
You are given $n$ points with integer coordinates on a coordinate axis $OX$. The coordinate of the $i$-th point is $x_i$. All points' coordinates are distinct and given in strictly increasing order. For each point $i$, you can do the following operation \textbf{no more than once}: take this point and move it by $1$ to the left or to the right (i..e., you can change its coordinate $x_i$ to $x_i - 1$ or to $x_i + 1$). In other words, for each point, you choose (separately) its new coordinate. For the $i$-th point, it can be either $x_i - 1$, $x_i$ or $x_i + 1$. Your task is to determine if you can move some points as described above in such a way that the new set of points forms a \textbf{consecutive segment} of integers, i. e. for some integer $l$ the coordinates of points should be equal to $l, l + 1, \ldots, l + n - 1$. Note that the resulting points should have \textbf{distinct} coordinates. You have to answer $t$ independent test cases.
We can see that the answer is YES if and only if there are no more than two gaps of length $1$ between the given points. If there is no gap, the answer is obviously YES. If there is only one gap of length $1$, we can just move the left (or the right) part of the set to this gap. When there are two gaps, we can move the part before the first gap to the right and the part after the second gap to the left. Of course, if there is a gap of length at least $3$ (or multiple gaps with the total length $3$), we can't move the points from the left and the right part to satisfy the middle gap. Time complexity: $O(n)$.
[ "brute force", "math", "sortings" ]
1,000
for i in range(int(input())): n = int(input()) x = list(map(int, input().split())) print('YES' if x[-1] - x[0] - n + 1 <= 2 else 'NO')
1671
C
Dolce Vita
Turbulent times are coming, so you decided to buy sugar in advance. There are $n$ shops around that sell sugar: the $i$-th shop sells one pack of sugar for $a_i$ coins, but only \textbf{one pack to one customer} each day. So in order to buy several packs, you need to visit several shops. Another problem is that prices are increasing each day: during the first day the cost is $a_i$, during the second day cost is $a_i + 1$, during the third day — $a_i + 2$ and so on for each shop $i$. On the contrary, your everyday budget is only $x$ coins. In other words, each day you go and buy as many packs as possible with total cost not exceeding $x$. Note that if you don't spend some amount of coins during a day, you can't use these coins during the next days. Eventually, the cost for each pack will exceed $x$, and you won't be able to buy even a single pack. So, how many packs will you be able to buy till that moment in total?
Firstly, note that if we want to buy as many packs as possible, then it's optimal to buy the cheapest packs. In other words, if we sort all packs, we'll always buy a prefix of array $a$. Next, note that each day we buy some number of packs $i \in [1, n]$, so, instead of iterating through the days, we can iterate through the number of packs $i$ and for each $i$ calculate the number of days we'll buy exactly $i$ packs. Since the prices increasing and at day $k + 1$ the price is $a_i + k$, then exists last day $k_i + 1$ such that as days $1, 2, \dots, k_i + 1$ we could buy $i$ packs and at days $k_i + 2, k_i + 3, \dots$ we can't. And we can find $k_i$ as maximum possible integer solution to inequation $(a_1 + k_i) + \dots + (a_i + k_i) \le x$ or $k_i = \left\lfloor \frac{x - (a_1 + \dots + a_i)}{i} \right\rfloor$. We can calculate all $k_i$ using prefix sums $a_1 + \dots + a_i$ in linear time. As a result, we buy $n$ packs in days $(0, k_1 + 1]$; $n \cdot (k_1 + 1)$ in total; $n - 1$ packs in days $(k_1 + 1, k_2 + 1]$; $(n - 1) \cdot (k_2 - k_1)$ in total; $n - 2$ packs in days $(k_2 + 1, k_3 + 1]$; $(n - 2) \cdot (k_3 - k_2)$ in total and so on. The resulting complexity is $O(n \log{n})$ because of sort.
[ "binary search", "brute force", "greedy", "math" ]
1,200
fun main() { repeat(readLine()!!.toInt()) { val (n, x) = readLine()!!.split(' ').map { it.toInt() } val a = readLine()!!.split(' ').map { it.toInt() }.sorted() var sum = a.sumOf { it.toLong() } var prevDay = -1L var ans = 0L for (i in n - 1 downTo 0) { val curDay = if (x - sum >= 0) (x - sum) / (i + 1) else -1 ans += (i + 1) * (curDay - prevDay) prevDay = curDay sum -= a[i] } println(ans) } }
1671
D
Insert a Progression
You are given a sequence of $n$ integers $a_1, a_2, \dots, a_n$. You are also given $x$ integers $1, 2, \dots, x$. You are asked to insert each of the extra integers into the sequence $a$. Each integer can be inserted at the beginning of the sequence, at the end of the sequence, or between any elements of the sequence. The score of the resulting sequence $a'$ is the sum of absolute differences of adjacent elements in it $\left(\sum \limits_{i=1}^{n+x-1} |a'_i - a'_{i+1}|\right)$. What is the smallest possible score of the resulting sequence $a'$?
Observe the cost of inserting a single element. Notice that inserting any value between the minimum of the sequence and the maximum of the sequence is free. Why is this true? The argument is similar to the algorithm of finding some $x$ such that $f(x) = 0$ for a continous function $f$ if you know some $x_1$ such that $f(x_1) < 0$ and $x_2$ such that $f(x_2) > 0$. As a more general idea, it's free to insert some value $s$ into a segment $[l; r]$ such that $a_l \le s$ and $s \le a_r$ (WLOG assume $a_l \le a_r$). Let's find the position that is free. If $r - l = 1$, then you can insert $s$ between $a_l$ and $a_r$, since it's free. Otherwise, you can choose an arbitrary position $l < i < r$. $s$ will be either between $a_i$ and $a_l$ or between $a_i$ and $a_r$ (or both of them). Descend into the one that holds to continue the search. Since the lenght decreases, at some point you will reach the segment of length $1$. How does that help? Well, you can insert $1$ somewhere, then insert $x$ somewhere. The rest of insertions will be free. Now it's an algorithmic problem. First, consider all options to insert both $1$ and $x$ between the same pair of elements. Next, assume you insert $1$ somewhere before $x$. Iterate from left to right, maintaning the lowest price to insert $1$. Try to insert $x$ at the current position and $1$ into the cheapest position before it. Then update the lowest price for inserting $1$. After you finish, reverse the sequence and solve the problem again - that will be the same as inserting $x$ before $1$. Overall complexity: $O(n)$ per testcase.
[ "brute force", "constructive algorithms", "greedy" ]
1,600
#include <bits/stdc++.h> #define forn(i, n) for (int i = 0; i < int(n); i++) using namespace std; int main() { int t; scanf("%d", &t); forn(_, t){ int n, x; scanf("%d%d", &n, &x); vector<int> a(n); forn(i, n) scanf("%d", &a[i]); long long ans = 1e18; long long cur = 0; forn(i, n - 1){ cur += abs(a[i] - a[i + 1]); } forn(_, 2){ long long mn = abs(a[0] - 1); ans = min(ans, cur + abs(a[0] - x) + (x - 1)); forn(i, n - 1){ ans = min(ans, cur + mn - abs(a[i] - a[i + 1]) + abs(a[i] - x) + abs(a[i + 1] - x)); ans = min(ans, cur - abs(a[i] - a[i + 1]) + abs(a[i] - x) + abs(a[i + 1] - 1) + (x - 1)); mn = min(mn, 0ll - abs(a[i] - a[i + 1]) + abs(a[i] - 1) + abs(a[i + 1] - 1)); } ans = min(ans, cur + mn + abs(a.back() - x)); reverse(a.begin(), a.end()); } printf("%lld\n", ans); } return 0; }
1671
E
Preorder
You are given a rooted tree of $2^n - 1$ vertices. Every vertex of this tree has either $0$ children, or $2$ children. All leaves of this tree have the same distance from the root, and for every non-leaf vertex, one of its children is the left one, and the other child is the right one. Formally, you are given a perfect binary tree. The vertices of the tree are numbered in the following order: - the root has index $1$; - if a vertex has index $x$, then its left child has index $2x$, and its right child has index $2x+1$. Every vertex of the tree has a letter written on it, either A or B. Let's define the character on the vertex $x$ as $s_x$. Let the preorder string of some vertex $x$ be defined in the following way: - if the vertex $x$ is a leaf, then the preorder string of $x$ be consisting of only one character $s_x$; - otherwise, the preorder string of $x$ is $s_x + f(l_x) + f(r_x)$, where $+$ operator defines concatenation of strings, $f(l_x)$ is the preorder string of the left child of $x$, and $f(r_x)$ is the preorder string of the right child of $x$. The preorder string of the tree is the preorder string of its root. Now, for the problem itself... You have to calculate the number of different strings that can be obtained as the preorder string of the given tree, if you are allowed to perform the following operation any number of times before constructing the preorder string of the tree: - choose any non-leaf vertex $x$, and swap its children (so, the left child becomes the right one, and vice versa).
In terms of preorder strings, the operation "swap two children of some vertex" means "swap two substrings of equal length in some specific location". This operation can be inverted by applying it an additional time, so for every positive integer $k$, all of the strings of length $2^k-1$ are split into equivalence classes in such a way that two strings from the same class can be transformed into each other, and two strings from different classes cannot. For each vertex, the set of its possible preorder strings is one of these classes. Let's calculate the answer for the problem recursively: let $dp_v$ be the number of preorder strings for the vertex $v$. For a leaf, the number of its preorder strings is $1$. For a vertex $x$ with children $y$ and $z$, one of the two holds: if the equivalence class for vertex $y$ is different from the equivalence class for vertex $z$, then we have to pick a string from the class of vertex $y$, pick a string from the class of vertex $z$, and choose the order in which we take them. So, $dp_x = dp_y \cdot dp_z + dp_z \cdot dp_y = 2 \cdot dp_y \cdot dp_z$; if the equivalence class for $y$ is the same as the equivalence class for $z$, then swapping $y$ and $z$ doesn't do anything, so we pick a string from the equivalence class of $y$, and then a string from the equivalence class of $z$. So, $dp_x = dp_y \cdot dp_z = dp_y^2$. The only thing we don't know is how to determine if two vertices represent the same equivalence class. The model solution uses hashing for this, but there's a much simpler method: for each vertex $v$, let $t_v$ be the lexicographically smallest string that can be a preorder string of $v$. If a vertex $x$ has children $y$ and $z$, then $t_x = \min(t_y + s_x + t_z, t_z + s_x + t_y)$, and we can calculate these strings recursively since the total length is $O(n 2^n)$ - each of $2^n-1$ characters will be present in $O(n)$ strings.
[ "combinatorics", "divide and conquer", "dp", "dsu", "hashing", "sortings", "trees" ]
2,100
#include <bits/stdc++.h> using namespace std; mt19937 rnd(42); const int MOD = 998244353; const int K = 3; int add(int x, int y, int mod = MOD) { x += y; while(x >= mod) x -= mod; while(x < 0) x += mod; return x; } int sub(int x, int y, int mod = MOD) { return add(x, -y, mod); } int mul(int x, int y, int mod = MOD) { return (x * 1ll * y) % mod; } int binpow(int x, int y, int mod = MOD) { int z = 1; while(y > 0) { if(y % 2 == 1) z = mul(z, x, mod); y /= 2; x = mul(x, x, mod); } return z; } bool prime(int x) { for(int i = 2; i * 1ll * i <= x; i++) if(x % i == 0) return false; return true; } int get_base() { int x = rnd() % 10000 + 4444; while(!prime(x)) x++; return x; } int get_modulo() { int x = rnd() % int(1e9) + int(1e8); while(!prime(x)) x++; return x; } typedef array<int, K> hs; hs base, modulo; void generate_hs() { for(int i = 0; i < K; i++) { base[i] = get_base(); modulo[i] = get_modulo(); } } hs operator+(const hs& a, const hs& b) { hs c; for(int i = 0; i < K; i++) { c[i] = add(a[i], b[i], modulo[i]); } return c; } hs operator-(const hs& a, const hs& b) { hs c; for(int i = 0; i < K; i++) { c[i] = sub(a[i], b[i], modulo[i]); } return c; } hs operator*(const hs& a, const hs& b) { hs c; for(int i = 0; i < K; i++) { c[i] = mul(a[i], b[i], modulo[i]); } return c; } hs operator^(const hs& a, const hs& b) { hs c; for(int i = 0; i < K; i++) { c[i] = binpow(a[i], b[i], modulo[i]); } return c; } hs char_hash(char c) { hs res; for(int i = 0; i < K; i++) res[i] = c - 'A' + 1; return res; } const int N = 18; const int V = 1 << N; string s; char buf[V + 43]; int n; int ans[V + 43]; hs vertex_hash[V + 43]; bool is_leaf(int x) { return (x * 2 + 1) >= ((1 << n) - 1); } void rec(int x) { vertex_hash[x] = char_hash(s[x]); ans[x] = 1; if(is_leaf(x)) { return; } rec(x * 2 + 1); rec(x * 2 + 2); vertex_hash[x] = vertex_hash[x] + (base ^ vertex_hash[2 * x + 1]) + (base ^ vertex_hash[2 * x + 2]); ans[x] = mul(ans[2 * x + 1], ans[2 * x + 2]); if(vertex_hash[2 * x + 1] != vertex_hash[2 * x + 2]) ans[x] = mul(ans[x], 2); } int main() { generate_hs(); scanf("%d", &n); scanf("%s", buf); s = buf; rec(0); cout << ans[0] << endl; }
1671
F
Permutation Counting
Calculate the number of permutations $p$ of size $n$ with exactly $k$ inversions (pairs of indices $(i, j)$ such that $i < j$ and $p_i > p_j$) and exactly $x$ indices $i$ such that $p_i > p_{i+1}$. Yep, that's the whole problem. Good luck!
A lot of solutions which were written during the contest use Berlekamp-Messey or some other algorithms related to analyzing linear recurrences, but the model solution is based on other principles. First of all, if the number of inversions is at most $11$, it means that most elements of the permutation will stay at their own places, and those which don't stay at their places can't be too far away from them. Let's denote a block $[l, r]$ in a permutation as a segment of indices $[l, r]$ such that: all elements less than $l$ are to the left of the block; all elements greater than $r$ are to the right of the block; all elements from $[l, r]$ belong to the block. Let's say that a block is non-trivial if it contains at least two elements. Suppose we split a permutation into the maximum number of blocks. Then, for each block, we can see that: if its length is $b$, it has at least $b-1$ inversions (to prove it, you can use the fact that the number of inversions is equal to the number of swaps of adjacent elements required to sort the permutation; and if we cannot split the block into other blocks, it means that we have to swap each pair of adjacent elements in it at least once to sort it) if the block is non-trivial, it has at least one $i$ such that $p_i > p_{i+1}$. From these two facts, we can see that: there will be at most $11$ non-trivial blocks; there will be at most $22$ elements in total belonging to non-trivial blocks; the maximum possible length of a block is $12$. The main idea of the solution is to calculate the following dynamic programming: $dp_{i,j,a,b}$ is the number of ways to split $j$ elements into $i$ non-trivial blocks such that there are exactly $b$ inversions in them and exactly $a$ pairs $p_i > p_{i+1}$. Then, to get the answer for the test case "$n$ $k$ $x$", we can iterate on the number of non-trivial blocks and the number of elements in them, and choose the elements belonging to that blocks with a binomial coefficient. The only thing that's left is how to calculate this dynamic programming efficiently. There are a few ways to do it, but the model solution uses a table $cnt_{a,b,c}$ - the number of different non-trivial blocks of length $a$ with $b$ elements $p_i > p_{i+1}$ and $c$ inversions - to handle transitions. This table is not very big, so you can run an exhaustive search for $2$-$3$ minutes to calculate it and then just paste its results into the source code of your program. Note that you have to make sure that you consider only the blocks which cannot be split any further.
[ "brute force", "combinatorics", "dp", "fft", "math" ]
2,700
#include <bits/stdc++.h> using namespace std; const int K = 13; const int MOD = 998244353; int n, k, x; int cnt[K][K][K]; int dp[K][2 * K][K][K]; int add(int x, int y) { x += y; while(x >= MOD) x -= MOD; while(x < 0) x += MOD; return x; } int sub(int x, int y) { return add(x, -y); } int mul(int x, int y) { return (x * 1ll * y) % MOD; } int binpow(int x, int y) { int z = 1; while(y > 0) { if(y % 2 == 1) z = mul(z, x); y /= 2; x = mul(x, x); } return z; } void precalc() { cnt[2][1][1] = 1; cnt[3][1][2] = 2; cnt[3][2][3] = 1; cnt[4][1][3] = 3; cnt[4][1][4] = 1; cnt[4][2][3] = 1; cnt[4][2][4] = 4; cnt[4][2][5] = 3; cnt[4][3][6] = 1; cnt[5][1][4] = 4; cnt[5][1][5] = 2; cnt[5][1][6] = 2; cnt[5][2][4] = 4; cnt[5][2][5] = 12; cnt[5][2][6] = 12; cnt[5][2][7] = 9; cnt[5][2][8] = 3; cnt[5][3][5] = 2; cnt[5][3][6] = 4; cnt[5][3][7] = 6; cnt[5][3][8] = 6; cnt[5][3][9] = 4; cnt[5][4][10] = 1; cnt[6][1][5] = 5; cnt[6][1][6] = 3; cnt[6][1][7] = 4; cnt[6][1][8] = 3; cnt[6][1][9] = 1; cnt[6][2][5] = 10; cnt[6][2][6] = 28; cnt[6][2][7] = 35; cnt[6][2][8] = 35; cnt[6][2][9] = 30; cnt[6][2][10] = 17; cnt[6][2][11] = 8; cnt[6][3][5] = 1; cnt[6][3][6] = 13; cnt[6][3][7] = 29; cnt[6][3][8] = 41; cnt[6][3][9] = 44; cnt[6][3][10] = 45; cnt[6][3][11] = 30; cnt[6][4][7] = 1; cnt[6][4][8] = 4; cnt[6][4][9] = 7; cnt[6][4][10] = 7; cnt[6][4][11] = 11; cnt[7][1][6] = 6; cnt[7][1][7] = 4; cnt[7][1][8] = 6; cnt[7][1][9] = 6; cnt[7][1][10] = 6; cnt[7][1][11] = 2; cnt[7][2][6] = 20; cnt[7][2][7] = 55; cnt[7][2][8] = 80; cnt[7][2][9] = 95; cnt[7][2][10] = 101; cnt[7][2][11] = 94; cnt[7][3][6] = 6; cnt[7][3][7] = 50; cnt[7][3][8] = 118; cnt[7][3][9] = 186; cnt[7][3][10] = 230; cnt[7][3][11] = 260; cnt[7][4][7] = 3; cnt[7][4][8] = 18; cnt[7][4][9] = 48; cnt[7][4][10] = 85; cnt[7][4][11] = 113; cnt[7][5][10] = 2; cnt[7][5][11] = 4; cnt[8][1][7] = 7; cnt[8][1][8] = 5; cnt[8][1][9] = 8; cnt[8][1][10] = 9; cnt[8][1][11] = 11; cnt[8][2][7] = 35; cnt[8][2][8] = 96; cnt[8][2][9] = 155; cnt[8][2][10] = 207; cnt[8][2][11] = 250; cnt[8][3][7] = 21; cnt[8][3][8] = 145; cnt[8][3][9] = 358; cnt[8][3][10] = 616; cnt[8][3][11] = 859; cnt[8][4][7] = 1; cnt[8][4][8] = 26; cnt[8][4][9] = 124; cnt[8][4][10] = 313; cnt[8][4][11] = 567; cnt[8][5][9] = 3; cnt[8][5][10] = 16; cnt[8][5][11] = 53; cnt[9][1][8] = 8; cnt[9][1][9] = 6; cnt[9][1][10] = 10; cnt[9][1][11] = 12; cnt[9][2][8] = 56; cnt[9][2][9] = 154; cnt[9][2][10] = 268; cnt[9][2][11] = 389; cnt[9][3][8] = 56; cnt[9][3][9] = 350; cnt[9][3][10] = 898; cnt[9][3][11] = 1654; cnt[9][4][8] = 8; cnt[9][4][9] = 126; cnt[9][4][10] = 552; cnt[9][4][11] = 1404; cnt[9][5][9] = 4; cnt[9][5][10] = 48; cnt[9][5][11] = 204; cnt[9][6][11] = 1; cnt[10][1][9] = 9; cnt[10][1][10] = 7; cnt[10][1][11] = 12; cnt[10][2][9] = 84; cnt[10][2][10] = 232; cnt[10][2][11] = 427; cnt[10][3][9] = 126; cnt[10][3][10] = 742; cnt[10][3][11] = 1967; cnt[10][4][9] = 36; cnt[10][4][10] = 448; cnt[10][4][11] = 1887; cnt[10][5][9] = 1; cnt[10][5][10] = 43; cnt[10][5][11] = 357; cnt[10][6][11] = 6; cnt[11][1][10] = 10; cnt[11][1][11] = 8; cnt[11][2][10] = 120; cnt[11][2][11] = 333; cnt[11][3][10] = 252; cnt[11][3][11] = 1428; cnt[11][4][10] = 120; cnt[11][4][11] = 1302; cnt[11][5][10] = 10; cnt[11][5][11] = 252; cnt[11][6][11] = 5; cnt[12][1][11] = 11; cnt[12][2][11] = 165; cnt[12][3][11] = 462; cnt[12][4][11] = 330; cnt[12][5][11] = 55; cnt[12][6][11] = 1; } int inv[K]; int C(int n, int k) { if(n < 0 || n < k || k < 0) return 0; int res = 1; for(int i = n; i > n - k; i--) res = mul(res, i); for(int i = 1; i <= k; i++) res = mul(res, inv[i]); return res; } void prepare() { for(int i = 1; i < K; i++) inv[i] = binpow(i, MOD - 2); precalc(); dp[0][0][0][0] = 1; for(int i = 0; i < K; i++) for(int j = 0; j < 2 * K; j++) for(int a = 0; a < K - 2; a++) for(int b = 0; b < K - 2; b++) { if(dp[i][j][a][b] == 0) continue; for(int add_cnt = 2; add_cnt < K; add_cnt++) for(int add_desc = 1; add_desc <= K - 2; add_desc++) for(int add_inv = 1; add_inv <= K - 2; add_inv++) { if(j + add_cnt >= 2 * K || a + add_desc > K - 2 || b + add_inv > K - 2) continue; int& nw = dp[i + 1][j + add_cnt][a + add_desc][b + add_inv]; nw = add(nw, mul(dp[i][j][a][b], cnt[add_cnt][add_desc][add_inv])); } } } void solve() { scanf("%d %d %d", &n, &k, &x); if(k == 0 && x == 0) { puts("1"); return; } int ans = 0; for(int i = 1; i < K; i++) for(int j = 1; j < 2 * K; j++) if(dp[i][j][x][k] != 0) { ans = add(ans, mul(dp[i][j][x][k], C(n - j + i, i))); } printf("%d\n", ans); } int main() { prepare(); int t; scanf("%d", &t); for(int i = 0; i < t; i++) solve(); }
1672
A
Log Chopping
There are $n$ logs, the $i$-th log has a length of $a_i$ meters. Since chopping logs is tiring work, errorgorn and maomao90 have decided to play a game. errorgorn and maomao90 will take turns chopping the logs with \textbf{errorgorn chopping first}. On his turn, the player will pick a log and chop it into $2$ pieces. If the length of the chosen log is $x$, and the lengths of the resulting pieces are $y$ and $z$, then $y$ and $z$ have to be \textbf{positive integers}, and $x=y+z$ must hold. For example, you can chop a log of length $3$ into logs of lengths $2$ and $1$, but not into logs of lengths $3$ and $0$, $2$ and $2$, or $1.5$ and $1.5$. The player who is unable to make a chop will be the loser. Assuming that both errorgorn and maomao90 play optimally, who will be the winner?
No matter what move each player does, the result of the game will always be the same. Count the number of moves. Let us consider the ending state of the game. It turns out that at the ending state, we will only have logs of $1$ meter. Otherwise, players can make a move. Now, at the ending state of the game, we will have $\sum\limits_{k=1}^n a_k$ logs. And each move we increase the number of logs by exactly $1$. Since we started with $n$ logs, there has been exactly $(\sum\limits_{k=1}^n a_k) - n$ turns. Alternatively, a log of length $a_k$ will be cut $a_k-1$ times, so there will be $\sum\limits_{k=1}^n (a_k-1)$ turns. If there were an odd number of turns $\texttt{errorgorn}$ wins, otherwise $\texttt{maomao90}$ wins.
[ "games", "implementation", "math" ]
800
// Super Idol的笑容 // 都没你的甜 // 八月正午的阳光 // 都没你耀眼 // 热爱105°C的你 // 滴滴清纯的蒸馏水 #include <bits/stdc++.h> #include <ext/pb_ds/assoc_container.hpp> #include <ext/pb_ds/tree_policy.hpp> #include <ext/rope> using namespace std; using namespace __gnu_pbds; using namespace __gnu_cxx; #define int long long #define ll long long #define ii pair<ll,ll> #define iii pair<ii,ll> #define fi first #define se second #define endl '\n' #define debug(x) cout << #x << ": " << x << endl #define pub push_back #define pob pop_back #define puf push_front #define pof pop_front #define lb lower_bound #define ub upper_bound #define rep(x,start,end) for(auto x=(start)-((start)>(end));x!=(end)-((start)>(end));((start)<(end)?x++:x--)) #define all(x) (x).begin(),(x).end() #define sz(x) (int)(x).size() #define indexed_set tree<ll,null_type,less<ll>,rb_tree_tag,tree_order_statistics_node_update> //change less to less_equal for non distinct pbds, but erase will bug mt19937 rng(chrono::system_clock::now().time_since_epoch().count()); int n; int arr[105]; signed main(){ ios::sync_with_stdio(0); cin.tie(0); cout.tie(0); cin.exceptions(ios::badbit | ios::failbit); int TC; cin>>TC; while (TC--){ cin>>n; rep(x,0,n) cin>>arr[x]; int tot=0; rep(x,0,n) tot+=arr[x]-1; if (tot%2==0) cout<<"maomao90"<<endl; else cout<<"errorgorn"<<endl; } }
1672
B
I love AAAB
Let's call a string \textbf{good} if its length is at least $2$ and all of its characters are $A$ except for the last character which is $B$. The good strings are $AB,AAB,AAAB,\ldots$. Note that $B$ is \textbf{not} a good string. You are given an initially empty string $s_1$. You can perform the following operation any number of times: - Choose any position of $s_1$ and insert some good string in that position. Given a string $s_2$, can we turn $s_1$ into $s_2$ after some number of operations?
Pretend that we can only insert $\texttt{AB}$. What if we replaced $\texttt{A}$ and $\texttt{B}$ with $\texttt{(}$ and $\texttt{)}$? Claim: The string is obtainable if it ends in $\texttt{B}$ and every prefix of the string has at least as many $\texttt{A}$ as $\texttt{B}$. An alternative way to think about the second condition is to assign $\texttt{A}$ to have a value of $1$ and $\texttt{B}$ to have a value of $-1$. Then, we are just saying that each prefix must have a non-negative sum. This is pretty similar to bracket sequences. Now, both conditions are clearly necessary, let us show that they are sufficient too. We will explicitly construct the string (in the reverse direction). While there are more than $1$ occurrences of $\texttt{B}$ in the string, remove the first occurrence of $\texttt{AB}$. After doing this process, you will eventually end up with the string $\texttt{AAA...AAB}$.
[ "constructive algorithms", "implementation" ]
800
// Super Idol的笑容 // 都没你的甜 // 八月正午的阳光 // 都没你耀眼 // 热爱105°C的你 // 滴滴清纯的蒸馏水 #include <bits/stdc++.h> #include <ext/pb_ds/assoc_container.hpp> #include <ext/pb_ds/tree_policy.hpp> #include <ext/rope> using namespace std; using namespace __gnu_pbds; using namespace __gnu_cxx; #define int long long #define ll long long #define ii pair<ll,ll> #define iii pair<ii,ll> #define fi first #define se second #define endl '\n' #define debug(x) cout << #x << ": " << x << endl #define pub push_back #define pob pop_back #define puf push_front #define pof pop_front #define lb lower_bound #define ub upper_bound #define rep(x,start,end) for(auto x=(start)-((start)>(end));x!=(end)-((start)>(end));((start)<(end)?x++:x--)) #define all(x) (x).begin(),(x).end() #define sz(x) (int)(x).size() #define indexed_set tree<ll,null_type,less<ll>,rb_tree_tag,tree_order_statistics_node_update> //change less to less_equal for non distinct pbds, but erase will bug mt19937 rng(chrono::system_clock::now().time_since_epoch().count()); signed main(){ ios::sync_with_stdio(0); cin.tie(0); cout.tie(0); cin.exceptions(ios::badbit | ios::failbit); int TC; cin>>TC; while (TC--){ string s; cin>>s; bool ok=(s.back()=='B'); int sum=0; for (auto it:s){ if (it=='A') sum++; else sum--; if (sum<0) ok=false; } if (ok) cout<<"YES"<<endl; else cout<<"NO"<<endl; } }
1672
C
Unequal Array
You are given an array $a$ of length $n$. We define the \textbf{equality} of the array as the number of indices $1 \le i \le n - 1$ such that $a_i = a_{i + 1}$. We are allowed to do the following operation: - Select two integers $i$ and $x$ such that $1 \le i \le n - 1$ and $1 \le x \le 10^9$. Then, set $a_i$ and $a_{i + 1}$ to be equal to $x$. Find the minimum number of operations needed such that the equality of the array is less than or equal to $1$.
If the array is $a=[1,1,\ldots,1]$. We will need $0$ moves if $n \leq 2$ and will need $\max(n-3,1)$ moves. The only way to reduce the number of $i$ such that $a_i = a_{i+1}$ is when $a_i = a_{i+1}$ and $a_{i+2} = a_{i+3}$, and you apply the operation on $a_{i+1}$ and $a_{i+2}$. Suppose $l$ is the smallest index where $a_l = a_{l+1}$ and r is the largest index where $a_r = a_{r+1}$. If $l=r$ or $l$ and $r$ does not exist, the condition is already satisfied and we can do 0 operations. Otherwise, the answer is $\max(1, r - l - 1)$. The proof is as follows: Suppose $l+1=r$, then, there are 3 elements that are adjacent to each other. Hence, we can just do one operation with $i=l$ and $x=\infty$ to make the equality of the array 1. Suppose otherwise, then the array will look something like $[..., X, X, ..., Y, Y, ...]$, with $r - l - 2$ elements between the second $X$ and the first $Y$. Then, we can do operations on $i={l+1, l+2, \ldots, r-2, r-1}$ to make the equality of the array 1. To see why we need at least $r - l - 1$ operations, observe that each operation will cause $r - l$ to decrease by at most 1. This is because if we do not do an operation on $i \in \{l-1,l+1,r-1,r+1\}$, then both $a_l = a_{l+1}$ and $a_r = a_{r+1}$ will still hold. We see that $r-l$ only decreases when do we an operation on $i \in {l+1,r-1}$ and it is not too hard to show that it only decreases by $1$ in those cases while $r-l > 2$ Hence, we keep doing the operation until $r - l = 2$, which will only require 1 operation to change both pairs and make the equality 1.
[ "constructive algorithms", "greedy", "implementation" ]
1,100
#include <bits/stdc++.h> using namespace std; template <class T> inline bool mnto(T& a, T b) {return a > b ? a = b, 1 : 0;} template <class T> inline bool mxto(T& a, T b) {return a < b ? a = b, 1: 0;} #define REP(i, s, e) for (int i = s; i < e; i++) #define RREP(i, s, e) for (int i = s; i >= e; i--) typedef long long ll; typedef long double ld; #define MP make_pair #define FI first #define SE second typedef pair<int, int> ii; typedef pair<ll, ll> pll; #define MT make_tuple typedef tuple<int, int, int> iii; #define ALL(_a) _a.begin(), _a.end() #define pb push_back typedef vector<int> vi; typedef vector<ll> vll; typedef vector<ii> vii; #ifndef DEBUG #define cerr if (0) cerr #endif #define INF 1000000005 #define LINF 1000000000000000005ll #define MAXN 200005 int t; int n; int a[MAXN]; int main() { #ifndef DEBUG ios::sync_with_stdio(0), cin.tie(0); #endif cin >> t; while (t--) { cin >> n; REP (i, 0, n) { cin >> a[i]; } int mn = -1, mx = -1; REP (i, 1, n) { if (a[i] == a[i - 1]) { if (mn == -1) { mn = i; } mx = i; } } if (mn == mx) { cout << 0 << '\n'; } else { cout << max(1, mx - mn - 1) << '\n'; } } return 0; }
1672
D
Cyclic Rotation
There is an array $a$ of length $n$. You may perform the following operation any number of times: - Choose two indices $l$ and $r$ where $1 \le l < r \le n$ and $a_l = a_r$. Then, set $a[l \ldots r] = [a_{l+1}, a_{l+2}, \ldots, a_r, a_l]$. You are also given another array $b$ of length $n$ which is a permutation of $a$. Determine whether it is possible to transform array $a$ into an array $b$ using the above operation some number of times.
The operation of cyclic shift is equivalent to deleting $a_i$ and duplicating $a_j$ where $a_i = a_j$. Reverse the process. We will solve the problem by reversing the steps and transform array $b$ to array $a$. We can do the following operation on $b$: pick index $i<j$ such that $b_j=b_{j+1}$ and remove $b_{j+1}$ and insert it after position $i$. Now, for every consecutive block of identical elements in $b$, we can remove all but one element from it and move it left. If we process from right to left, we can imagine as taking consecutive elemnets in $b$ out and placing in a reserve, and using them to match some elements in $a$ towards the left. Using this idea, we can use the following greedy two-pointer algorithm: Let $i$ and $j$ represent the size of $a$ and $b$ respectively (and hence $a_i$ and $b_j$ will refer to the last elements of $a$ and $b$ respectively). We also have an initially empty multiset $S$, which represents the reserve. We will perform the following operations in this order: While $b_{j-1}=b_j$, add $b_j$ to the multiset $S$ and decrement $j$ If $a_i = b_j$, then decrement both $i$ and $j$ Otherwise, we delete an occurance of $a_i$ in $S$ and decrement $i$. If we cannot find $a_i$ in $S$, it is impossible to transform $b$ to $a$. Let us define an array $c$ where all elements of $c$ is $1$. We can rephrase the problem in the following way: Choose $i<j$ such that $a_i = a_j$ and $c_i>0$. Then update $c_i \gets c_i-1$ and $c_j \gets c_j+1$. The final array $b$ is obtained by the following: Let $b$ be initially empty, then iterate $i$ from $1$ to $n$ and add $c_i$ copies of $a_i$ to $b$. As such, we can consider mapping elements of $b$ into elements of $a$. More specifically, consider a mapping $f$ where $f$ is non-decreasing, $b_i=a_{f_i}$ and we increment $c_{f_i}$ by $1$ for all $i$. All that remains is to determine if we can make such a mapping such that $c$ is valid. Notice that if all elements of $a$ are identical, the necessary and sufficient condition for a valid array $c$ is that $c_1+c_2+\ldots+c_i \leq i$ for all $i$. This motivates us to construct an array $pa$ where $pa_i$ is the number of indices $j \leq i$ such that $a_i=a_j$. Analogously, construct $pb$. Then the necessary and sufficient conditions for a mapping $f$ is that $f$ is non-decreasing, $b_i=a_{f_i}$ and $pb_i \leq pa_{f_i}$. A greedy algorithm to construct $f$, if it exists, is trivial by minimizing $f_i$ for each $i$.
[ "constructive algorithms", "greedy", "implementation", "two pointers" ]
1,700
// Super Idol的笑容 // 都没你的甜 // 八月正午的阳光 // 都没你耀眼 // 热爱105°C的你 // 滴滴清纯的蒸馏水 #include <bits/stdc++.h> #include <ext/pb_ds/assoc_container.hpp> #include <ext/pb_ds/tree_policy.hpp> #include <ext/rope> using namespace std; using namespace __gnu_pbds; using namespace __gnu_cxx; #define int long long #define ll long long #define ii pair<ll,ll> #define iii pair<ii,ll> #define fi first #define se second #define endl '\n' #define debug(x) cout << #x << ": " << x << endl #define pub push_back #define pob pop_back #define puf push_front #define pof pop_front #define lb lower_bound #define ub upper_bound #define rep(x,start,end) for(auto x=(start)-((start)>(end));x!=(end)-((start)>(end));((start)<(end)?x++:x--)) #define all(x) (x).begin(),(x).end() #define sz(x) (int)(x).size() #define indexed_set tree<ll,null_type,less<ll>,rb_tree_tag,tree_order_statistics_node_update> //change less to less_equal for non distinct pbds, but erase will bug mt19937 rng(chrono::system_clock::now().time_since_epoch().count()); int n; int arr[200005]; int brr[200005]; int crr[200005]; int num[200005]; signed main(){ ios::sync_with_stdio(0); cin.tie(0); cout.tie(0); cin.exceptions(ios::badbit | ios::failbit); int TC; cin>>TC; while (TC--){ cin>>n; rep(x,1,n+1) cin>>arr[x]; rep(x,1,n+1) cin>>brr[x]; rep(x,1,n+1) num[x]=0; rep(x,1,n+1){ num[arr[x]]++; crr[x]=num[arr[x]]; } rep(x,1,n+1) num[x]=0; int idx=1; rep(x,1,n+1){ num[brr[x]]++; while (idx<=n && (arr[idx]!=brr[x] || crr[idx]<num[brr[x]])) idx++; } if (idx>n) cout<<"NO"<<endl; else cout<<"YES"<<endl; } }
1672
E
notepad.exe
\textbf{This is an interactive problem.} There are $n$ words in a text editor. The $i$-th word has length $l_i$ ($1 \leq l_i \leq 2000$). The array $l$ is hidden and only known by the grader. The text editor displays words in lines, splitting each two words in a line with at least one space. Note that a line does not have to end with a space. Let the height of the text editor refer to the number of lines used. For the given \textbf{width}, the text editor will display words in such a way that the height is minimized. More formally, suppose that the text editor has \textbf{width} $w$. Let $a$ be an array of length $k+1$ where $1=a_1 < a_2 < \ldots < a_{k+1}=n+1$. $a$ is a \textbf{valid} array if for all $1 \leq i \leq k$, $l_{a_i}+1+l_{a_i+1}+1+\ldots+1+l_{a_{i+1}-1} \leq w$. Then the \textbf{height} of the text editor is the minimum $k$ over all valid arrays. Note that if $w < \max(l_i)$, the text editor cannot display all the words properly and will crash, and the height of the text editor will be $0$ instead. You can ask $n+30$ queries. In one query, you provide a width $w$. Then, the grader will return the height $h_w$ of the text editor when its width is $w$. Find the minimum area of the text editor, which is the minimum value of $w \cdot h_w$ over all $w$ for which $h_w \neq 0$. The lengths are fixed in advance. In other words, \textbf{the interactor is not adaptive}.
Find the sum of lengths of words. Given a height, how many "good" widths are there. The first idea that we have is that we want to be able to find the minimum possible width of the text editor for a specific height. We can do this in $n\log (n \cdot 2000)$ queries using binary search for each height. This is clearly not good enough, so let us come up with more observations. First, we can binary search for the minimum possible width for height $1$. This value is $(\sum_{i=1}^n l_i)+n-1$ which we will denote with $S$. Let us consider what we might want to query for height $h$. Suppose that the words are arranged very nicely such that there are no trailing spaces in each line. Then, the total number of characters will be $S - h +1$. This means that the minimum possible area for height $h$ will be $S-h+1$. We also know that if the area is more than $S$, it will not be useful as the area for $h=1$ is already $S$. Now, we know that the range of possible areas that we are interested in is $[S - h +1, S]$. There is a total of $h$ possible areas that it can take, and an area is only possible if $h \cdot w = a$, or in other words, the area is divisible by $h$. Among the $h$ consecutive possible values, exactly one of them will be divisible by $h$, hence we can just query that one value of $w$ which can very nicely be found as $\lfloor \frac{S}{h} \rfloor$. The total number of queries used is $n + \log(n \cdot 2000)$ where $n$ comes from the single query for each height and the $\log(n \cdot 2000)$ comes from the binary search at the start.
[ "binary search", "constructive algorithms", "greedy", "interactive" ]
2,200
// Super Idol的笑容 // 都没你的甜 // 八月正午的阳光 // 都没你耀眼 // 热爱105°C的你 // 滴滴清纯的蒸馏水 #include <bits/stdc++.h> #include <ext/pb_ds/assoc_container.hpp> #include <ext/pb_ds/tree_policy.hpp> #include <ext/rope> using namespace std; using namespace __gnu_pbds; using namespace __gnu_cxx; #define int long long #define ll long long #define ii pair<ll,ll> #define iii pair<ii,ll> #define fi first #define se second #define debug(x) cout << #x << ": " << x << endl #define pub push_back #define pob pop_back #define puf push_front #define pof pop_front #define lb lower_bound #define ub upper_bound #define rep(x,start,end) for(auto x=(start)-((start)>(end));x!=(end)-((start)>(end));((start)<(end)?x++:x--)) #define all(x) (x).begin(),(x).end() #define sz(x) (int)(x).size() #define indexed_set tree<ll,null_type,less<ll>,rb_tree_tag,tree_order_statistics_node_update> //change less to less_equal for non distinct pbds, but erase will bug int n; int ask(int i){ if (i==0) return 0; cout<<"? "<<i<<endl; int temp; cin>>temp; if (temp==-1){ exit(0); } return temp; } signed main(){ ios::sync_with_stdio(0); cin.tie(0); cout.tie(0); cin.exceptions(ios::badbit | ios::failbit); cin>>n; int lo=-2,hi=5e6,mi; while (hi-lo>1){ mi=hi+lo>>1; if (ask(mi)==1) hi=mi; else lo=mi; } int ans=1e9; rep(x,1,n+1){ int temp=ask(hi/x); if (temp) ans=min(ans,temp*(hi/x)); } cout<<"! "<<ans<<endl; }
1672
F2
Checker for Array Shuffling
oolimry has an array $a$ of length $n$ which he really likes. Today, you have changed his array to $b$, a permutation of $a$, to make him sad. Because oolimry is only a duck, he can only perform the following operation to restore his array: - Choose two integers $i,j$ such that $1 \leq i,j \leq n$. - Swap $b_i$ and $b_j$. The \textbf{sadness} of the array $b$ is the minimum number of operations needed to transform $b$ into $a$. Given the arrays $a$ and $b$, where $b$ is a permutation of $a$, determine if $b$ has the maximum sadness over all permutations of $a$.
The number of occurances of the most frequent element is important. Let $X$ be the number of occurances of the most common element. The maximal sadness is $N-X$. For every minimal sequence of swaps, we have duality of maximal number of cycles when considering the graph with edges $a_i \to b_i$. Let $N$ be the length of $A$ and $B$. We want to prove that an optimal swapping from $B \to A$ is equivalent to sorting via some cycles. Suppose our swap order is $\{(l_1,r_1),(l_2,r_2),\ldots,(l_K,r_K)\}$. Let's consider a graph $G$ with edges being the swaps. Suppose the number of connected components in $G$ is $CC$, then there is a way to perform the transformation $B \to A$ using $CC$ cycles since we can view the labels of each connected component of $G$ as a permutation of the original vertices. One cycle of length $X$ uses $X-1$ swaps, so we use $N-CC$ swaps in total. Since $CC \geq N-K$, we can always change the swap order to swapping cycles while not performing a bigger number of moves. Now we have changed the problem to maximizing the number of cycles we use. Let $cnt_x$ be the number of occurrences of $x$ in $A$. WLOG $cnt_1 \geq cnt_2 \geq \ldots$. Let $s_A(B)$ denote the sadness of $B$ when the original array is $A$. Claim: $\max(s_A) \leq N-cnt_1$ Proof: By pigeonhole principle, we know there exist a cycle with $2$ occurrences of the element $1$. Consider a cycle that swaps $i_1 \to i_2 \to \ldots \to i_K \to i_1$ where $A_{i_1}=A_{i_z}=1$. Then we can increase the number of connected components while maintaining $B$ by splitting into $2$ cycles $i_1 \to i_2 \to \ldots \to i_{z-1} \to i_1$ and $i_z \to i_2 \to \ldots \to i_N \to i_z$. Therefore, in an optimal solution, there should not be a cycle that passes through the same value twice. $\blacksquare$ Therefore, we can assume that all occurrences of $1$ belong to different cycles. Therefore, $\#cyc \geq cnt_1$ swaps are used. The number of swaps used is $N-\#cyc \leq N-cnt_1$. Therefore, $N-cnt_1$ is a upper bound of $s$. Claim: $s_A(B)<N-cnt_1$ $\Leftrightarrow$ there exists a cycle $i_1 \to i_2 \to \ldots \to i_K \to i_1$ such that all $i_x \neq 1$. Proof: $(\Rightarrow)$ There exists a cycle decomposition of the graph that uses at least $cnt_1+1$ cycles. Since a single element of $1$ can only go to a single cycle, there exists a cycle without $1$. $(\Leftarrow)$ Let's remove this cycle to form an arrays $A'$ and $B'$. Then $s_{A'}(B') \leq N-K-cnt_1$. Now, we only needed $K-1$ swaps to remove the cycle, so it much be that $s_A(B) \leq (N-K-cnt_1)+(K-1)=N-cnt_1-1$. $\blacksquare$ To construction a permutation such that $s(B)=N-cnt_1$, let's construct a graph $G_{cnt}$ based on the number of occurrences of each element in $A$. We draw $cnt_{i+1}$ edges from $(i) \to (i+1)$ and $cnt_{i}-cnt_{i+1}$ edges from $(i) \to (1)$. It is obviously impossible to find a cycle that does not contain $1$. Since all edges will be of the form $(i) \to (i+1)$. Another way to construct this permutation is to assume that $A$ is sorted. Then we perform $cnt_1$ cyclic shifts on $A$ to obtain $B$. Given the graph representation, finding such a cycle $i_1 \to i_2 \to \ldots \to i_K \to i_1$ such that all $i_x \neq 1$ is easy. Let's remove $1$ from the graph then check if the graph is a DAG.
[ "constructive algorithms", "dfs and similar", "graphs" ]
2,800
// Super Idol的笑容 // 都没你的甜 // 八月正午的阳光 // 都没你耀眼 // 热爱105°C的你 // 滴滴清纯的蒸馏水 #include <bits/stdc++.h> #include <ext/pb_ds/assoc_container.hpp> #include <ext/pb_ds/tree_policy.hpp> #include <ext/rope> using namespace std; using namespace __gnu_pbds; using namespace __gnu_cxx; #define int long long #define ll long long #define ii pair<ll,ll> #define iii pair<ii,ll> #define fi first #define se second #define endl '\n' #define debug(x) cout << #x << ": " << x << endl #define pub push_back #define pob pop_back #define puf push_front #define pof pop_front #define lb lower_bound #define ub upper_bound #define rep(x,start,end) for(auto x=(start)-((start)>(end));x!=(end)-((start)>(end));((start)<(end)?x++:x--)) #define all(x) (x).begin(),(x).end() #define sz(x) (int)(x).size() #define indexed_set tree<ll,null_type,less<ll>,rb_tree_tag,tree_order_statistics_node_update> //change less to less_equal for non distinct pbds, but erase will bug mt19937 rng(chrono::system_clock::now().time_since_epoch().count()); int n; int arr[200005]; int brr[200005]; vector<int> al[200005]; bool onstk[200005]; bool vis[200005]; bool cyc; void dfs(int i){ onstk[i]=vis[i]=true; for (auto it:al[i]){ if (onstk[it]) cyc=true; if (!vis[it]) dfs(it); } onstk[i]=false; } signed main(){ ios::sync_with_stdio(0); cin.tie(0); cout.tie(0); cin.exceptions(ios::badbit | ios::failbit); int TC; cin>>TC; while (TC--){ cin>>n; rep(x,1,n+1) al[x].clear(); rep(x,1,n+1) vis[x]=onstk[x]=false; rep(x,1,n+1) cin>>arr[x]; rep(x,1,n+1) cin>>brr[x]; rep(x,1,n+1) al[arr[x]].pub(brr[x]); int mx=1; rep(x,1,n+1) if (sz(al[x])>sz(al[mx])) mx=x; vis[mx]=true; cyc=false; rep(x,1,n+1) if (!vis[x]) dfs(x); if (cyc) cout<<"WA"<<endl; else cout<<"AC"<<endl; } }
1672
G
Cross Xor
There is a grid with $r$ rows and $c$ columns, where the square on the $i$-th row and $j$-th column has an integer $a_{i, j}$ written on it. Initially, all elements are set to $0$. We are allowed to do the following operation: - Choose indices $1 \le i \le r$ and $1 \le j \le c$, then replace all values on the same row or column as $(i, j)$ with the value xor $1$. In other words, for all $a_{x, y}$ where $x=i$ or $y=j$ or both, replace $a_{x, y}$ with $a_{x, y}$ xor $1$. You want to form grid $b$ by doing the above operations a finite number of times. However, some elements of $b$ are missing and are replaced with '?' instead. Let $k$ be the number of '?' characters. Among all the $2^k$ ways of filling up the grid $b$ by replacing each '?' with '0' or '1', count the number of grids, that can be formed by doing the above operation a finite number of times, starting from the grid filled with $0$. As this number can be large, output it modulo $998244353$.
We need to consider $4$ cases based on the parity of $r$ and $c$. Let $R_i$ and $C_j$ denote $\bigotimes\limits_{j=1}^c a_{i,j}$ and $\bigotimes\limits_{i=1}^r a_{i,j}$ respectively, or the xor-sum of the $i$-th row and the xor-sum of the $j$-th column respectively. For some cases, $R$ and $C$ will be constant sequences no matter what sequence of operations we perform. The obvious necessary conditions for $R$ and $C$ are actually sufficient. When at least one of $r$ or $c$ is even, counting the valid grids is easy. When both $r$ and $c$ is odd, consider drawing the bipartite graph where edges are cells with $\texttt{?}$. Let $R_i$ and $C_j$ denote $\bigotimes\limits_{j=1}^c a_{i,j}$ and $\bigotimes\limits_{i=1}^r a_{i,j}$ respectively, or the xor-sum of the $i$-th row and the xor-sum of the $j$-th column respectively. We will split the problem into 3 cases. Choose some $(x,y)$ and do an operation on all $(i,j)$ where $i=x$ or $j=y$. The effect of this series of operations is toggling $(x,y)$. All possible grids are reachable. Counting them is easy. If $r$ is odd and $c$ is even, we can treat it as the same case by swapping a few variables. Notice that every operation toggles all elements in $R$. It is neccasary that $R$ all values in R are the same, let us prove that this is sufficient as well. Now suppose $R$ is all 0. If $R$ is all $1$. We can perform the operation on $(1,1)$ and now $R$ is all $0$. If we pick $1 \leq x \leq r$ and $1 \leq y < c$ and perform operations on all $(i,j)$ where $i \neq x$ and $j=y$ or $j=c$, then it is equivalent to toggling $(x,y)$ and $(x,c)$. We can perform the following new operation: pick $1 \leq x \leq r$ and $1 \leq y < c$ toggle $(x,y)$,$(x,c)$ Since $R$ is all 0, each row has an even number of $1$. If we apply the new operation on all $(x,y)$ where $a_{x,y} = 1$ and $y < c$, then $(x,c)$ will be $0$ in the end. Hence, the whole grid will be $0$. Notice that every operation toggles all elements in $R$ and $C$. It is neccasary that both $R$ are $C$ all having the same values, let us prove that this is sufficient as well. Suppose $R$ is all $0$ and $C$ is all $0$. If $R$ and $C$ are all $1$, we apply the operation on $(1,1)$ to make $R$ and $C$ both all $0$ Notice that if we pick $1 \leq x_1 < x_2 \leq r$ and $1 \leq y_1 < y_2 \leq c$. Let $S=\{(x_1,y_1), (x_1,y_2), (x_2,y_1),(x_2,y_2)\}$. When we perform operations on all cells in $S$, it is equivalent to toggling all cells in $S$. We can perform the following new operation: pick $1 \leq x < r$ and $1 \leq y < c$ toggle $(x,y)$,$(x,c)$,$(r,y)$,$(r,c)$ Since $R$ and $C$ is all 0, each row and column has an even number of 1. If we apply the new operation on all $(x,y)$ where $a_{x,y} = 1$ and $x < r$ and $y < c$ , then $(x,c)$ will be $0$ for $0 < x < r$ and $(r,y)$ will be $0$ for $0 < y < c$ in the end. And hence, $a_{r,c} = 0$ too since $R$ and $C$ is all 0. Hence, the whole grid will be $0$. Thanks to dario2994 for writing this. Let $V = Z_2^{nm}$. $V$ is endowed with the natural scalar product, which induces the concept of orthogonality. Let $M$ be the subspace generated by the moves. Let $M^{\perp}$ be the space orthogonal to $M$. It is a basic result in linear algebra that $(M^{\perp})^{\perp} = M$. One can see that $\{(x1, y1), (x1, y2), (x2, y1), (x2, y2)\}$ belongs to $M$ (it is a combination of 4 moves). Thus one deduces that if $u \in M^{\perp}$ then $u_{x,y} = a_x \oplus b_y$ for two vectors $a\in Z_2^r, b \in Z_2^c$. Given $a, b$; the scalar product between $u$ and the move centered at $(x, y)$ is: $xor(a) \oplus xor(b) \oplus (c+1)a_x \oplus (r+1)b_y$. Assume that $u$ is in $M^{\perp}$: If $r, c$ are both even, then $a_x$ and $b_y$ must be constant and equal each other. Thus $M^{\perp}$ is only the $0$ vector. If $r$ is even and $c$ is odd, then $b_y$ is constant. Hence $M^{\perp}$ is generated by any two rows. If $r$ is odd and $c$ is even, analogous. If $r$ and $c$ are both odd, then the only condition is $xor(a) \oplus xor(b) = 0$. This is necessary and sufficient for the orthogonality. And it implies that $M^{\perp}$ is generated by any two rows and any two columns. Since we determined $M^{\perp}$, we have determined also $M$. Case 1 and 2 are the easy cases while counting case 3 is more involved. All grids are obtainable. Let $\#?$ denote the number of $\texttt{?}$s in the grid. Then the answer is $2^{\#?}$ since all grid are obtainable. If $r$ is odd and $c$ is even, we can treat it as the same case by swapping a few variables. Let us fix whether we want $R=[0,0,\ldots,0]$ or $R=[1,1,\ldots,1]$. We will count the number of valid grids for each case. Let $\#?_i$ denote the number of $\texttt{?}$s in the $i$-th row. If $\#?_i>0$, then then number of ways to set the $i$-th row is $2^{\#?_i-1}$. Otherwise, the number of ways is either $0$ to $1$ depending on the initial value of $R_i$. Let us define a bipartite graph with vertices $r+c$ vertices, labelled $V_{R,i}$ for $1 \leq i \leq r$ and $V_{C,j}$ for $1 \leq j \leq c$. If $a_{i,j}=\texttt{?}$, then we will add an (undirected) edge $V_{R,i} \leftrightarrow V_{C,j}$. Now we assume that each $\texttt{?}$ is set to $\texttt{0}$ at first. We will choose a subset of them to turn into $\texttt{1}$. When we do this on $a_{i,j}$, the value of $R_i$ and $C_j$ will toggle. In terms of the graph, this corresponds to assigning $0$ or $1$ to each edge. When we assign $1$ to the edge connecting $V_{R,i}$ and $V_{C,j}$, then $R_i$ and $C_j$ will toggle. We can consider $R_i$ and $C_j$ to be the weight of the vertices $V_{R,i}$ and $V_{C,j}$ respecitvely. Consider a connected component of this bipartite graph. Choose an arbitrary spanning tree of this connected component. By assinging the weights of the edges in the spanning tree, we can arbitrarily set the weights of all but one vertex. We cannot arbitarily set the weight of all vertices as the xor-sum of the weight of vertices is an invariant. Let us show that we can arbitarily choose the weights of all but one vertex on this connected component using the spanning tree. Let us arbitrarily root the tree. Choose some arbitrary leaf of the tree, if the weight of the leaf is correct, assign the edge connected to that vertex weight $0$. Otherwise, assign it weight $1$. Then remove the leaf and its corresponding edge. Actually, this shows that there is a one-to-one correspondents between the possible weights of the edges and the possible weights of the vertices. For the edges not in the spanning tree we have chosen, we can arbitarily set their weights while we are still able to choose the weights of all but one vertex on this connected component by properly assigning weights of the edges in the spanning tree. Suppose we want this constant value of $R$ and $C$ to be $v$, where $v$ is either $0$ or $1$. Suppose that the connected component has size $n$, has $m$ edges and the xor of all the initial vertex weights is $x$. If $n$ is even: If $x=0$, then there are $2^{m-n+1}$ ways to assign weights to edges. If $x=1$, then there are $0$ ways to assign weights to edges. If $n$ is odd: If $x=v$, then there are $2^{m-n+1}$ ways to assign weights to edges. If $x\neq v$, then there are $0$ ways to assign weights to edges.
[ "constructive algorithms", "graphs", "math", "matrices" ]
3,200
// Super Idol的笑容 // 都没你的甜 // 八月正午的阳光 // 都没你耀眼 // 热爱105°C的你 // 滴滴清纯的蒸馏水 #include <bits/stdc++.h> #include <ext/pb_ds/assoc_container.hpp> #include <ext/pb_ds/tree_policy.hpp> #include <ext/rope> using namespace std; using namespace __gnu_pbds; using namespace __gnu_cxx; #define int long long #define ll long long #define ii pair<ll,ll> #define iii pair<ii,ll> #define fi first #define se second #define endl '\n' #define debug(x) cout << #x << ": " << x << endl #define pub push_back #define pob pop_back #define puf push_front #define pof pop_front #define lb lower_bound #define ub upper_bound #define rep(x,start,end) for(auto x=(start)-((start)>(end));x!=(end)-((start)>(end));((start)<(end)?x++:x--)) #define all(x) (x).begin(),(x).end() #define sz(x) (int)(x).size() #define indexed_set tree<ll,null_type,less<ll>,rb_tree_tag,tree_order_statistics_node_update> //change less to less_equal for non distinct pbds, but erase will bug mt19937 rng(chrono::system_clock::now().time_since_epoch().count()); const int MOD=998244353; ll qexp(ll b,ll p,int m){ ll res=1; while (p){ if (p&1) res=(res*b)%m; b=(b*b)%m; p>>=1; } return res; } int n,m; char grid[2005][2005]; int w[4005]; vector<int> al[4005]; bool vis[4005]; int ss,par,edges; void dfs(int i){ if (vis[i]) return; vis[i]=true; ss++; par^=w[i]; edges+=sz(al[i]); for (auto it:al[i]) dfs(it); } signed main(){ ios::sync_with_stdio(0); cin.tie(0); cout.tie(0); cin.exceptions(ios::badbit | ios::failbit); cin>>n>>m; rep(x,0,n) cin>>grid[x]; if (n%2>m%2){ swap(n,m); rep(x,0,2005) rep(y,0,2005) if (x<y) swap(grid[x][y],grid[y][x]); } // rep(x,0,n){ // rep(y,0,m) cout<<grid[x][y]<<" "; cout<<endl; // } if (n%2==0 && m%2==0){ int cnt=0; rep(x,0,n) rep(y,0,m) if (grid[x][y]=='?') cnt++; cout<<qexp(2,cnt,MOD)<<endl; } else if (n%2==0 && m%2==1){ int cnt0=1,cnt1=1; rep(x,0,n){ int val=0; int cnt=0; rep(y,0,m){ if (grid[x][y]=='?') cnt++; else val^=grid[x][y]-'0'; } if (cnt==0){ if (val==0) cnt1=0; else cnt0=0; } else{ cnt0=(cnt0*qexp(2,cnt-1,MOD))%MOD; cnt1=(cnt1*qexp(2,cnt-1,MOD))%MOD; } } cout<<(cnt1+cnt0)%MOD<<endl; } else{ rep(x,0,n) rep(y,0,m){ if (grid[x][y]!='?'){ w[x]^=grid[x][y]-'0'; w[y+n]^=grid[x][y]-'0'; } else{ al[x].pub(y+n); al[y+n].pub(x); } } int cnt0=1,cnt1=1; rep(x,0,n+m) if (!vis[x]){ ss=0,par=0,edges=0; dfs(x); edges/=2; edges-=ss-1; //extra edge int mul=qexp(2,edges,MOD); if (ss%2==0){ if (par) mul=0; cnt0=(cnt0*mul)%MOD; cnt1=(cnt1*mul)%MOD; } else{ if (par==0){ cnt0=(cnt0*mul)%MOD; cnt1=0; } else{ cnt0=0; cnt1=(cnt1*mul)%MOD; } } } cout<<(cnt0+cnt1)%MOD<<endl; } }
1672
H
Zigu Zagu
You have a binary string $a$ of length $n$ consisting only of digits $0$ and $1$. You are given $q$ queries. In the $i$-th query, you are given two indices $l$ and $r$ such that $1 \le l \le r \le n$. Let $s=a[l,r]$. You are allowed to do the following operation on $s$: - Choose two indices $x$ and $y$ such that $1 \le x \le y \le |s|$. Let $t$ be the substring $t = s[x, y]$. Then for all $1 \le i \le |t| - 1$, the condition $t_i \neq t_{i+1}$ has to hold. Note that $x = y$ is always a valid substring. - Delete the substring $s[x, y]$ from $s$. For each of the $q$ queries, find the minimum number of operations needed to make $s$ an empty string. Note that for a string $s$, $s[l,r]$ denotes the subsegment $s_l,s_{l+1},\ldots,s_r$.
We will always remove a maximal segment, or in other words we will select $l$ and $r$ such that $A_{l-1} = A_l$ and $A_r = A_{r+1}$. Try to find an invariant. We can first split string $A$ into the minimum number of sections of $\texttt{010101}\ldots$ and $\texttt{101010}\ldots$. Let the number of sections be $K$. Since we can simply delete each section individually, the worst answer that we can get is $K$. Also, there is no reason to only delete part of a segment, so from here on we only assume that we delete maximal segments. Now, we can decompose $A$ based on its $K$ sections and write it as a string $D$. The rules for the decomposition is as follows: $10\ldots01 \to x$ $01\ldots10 \to x'$ $10\ldots10 \to y$ $01\ldots01 \to y'$ For example, the string $A=[0101][1][1010]$ becomes $D=y'xy$. Now, let us look at what our operation does on $D$. When we remove a section of even length ($y$ or $y'$) that is not on the endpoint of the string, the left and right sections will get combined. This is because the two ends of an even section are opposite, allowing the left and right sections to merge. Otherwise, it results in no merging. When some sections get combined, the length of string $D$ gets reduced by $2$, while the length of $D$ gets reduced by $1$ otherwise. Clearly, we want to maximize deleting the number of sections of even length that are not on the endpoints of the string. We will call such a move a power move. Let us classify strings that have no power moves. They actually come in $8$ types: $x x \ldots x$ $y' x x \ldots x$ $x x \ldots x y$ $y' x x \ldots x y$ $x' x' \ldots x'$ $y x' x' \ldots x'$ $x' x' \ldots x' y'$ $y x' x' \ldots x' y'$ We can prove that for any string not of this form, there will be always be character $y$ or $y'$ that is not on the ends of the string. Suppose that the string contains both $x$ and $x'$, then $xyx'$ or $x'y'x$ must be a substring. Also, the number of $y$ or $y'$s on each side cannot be more than $1$. Note that strings such that $y$ or $yy'$ may fall under multiple types. Furthermore, for string of these types, the number of moves we have to make is equal to the length of the string. Let us define the balance of $x$ as the number of $x$ minus the number of $x'$. We will define the balance of $y$ similarly. When we perform a power move, notice that the balance of the string is unchanged. Indeed, each power move either removes a pair of $x$ and $x'$ or $y$ and $y'$ from the string. With this, we can easily find which type of ending string we will end up with based on the perviously mentioned invariants, except for the cases of differentiating between the string $x x \ldots x$ and $y' x x \ldots x y$ (and the case for $x'$). To differentiate between these $2$ cases, we can note that the first character of our string does not change when we perform power moves. And indeed, $x$ and $y'$ have different starting characters. Note that we have to be careful when the balance of $x$ and the balance of $y$ is $0$ in the initial string as for strings such as $yy'$, the final string is not $\varnothing$ but $yy'$. With this, we can answer queries in $O(1)$ since we can query the balance of $x$, the balance of $y$ and the total length of the decomposed string in $O(1)$. Furthermore, there is a implementation trick here. Notice that if $a_{l-1}\neq a_l$, then then answer for $s[l-1,r]$ will be equal to the answer for $s[l,r]$. So in implementation, it is easier to "extend" $l$ and $r$ to find the balance of $x$ and $y$.
[ "constructive algorithms", "data structures", "greedy" ]
2,700
#include <bits/stdc++.h> using namespace std; int n,q; string s; int l[200005]; int r[200005]; int psum[200005]; int balance[200005]; signed main(){ ios::sync_with_stdio(0); cin.tie(0); cout.tie(0); cin.exceptions(ios::badbit | ios::failbit); cin>>n>>q; cin>>s; s=s[0]+s+s[n-1]; for (int x=1;x<=n;x++){ if (s[x-1]==s[x]) l[x]=x; else l[x]=l[x-1]; } for (int x=n;x>=1;x--){ if (s[x]==s[x+1]){ r[x]=x; psum[x]=1; if ((x-l[x])%2==0){ balance[x]=(s[x]=='1'?1:-1); } } else r[x]=r[x+1]; } for (int x=1;x<=n;x++){ psum[x]+=psum[x-1]; balance[x]+=balance[x-1]; } int a,b; while (q--){ cin>>a>>b; a=l[a],b=r[b]; int bl=balance[b]-balance[a-1]; int sum=psum[b]-psum[a-1]; int ans=(sum+abs(bl))/2; if ((sum+abs(bl))%2==1) ans++; else if (abs(bl)==0) ans++; else if (bl>0 ^ s[a]=='1') ans++; cout<<ans<<"\n"; } }
1672
I
PermutationForces
You have a permutation $p$ of integers from $1$ to $n$. You have a strength of $s$ and will perform the following operation some times: - Choose an index $i$ such that $1 \leq i \leq |p|$ and $|i-p_i| \leq s$. - For all $j$ such that $1 \leq j \leq |p|$ and $p_i<p_j$, update $p_j$ to $p_j-1$. - Delete the $i$-th element from $p$. Formally, update $p$ to $[p_1,\ldots,p_{i-1},p_{i+1},\ldots,p_n]$. It can be shown that no matter what $i$ you have chosen, $p$ will be a permutation of integers from $1$ to $|p|$ after all operations. You want to be able to transform $p$ into the empty permutation. Find the minimum strength $s$ that will allow you to do so.
When removing an element, consider how the costs of other elements change? When removing an element, consider the elements whose cost increase. What are their properties? Is there a greedy algorithm? Yes there is. How to make it run fast? Use monotonicity to reduce one dimension Let us rephrase the problem. Let $x$ and $y$ be arrays where $x_i=p_i$ and $y_i=i$ initially. For brevity, let $c_i = |x_i - y_i|$. We want to check if we can do the following operation $n$ times on the array: Choose an index $i$ such that and $c_i \leq s$. For all $j$ where $x_i < x_j$, update $x_j \gets x_j-1$. For all $j$ where $y_i < y_j$, update $y_j \gets y_j-1$. Set $x_i \gets \infty$ and $y_i \gets -\infty$ Let us fix $s$ and solve the problem of checking whether a value of $s$ allows us to transform the permutation into the empty permutation. Let $(x,y,c)$ be the arrays before some arbitrary operation and $(x',y',c')$ be the arrays after that operation. If we only perform moves with $c_i \leq s$, then $c_j \leq s$ implies that $c'_j \leq s$ i.e. if something was removable before, it will be removable later if we only use valid moves. Proof: Note that $x'_j = x_j$ or $x'_j=x_j-1$. The case for $y$ is same. We can see that $c'_j \leq c_j+1$. So the only case where $c'_j > s$ is when $c_j=s$. Case $1$: $x_j \leq y_j$ Then it must be that $x'_j=x_j$ and $y'_j=y_j-1$. By the definition of our operation, we have the following inequality: $x_i < x_j \leq y_j < y_i$. This implies that $c_i>s$, which is a contradiction. Case $2$: $x_j \geq y_j$ By similar analysis we see that $c_i>s$. $\blacksquare$ Suppose that we only remove points with $c_i \leq s$ for some fixed $s$. This greedy algorithm works here - at each step, choose any point $c_i \leq s$ with and remove it. - if no such point exists, the $s$ does not work Proof: Given any permutation, let any point with $c_a \leq s$ be $a$. Consider any optimal sequence of moves $[b_1,b_2,\ldots,b_w,a,\ldots]$. We can transform to another optimal solution it by moving $a$ to the front. Let the element before $a$ to be $b_w$. We will swap $a$ and $b_w$. $a$ is already removable at the start so it will be removable after removing $b_1,b_2,\ldots,b_{w-1}$ by lemma $1$. After removing everything before $b_1,b_2,\ldots,b_{w-1}$, $b_w$ is removable, so it will be removable after removing $a$ by lemma $1$. Hence we can move $a$ to the front of the sequence of moves by repeatedly swaping elemenets. By exchange arguement, the greedy solution of removing any point with $c_a \leq s$ is an optimal solution. By extension, the following greedy algorithm works: Set $s \gets 0$. At each step, choose index $i$ with minimal $c_i$ Update $s \gets \max(s,c_i)$ Remove point $i$ Let's start with $s=0$ and remove things while we can. If we are at a state that we are stuck, incremenet $s$. When we increment $s$, the moves that we haved done before will still be a valid choice with this new value of $s$. We simply increment $s$ until we can remove the entire permutation which is Now the only difficult part about this is maintaining the array $c_i$ (the cost) for the points we have not removed. Let's define a point as good as follows: If $y < x$, the point is good if there exist no other point $(x',y')$ such that $y < y' \leq x' < x$. Otherwise, the point is good if there exist no other point $(x',y')$ such that $x < x' \leq y' < y$. We maintain only the good elements, because only good elements are candidates for the minimum $c_i$. Suppose element is not good and minimal, then the point that causes it to be not good has a strictly smaller cost, an obvious contradiction. Now we will use data structures to maintain $c_i$ of good points. We will split the good points into the left good and right good points which are those of $x_i \leq y_i$ and $y_i \leq x_i$ respectively. Notice that if $x_i = y_i$, then it is both left good and right good. We will focus on the left good points. Suppose $i$ and $j$ are both left good with $x_i < x_j$, then $y_i < y_j$. Suppose otherwise, then we have $x_i < x_j \leq y_j < y_i$, making $i$ not good. As such $x$ and $y$ of the left good points are monotone. To find this monotone chain of left good points, we can maintain a max segment tree which stores max $y$ for all alive $x$. Using binary search on segment tree to find the unique point with $x' > x$ such that $y'$ is minimized. Where $(x,y)$ is a point on the chain, and $(x',y')$ is the next point. We can repeatedly do this to find the entire chain of left good elements We can store a segment tree where $i$ is the key and $c_i$ is the value. If an element is left good, it will always be left good until it is removed. The following two operations are simply range updates on the segment tree since $y_i$ is monotone. - For all $j$ such that $x_j>x_i$, set $x_j \leftarrow x_j-1$. - For all $j$ such that $y_j<y_i$, set $y_j \leftarrow y_j-1$. Now, when we remove some left good point, some other points will become left good, and we will need to add them. We do this by starting from the previous element of the left good chain, and then keep repeating the same algo using descend on the segment tree. When we add a new left good point, we need to know the cost at the current point in time. If we consider a point which is initially $(x,y)$, and all other previously removed $(x',y')$, $x$ decreases by 1 per $x' < x$ and $y$ decreases by 1 per $y' < y$. Hence, we can maintain a fenwick tree of the removed point's $x$ and $y$, and using that we can determine the $x$ and $y$ at the time when we add it to the left good chain (and hence to the segment tree). Time Complexity: $O(n \log n)$ Thanks to dario2994 for pointing this out. Surprisingly quad trees are provably $O(n \sqrt n)$ here. Take the $k$-th layer of the quad tree. The $n \cdot n$ grid will be split into $4^k$ squares in the $k$-th layer. Since we are doing half plane covers, our query range will only touch $2^k$ squares. At the same time, the width of those $2^k$ squares is $\frac{n}{2^k}$. Since each column only has a single element, our query range will also by bounded by $\frac{n}{2^k}$. The time complexity for a single update is given by $\sum\limits_{k=1}^{\log n} \min(2^k,\frac{n}{2^k}) = O(\sqrt n)$.
[ "data structures", "greedy" ]
3,000
// Super Idol的笑容 // 都没你的甜 // 八月正午的阳光 // 都没你耀眼 // 热爱105°C的你 // 滴滴清纯的蒸馏水 #include <bits/stdc++.h> #include <ext/pb_ds/assoc_container.hpp> #include <ext/pb_ds/tree_policy.hpp> #include <ext/rope> using namespace std; using namespace __gnu_pbds; using namespace __gnu_cxx; #define ii pair<int,int> #define fi first #define se second #define debug(x) cout << #x << ": " << x << endl #define pub push_back #define pob pop_back #define puf push_front #define pof pop_front #define lb lower_bound #define ub upper_bound #define rep(x,start,end) for(auto x=(start)-((start)>(end));x!=(end)-((start)>(end));((start)<(end)?x++:x--)) #define all(x) (x).begin(),(x).end() #define sz(x) (int)(x).size() #define indexed_set tree<ll,null_type,less<ll>,rb_tree_tag,tree_order_statistics_node_update> //change less to less_equal for non distinct pbds, but erase will bug mt19937 rng(chrono::system_clock::now().time_since_epoch().count()); struct FEN{ int fen[500005]; FEN(){ memset(fen,0,sizeof(fen)); } void upd(int i,int j){ while (i<500005){ fen[i]+=j; i+=i&-i; } } int query(int i){ int res=0; while (i){ res+=fen[i]; i-=i&-i; } return res; } } fval,fidx; struct dat{ struct node{ int s,e,m; ii val; int lazy=0; node *l,*r; node (int _s,int _e){ s=_s,e=_e,m=s+e>>1; val={1e9,s}; if (s!=e){ l=new node(s,m); r=new node(m+1,e); } } void propo(){ if (lazy){ val.fi+=lazy; if (s!=e){ l->lazy+=lazy; r->lazy+=lazy; } lazy=0; } } void update(int i,int j,int k){ if (s==i && e==j) lazy+=k; else{ if (j<=m) l->update(i,j,k); else if (m<i) r->update(i,j,k); else l->update(i,m,k),r->update(m+1,j,k); l->propo(),r->propo(); val=min(l->val,r->val); } } void set(int i,int k){ propo(); if (s==e) val.fi=k; else{ if (i<=m) l->set(i,k); else r->set(i,k); l->propo(),r->propo(); val=min(l->val,r->val); } } } *root=new node(0,500005); struct node2{ int s,e,m; int val=-1e9; node2 *l,*r; node2 (int _s,int _e){ s=_s,e=_e,m=s+e>>1; if (s!=e){ l=new node2(s,m); r=new node2(m+1,e); } } void update(int i,int k){ if (s==e) val=k; else{ if (i<=m) l->update(i,k); else r->update(i,k); val=max(l->val,r->val); } } ii query(int i,int key){ //find key<=val where i<=s if (e<i || val<key) return {-1,-1}; if (s==e) return {s,val}; else{ auto temp=l->query(i,key); if (temp!=ii(-1,-1)) return temp; else return r->query(i,key); } } } *root2=new node2(0,500005); set<ii> s={ {500005,500005} }; //root stores the values of each pair //root2 stores the left endpoint of each pair to add non-overlapping ranges //s stores the pairs are still alive so its easy to do searches dat *d; //we also store the other guy bool orien; //false for i->arr[i] int pp[500005]; void push(int i,int j){ pp[j]=i; root2->update(j,i); } void add(int i,int j){ root2->update(j,-1e9); s.insert({i,j}); int val; if (!orien) val=fval.query(j)-fidx.query(i); else val=fidx.query(j)-fval.query(i); root->set(j,val); } void del(int j){ ii curr={-1,-1}; int lim=500005; if (j!=-1){ int i=pp[j]; auto it=d->s.ub({j,1e9}); d->root->update(i,(*it).se-1,-1); if (!orien) fidx.upd(i,-1),fval.upd(j,-1); else fval.upd(i,-1),fidx.upd(j,-1); it=s.find({i,j}); if (it!=s.begin()) curr=*prev(it); lim=(*next(it)).se; s.erase(it); root->set(j,1e9); root2->update(j,-1e9); } while (true){ auto temp=root2->query(curr.se,curr.fi); swap(temp.fi,temp.se); if (temp==ii(-1,-1) || lim<=temp.se) break; add(temp.fi,temp.se); curr=temp; } } } *l=new dat(),*r=new dat(); int n; int main(){ ios::sync_with_stdio(0); cin.tie(0); cout.tie(0); cin.exceptions(ios::badbit | ios::failbit); //cyclic mapping to each other l->d=r; r->d=l; r->orien=true; cin>>n; rep(x,1,n+1){ int y; cin>>y; if (x<=y) l->push(x,y); else r->push(y,x); } rep(x,1,n+1) fidx.upd(x,1),fval.upd(x,1); l->del(-1); r->del(-1); int ans=0; rep(x,0,n){ if (l->root->val.fi<=r->root->val.fi){ ans=max(ans,l->root->val.fi); l->del(l->root->val.se); } else{ ans=max(ans,r->root->val.fi); r->del(r->root->val.se); } } cout<<ans<<endl; }
1673
A
Subtle Substring Subtraction
Alice and Bob are playing a game with strings. There will be $t$ rounds in the game. In each round, there will be a string $s$ consisting of lowercase English letters. Alice moves first and both the players take alternate turns. \textbf{Alice is allowed to remove any substring of even length (possibly empty) and Bob is allowed to remove any substring of odd length from $s$}. More formally, if there was a string $s = s_1s_2 \ldots s_k$ the player can choose a substring $s_ls_{l+1} \ldots s_{r-1}s_r$ with length of corresponding parity and remove it. After that the string will become $s = s_1 \ldots s_{l-1}s_{r+1} \ldots s_k$. After the string becomes empty, the round ends and each player calculates his/her score for this round. The score of a player is the sum of values of all characters removed by him/her. The value of $a$ is $1$, the value of $b$ is $2$, the value of $c$ is $3$, $\ldots$, and the value of $z$ is $26$. The player with higher score wins the round. For each round, determine the winner and the difference between winner's and loser's scores. Assume that both players play optimally to maximize their score. It can be proved that a draw is impossible.
Greedy The answer depends on whether the length of $s$ is even or odd and on the first and last characters of $s$ if the length is odd. The problem can be solved greedily. Let $n$ be the length of the given string. If the $n$ is even, it is always optimal for Alice to remove the whole string. If the $n$ is odd, it is always optimal for Alice to remove either $s_1s_2\ldots s_{n-1}$ or $s_2s_3\ldots s_n$ based on which gives the higher score and then Bob can remove the remaining character ($s_n$ or $s_1$ respectively). This is optimal because if Alice chooses to remove a substring of even length $2k$ such that $2k < n-1$ then Bob can remove the remaining $n-2k\geq 3$ characters, one of which will always be either $s_1$ or $s_n$, thus increasing Bob's score and decreasing Alice's score. Prove that - Bob can win if and only if the length of the string is $1$. A draw is impossible.
[ "games", "greedy", "strings" ]
800
#include <bits/stdc++.h> using namespace std; int main() { int tc; cin >> tc; while(tc--) { string s; cin >> s; int n=s.length(),alice=0; for(int i=0;i<n;i++) alice += s[i]-'a'+1; if(n%2==0) cout << "Alice " << alice << '\n'; else { int bob; if(s[0]<=s[n-1]) bob = s[0]-'a'+1; else bob = s[n-1]-'a'+1; alice -= bob; if(alice > bob) cout << "Alice " << alice-bob << '\n'; else if(alice < bob) cout << "Bob " << bob-alice << '\n'; else cout << "Draw " << 0 << '\n'; } } }
1673
B
A Perfectly Balanced String?
Let's call a string $s$ perfectly balanced if for all possible triplets $(t,u,v)$ such that $t$ is a non-empty substring of $s$ and $u$ and $v$ are characters present in $s$, the difference between the frequencies of $u$ and $v$ in $t$ is not more than $1$. For example, the strings "aba" and "abc" are perfectly balanced but "abb" is not because for the triplet ("bb",'a','b'), the condition is not satisfied. You are given a string $s$ consisting of lowercase English letters only. Your task is to determine whether $s$ is perfectly balanced or not. A string $b$ is called a substring of another string $a$ if $b$ can be obtained by deleting some characters (possibly $0$) from the start and some characters (possibly $0$) from the end of $a$.
The string is perfectly balanced if it is periodic and the repeating pattern contains all distinct alphabets. Let the number of distinct characters in $s$ be $k$ and length of $s$ be $n$. Then, $s$ will be perfectly balanced if and only if $s_{i}, s_{i+1}, \ldots, s_{i+k-1}$ are all pairwise distinct for every $i$ in the range $1\leq i\leq n-k+1$. If there exists some $i$ in the range $1\leq i\leq n-k+1$ for which the characters $s_{i}, s_{i+1}, \ldots, s_{i+k-1}$ are not pairwise distinct, there will be atleast one character $u$ in the substring $t=s_{i} s_{i+1}\ldots s_{i+k-1}$ such that $f_t(u)\geq 2$ and by pigeonhole principle, there will be atleast one character $v$ present in $s$ such that $f_t(v)=0$. So, for the triple $(t,u,v)$, $f_t(u)-f_t(v)\geq 2$, violating the criteria for the $s$ to be perfectly balanced. Suppose the following condition is met. Let's pick up any substring $t=s_is_{i+1}\ldots s_j$. Let's divide $t$ into $\Big\lceil \frac{j-i+1}{k}\Big\rceil$ blocks each of length $k$ except probably the last block. For each of these blocks, the frequency of all characters is equal to $1$ (because there are $k$ distinct characters in a block as well as in $s$) and for the last block, the frequency of some characters is equal to $1$ wheras the frequency of rest of the characters is equal to $0$. So, the frequency of some characters in $t$ will be equal to $\Big\lceil \frac{j-i+1}{k}\Big\rceil$ while that for the other characters will be equal to $\Big\lceil \frac{j-i+1}{k}\Big\rceil-1$. If we pick any two characters $u$ and $v$, $\lvert f_t(u)-f_t(v)\rvert\leq 1$ meaning that $s$ is perfectly balanced. Prove that the condition is equivalent to the following two conditions - The first $k$ characters of $s$ are pairwise distinct. For each $i$ in the range $1\leq i\leq n-k$, $s_i=s_{i+k}$.
[ "brute force", "greedy", "strings" ]
1,100
#include <bits/stdc++.h> using namespace std; int main() { int tc; cin >> tc; while(tc--) { string s; cin >> s; int n = s.length(); set<char> c; bool ok = true; int k; for(k=0;k<n;k++) { if(c.find(s[k])==c.end()) c.insert(s[k]); else break; } for(int i=k;i<n;i++) { if(s[i]!=s[i-k]) ok = false; } if(ok) cout << "YES\n"; else cout << "NO\n"; } }
1673
C
Palindrome Basis
You are given a positive integer $n$. Let's call some positive integer $a$ without leading zeroes palindromic if it remains the same after reversing the order of its digits. Find the number of distinct ways to express $n$ as a sum of positive palindromic integers. Two ways are considered different if the frequency of at least one palindromic integer is different in them. For example, $5=4+1$ and $5=3+1+1$ are considered different but $5=3+1+1$ and $5=1+3+1$ are considered the same. Formally, you need to find the number of distinct multisets of positive palindromic integers the sum of which is equal to $n$. Since the answer can be quite large, print it modulo $10^9+7$.
The number of palindromes less than $4\cdot 10^4$ is relatively small. The rest of the problem is quite similar to the classical partitions problem. First, we need to observe that the number of palindromes less than $4\cdot 10^4$ is relatively very small. The number of $5$-digit palindromes are $300$ (enumerate all $3$-digit numbers less than $400$ and append the first two digits in the reverse order). Similarly, the number of $4$-digit, $3$-digit, $2$-digit and $1$-digit palindromes are $90$, $90$, $9$ and $9$ respectively, giving a total of $M=498$ palindromes. Now, the problem can be solved just like the classical partitions problem which can be solved using Dynamic Programming. Let $dp_{k,m} =$ Number of ways to partition the number $k$ using only the first $m$ palindromes. It is not hard to see that $dp_{k,m} = dp_{k,m-1} + dp_{k-p_m,m}$ where $p_m$ denotes the $m^{th}$ palindrome. The first term corresponds to the partitions of $k$ using only the first $m-1$ palindromes and the second term corresponds to those partitions of $k$ in which the $m^{th}$ palindrome has been used atleast once. As base cases, $dp_{k,1}=1$ and $dp_{1,m}=1$. The final answer for any $n$ will be $dp_{n,M}$. The time and space complexity is $\mathcal{O}(n\cdot M)$. Try to optimize the space complexity to $\mathcal{O}(n)$.
[ "brute force", "dp", "math", "number theory" ]
1,500
#include <bits/stdc++.h> using namespace std; const int N = 40004, M = 502; const long long MOD = 1000000007; long long dp[N][M]; int reverse(int n) { int r=0; while(n>0) { r=r*10+n%10; n/=10; } return r; } bool palindrome(int n) { return (reverse(n)==n); } int main() { vector<int> palin; palin.push_back(0); for(int i=1;i<2*N;i++) { if(palindrome(i)) palin.push_back(i); } for(int j=1;j<M;j++) dp[0][j]=1; for(int i=1;i<N;i++) { dp[i][0]=0; for(int j=1;j<M;j++) { if(palin[j]<=i) dp[i][j]=(dp[i][j-1]+dp[i-palin[j]][j])%MOD; else dp[i][j]=dp[i][j-1]; } } ios_base::sync_with_stdio(false); cin.tie(NULL); int tc; cin >> tc; while(tc--) { int n; cin >> n; cout << dp[n][M-1] << '\n'; } }
1673
D
Lost Arithmetic Progression
Long ago, you thought of two finite arithmetic progressions $A$ and $B$. Then you found out another sequence $C$ containing all elements common to both $A$ and $B$. It is not hard to see that $C$ is also a finite arithmetic progression. After many years, you forgot what $A$ was but remember $B$ and $C$. You are, for some reason, determined to find this lost arithmetic progression. Before you begin this eternal search, you want to know how many different finite arithmetic progressions exist which can be your lost progression $A$. Two arithmetic progressions are considered different if they differ in their first term, common difference or number of terms. It may be possible that there are infinitely many such progressions, in which case you won't even try to look for them! Print $-1$ in all such cases. Even if there are finite number of them, the answer might be very large. So, you are only interested to find the answer modulo $10^9+7$.
First check if all elements of $C$ are present in $B$ or not. If not, the answer is $0$. Then check if the answer is infinite or not. It depends on only the first and last elements of $B$ and $C$. If $p$ is the common difference of $A$ then $lcm(p,q)=r$. $p$ must necessarily be a factor of $r$ and $\mathcal{O}(\sqrt n)$ works here. If all elements of $C$ are not present in $B$, then the answer is $0$. It is sufficient to check the following $4$ conditions to check if all elements of $C$ are present in $B$ or not - The first term of $B\leq$ The first term of $C$, i.e., $b\leq c$. The last term of $B\geq$ The last term of $C$, i.e., $b+(y-1)q\geq c+(z-1)r$. The common difference of $C$ must be divisible by the common difference of $B$, i.e., $r\bmod q=0$. The first term of $C$ must lie in $B$, i.e., $(c-b)\bmod q=0$. Now suppose the following conditions are satisfied. Let's denote an Arithmetic Progression (AP) with first term $a$, common difference $d$ and $n$ number of terms by $[a,d,n]$. If $b>c-r$ then there are infinite number of progressions which can be $A$ like $[c,r,z]$, $[c-r,r,z+1]$, $[c-2r,r,z+2]$ and so on. Similarly, if $b+(y-1)q<c+zr$, there are infinite number of progressions which can be $A$ like $[c,r,z]$, $[c,r,z+1]$, $[c,r,z+2]$ and so on. Otherwise, there are a finite number of progressions which can be $A$. Let's count them. Let $A$ be the AP $[a,p,x]$ and $l=a+(x-1)p$. It can be seen that $lcm(p,q)=r$, $(c-a)\bmod p=0$, $a>c-r$ and $l<c+rz$ for any valid $A$. The first two conditions are trivial. The third condition is necessary because if $a\leq c-r$ then $c-r$ will always be present in both $A$ and $B$ contradicting the fact that $C$ contains all the terms common to $A$ and $B$. Similarly, the fourth condition is also necessary. The only possible values $p$ can take according to the first condition are factors of $r$ which can be enumerated in $\mathcal{O}(\sqrt{r})$. The $lcm$ condition can be checked in $\mathcal{O}(\log r)$. For a particular value of $p$, there are $\frac{r}{p}$ possible values of $a$ satisfying conditions 2 and 3 and $\frac{r}{p}$ possible values of $l$ satisfying conditions 2 and 4. Thus, the answer is $\displaystyle\sum_{lcm(p,q)=r}\Big(\frac{r}{p}\Big)^2$. Time complexity: $\mathcal{O}(t\,\sqrt{r}\,\log r)$
[ "combinatorics", "math", "number theory" ]
1,900
#include <bits/stdc++.h> using namespace std; const long long MOD = 1000000007; long long gcd(long long a,long long b) { if(b==0) return a; else return gcd(b,a%b); } long long lcm(long long a,long long b) { long long g = gcd(a,b); return (a*b)/g; } int main() { int tc; cin >> tc; while(tc--) { long long b,c,q,r,y,z; cin >> b >> q >> y; cin >> c >> r >> z; long long e = b+q*(y-1); long long f = c+r*(z-1); if(c<b || c>e || f<b || f>e || r%q!=0 || (c-b)%q!=0) cout << 0 << '\n'; else if(c-r<b || f+r>e) cout << -1 << '\n'; else { long long ans = 0; for(long long p=1;p*p<=r;p++) { if(r%p==0) { if(lcm(p,q)==r) { long long a = ((r/p)*(r/p))%MOD; ans = (ans+a)%MOD; } if(p*p!=r && lcm(r/p,q)==r) { long long a = (p*p)%MOD; ans = (ans+a)%MOD; } } } cout << ans << '\n'; } } }
1673
E
Power or XOR?
The symbol $\wedge$ is quite ambiguous, especially when used without context. Sometimes it is used to denote a power ($a\wedge b = a^b$) and sometimes it is used to denote the XOR operation ($a\wedge b=a\oplus b$). You have an ambiguous expression $E=A_1\wedge A_2\wedge A_3\wedge\ldots\wedge A_n$. You can replace each $\wedge$ symbol with either a $Power$ operation or a $XOR$ operation to get an unambiguous expression $E'$. The value of this expression $E'$ is determined according to the following rules: - All $Power$ operations are performed before any $XOR$ operation. In other words, the $Power$ operation takes precedence over $XOR$ operation. For example, $4\;XOR\;6\;Power\;2=4\oplus (6^2)=4\oplus 36=32$. - Consecutive powers are calculated from \textbf{left to right}. For example, $2\;Power\;3 \;Power\;4 = (2^3)^4 = 8^4 = 4096$. You are given an array $B$ of length $n$ and an integer $k$. The array $A$ is given by $A_i=2^{B_i}$ and the expression $E$ is given by $E=A_1\wedge A_2\wedge A_3\wedge\ldots\wedge A_n$. You need to find the XOR of the values of all possible unambiguous expressions $E'$ which can be obtained from $E$ and has at least $k$ $\wedge$ symbols used as $XOR$ operation. Since the answer can be very large, you need to find it modulo $2^{2^{20}}$. Since this number can also be very large, you need to print its binary representation \textbf{without leading zeroes}. If the answer is equal to $0$, print $0$.
All numbers in $A$ are powers of $2$ and the modulo is also a power of $2$ and $B_i\geq 1$. Fix all operators in a particular subsegment as $\texttt{Power}$ and fix the operators around the segment as $\texttt{XOR}$. Find the contribution of this segment independent of other segments. Maximum possible length for such a subsegment which can contribute to the answer is $20$. Use Lucas' Theorem or Submasks DP or precomputation in order to count the parity of the number of valid unambiguous expressions in which the subsegment appears as in Hint 2. Let's consider a subsegment $[A_{l}\wedge A_{l+1}\wedge A_{l+2}\wedge\ldots\wedge A_r]$. Let all the $\wedge$ symbols in this segment be replaced by $\texttt{Power}$ and the $\wedge$ symbols before $A_l$ and after $A_r$ be replaced by $\texttt{XOR}$. Then the value of this segment will not be affected by the rest of the expression. Moreover, out of all the expressions in which this segment appears as above, it will contribute the same value to the final answer. Since the final answer is also a $\texttt{XOR}$, if the segment $[l\ldots r]$ appears in the above mentioned form in odd number of valid unambiguous expressions, it will contribute $(\ldots((A_l^{A_{l+1}})^{A_{l+2}}) \ldots )^ {A_r}$ to the final answer else it will contribute nothing. We can find the contribution of each segment $[l\ldots r]$ independently for all values of $1\leq l < r \leq n$. Now, there are two things we need to find out: How much $(\ldots((A_l^{A_{l+1}})^{A_{l+2}}) \ldots )^ {A_r}$ will contribute to the final answer, modulo $MOD = 2^{1048576}$. What is the parity of the count of valid unambiguous expressions, in which the segment $[l...r]$ appears as $... \oplus (\ldots((A_l^{A_{l+1}})^{A_{l+2}}) \ldots )^ {A_r} \oplus \ldots$. Part 1: Notice that since all the elements of $A$ are powers of $2$, $S(l,r)=(\ldots((A_l^{A_{l+1}})^{A_{l+2}}) \ldots )^ {A_r}$ will also be a power of $2$. It means that $\texttt{XOR}$-ing it with answer will flip not more than $1$ bit in the answer. The rest of the calculations is pretty straightforward. $S(l,r) = 2^{B_l\cdot A_{l+1}\cdot A_{l+2}\cdot\ldots\cdot A_{r}}$ by properties of exponents. So, if it contributes to the answer, it will flip the $B_l\cdot A_{l+1}\cdot A_{l+2}\cdot\ldots\cdot A_{r}-$th bit of the answer. Now, note that if $S\geq 2^{1048576}$, it will have no effect on the answer because $S(l,r) \bmod 2^{1048576}$ will then be $0$. So, we care only for those $(l,r)$ for which $S(l,r) < 2^{1048576}$. Since $B_i\geq 1$, $A_i\geq 2$ and so, $r-l \leq 20$ because $2^{20} = 1048576$. Thus, it is sufficient to calculate $S(l,r)$ for only $20$ values of $r$ per value of $l$. Part 2: We have used $r-l$ $\wedge$ operators as $\texttt{Power}$ and $0$, $1$ or $2$ $\wedge$ operators as $\texttt{XOR}$. Let's say that out of the $m$ unused operators, we need to use at least $q$ of them as $\texttt{XOR}$. Then the number of ways to do this is $\binom{m}{q}+\binom{m}{q+1}+\binom{m}{q+2}+\ldots+\binom{m}{m}$. Infact, instead of finding this value, we are only interested in finding whether it is even or odd. So, we need the value of $\big[\binom{m}{q}+\binom{m}{q+1}+\binom{m}{q+2}+\ldots+\binom{m}{m}\big]\bmod 2=$ $\big[\binom{m-1}{q}+\binom{m-1}{q-1} + \binom{m-1}{q+1}+\binom{m-1}{q} + \binom{m-1}{q+2}+\binom{m-1}{q+1} + \ldots + \binom{m-1}{m-1}+\binom{m-1}{m-2} + \binom{m}{m}\big] \bmod 2=$ $\big[\binom{m-1}{q-1} + \binom{m-1}{m-1} + \binom{m}{m}]\bmod 2 =$ $\binom{m-1}{q-1} \bmod 2$ as $(a + a) \bmod 2 = 0$ and $\binom{x}{x} = 1$ by definition. $\binom{m-1}{q-1} \bmod 2$ can be found using Lucas' Theorem. It turns out that $\binom{n}{r}$ is odd if and only if $r$ is a submask of $n$, i.e., $n | r = n$. Note that there are also many other ways to find this value (like Submasks DP or using the fact that $r-l\leq 20$ for precomputation) but this is the easiest one. Some final notes - We can maintain the final answer as a binary string of length $1048576$. Find the value $X = B_l\cdot A_{l+1}\cdot A_{l+2}\cdot\ldots\cdot A_{r}$ and if the required parity is odd and $X < 1048576$, flip the $X-$th bit of the string. We need to be careful while calculating $B_l\cdot A_{l+1}\cdot A_{l+2}\cdot\ldots\cdot A_{r}$ since $A_i$ can be as large as $2^{1048575}$. But since we are interested in values that evaluate to something smaller than $1048576$, we will never try to multiply $A_i$ for anything with $B_i > 20$. Calculating the parity of $\binom{n}{r}$ in $\mathcal{O}(\log n)$ may time out. The constraints are strict enough. Total time Complexity - $\mathcal{O}(n \log \log MOD)$ Try to solve the problem if $B_i\geq 0$ and if powers are calculated from right to left.
[ "bitmasks", "combinatorics", "math", "number theory" ]
2,500
#include <bits/stdc++.h> using namespace std; const int N = 1048576; long long b[N]; char ans[N]; int main() { ios_base::sync_with_stdio(false); cin.tie(NULL); int n,k; cin >> n >> k; for(int i=0;i<n;i++) { cin >> b[i]; } for(int i=0;i<1048576;i++) ans[i]='0'; for(int l=0;l<n;l++) { long long p=1; for(int r=l;r<n;r++) { if(r==l) p*=b[r]; else { if(b[r]>=20) break; else p*=(1ll<<b[r]); } if(p>=1048576) break; int m = n-r+l-3; int q = k-2; if(l==0) { m++; q++; } if(r==n-1) { m++; q++; } if(m>=q && (m==0 || (q>0 && ((m-1)|(q-1))==(m-1)))) ans[p]='1'+'0'-ans[p]; } } bool start=false; for(int i=1048575;i>=0;i--) { if(ans[i]=='0' && start) cout << 0; else if(ans[i]=='1') { cout << 1; start=true; } } if(!start) cout << 0; cout << '\n'; }
1673
F
Anti-Theft Road Planning
This is an interactive problem. A city has $n^2$ buildings divided into a grid of $n$ rows and $n$ columns. You need to build a road of some length $D(A,B)$ of your choice between each pair of adjacent by side buildings $A$ and $B$. Due to budget limitations and legal restrictions, the length of each road must be a positive integer and \textbf{the total length of all roads should not exceed $48\,000$}. There is a thief in the city who will start from the topmost, leftmost building (in the first row and the first column) and roam around the city, occasionally stealing artifacts from some of the buildings. He can move from one building to another adjacent building by travelling through the road which connects them. You are unable to track down what buildings he visits and what path he follows to reach them. But there is one tracking mechanism in the city. The tracker is capable of storing a single integer $x$ which is initially $0$. Each time the thief travels from a building $A$ to another adjacent building $B$ through a road of length $D(A,B)$, the tracker changes $x$ to $x\oplus D(A,B)$. Each time the thief steals from a building, the tracker reports the value $x$ stored in it and resets it back to $0$. It is known beforehand that the thief will steal in exactly $k$ buildings but you will know the values returned by the tracker only after the thefts actually happen. Your task is to choose the lengths of roads in such a way that no matter what strategy or routes the thief follows, you will be able to exactly tell the location of all the buildings where the thefts occurred from the values returned by the tracker.
The main goal is to assign numbers $A_{i,j}$ from $0$ to $1023$ to all buildings such that all buildings get distinct numbers and assign the road lengths between buildings $B_{x_1,y_1}$ and $B_{x_2,y_2}$ as $A_{x_1,y_1}\oplus A_{x_2,y_2}$. Among all such assignments, try to find the one which has the least sum of road lengths. $2$-Dimensional Gray Code works For now, lets ignore $n<=32$ and assume $n=32$. Let's try to build the roads in such a way that no matter what path the thief takes to reach building $B_{i,j}$, the tracker will always return a fixed value $A_{i,j}$ such that all $A_{i,j}$ are distinct. Then by knowing the values returned by the tracker, one can easily find which building the theft occurred in. The main problem here is not to choose the lengths of the roads, since by choosing the length of road between buildings $B_{x_1,y_1}$ and $B_{x_2,y_2}$ as $A_{x_1,y_1}\oplus A_{x_2,y_2}$, one can always achieve this property. But there is a constraint which needs to be satisfied: The total length of all roads must not exceed $48000$. This is, in fact, a tight constraint (model solution uses $47616$) due to which one needs to choose the values of $A_{i,j}$ very efficiently. Consider this problem - Find a permutation of numbers from $0$ to $2^m-1$ such that the sum of XOR of consecutive integers is minimized. The answer to this is Gray Code or Reflected Binary Code. In the standard Gray Code, bit $0$ is flipped $2^{m-1}$ times, bit $1$ is flipped $2^{m-2}$ times, bit $2$ is flipped $2^{m-3}$ times, $\ldots$, bit $m-1$ is flipped $1$ time. The idea is to use small bits more number of times compared to the larger ones. Our task is to implement this idea in $2$-dimensions. Let's look at the algorithm used to build Gray Code. If we have the Gray Code for $k$ bits, it can be extended to $k+1$ bits by taking a copy of it, reflecting it and appending $1$ to the beginning of the reflected code and $0$ to the beginning of the original one. Here, if we have the Gray Code for $2^k \times 2^k$ matrix, it can be first extended to a Gray Code for $2^k \times 2^{k+1}$ matrix and this can further be extended to a Gray Code for $2^{k+1} \times 2^{k+1}$ matrix. If we build a $2^m \times 2^m$ matrix using this algorithm, the total length of roads used will be $\frac{3}{2}\cdot(4^m)\cdot(2^m-1)$. In this problem, $m = 5$. So, total length of roads used = $\frac{3}{2} \cdot 1024 \cdot 31 = 47616$. Once this construction is complete, finding the buildings where thefts occurred is elementary since there can now be only one building corresponding to each value returned by the tracker. Now, coming back to the original problem, we can simply take the first $n$ rows and the first $n$ columns from the constructed matrix. The cost won't increase and the properties still hold.
[ "bitmasks", "constructive algorithms", "divide and conquer", "greedy", "interactive", "math" ]
2,400
#include <bits/stdc++.h> using namespace std; const int N = 32; int maxpower2(int n) { int p=1; while(n%2==0) { p*=2; n/=2; } return p; } int main() { int n,k; cin >> n >> k; int h[N][N-1]; for(int i=0;i<N;i++) { for(int j=1;j<=N-1;j++) { h[i][j-1]=maxpower2(j)*maxpower2(j); } } for(int i=0;i<n;i++) { for(int j=0;j<n-1;j++) { cout << h[i][j] << " "; } cout << endl; } int v[N-1][N]; for(int i=1;i<=N-1;i++) { for(int j=0;j<N;j++) { v[i-1][j]=2*maxpower2(i)*maxpower2(i); } } for(int i=0;i<n-1;i++) { for(int j=0;j<n;j++) { cout << v[i][j] << " "; } cout << endl; } int b[n][n]; b[0][0]=0; for(int j=1;j<n;j++) { b[0][j]=b[0][j-1]^h[0][j-1]; } for(int i=1;i<n;i++) { for(int j=0;j<n;j++) { b[i][j]=b[i-1][j]^v[i-1][j]; } } map<int,pair<int,int> > m; for(int i=0;i<n;i++) { for(int j=0;j<n;j++) { m[b[i][j]]={i,j}; } } int y=0; while(k--) { int x; cin >> x; pair<int,int> ans = m[x^y]; cout << ans.first+1 << " " << ans.second+1 << endl; y^=x; } }
1674
A
Number Transformation
You are given two integers $x$ and $y$. You want to choose two \textbf{strictly positive} (greater than zero) integers $a$ and $b$, and then apply the following operation to $x$ \textbf{exactly} $a$ times: replace $x$ with $b \cdot x$. You want to find two positive integers $a$ and $b$ such that $x$ becomes equal to $y$ after this process. If there are multiple possible pairs, you can choose \textbf{any of them}. If there is no such pair, report it. For example: - if $x = 3$ and $y = 75$, you may choose $a = 2$ and $b = 5$, so that $x$ becomes equal to $3 \cdot 5 \cdot 5 = 75$; - if $x = 100$ and $y = 100$, you may choose $a = 3$ and $b = 1$, so that $x$ becomes equal to $100 \cdot 1 \cdot 1 \cdot 1 = 100$; - if $x = 42$ and $y = 13$, there is no answer since you cannot decrease $x$ with the given operations.
The process in the statement can be rephrased as "multiply $x$ by $b^a$". $x \cdot b^a$ will be divisible by $x$, so if $y$ is not divisible by $x$, there is no answer. Otherwise, $a = 1$ and $b = \frac{y}{x}$ can be used.
[ "constructive algorithms", "math" ]
800
t = int(input()) for i in range(t): x, y = map(int, input().split()) if y % x != 0: print(0, 0) else: print(1, y // x)
1674
B
Dictionary
The Berland language consists of words having \textbf{exactly two letters}. Moreover, \textbf{the first letter of a word is different from the second letter}. Any combination of two different Berland letters (which, by the way, are the same as the lowercase letters of Latin alphabet) is a correct word in Berland language. The Berland dictionary contains all words of this language. The words are listed in a way they are usually ordered in dictionaries. Formally, word $a$ comes earlier than word $b$ in the dictionary if one of the following conditions hold: - the first letter of $a$ is less than the first letter of $b$; - the first letters of $a$ and $b$ are the same, and the second letter of $a$ is less than the second letter of $b$. So, the dictionary looks like that: - Word $1$: ab - Word $2$: ac - ... - Word $25$: az - Word $26$: ba - Word $27$: bc - ... - Word $649$: zx - Word $650$: zy You are given a word $s$ from the Berland language. Your task is to find its index in the dictionary.
There are many different ways to solve this problem: generate all Berland words with two for-loops and store them in an array, then for each test case, go through the array of words to find the exact word you need; generate all Berland words with two for-loops and store them in a dictionary-like data structure (map in C++, dict in Python, etc), using words as keys and their numbers as values. This allows to search for the index of the given word quickly; for each test case, run two for-loops to iterate over the words, count the number of words we skipped, and stop at the word from the test case; try to invent some formulas that allow counting the number of words before the given one.
[ "combinatorics", "math" ]
800
#include <bits/stdc++.h> using namespace std; int main() { string w = "aa"; map<string, int> idx; int i = 1; for(w[0] = 'a'; w[0] <= 'z'; w[0]++) for(w[1] = 'a'; w[1] <= 'z'; w[1]++) if(w[0] != w[1]) idx[w] = i++; int t; cin >> t; for(int i = 0; i < t; i++) { string s; cin >> s; cout << idx[s] << endl; } }
1674
C
Infinite Replacement
You are given a string $s$, consisting only of Latin letters 'a', and a string $t$, consisting of lowercase Latin letters. In one move, you can replace any letter 'a' in the string $s$ with a string $t$. Note that after the replacement string $s$ might contain letters other than 'a'. You can perform an arbitrary number of moves (including zero). How many different strings can you obtain? Print the number, or report that it is infinitely large. Two strings are considered different if they have different length, or they differ at some index.
Let's consider some cases. If there are letters 'a' in string $t$, then the moves can be performed endlessly. If $t$ itself is equal to "a", then the string won't change, so the answer is $1$. Otherwise, the length of $t$ is least $2$, so string $s$ will be increasing in length after each move, and the answer is -1. If there are no letters 'a' in string $t$, then the resulting string is only determined by whichever letters 'a' we chose to replace with $t$. That's because once we replace a letter 'a' with string $t$, we can do nothing with the new letters anymore. We can actually imagine that $t$ is equal to "b", and the answer won't change. Now it's easy to see that the answer is equal to the number of strings of length $n$, consisting only of letters 'a' and 'b'. There are two options for each position, and there are $n$ positions, so the answer is $2^n$. Overall complexity: $O(|s| + |t|)$ per testcase.
[ "combinatorics", "implementation", "strings" ]
1,000
for _ in range(int(input())): s = input() t = input() if t == "a": print(1) elif t.count('a') != 0: print(-1) else: print(2**len(s))
1674
D
A-B-C Sort
You are given three arrays $a$, $b$ and $c$. Initially, array $a$ consists of $n$ elements, arrays $b$ and $c$ are empty. You are performing the following algorithm that consists of two steps: - Step $1$: while $a$ is not empty, you take the last element from $a$ and move it in the middle of array $b$. If $b$ currently has odd length, you can choose: place the element from $a$ to the left or to the right of the middle element of $b$. As a result, $a$ becomes empty and $b$ consists of $n$ elements. - Step $2$: while $b$ is not empty, you take the middle element from $b$ and move it to the end of array $c$. If $b$ currently has even length, you can choose which of two middle elements to take. As a result, $b$ becomes empty and $c$ now consists of $n$ elements. Refer to the Note section for examples.Can you make array $c$ sorted in non-decreasing order?
Let's look at elements $a_n$ and $a_{n - 1}$. After the first step, they will always move to positions $b_1$ and $b_n$ (it's up to you to choose: $a_n \to b_1$ and $a_{n-1} \to b_n$ or vice versa) because all remaining $a_i$ for $i < n - 1$ will be placed between $a_n$ and $a_{n-1}$. After the second step, elements $b_1$ and $b_n$ will always be placed at positions $c_{n-1}$ and $c_n$ (it's also up to you to decide the exact order) because it's easy to see that you first take all $b_i$ for $1 < i < n$ and only after that - $b_1$ and $b_n$. In other words, elements $a_{n-1}$ and $a_n$ are moved to positions $c_{n-1}$ and $c_n$. We can analogically prove that each pair $(a_{n-2i-1}, a_{n-2i})$ is moved to a pair of positions $(c_{n-2i-1}, c_{n-2i})$: you first take all elements $a_j$ for $j > n - 2i$ and place them at positions $[b_1, \dots, b_i]$ and $[b_{n-i+1}, \dots, b_n]$; then you move $a_{n-2i}$ and $a_{n-2i-1}$; finally you move all remaining elements from $a$ between $b_{i+1}$ and $b_{n-i}$. Step $2$ just does everything in "reverse" order to step $1$. It means that array $c$ is basically array $a$, but you can swap elements in pairs $(a_{n-2i-1}, a_{n-2i})$ for $i \ge 0$. And to make $a$ ($c$) sorted, we can try to sort each pair and check - is it enough to sort the whole array or not.
[ "constructive algorithms", "implementation", "sortings" ]
1,200
fun main() { repeat(readLine()!!.toInt()) { val n = readLine()!!.toInt() val a = readLine()!!.split(' ').map { it.toInt() }.toIntArray() for (i in (n % 2) until n step 2) { if (a[i] > a[i + 1]) a[i] = a[i + 1].also { a[i + 1] = a[i] } } var sorted = true for (i in a.indices) if (i > 0 && a[i - 1] > a[i]) sorted = false println(if(sorted) "YES" else "NO") } }
1674
E
Breaking the Wall
Monocarp plays "Rage of Empires II: Definitive Edition" — a strategic computer game. Right now he's planning to attack his opponent in the game, but Monocarp's forces cannot enter the opponent's territory since the opponent has built a wall. The wall consists of $n$ sections, aligned in a row. The $i$-th section initially has durability $a_i$. If durability of some section becomes $0$ or less, this section is considered broken. To attack the opponent, Monocarp needs to break at least two sections of the wall (any two sections: possibly adjacent, possibly not). To do this, he plans to use an onager — a special siege weapon. The onager can be used to shoot any section of the wall; the shot deals $2$ damage to the target section and $1$ damage to adjacent sections. In other words, if the onager shoots at the section $x$, then the durability of the section $x$ decreases by $2$, and the durability of the sections $x - 1$ and $x + 1$ (if they exist) decreases by $1$ each. Monocarp can shoot at any sections any number of times, \textbf{he can even shoot at broken sections}. Monocarp wants to calculate the minimum number of onager shots needed to break at least two sections. Help him!
Let's analyze three cases based on the distance between two sections we are going to break: break two neighboring sections ($i$ and $i+1$); break two sections with another section between them ($i$ and $i+2$); break two sections with more than one section between them. Why exactly these cases? Because the damage from the shots and the possibility to hit both sections with the same shot depends on the distance between them. If there is more than one section between the two we want to break, then any shot hits only one of these sections, so each shot should be aimed at one of those sections, and we break them independently. Let's pick two sections with minimum durability and calculate the number of shots required to break them; if these sections are $i$ and $j$, then the required number of shots is $\lceil \frac{a_i}{2} \rceil + \lceil \frac{a_j}{2} \rceil$. It actually does not matter if the distance between them is less than $3$; if it is so, these sections will be analyzed in one of the other cases. Okay, now let's deal with two sections having exactly one section between them. We can iterate on all combinations of these sections (iterate on $i$ from $1$ to $n-2$ and pick sections $i$ and $i+2$). Let's analyze how can we damage them. If we shoot at the section between them, we deal $1$ damage to both sections; if we shoot at one of those sections, we deal $2$ damage to it and $0$ damage to the other section. So, each shot distributes $2$ damage between these two sections the way we want to distribute it, and the number of shots required to break these two sections is $\lceil \frac{a_i + a_{i+2}}{2} \rceil$. The case when we try to break two adjacent sections is the trickiest one. Let's say that these sections are $i$ and $i+1$, $x = \max(a_i, a_{i+1})$, and $y = \min(a_i, a_{i+1})$. If we target one of these sections, we deal $2$ damage to it and $1$ damage to the other section. Let's try to run the following algorithm: shoot at the section with higher durability, until both of them break. It can be slow, but we can see that after the first $x-y$ shots, the durabilities of the sections become equal, and each pair of shots after that deals $3$ damage to both sections. So, we can model the first $x-y$ shots, subtract $2(x-y)$ from $x$ and $(x-y)$ from $y$, and then we'll need $\lceil \frac{x+y}{3} \rceil$ shots. The only case when this doesn't work is if we break both sections before we equalize their durabilities; it means that $2y \le x$ and we need to do only $\lceil \frac{x}{2} \rceil$ shots.
[ "binary search", "brute force", "constructive algorithms", "greedy", "math" ]
2,000
#include <iostream> #include <sstream> #include <cstdio> #include <vector> #include <cmath> #include <queue> #include <string> #include <cstring> #include <cassert> #include <iomanip> #include <algorithm> #include <set> #include <map> #include <ctime> #include <cmath> #define forn(i, n) for(int i=0;i<n;++i) #define fore(i, l, r) for(int i = int(l); i <= int(r); ++i) #define sz(v) int(v.size()) #define all(v) v.begin(), v.end() #define pb push_back #define mp make_pair #define x first #define y1 ________y1 #define y second #define ft first #define sc second #define pt pair<int, int> template<typename X> inline X abs(const X& a) { return a < 0? -a: a; } template<typename X> inline X sqr(const X& a) { return a * a; } typedef long long li; typedef long double ld; using namespace std; const int INF = 1000 * 1000 * 1000; const ld EPS = 1e-9; const ld PI = acos(-1.0); const int N = 200 * 1000 + 13; int n; int a[N]; inline void read() { cin >> n; for (int i = 0; i < n; i++) { cin >> a[i]; } } inline void solve() { int ans = INF; for (int i = 0; i < n - 1; i++) { int cur = 0; int x = a[i], y = a[i + 1]; if (x < y) { swap(x, y); } int cnt = min(x - y, (x + 1) / 2); cur += cnt; x -= 2 * cnt; y -= cnt; if (x > 0 && y > 0) { cur += (x + y + 2) / 3; } ans = min(ans, cur); } for (int i = 0; i < n - 2; i++) { int cur = 0; int x = a[i], y = a[i + 2]; if (x < y) { swap(x, y); } int cnt = (x - y + 1) / 2; cur += cnt; cur += y; ans = min(ans, cur); } sort(a, a + n); ans = min(ans, (a[0] + 1) / 2 + (a[1] + 1) / 2); cout << ans << endl; } int main () { #ifdef fcspartakm freopen("input.txt", "r", stdin); //freopen("output.txt", "w", stdout); #endif srand(time(NULL)); cerr << setprecision(10) << fixed; read(); solve(); //cerr << "TIME: " << clock() << endl; }
1674
F
Desktop Rearrangement
Your friend Ivan asked you to help him rearrange his desktop. The desktop can be represented as a rectangle matrix of size $n \times m$ consisting of characters '.' (empty cell of the desktop) and '*' (an icon). The desktop is called \textbf{good} if all its icons are occupying some prefix of full columns and, possibly, the prefix of the next column (and there are no icons outside this figure). In other words, some amount of first columns will be filled with icons and, possibly, some amount of first cells of the next (after the last full column) column will be also filled with icons (and all the icons on the desktop belong to this figure). This is pretty much the same as the real life icons arrangement. In one move, you can take one icon and move it to any empty cell in the desktop. Ivan loves to add some icons to his desktop and remove them from it, so he is asking you to answer $q$ queries: what is the \textbf{minimum} number of moves required to make the desktop \textbf{good} after adding/removing one icon? Note that \textbf{queries are permanent} and change the state of the desktop.
I've seen a lot of data structures solutions for this problem, but author's solution doesn't use them and works in $O(nm + q)$. Firstly, let's change our matrix to a string $s$, because it will be easier to work with a string than with a matrix. The order of characters will be from top to bottom, from left to right (i. e. the outer cycle by columns, and the inner by rows). Then, let's calculate $sum$ - the number of icons on the desktop (the number of '*' in $s$). Then the answer will be, obviously, the number of dots on the prefix of $s$ of size $sum$. Now let's deal with queries. It can be shown that one query changes our answer by no more than $1$. Let $p = ny + x$ be the position of the cell that is being changed in $s$ (zero-indexed). Then, if $p < sum$, there are two cases. If $s_p$ is '.', then we have one more icon on our prefix, so the answer decreases by one (because we filled one empty space in the good desktop). Otherwise, it increases by one (because this icon is outside our prefix). Then let's change the corresponding character by the opposite. After that, we should move our right border ($sum$) accordingly to the new number of icons. Note that this border is exclusive. If $s_p$ becomes '*', then we will increase the variable $sum$. But before that, if $s_{sum}$ is '.', then there should be an icon, and it is not here yet, so the answer increases. Otherwise, our border will decrease. Then, if $s_{sum - 1}$ is '.', then the answer decreases (because there was a place for an icon, and now it is not needed anymore). Time complexity: $O(nm + q)$.
[ "data structures", "greedy", "implementation" ]
1,800
#include <bits/stdc++.h> using namespace std; static char buf[1010]; int main() { #ifdef _DEBUG freopen("input.txt", "r", stdin); // freopen("output.txt", "w", stdout); #endif int n, m, q; scanf("%d %d %d", &n, &m, &q); vector<string> tmp(n); string s; int sum = 0; for (int i = 0; i < n; ++i) { scanf("%s", buf); tmp[i] = buf; sum += count(tmp[i].begin(), tmp[i].end(), '*'); } for (int j = 0; j < m; ++j) { for (int i = 0; i < n; ++i) { s += tmp[i][j]; } } int res = count(s.begin(), s.begin() + sum, '.'); int pos = sum; for (int i = 0; i < q; ++i) { int x, y; scanf("%d %d", &x, &y); --x, --y; int p = y * n + x; if (p < pos) { if (s[p] == '.') { --res; } else { ++res; } } s[p] = (s[p] == '.' ? '*' : '.'); if (s[p] == '*') { if (s[pos] == '.') { ++res; } ++pos; } else { if (s[pos - 1] == '.') { --res; } --pos; } printf("%d\n", res); } return 0; }
1674
G
Remove Directed Edges
You are given a directed acyclic graph, consisting of $n$ vertices and $m$ edges. The vertices are numbered from $1$ to $n$. There are no multiple edges and self-loops. Let $\mathit{in}_v$ be the number of incoming edges (indegree) and $\mathit{out}_v$ be the number of outgoing edges (outdegree) of vertex $v$. You are asked to remove some edges from the graph. Let the new degrees be $\mathit{in'}_v$ and $\mathit{out'}_v$. You are only allowed to remove the edges if the following conditions hold for every vertex $v$: - $\mathit{in'}_v < \mathit{in}_v$ or $\mathit{in'}_v = \mathit{in}_v = 0$; - $\mathit{out'}_v < \mathit{out}_v$ or $\mathit{out'}_v = \mathit{out}_v = 0$. Let's call a set of vertices $S$ cute if for each pair of vertices $v$ and $u$ ($v \neq u$) such that $v \in S$ and $u \in S$, there exists a path either from $v$ to $u$ or from $u$ to $v$ over the non-removed edges. What is the maximum possible size of a cute set $S$ after you remove some edges from the graph and both indegrees and outdegrees of all vertices either decrease or remain equal to $0$?
Let's solve the problem in reverse. Imagine we have already removed some edges, so that the conditions hold. When is some set of vertices considered cute? Since the graph is acyclic, we can topologically sort the vertices in the set. The vertices are reachable from each other, so there exists a path from the $i$-th vertex in the set to the $(i+1)$-st vertex. Thus, there exists a path that goes through all chosen vertices. However, we can make this conclusion even stronger. In the optimal answer, not just the path goes from the $i$-th vertex to the $(i+1)$-st one, but a single edge. That can be shown by contradiction. Let there be some vertices $v$ and $u$ that are adjacent in the chosen cute set. There exists a path between them, but not a single edge. We want to show that this set is not optimal and can be made larger. The vertices on that path don't belong to the set. If they did, they would be between $v$ and $u$ in the set (because of the topological order). We can add them to the set. Every vertex that can reach $v$, can reach them too, and every vertex that can be reached from $u$, can be reached from them. Thus, it will still be a cute set. Now every vertex from $v$ to $u$ has an edge between them and the size of the set is larger. Thus, we showed that the maximum set in the answer is always some path in the graph. So the task is to choose some path, then remove some edges so that this path still exists and the conditions hold. Note that if the conditions hold for some set of remaining edges, then we can remove any edge from it, and the conditions will still be met. Thus, we can only leave this path. Let's look closer into the conditions. What they actually tell is the following. If a vertex has incoming edges, then remove at least one of them. The same for the outgoing edges. Since we are looking for a path, it's enough to leave one outgoing edge for all vertices except the last one and leave one incoming edge for all vertices except the first one. In order to achieve that, every vertex except the last one should have at least two outgoing edges and every vertex except the first one should have at least two incoming edges. We can see that this condition is not only necessary, but sufficient as well. Just remove the outgoing edges which don't go to the next vertex and the incoming edges which don't go from the previous vertex. Now we can wrap this up into the dynamic programming. Initialize the answer with $1$, since you can always remove all edges, and get a set with one vertex. Then let $\mathit{dp}_v$ be the longest path such that it starts in vertex $v$, all vertices in it have at least two incoming edges and all vertices except maybe the final one have at least two outgoing edges. Initialize the $\mathit{dp}$ for the vertices that can be the final in the path (have at least two incoming edges) with $1$. Then update $\mathit{dp}_v$ for all $v$ that can be internal vertices (have at least two outgoing and two incoming edges) with $\mathit{dp}_u + 1$ for all outgoing edges $(v, u)$. Finally, update the answer from the vertices that can be the first one in the path. For each vertex $v$ that has at least two outgoing edges, take the value of $\mathit{dp}_u + 1$ for all outgoing edges $(v, u)$. Overall complexity: $O(n + m)$.
[ "dfs and similar", "dp", "graphs" ]
2,000
#include <bits/stdc++.h> #define forn(i, n) for (int i = 0; i < int(n); i++) using namespace std; const int INF = 1e9; vector<int> in, out; vector<vector<int>> g; vector<int> dp; int calc(int v){ if (dp[v] != -1) return dp[v]; if (in[v] >= 2 && out[v] >= 2){ dp[v] = 1; for (int u : g[v]) dp[v] = max(dp[v], calc(u) + 1); } else if (in[v] >= 2){ dp[v] = 1; } else{ dp[v] = -INF; } return dp[v]; } int main() { int n, m; scanf("%d%d", &n, &m); g.resize(n); in.resize(n); out.resize(n); forn(i, m){ int v, u; scanf("%d%d", &v, &u); --v, --u; g[v].push_back(u); ++in[u]; ++out[v]; } int ans = 1; dp.resize(n, -1); forn(v, n) if (out[v] >= 2){ for (int u : g[v]){ ans = max(ans, calc(u) + 1); } } printf("%d\n", ans); return 0; }
1675
A
Food for Animals
In the pet store on sale there are: - $a$ packs of dog food; - $b$ packs of cat food; - $c$ packs of universal food (such food is suitable for both dogs and cats). Polycarp has $x$ dogs and $y$ cats. Is it possible that he will be able to buy food for all his animals in the store? Each of his dogs and each of his cats should receive one pack of suitable food for it.
Obviously, the best way to buy food for every pet is to buy maximum possible food for dogs and cats, then $max(0, x - a)$ dogs and $max(0, y - b)$ cats will not get food. We will buy universal food for these dogs and cats. Then the answer is YES, if $max(0, x - a) + max(0, y - b) \le c$, and NO else.
[ "greedy", "math" ]
800
#include <bits/stdc++.h> using namespace std; #define forn(i, n) for (int i = 0; i < int(n); i++) int main() { int t; cin >> t; forn(tt, t) { int a, b, c, x, y; cin >> a >> b >> c >> x >> y; int ax = min(a, x); int by = min(b, y); a -= ax; x -= ax; b -= by; y -= by; if (c >= x + y) cout << "YES" << endl; else cout << "NO" << endl; } }
1675
B
Make It Increasing
Given $n$ integers $a_1, a_2, \dots, a_n$. You can perform the following operation on them: - select any element $a_i$ ($1 \le i \le n$) and divide it by $2$ (round down). In other words, you can replace any selected element $a_i$ with the value $\left \lfloor \frac{a_i}{2}\right\rfloor$ (where $\left \lfloor x \right\rfloor$ is – round down the real number $x$). Output the minimum number of operations that must be done for a sequence of integers to become strictly increasing (that is, for the condition $a_1 \lt a_2 \lt \dots \lt a_n$ to be satisfied). Or determine that it is impossible to obtain such a sequence. Note that elements \textbf{cannot} be swapped. The only possible operation is described above. For example, let $n = 3$ and a sequence of numbers $[3, 6, 5]$ be given. Then it is enough to perform two operations on it: - Write the number $\left \lfloor \frac{6}{2}\right\rfloor = 3$ instead of the number $a_2=6$ and get the sequence $[3, 3, 5]$; - Then replace $a_1=3$ with $\left \lfloor \frac{3}{2}\right\rfloor = 1$ and get the sequence $[1, 3, 5]$. The resulting sequence is strictly increasing because $1 \lt 3 \lt 5$.
We will process the elements of the sequence starting from the end of the sequence. Each element $a_i$ ($1 \le i \le n - 1$) will be divided by $2$ until it is less than $a_{i+1}$. If at some point it turns out that $a_{i + 1} = 0$, it is impossible to obtain the desired sequence.
[ "greedy", "implementation" ]
900
#include<bits/stdc++.h> using namespace std; void solve(){ int n; cin >> n; vector<int>a(n); for(auto &i : a) cin >> i; int ans = 0; for(int i = n - 2; i >= 0; i--){ while(a[i] >= a[i + 1] && a[i] > 0){ a[i] /= 2; ans++; } if(a[i] == a[i+1]){ cout << -1 << '\n'; return; } } cout << ans << '\n'; } int main(){ int t; cin >> t; while(t--){ solve(); } }
1675
C
Detective Task
Polycarp bought a new expensive painting and decided to show it to his $n$ friends. He hung it in his room. $n$ of his friends entered and exited there one by one. At one moment there was no more than one person in the room. In other words, the first friend entered and left first, then the second, and so on. It is known that at the beginning (before visiting friends) a picture hung in the room. At the end (after the $n$-th friend) it turned out that it disappeared. At what exact moment it disappeared — there is no information. Polycarp asked his friends one by one. He asked each one if there was a picture when he entered the room. Each friend answered one of three: - no (response encoded with 0); - yes (response encoded as 1); - can't remember (response is encoded with ?). Everyone except the thief either doesn't remember or told the \textbf{truth}. The thief can say anything (any of the three options). Polycarp cannot understand who the thief is. He asks you to find out the number of those who can be considered a thief according to the answers.
First, let's note that we will have a transition from $1$ to $0$ only once, otherwise it turns out that first the picture disappeared, then it appeared and disappeared back, but we can consider that a friend in the middle, who answered $1$ lied to us, but this is not true, because even before him the picture disappeared. So we need to find this transition. Since we can also meet $?$, we find the index of the leftmost $0$ (in case of absence, we take $n - 1$) and mark it as $r_0$, and the index of rightmost $1$ (in case of absence, we take $0$) and mark as $l_1$. Answer - the number of indices between them (inclusive), because only they could lie. $r_0 - l_1 + 1$ There could not be a thief to the left of $l_1$, since either the friend under the index $l_1$ lied, or the picture was not stolen before $l_1$. There could not be a thief to the right of $r_0$, since either the painting had already been stolen in the presence of $r_0$'s friend, or it was he who lied.
[ "implementation" ]
1,100
#include <bits/stdc++.h> using namespace std; #define forn(i, n) for (int i = 0; i < int(n); i++) int main() { int t; cin >> t; forn(tt, t) { string s; cin >> s; int n = s.length(); vector<bool> a(n + 1); a[0] = true; forn(i, n) a[i + 1] = a[i] && (s[i] == '1' || s[i] == '?'); vector<bool> b(n + 1); b[0] = true; for (int i = n - 1; i >= 0; i--) b[n - i] = b[n - i - 1] && (s[i] == '0' || s[i] == '?'); int result = 0; forn(i, n) if (a[i] && b[n - i - 1]) result++; cout << result << endl; } }
1675
D
Vertical Paths
You are given a rooted tree consisting of $n$ vertices. Vertices are numbered from $1$ to $n$. Any vertex can be the root of a tree. A tree is a connected undirected graph without cycles. A rooted tree is a tree with a selected vertex, which is called the root. The tree is specified by an array of parents $p$ containing $n$ numbers: $p_i$ is a parent of the vertex with the index $i$. The parent of a vertex $u$ is a vertex that is the next vertex on the shortest path from $u$ to the root. For example, on the simple path from $5$ to $3$ (the root), the next vertex would be $1$, so the parent of $5$ is $1$. The root has no parent, so for it, the value of $p_i$ is $i$ (the root is the only vertex for which $p_i=i$). Find such a set of paths that: - each vertex belongs to exactly one path, each path can contain one or more vertices; - in each path each next vertex — is a son of the current vertex (that is, paths always lead down — from parent to son); - number of paths is \textbf{minimal}. For example, if $n=5$ and $p=[3, 1, 3, 3, 1]$, then the tree can be divided into three paths: - $3 \rightarrow 1 \rightarrow 5$ (path of $3$ vertices), - $4$ (path of $1$ vertices). - $2$ (path of $1$ vertices). \begin{center} {\small Example of splitting a root tree into three paths for $n=5$, the root of the tree — node $3$.} \end{center}
Let's find a set of leaves of a given tree. From each leaf we will climb up the tree until we meet a vertex already visited. Having met such a vertex, start a new path from the next leaf. The sequence of vertices in the found paths must be deduced in reverse order, because the paths must go from bottom to top. It also follows from this solution that the number of paths will always be equal to the number of leaves in the tree.
[ "graphs", "implementation", "trees" ]
1,300
#include<bits/stdc++.h> using namespace std; void solve(){ int n; cin >> n; vector<int>b(n + 1); vector<bool>leaf(n + 1, true); for(int i = 1; i <= n; i++) { cin >> b[i]; leaf[b[i]] = false; } if(n == 1){ cout << "1\n1\n1\n\n"; return; } vector<int>paths[n + 1]; vector<bool>used(n + 1, false); int color = 0; for(int i = 1; i <= n; i++){ if(!leaf[i]) continue; used[i] = true; paths[color].emplace_back(i); int v = i; while (b[v] != v && !used[b[v]]){ v = b[v]; used[v] = true; paths[color].emplace_back(v); } color++; } cout << color << '\n'; for(auto &path : paths){ if(path.empty()) continue; cout << (int)path.size() << '\n'; reverse(path.begin(), path.end()); for(auto &v: path) cout << v << ' '; cout << '\n'; } cout << '\n'; } int main(){ int t; cin >> t; while(t--){ solve(); } }
1675
E
Replace With the Previous, Minimize
You are given a string $s$ of lowercase Latin letters. The following operation can be used: - select one character (from 'a' to 'z') that occurs at least once in the string. And replace all such characters in the string with the previous one in alphabetical order on the loop. For example, replace all 'c' with 'b' or replace all 'a' with 'z'. And you are given the integer $k$ — the maximum number of operations that can be performed. Find the minimum lexicographically possible string that can be obtained by performing no more than $k$ operations. The string $a=a_1a_2 \dots a_n$ is lexicographically smaller than the string $b = b_1b_2 \dots b_n$ if there exists an index $k$ ($1 \le k \le n$) such that $a_1=b_1$, $a_2=b_2$, ..., $a_{k-1}=b_{k-1}$, but $a_k < b_k$.
Greedy idea. To minimize the string, we will go from left to right and maintain a variable $mx$ = maximal character, from which we will reduce everything to 'a'. Initially it is 'a' and we spend $0$ of operations on it. Then, at the next symbol, we can either reduce it to 'a' in no more than $k$ operations, or reduce to 'a' the prefix we have already passed and minimize the next character in the remaining operations.
[ "dsu", "greedy", "strings" ]
1,500
#include <bits/stdc++.h> using namespace std; #define forn(i, n) for (int i = 0; i < int(n); i++) #define sz(v) (int)v.size() #define all(v) v.begin(),v.end() #define eb emplace_back template <class T> bool ckmax(T &a, T b) {return a<b ? a=b, true : false;} void solve() { int n,k; cin >> n >> k; string s; cin >> s; char mn = 'a'; for (int i = 0; i < n; i++) if(s[i] > mn) { if (s[i] - 'a' > k) { k -= mn - 'a'; int to = s[i] - k; for (char c = s[i]; c > to; c--) { for (char &e:s) if (e == c) { e = char(c-1); } } break; } else ckmax(mn, s[i]); } for (char &e:s) if (e <= mn) { e = 'a'; } cout << s << endl; } int main() { int t; cin >> t; forn(tt, t) { solve(); } }
1675
F
Vlad and Unfinished Business
Vlad and Nastya live in a city consisting of $n$ houses and $n-1$ road. From each house, you can get to the other by moving only along the roads. That is, the city is a tree. Vlad lives in a house with index $x$, and Nastya lives in a house with index $y$. Vlad decided to visit Nastya. However, he remembered that he had postponed for later $k$ things that he has to do before coming to Nastya. To do the $i$-th thing, he needs to come to the $a_i$-th house, things can be done in any order. In $1$ minute, he can walk from one house to another if they are connected by a road. Vlad does not really like walking, so he is interested what is the minimum number of minutes he has to spend on the road to do all things and then come to Nastya. Houses $a_1, a_2, \dots, a_k$ he can visit in any order. He can visit any house multiple times (if he wants).
To begin with, we will hang the tree by the vertex $x$. In fact, we want to go from the root to the top of $y$, going off this path to do things and coming back. At one vertex of the path, it is advantageous to get off it in all the necessary directions and follow it further. So we will go $1$ once for each edge leading to $y$ and $2$ times for each edge leading to some of the cases, but not leading to $y$. Let's match each vertex with an edge to its ancestor. If the edge of a vertex leads to $y$, then $y$ is in the subtree of this vertex, similarly with vertices with cases. It is necessary for each vertex to determine whether there is a vertex $y$ in its subtree and whether there is a vertex from the array $a$, this can be done using a depth-first search, then we will calculate the answer according to the rules described above.
[ "dfs and similar", "dp", "greedy", "trees" ]
1,800
#include <bits/stdc++.h> using namespace std; //#define int long long #define ll long long //#define double long double #define all(a) a.begin(), a.end() #define rall(a) a.rbegin(), a.rend() const int MOD = 1e9 + 7; const int maxN = 5e3 + 1; const int INF = 2e9; const int MB = 20; vector<vector<int>> g; vector<bool> todo, good; void dfs(int v, int p = -1) { for (int u : g[v]) { if (u != p) { dfs(u, v); if (todo[u]) { todo[v] = true; } if (good[u]) { good[v] = true; } } } } void solve() { int n, k; cin >> n >> k; g.clear(); g.resize(n); int x, y; cin >> x >> y; --x; --y; todo.resize(n); fill(all(todo), false); good.resize(n); fill(all(good), false); for (int i = 0; i < k; ++i) { int v; cin >> v; --v; todo[v] = true; } good[y] = true; for (int i = 0; i < n - 1; ++i) { int v, u; cin >> v >> u; --v; --u; g[v].push_back(u); g[u].push_back(v); } dfs(x); int ans = 0; for (int i = 0; i < n; ++i) { if (i == x) { continue; } if (good[i]) { ++ans; } else if (todo[i]) { ans += 2; } } cout << ans << '\n'; } signed main() { ios_base::sync_with_stdio(false); cin.tie(0); cout.tie(0); // srand(time(0)); int t = 1; cin >> t; while (t--) solve(); }
1675
G
Sorting Pancakes
Nastya baked $m$ pancakes and spread them on $n$ dishes. The dishes are in a row and numbered from left to right. She put $a_i$ pancakes on the dish with the index $i$. Seeing the dishes, Vlad decided to bring order to the stacks and move some pancakes. In one move, he can shift one pancake from any dish to the closest one, that is, select the dish $i$ ($a_i > 0$) and do one of the following: - if $i > 1$, put the pancake on a dish with the previous index, after this move $a_i = a_i - 1$ and $a_{i - 1} = a_{i - 1} + 1$; - if $i < n$, put the pancake on a dish with the following index, after this move $a_i = a_i - 1$ and $a_{i + 1} = a_{i + 1} + 1$. Vlad wants to make the array $a$\textbf{non-increasing}, after moving as few pancakes as possible. Help him find the minimum number of moves needed for this. The array $a=[a_1, a_2,\dots,a_n]$ is called non-increasing if $a_i \ge a_{i+1}$ for all $i$ from $1$ to $n-1$.
For convenience, we will calculate the prefix sums on the array $a$, we will also enter the array $b$ containing the indexes of all pancakes and calculate the prefix sums on it. Let's use dynamic programming. Let's define $dp[i][last][sum]$ as the required number of operations to correctly lay out the $i$-th prefix, with the final $a_i = last$, and $\sum_{j = 1}^i a_j = sum$. Then you can go to $dp[i][last][sum]$ from $dp[i - 1][last + j][sum - last]$ (the previous number must be greater, and the sum is fixed). To $dp[i - 1][last + j][sum - last]$, it will be necessary to add a certain number of actions necessary to get $a_i = last$, let's call it $add$ (all the terrible prefix sums are needed to count it). Since $add$ depends only on $last$ and $sum$, we only need to choose the minimum $dp[i - 1][last + j][sum - last]$, the choice can be optimized by suffix minima. As a result, the solution works for $\mathcal{O}(n*m^2)$, that's how many states need to be processed.
[ "dp" ]
2,300
#include <bits/stdc++.h> //#define int long long #define mp make_pair #define x first #define y second #define all(a) (a).begin(), (a).end() #define rall(a) (a).rbegin(), (a).rend() typedef long double ld; typedef long long ll; using namespace std; mt19937 rnd(143); const ll inf = 1e9 + 7; const ll M = 998'244'353; const ld pi = atan2(0, -1); const ld eps = 1e-4; void solve(int test_case) { int n, m; cin >> n >> m; vector<int> a(n), pancakes(1); for(int &e: a) cin >> e; for(int i = 0; i < n; ++i){ for(int j = 0; j < a[i]; ++j){ pancakes.emplace_back(i); int c = pancakes.size(); pancakes[c - 1] += pancakes[c - 2]; } if(i > 0) a[i] += a[i - 1]; } vector<vector<vector<int>>> dp(n, vector<vector<int>>(m + 1, vector<int>(m + 1, inf))); for(int j = 0; j <= m; j++){ if(a[0] >= j) dp[0][j][j] = a[0] - j;//moved right else dp[0][j][j] = pancakes[j];//moved from right } for(int j = m - 1; j >= 0; --j){ for(int k = j; k <= m; ++k){ dp[0][j][k] = min(dp[0][j][k], dp[0][j + 1][k]); } } for(int i = 1; i < n; ++i){ for(int j = 0; j <= m; ++j){ for(int k = j; k <= m; ++k){ int add = 0; if(a[i] >= k) add = a[i] - k;//moved to i + 1 else { int lend = min(j, k - a[i]); add = pancakes[k] - pancakes[k - lend] - i * (lend);//moved from greater i } dp[i][j][k] = dp[i - 1][j][k - j] + add; } } for(int j = m - 1; j >= 0; --j){ for(int k = (i + 1) * j; k <= m; ++k){ dp[i][j][k] = min(dp[i][j][k], dp[i][j + 1][k]); } } } cout << dp[n-1][0][m]; } bool multi = false; signed main() { int t = 1; if (multi) { cin >> t; } for (int i = 1; i <= t; ++i) { solve(i); cout << "\n"; } return 0; }
1676
A
Lucky?
A ticket is a string consisting of six digits. A ticket is considered lucky if the sum of the first three digits is equal to the sum of the last three digits. Given a ticket, output if it is lucky or not. Note that a ticket can have leading zeroes.
We need to check if the sum of the first three digits is equal to the sum of the last three digits. This is doable by scanning the input as a string, then comparing the sum of the first three characters with the sum of the last three characters using the if statement and the addition operation.
[ "implementation" ]
800
#include <bits/stdc++.h> using namespace std; void solve() { string s; cin >> s; if(s[0]+s[1]+s[2] == s[3]+s[4]+s[5]) { cout << "YES" << endl; } else { cout << "NO" << endl; } } int main() { int t = 1; cin >> t; while (t--) { solve(); } }
1676
B
Equal Candies
There are $n$ boxes with different quantities of candies in each of them. The $i$-th box has $a_i$ candies inside. You also have $n$ friends that you want to give the candies to, so you decided to give each friend a box of candies. But, you don't want any friends to get upset so you decided to eat some (possibly none) candies from each box so that all boxes have the same quantity of candies in them. Note that you may eat a different number of candies from different boxes and you cannot add candies to any of the boxes. What's the minimum total number of candies you have to eat to satisfy the requirements?
Because we can only eat candies from boxes. The only way to make all boxes have the same quantity of candies in them would be to make all candies contain a number of candies equal to the minimum quantity of candies a box initially has. So, we should find this minimum number, let's denote it as $m$, and then for each box, there should be eaten $a_i - m$ candies. So the answer would be the sum of $a_i - m$ over all $i$-s ($1 \leq i \leq n$).
[ "greedy", "math", "sortings" ]
800
#include "bits/stdc++.h" using namespace std; int main() { int t; cin >> t; while(t--) { int n; cin >> n; vector<int> a(n); int mn = INT_MAX, ans = 0; for(int i = 0; i < n; ++i) { cin >> a[i]; mn = min(mn, a[i]); } for(int i = 0; i < n; ++i) { ans += a[i] - mn; } cout << ans << "\n"; } }
1676
C
Most Similar Words
You are given $n$ words of \textbf{equal} length $m$, consisting of lowercase Latin alphabet letters. The $i$-th word is denoted $s_i$. In one move you can choose \textbf{any position in any single word} and change the letter at that position to the previous or next letter in alphabetical order. For example: - you can change 'e' to 'd' or to 'f'; - 'a' can only be changed to 'b'; - 'z' can only be changed to 'y'. The difference between two words is the \textbf{minimum} number of moves required to make them equal. For example, the difference between "best" and "cost" is $1 + 10 + 0 + 0 = 11$. Find the minimum difference of $s_i$ and $s_j$ such that $(i < j)$. In other words, find the minimum difference over all possible pairs of the $n$ words.
Firstly, given any pair of strings of length $m$, we should be able to tell the difference between them. It's enough to find the sum of absolute differences between each character from the same position. Now, we should go through all possible pairs and pick the minimum value over all of them using the function we use to calculate the difference.
[ "brute force", "greedy", "implementation", "math", "strings" ]
800
#include "bits/stdc++.h" using namespace std; int cost(string& a, string& b) { int val = 0; for(int i = 0; i < a.size(); ++i) { val += abs(a[i] - b[i]); } return val; } int main() { int t; cin >> t; while(t--) { int n, m; cin >> n >> m; vector<string> s(n); for(int i = 0; i < n; ++i) { cin >> s[i]; } int ans = INT_MAX; for(int i = 0; i < n; ++i) { for(int j = i + 1; j < n; ++j) { ans = min(ans, cost(s[i], s[j])); } } cout << ans << "\n"; } }
1676
D
X-Sum
Timur's grandfather gifted him a chessboard to practice his chess skills. This chessboard is a grid $a$ with $n$ rows and $m$ columns with each cell having a \textbf{non-negative} integer written on it. Timur's challenge is to place a bishop on the board such that the sum of all cells attacked by the bishop is \textbf{maximal}. The bishop attacks in all directions diagonally, and there is no limit to the distance which the bishop can attack. Note that the cell on which the bishop is placed is also considered attacked. Help him find the maximal sum he can get.
The solution is to check the sum over all diagonals for each cell. For a cell ($i,j$) we can iterate over all elements in all its diagonals. This will be in total $O(max(n, m))$ elements. The complexity will be $O(n \cdot m \cdot max(n, m))$. $O(n \cdot m)$ solutions involving precomputation are also possible but aren't needed.
[ "brute force", "greedy", "implementation" ]
1,000
#include <bits/stdc++.h> using namespace std; void solve() { int n, m; cin >> n >> m; int a[n][m]; for(int i = 0; i < n; i++) { for(int j = 0; j < m; j++) { cin >> a[i][j]; } } int mx = 0; for(int i = 0; i < n; i++) { for(int j = 0; j < m; j++) { int now = 0; int ci = i, cj = j; while(ci >= 0 && ci < n && cj >= 0 && cj < m) { now+=a[ci][cj]; ci--; cj--; } ci = i, cj = j; while(ci >= 0 && ci < n && cj >= 0 && cj < m) { now+=a[ci][cj]; ci++; cj--; } ci = i, cj = j; while(ci >= 0 && ci < n && cj >= 0 && cj < m) { now+=a[ci][cj]; ci--; cj++; } ci = i, cj = j; while(ci >= 0 && ci < n && cj >= 0 && cj < m) { now+=a[ci][cj]; ci++; cj++; } now-=a[i][j]*3; mx = max(mx, now); } } cout << mx << endl; } int main() { int t; cin >> t; while(t--) { solve(); } }
1676
E
Eating Queries
Timur has $n$ candies. The $i$-th candy has a quantity of sugar equal to $a_i$. So, by eating the $i$-th candy, Timur consumes a quantity of sugar equal to $a_i$. Timur will ask you $q$ queries regarding his candies. For the $j$-th query you have to answer what is the \textbf{minimum} number of candies he needs to eat in order to reach a quantity of sugar \textbf{greater than or equal to} $x_j$ or print -1 if it's not possible to obtain such a quantity. In other words, you should print the minimum possible $k$ such that after eating $k$ candies, Timur consumes a quantity of sugar of at least $x_j$ or say that no possible $k$ exists. Note that he can't eat the same candy twice and queries are independent of each other (Timur can use the same candy in different queries).
Let's solve the problem with just one query. Greedily, we should pick the candies with the most sugar first, since there is no benefit to picking a candy with less sugar. So the solution is as follows: sort the candies in descending order, and then find the prefix whose sum is $\geq x$. This is $\mathcal{O}(n)$ per query, which is too slow for us. To speed it up, notice that we just need to find a prefix sum at least $x$. So if we compute the prefix sums of the reverse-sorted array, we need to find the first element that is at least $x$. Since all elements of $a$ are positive, the array of prefix sums is increasing. Therefore, you can binary search the first element $\geq x$. This solves the problem in $\log n$ per query. Total time complexity: $\mathcal{O}(q \log n + n)$.
[ "binary search", "greedy", "sortings" ]
1,100
#include "bits/stdc++.h" using namespace std; int main() { int t; cin >> t; while(t--) { int n, q; cin >> n >> q; vector<long long> a(n), p(n); for(int i = 0; i < n; ++i) { cin >> a[i]; } sort(a.rbegin(), a.rend()); for(int i = 0; i < n; ++i) { p[i] = (i ? p[i - 1] : 0) + a[i]; } while(q--) { long long x; cin >> x; int l = 1, r = n, ans = -1; while(l <= r) { int mid = (l + r) / 2; if(p[mid - 1] >= x) { ans = mid; r = mid - 1; } else { l = mid + 1; } } cout << ans << "\n"; } } }
1676
F
Longest Strike
Given an array $a$ of length $n$ and an integer $k$, you are tasked to find any two numbers $l$ and $r$ ($l \leq r$) such that: - For each $x$ $(l \leq x \leq r)$, $x$ appears in $a$ at least $k$ times (i.e. $k$ or more array elements are equal to $x$). - The value $r-l$ is maximized. If no numbers satisfy the conditions, output -1. For example, if $a=[11, 11, 12, 13, 13, 14, 14]$ and $k=2$, then: - for $l=12$, $r=14$ the first condition fails because $12$ does not appear at least $k=2$ times. - for $l=13$, $r=14$ the first condition holds, because $13$ occurs at least $k=2$ times in $a$ and $14$ occurs at least $k=2$ times in $a$. - for $l=11$, $r=11$ the first condition holds, because $11$ occurs at least $k=2$ times in $a$. A pair of $l$ and $r$ for which the first condition holds and $r-l$ is maximal is $l = 13$, $r = 14$.
Let's call a value good if it appears at least $k$ times. For example, if $a=[1,1,2,2,3,4,4,4,5,5,6,6]$ and $k=2$, then good values are $[1,2,4,5,6]$. So we need to find the longest subarray of this array in which all values are consecutive. For example, the subarray $[4,5,6]$ is the answer, because all values are good and the length of the array is longest. There are many ways to do this. For example, we can see when the difference between two elements is more than $1$, and then break the array into parts based on that. For instance, $[1,2,4,5,6] \to [1,2], [4,5,6]$. You can also iterate from left to right and keep track of the size of the current array. Time complexity: $\mathcal{O}(n)$.
[ "data structures", "greedy", "implementation", "sortings", "two pointers" ]
1,300
#include <bits/stdc++.h> using namespace std; void solve() { int n, k; cin >> n >> k; int a[n]; map<int, int> mp; for(int i = 0; i < n; i++) { cin >> a[i]; mp[a[i]]++; } vector<int> c; for(auto x : mp) { if(x.second >= k) { c.push_back(x.first); } } if(c.size() == 0) { cout << -1 << endl; return; } sort(c.begin(), c.end()); int mx = 0; int lans = c[0], rans = c[0]; int l = c[0]; for(int i = 1; i < c.size(); i++) { if(c[i]-1 == c[i-1]) { if(c[i]-l > mx) { lans = l; rans = c[i]; mx = c[i]-l; } } else { l = c[i]; } } cout << lans << " " << rans << endl; } int main(int argc, char * argv[]) { int t; cin >> t; while(t--) { solve(); } }
1676
G
White-Black Balanced Subtrees
You are given a rooted tree consisting of $n$ vertices numbered from $1$ to $n$. The root is vertex $1$. There is also a string $s$ denoting the color of each vertex: if $s_i = B$, then vertex $i$ is black, and if $s_i = W$, then vertex $i$ is white. A subtree of the tree is called balanced if the number of white vertices equals the number of black vertices. Count the number of balanced subtrees. A tree is a connected undirected graph without cycles. A rooted tree is a tree with a selected vertex, which is called the root. In this problem, all trees have root $1$. The tree is specified by an array of parents $a_2, \dots, a_n$ containing $n-1$ numbers: $a_i$ is the parent of the vertex with the number $i$ for all $i = 2, \dots, n$. The parent of a vertex $u$ is a vertex that is the next vertex on a simple path from $u$ to the root. The subtree of a vertex $u$ is the set of all vertices that pass through $u$ on a simple path to the root. For example, in the picture below, $7$ is in the subtree of $3$ because the simple path $7 \to 5 \to 3 \to 1$ passes through $3$. Note that a vertex is included in its subtree, and the subtree of the root is the entire tree. \begin{center} The picture shows the tree for $n=7$, $a=[1,1,2,3,3,5]$, and $s=WBBWWBW$. The subtree at the vertex $3$ is balanced. \end{center}
Let's run a dynamic programming from the leaves to the root. For each vertex store the values of the number of balanced subtrees, as well as the number of white and black vertices in it. Then from a vertex we can count the total number of white vertices in its subtree as well as the black vertices in its subtree, and update our total if they are equal. Remember to include the color of the vertex itself in these counts. The answer is the answer at the root. Therefore the problem is solved in $\mathcal{O}(n)$ time.
[ "dfs and similar", "dp", "graphs", "trees" ]
1,300
#include <bits/stdc++.h> using namespace std; const int MAX = 200007; const int MOD = 1000000007; void solve() { int n; cin >> n; vector<int> child[n + 7]; for (int i = 2; i <= n; i++) { int x; cin >> x; child[x].push_back(i); } string s; cin >> s; int res = 0; function<int(int)> dp = [&] (int x) { int bal = (s[x - 1] == 'B') ? -1 : 1; if (child[x].empty()) {return bal;} for (int i : child[x]) { bal += dp(i); } if (bal == 0) {res++;} return bal; }; dp(1); cout << res << '\n'; } int main() { ios::sync_with_stdio(false); cin.tie(nullptr); int tt; cin >> tt; for (int i = 1; i <= tt; i++) {solve();} // solve(); }
1676
H1
Maximum Crossings (Easy Version)
The only difference between the two versions is that in this version $n \leq 1000$ and the sum of $n$ over all test cases does not exceed $1000$. A terminal is a row of $n$ equal segments numbered $1$ to $n$ in order. There are two terminals, one above the other. You are given an array $a$ of length $n$. For all $i = 1, 2, \dots, n$, there should be a straight wire from some point on segment $i$ of the top terminal to some point on segment $a_i$ of the bottom terminal. You can't select the endpoints of a segment. For example, the following pictures show two possible wirings if $n=7$ and $a=[4,1,4,6,7,7,5]$. A crossing occurs when two wires share a point in common. In the picture above, crossings are circled in red. What is the \textbf{maximum} number of crossings there can be if you place the wires optimally?
Let's look at two wires from $i \to a_i$ and $j \to a_j$. If $a_i < a_j$, there can never be any intersection. If $a_i > a_j$, there has to be an intersection. If $a_i = a_j$, it is possible that there is an intersection or not, depending on how we arrange the wires on the bottom terminal. Since we want to maximize the number of intersections, we just need to count the number of pairs $(i,j)$ such that $a_i \geq a_j$. You can brute force all pairs in $\mathcal{O}(n^2)$.
[ "brute force" ]
1,400
#include <bits/stdc++.h> using namespace std; const int MAX = 200007; const int MOD = 1000000007; void solve() { int n; cin >> n; int a[n]; for (int i = 0; i < n; i++) { cin >> a[i]; } int res = 0; for (int i = 0; i < n; i++) { for (int j = i + 1; j < n; j++) { if (a[i] >= a[j]) {res++;} } } cout << res << '\n'; } int main() { ios::sync_with_stdio(false); cin.tie(nullptr); int tt; cin >> tt; for (int i = 1; i <= tt; i++) {solve();} // solve(); }
1676
H2
Maximum Crossings (Hard Version)
The only difference between the two versions is that in this version $n \leq 2 \cdot 10^5$ and the sum of $n$ over all test cases does not exceed $2 \cdot 10^5$. A terminal is a row of $n$ equal segments numbered $1$ to $n$ in order. There are two terminals, one above the other. You are given an array $a$ of length $n$. For all $i = 1, 2, \dots, n$, there should be a straight wire from some point on segment $i$ of the top terminal to some point on segment $a_i$ of the bottom terminal. You can't select the endpoints of a segment. For example, the following pictures show two possible wirings if $n=7$ and $a=[4,1,4,6,7,7,5]$. A crossing occurs when two wires share a point in common. In the picture above, crossings are circled in red. What is the \textbf{maximum} number of crossings there can be if you place the wires optimally?
Read the solution of the easy version. We want to count the number of pairs $(i,j)$ such that $i<j$ and $a_i \geq a_j$. This is a standard problem, and we can do this we can use a segment tree or BIT, for example. Insert the $a_j$ from $j=1$ to $n$, and then for each $a_j$ count the number of $a_i \leq a_j$ using a BIT. It is also related to the problem of counting inversions, so you can solve it using a modification of merge sort. Either way, the solution is $\mathcal{O}(n \log n)$.
[ "data structures", "divide and conquer", "sortings" ]
1,500
#include <bits/stdc++.h> using namespace std; const int MAX = 200007; const int MOD = 1000000007; long long merge(int a[], int temp[], int left, int mid, int right) { int i, j, k; long long count = 0; i = left; j = mid; k = left; while ((i <= mid - 1) && (j <= right)) { if (a[i] <= a[j]){ temp[k++] = a[i++]; } else { temp[k++] = a[j++]; count += (mid - i); } } while (i <= mid - 1) temp[k++] = a[i++]; while (j <= right) temp[k++] = a[j++]; for (i=left; i <= right; i++) a[i] = temp[i]; return count; } long long mergeSort(int a[], int temp[], int left, int right){ int mid; long long count = 0; if (right > left) { mid = (right + left)/2; count = mergeSort(a, temp, left, mid); count += mergeSort(a, temp, mid+1, right); count += merge(a, temp, left, mid+1, right); } return count; } long long aInversion(int a[], int n) { int temp[n]; return mergeSort(a, temp, 0, n - 1); } void solve() { int n; cin >> n; int a[n]; for (int i = 0; i < n; i++) { cin >> a[i]; } long long res = aInversion(a, n); sort(a, a + n); long long curr = 1; for (int i = 1; i < n; i++) { if (a[i] != a[i - 1]) {res += (curr * (curr - 1)) / 2; curr = 1;} else {curr++;} } res += (curr * (curr - 1)) / 2; cout << res << '\n'; } int main() { ios::sync_with_stdio(false); cin.tie(nullptr); int tt; cin >> tt; for (int i = 1; i <= tt; i++) {solve();} // solve(); }
1677
A
Tokitsukaze and Strange Inequality
Tokitsukaze has a permutation $p$ of length $n$. Recall that a permutation $p$ of length $n$ is a sequence $p_1, p_2, \ldots, p_n$ consisting of $n$ distinct integers, each of which from $1$ to $n$ ($1 \leq p_i \leq n$). She wants to know how many different indices tuples $[a,b,c,d]$ ($1 \leq a < b < c < d \leq n$) in this permutation satisfy the following two inequalities: \begin{center} $p_a < p_c$ and $p_b > p_d$. \end{center} Note that two tuples $[a_1,b_1,c_1,d_1]$ and $[a_2,b_2,c_2,d_2]$ are considered to be different if $a_1 \ne a_2$ or $b_1 \ne b_2$ or $c_1 \ne c_2$ or $d_1 \ne d_2$.
We can calculate the answer in two steps. The first step, for each $b$, let $f_b$ represents the number of $p_d$ where $p_b > p_d$ in the interval $[b+1,n]$. We can calculate $f$ in $\mathcal{O}(n^2)$. The second step, calculate the answer. First we enumerate $c$ from $1$ to $n$, and then enumerate $a$ from $1$ to $c-1$. When $p_a < p_c$, add $f_b$ in the interval $[a+1,c-1]$ to the answer. Before enumerating $a$, we can calculate the prefix sum of $f$ first, so we can add the $f_b$ in the interval to the answer in $\mathcal{O}(1)$. The time complexity of this step is $\mathcal{O}(n^2)$. However, this will add the result of $d$ in the interval $[a+1,c-1]$ to the answer, which is illegal because $c < d$ is required. So we need to maintain $f$ while enumerating $c$: enumerate $b$ from $1$ to $c-1$, if $p_b > p_c$, $f_b$ minus $1$. $p_c$ is actually regarded as $p_d$, that is, subtract the case where $c$ is equal to $d$, so as to subtract the illegal case. The time complexity of this step is also $\mathcal{O}(n^2)$. Time complexity:$\mathcal{O}(n^2)$. By the way, use Fenwick Tree or Segment Tree can also pass, the time complexity is $\mathcal{O}(n^2 log\ n)$
[ "brute force", "data structures", "dp" ]
1,600
#include <bits/stdc++.h> using namespace std; /************* debug begin *************/ string to_string(string s){return '"'+s+'"';} string to_string(const char* s){return to_string((string)s);} string to_string(const bool& b){return(b?"true":"false");} template<class T>string to_string(T x){ostringstream sout;sout<<x;return sout.str();} template<class A,class B>string to_string(pair<A,B> p){return "("+to_string(p.first)+", "+to_string(p.second)+")";} template<class A>string to_string(const vector<A> v){ int f=1;string res="{";for(const auto x:v){if(!f)res+= ", ";f=0;res+=to_string(x);}res+="}"; return res; } void debug_out(){puts("");} template<class T,class... U>void debug_out(const T& h,const U&... t){cout<<" "<<to_string(h);debug_out(t...);} #ifdef tokitsukaze #define debug(...) cout<<"["<<#__VA_ARGS__<<"]:",debug_out(__VA_ARGS__); #else #define debug(...) 233; #endif /************* debug end *************/ #define mem(a,b) memset((a),(b),sizeof(a)) #define MP make_pair #define pb push_back #define fi first #define se second #define sz(x) (int)x.size() #define all(x) x.begin(),x.end() #define sqr(x) (x)*(x) using namespace __gnu_cxx; typedef long long ll; typedef unsigned long long ull; typedef pair<int,int> PII; typedef pair<ll,ll> PLL; typedef pair<int,ll> PIL; typedef pair<ll,int> PLI; typedef vector<int> VI; typedef vector<ll> VL; typedef vector<PII > VPII; /************* define end *************/ void println(VI x){for(int i=0;i<sz(x);i++) printf("%d%c",x[i]," \n"[i==sz(x)-1]);} void println(VL x){for(int i=0;i<sz(x);i++) printf("%lld%c",x[i]," \n"[i==sz(x)-1]);} void println(int *x,int l,int r){for(int i=l;i<=r;i++) printf("%d%c",x[i]," \n"[i==r]);} void println(ll *x,int l,int r){for(int i=l;i<=r;i++) printf("%lld%c",x[i]," \n"[i==r]);} /*************** IO end ***************/ void go(); int main(){ #ifdef tokitsukaze freopen("TEST.txt","r",stdin); #endif go();return 0; } const int INF=0x3f3f3f3f; const ll LLINF=0x3f3f3f3f3f3f3f3fLL; const double PI=acos(-1.0); const double eps=1e-6; const int MAX=1e5+10; const ll mod=998244353; /********************************* head *********************************/ int a[MAX]; ll f[MAX],bitf[MAX]; void go() { int n,i,j,k,l,T; ll ans; scanf("%d",&T); while(T--) { scanf("%d",&n); for(i=1;i<=n;i++) scanf("%d",&a[i]); mem(f,0); for(j=1;j<=n;j++) { for(l=j+1;l<=n;l++) { if(a[j]>a[l]) f[j]++; } } ans=0; for(k=1;k<=n;k++) { for(j=1;j<k;j++) { if(a[j]>a[k]) f[j]--; } bitf[0]=0; for(i=1;i<=k;i++) bitf[i]=bitf[i-1]+f[i]; for(i=1;i<k;i++) { if(a[i]<a[k]) ans+=bitf[k-1]-bitf[i]; } } printf("%lld\n",ans); } }
1677
B
Tokitsukaze and Meeting
Tokitsukaze is arranging a meeting. There are $n$ rows and $m$ columns of seats in the meeting hall. There are exactly $n \cdot m$ students attending the meeting, including several naughty students and several serious students. The students are numerated from $1$ to $n\cdot m$. The students will enter the meeting hall in order. When the $i$-th student enters the meeting hall, he will sit in the $1$-st column of the $1$-st row, and the students who are already seated will move back one seat. Specifically, the student sitting in the $j$-th ($1\leq j \leq m-1$) column of the $i$-th row will move to the $(j+1)$-th column of the $i$-th row, and the student sitting in $m$-th column of the $i$-th row will move to the $1$-st column of the $(i+1)$-th row. For example, there is a meeting hall with $2$ rows and $2$ columns of seats shown as below: There will be $4$ students entering the meeting hall in order, represented as a binary string "1100", of which '0' represents naughty students and '1' represents serious students. The changes of seats in the meeting hall are as follows: Denote a row or a column good if and only if there is at least one serious student in this row or column. Please predict the number of good rows and columns just after the $i$-th student enters the meeting hall, for all $i$.
Obviously, we can calculate the answers of rows and columns separately. For the answers of columns, we can observe that since there are only $n \cdot m$ students in total, no students will leave, and every time a new student entering the meeting hall, all columns will move one step to the right circularly, so the answer will not decrease. If the $i$-th student is a serious student, for all the previous students with subscript $j$ where $0 < j < i$, and $j \% m = i \% m$ are naughty students, the answer in the column will increase by $1$. For the answer of rows, we can transfer it from the answer of $i-m$, which is equivalent to adding a new row to the answer of $i-m$. Suppose the last serious student is the $j$-th student. If $i-j<m$, the answer will increase by $1$, otherwise the answer will be the same as that of when the $i-m$ student enters the meeting hall.
[ "data structures", "implementation", "math" ]
1,700
#include<bits/stdc++.h> using namespace std; int rownum[1000100]; int col[1000100]; int n,m; void init(){ for(int i = 0;i <= max(n,m);i++) { rownum[i] = col[i] = 0; } } void solve(){ scanf("%d%d",&n,&m); init(); int las = -n*m; int colnum = 0; char tmp; for(int i = 0;i < n*m;i++) { scanf(" %c",&tmp); tmp -= '0'; if(tmp == 1) { las = i; if(col[i%m] == 0) { col[i%m] = 1; colnum++; } } if(i - las < m) { rownum[i%m]++; } if(i!=0) printf(" "); printf("%d",rownum[i%m] + colnum); } printf("\n"); } int main(){ int T; scanf("%d",&T); while(T--) { solve(); } }
1677
C
Tokitsukaze and Two Colorful Tapes
Tokitsukaze has two colorful tapes. There are $n$ distinct colors, numbered $1$ through $n$, and each color appears exactly once on each of the two tapes. Denote the color of the $i$-th position of the first tape as $ca_i$, and the color of the $i$-th position of the second tape as $cb_i$. Now Tokitsukaze wants to select each color an integer value from $1$ to $n$, distinct for all the colors. After that she will put down the color values in each colored position on the tapes. Denote the number of the $i$-th position of the first tape as $numa_i$, and the number of the $i$-th position of the second tape as $numb_i$. For example, for the above picture, assuming that the color red has value $x$ ($1 \leq x \leq n$), it appears at the $1$-st position of the first tape and the $3$-rd position of the second tape, so $numa_1=numb_3=x$. Note that each color $i$ from $1$ to $n$ should have a \textbf{distinct} value, and the same color which appears in both tapes has the same value. After labeling each color, the beauty of the two tapes is calculated as $$\sum_{i=1}^{n}|numa_i-numb_i|.$$ Please help Tokitsukaze to find the highest possible beauty.
First, find the cycle directly, take out all the cycles, and then fill each cycle in the order: maximum, minimum, maximum, minimum $\ldots$. Note that when you encounter an odd cycle, the last one should be empty and fill in the middle value. For example, in a ternary cycle, if the first and the second position are filled with $1$ and $9$, it can be observed that the contribution of the ternary cycle to the answer remains unchanged no matter which number between $2 \sim 8$ is filled in the middle. So this situation can be left out first, and finally fill in whatever number is left. The same to the cycles with odd sizes such as self cycles and five membered cycles, because the last number does not provide any contribution. In this way, you can directly construct a solution with the maximum $score$. We notice that if we take out the numbers that has already been filled in each cycle and put them into a new cycled array $h$, in fact, the numbers providing contribution are only at the "peak" and "valley" points of the array. We define "peak" as the point with subscript $i$ where $h_{i}>h_{i-1}$ and $h_{i}>h_{i+1}$, and "valley" is the point with subscript $i$ where $h_{i}<h_{i-1}$ and $h_{i}<h_{i+1}$. Obviously, each "peak" will provide contribution of $2 \cdot h_{i}$, each "valley" will provide contribution of $-2 \cdot h_{i}$. For the points which are neither "peak"s nor "valley"s, they do not provide any contribution. In order to maximize the score, we make the larger number in the permutation "peak" and the smaller number "valley". There are $\left \lfloor \frac{CircleSize}{2} \right \rfloor$ "peak"s and "valley"s for a circle with a length of $Circlesize$. So the expression of the final answer can be derived from the summation formula of the arithmetic sequence: Let $c=\sum \left \lfloor \frac{CircleSize}{2} \right \rfloor$, $N$ represents the size of the permutation, $score=2c \cdot (N-c)$.
[ "constructive algorithms", "dfs and similar", "graphs", "greedy" ]
1,900
#include <bits/stdc++.h> using namespace std; const int MAXN = 100005; int n,col[2][MAXN],col_to_pos_1[MAXN]; long long output; bool vis_0[MAXN]; int T,cnt; long long c; void dfs(int pos) { if(vis_0[pos])return; vis_0[pos]=true; ++cnt; dfs(col_to_pos_1[col[0][pos]]); } int main(int argc, char const *argv[]) { scanf("%d",&T); while(T--) { scanf("%d",&n); for(int i=1;i<=n;++i) { vis_0[i]=false; } for(int i=1;i<=n;++i) { scanf("%d",&col[0][i]); } for(int i=1;i<=n;++i) { scanf("%d",&col[1][i]); col_to_pos_1[col[1][i]]=i; } c=0; for(int i=1;i<=n;++i) { cnt=0; dfs(i); c+=cnt/2; } printf("%lld\n", (c*(n-c))<<1); } return 0; }
1677
D
Tokitsukaze and Permutations
Tokitsukaze has a permutation $p$. She performed the following operation to $p$ \textbf{exactly} $k$ times: in one operation, for each $i$ from $1$ to $n - 1$ in order, if $p_i$ > $p_{i+1}$, swap $p_i$, $p_{i+1}$. After exactly $k$ times of operations, Tokitsukaze got a new sequence $a$, obviously the sequence $a$ is also a permutation. After that, Tokitsukaze wrote down the value sequence $v$ of $a$ on paper. Denote the value sequence $v$ of the permutation $a$ of length $n$ as $v_i=\sum_{j=1}^{i-1}[a_i < a_j]$, where the value of $[a_i < a_j]$ define as if $a_i < a_j$, the value is $1$, otherwise is $0$ (in other words, $v_i$ is equal to the number of elements greater than $a_i$ that are to the left of position $i$). Then Tokitsukaze went out to work. There are three naughty cats in Tokitsukaze's house. When she came home, she found the paper with the value sequence $v$ to be bitten out by the cats, leaving several holes, so that the value of some positions could not be seen clearly. She forgot what the original permutation $p$ was. She wants to know how many different permutations $p$ there are, so that the value sequence $v$ of the new permutation $a$ after \textbf{exactly} $k$ operations is the same as the $v$ written on the paper (not taking into account the unclear positions). Since the answer may be too large, print it modulo $998\,244\,353$.
Consider finding out the relationship between sequence $v$ and permutation $p$. It can be observed that the permutation $p$ have bijective relationship with sequence $c$. That is to say, sequence $c$ will only correspond to one permutation $p$, as it can be proved: Let $S={1,2,\ldots,n}$, and find that the last number can be determined through $c_n$, because there is no number after this position. We observe that $p_n$ is the $(c_n+1)$ largest number in $S$, then delete $p_n$ from $S$, we can get $p_{n - 1}$. So repeat the process, we can get the whole permutation. Consider counting $v$, we observe that each bubble sort will first make $v_i=\max(v_{i}-1,0)$, then the whole $v$ moves left, $v_1$ is directly covered, $v_n$ is set to $0$. Because each valued $v_i$ is preceded by a number larger than the current position, then it will definitely be exchanged and moved forward, $v_i$ minus $1$. For example, if the current $v=[0,0,2,1,1]$, after once bubble sort, $v=[0,1,0,0,0]$. To avoid confusion, the $V$ array below is the $v$ in the input. It's easy to count after knowing the above conclusion. For the position $i(i\le k)$, it can be observed that after $k$ times of bubble sort , it is directly covered, so just multiply the answer by $\prod_{i=1}^k i$. For the position $i(k\lt i \le n)$, if $V_{i-k}\ne -1$, then obviously $v_i$ is uniquely determined; If $V_{i-k}=-1$, then $v_i$ has $i$ possible values, multiply the answer by $i$; If $V_{i-k}=0$, then $v_i-k\le 0$, multiply the answer by $k+1$. Note that for the position $i(i\ge n-k+1)$, $V_i$ should be $0$ or $-1$, otherwise the answer must be $0$. Complexity:$\mathcal{O}(n)$.
[ "dp", "math" ]
2,500
#include<cstdio> #include<iostream> #include<cstring> #include<algorithm> #include<queue> #include<map> #include<ctime> #include<cmath> #include<unordered_map> using namespace std; #define LL long long #define pp pair<int,pair<int,int>> #define mp make_pair #define fi first #define se second.first #define th second.second namespace IO{ const int sz=1<<22; char a[sz+5],b[sz+5],*p1=a,*p2=a,*t=b,p[105]; inline char gc(){ return p1==p2?(p2=(p1=a)+fread(a,1,sz,stdin),p1==p2?EOF:*p1++):*p1++; } template<class T> void gi(T& x){ x=0; int f=1;char c=gc(); if(c=='-')f=-1; for(;c<'0'||c>'9';c=gc())if(c=='-')f=-1; for(;c>='0'&&c<='9';c=gc()) x=x*10+(c-'0'); x=x*f; } inline void flush(){fwrite(b,1,t-b,stdout),t=b; } inline void pc(char x){*t++=x; if(t-b==sz) flush(); } template<class T> void pi(T x,char c='\n'){ if(x==0) pc('0'); int t=0; for(;x;x/=10) p[++t]=x%10+'0'; for(;t;--t) pc(p[t]); pc(c); } struct F{~F(){flush();}}f; } using IO::gi; using IO::pi; using IO::pc; int t,n,a[1000005],ans,k; const int mod=998244353; signed main(){ srand(time(0)); gi(t); while(t--){ gi(n),gi(k); bool f=0; for(int i=1;i<=n;i++)gi(a[i]),f|=(a[i]>i-1); if(f){ pi(0,'\n'); continue; } ans=1; for(int i=1;i<=k;i++)ans=1ll*ans*i%mod; for(int i=n-k+1;i<=n;i++){ if(a[i]!=-1&&a[i]){ pi(0,'\n'); f=1; break; } } if(f)continue; for(int i=k+1;i<=n;i++){ int limit=i-1; if(a[i-k]==-1){ ans=1ll*ans*i%mod; }else{ if(a[i-k]){ limit=a[i-k]+k; if(limit>i-1)f=1; } else { limit=min(limit,k),ans=1ll*ans*(limit+1)%mod; } } } if(f)ans=0; pi(ans,'\n'); } return 0; }