contest_id
stringlengths
1
4
index
stringclasses
43 values
title
stringlengths
2
63
statement
stringlengths
51
4.24k
tutorial
stringlengths
19
20.4k
tags
listlengths
0
11
rating
int64
800
3.5k
code
stringlengths
46
29.6k
1950
A
Stair, Peak, or Neither?
You are given three digits $a$, $b$, and $c$. Determine whether they form a stair, a peak, or neither. - A stair satisfies the condition $a<b<c$. - A peak satisfies the condition $a<b>c$.
You just need to write two if-statements and check the two cases. Please note that some languages like C++ won't allow a chain of comparisons like a < b < c, and you should instead write it as a < b && b < c.
[ "implementation" ]
800
#include <iostream> void solve() { int a, b, c; std::cin >> a >> b >> c; if(a < b && b < c) std::cout << "STAIR"<< "\n"; else if(a < b && b > c) std::cout << "PEAK"<< "\n"; else std::cout << "NONE" << "\n"; } int main() { int tt; std::cin >> tt; while(tt--) solve(); }
1950
B
Upscaling
You are given an integer $n$. Output a $2n \times 2n$ checkerboard made of $2 \times 2$ squares alternating '$#$' and '$.$', with the top-left cell being '$#$'. \begin{center} {\small The picture above shows the answers for $n=1,2,3,4$.} \end{center}
You just need to implement what is written. One way is to go cell-by-cell in a regular $n \times n$ checkerboard, and construct the larger one one cell at a time by copying cell $(i,j)$ into cells $(2i,2j)$, $(2i+1, 2j)$, $(2i, 2j+1)$, $(2i+1, 2j+1)$. A faster solution is to notice that if we round down coordinates $(x,y)$ in the enlarged checkerboard to $(\lfloor \frac{x}{2} \rfloor, \lfloor \frac{y}{2} \rfloor)$, we get the corresponding cell in the original checkerboard. And to output a regular checkerboard, we output $\texttt{#}$ if the sum of coordinates is even, and $\texttt{.}$ if it is odd. So the faster implementation is: iterate over all cells $(x,y)$ in the $2n \times 2n$ checkerboard. If $\lfloor \frac{x}{2} \rfloor + \lfloor \frac{y}{2} \rfloor$ is even output $\texttt{#}$, else output $\texttt{.}$.
[ "implementation" ]
800
#include <bits/stdc++.h> using namespace std; void solve() { int n; cin >> n; for (int i = 0; i < 2 * n; i++) { for (int j = 0; j < 2 * n; j++) { cout << (i / 2 + j / 2 & 1 ? '.' : '#'); } cout << '\n'; } } int main() { ios::sync_with_stdio(false); cin.tie(nullptr); int tt; cin >> tt; for (int i = 1; i <= tt; i++) {solve();} // solve(); }
1950
C
Clock Conversion
Given the time in 24-hour format, output the equivalent time in 12-hour format. - 24-hour format divides the day into 24 hours from $00$ to $23$, each of which has 60 minutes from $00$ to $59$. - 12-hour format divides the day into two halves: the first half is $\mathrm{AM}$, and the second half is $\mathrm{PM}$. In each half, the hours are numbered in the order $12, 01, 02, 03, \dots, 11$. Each hour has 60 minutes numbered from $00$ to $59$.
From 24-hour format to 12-hour format, the minutes are the same. For the hours: If $\texttt{hh}$ is $00$, then it should become $12 \; \mathrm{AM}$. If $\texttt{hh}$ is from $01$ to $11$, then it should become $\texttt{hh} \; \mathrm{AM}$. If $\texttt{hh}$ is $12$, then it should become $12 \; \mathrm{PM}$. If $\texttt{hh}$ is from $13$ to $23$, then it should become $(\texttt{hh} - 12) \; \mathrm{PM}$.
[ "implementation", "math" ]
800
#include <bits/stdc++.h> using namespace std; void solve() { int h, m; char c; cin >> h >> c >> m; string am = (h < 12 ? " AM" : " PM"); h = (h % 12 ? h % 12 : 12); cout << (h < 10 ? "0" : "") << h << c << (m < 10 ? "0" : "") << m << am << '\n'; } int main() { ios::sync_with_stdio(false); cin.tie(nullptr); int tt; cin >> tt; for (int i = 1; i <= tt; i++) {solve();} // solve(); }
1950
D
Product of Binary Decimals
Let's call a number a binary decimal if it is a positive integer and all digits in its decimal notation are either $0$ or $1$. For example, $1\,010\,111$ is a binary decimal, while $10\,201$ and $787\,788$ are not. Given a number $n$, you are asked whether or not it is possible to represent $n$ as a product of some (not necessarily distinct) binary decimals.
First, let's make precompute list of all binary decimals at most $10^5$. You can do it in many ways, for example iterating through all numbers up to $10^5$ and checking if each is a binary decimal. Let's call a number good if it can be represented as the product of binary decimals. For each test case, we will write a simple recursive function. $n$ is good if: $n=1$, or $\frac{n}{i}$ is good, for some binary decimal $i>1$. Even if your implementation is slightly too slow, there are not many good numbers; you can simply precompute them all locally and hardcode them to get a solution that works in $\mathcal{O}(1)$. $T(n) \leq 2 T\left(\left\lfloor\frac{n}{10}\right\rfloor\right) + 4 T\left(\left\lfloor\frac{n}{100}\right\rfloor\right) + 26 T\left(\left\lfloor\frac{n}{1000}\right\rfloor\right).$ $T(n) \in \mathcal{O}(n^{\log_{10} \alpha}) = \mathcal{O}\left(n^{\log_{10} \left( \frac{2 + \sqrt[3]{395 - 3\sqrt{16881}} + \sqrt[3]{395 + 3\sqrt{16881}}}{3} \right)}\right) \approx \mathcal{O}(n^{0.635})\,,$ A more accurate estimate can be made by picking the solution to $\alpha^5 = 2\alpha^4 + 4\alpha^3 + 8\alpha^2 + 16\alpha+2$, which gives a bound $\mathcal{O}(n^{0.587})$.
[ "brute force", "dp", "implementation", "number theory" ]
1,100
#include <bits/stdc++.h> using namespace std; const int MAX = 100'007; const int MOD = 1'000'000'007; vector<int> binary_decimals; bool ok(int n) { if (n == 1) {return true;} bool ans = false; for (int i : binary_decimals) { if (n % i == 0) { ans |= ok(n / i); } } return ans; } void solve() { int n; cin >> n; cout << (ok(n) ? "YES\n" : "NO\n"); } int main() { for (int i = 2; i < MAX; i++) { int curr = i; bool bad = false; while (curr) { if (curr % 10 > 1) {bad = true; break;} curr /= 10; } if (!bad) {binary_decimals.push_back(i);} } int tt; cin >> tt; for (int i = 1; i <= tt; i++) {solve();} // solve(); }
1950
E
Nearly Shortest Repeating Substring
You are given a string $s$ of length $n$ consisting of lowercase Latin characters. Find the length of the shortest string $k$ such that several (possibly one) copies of $k$ can be concatenated together to form a string with the same length as $s$ and, at most, one different character. More formally, find the length of the shortest string $k$ such that $c = \underbrace{k + \cdots + k}_{x\rm\ \text{times}}$ for some positive integer $x$, strings $s$ and $c$ has the same length and $c_i \neq s_i$ for at most one $i$ (i.e. there exist $0$ or $1$ such positions).
Let's call a string a period if it can be multiplied to the same length as $s$ What are the possibilities for the lengths of the period? Clearly, it must be a divisor of $n$. So the solution is to check all divisors of $n$ and see the smallest one that works. To check if length $l$ works, multiply the prefix of length $l$ until it's the same length as $s$ and check how many differences there are. However, the different letter can be in the prefix(for example, $\texttt{hshaha}$ and $l = 2$), so we also check the same for the suffix of length $l$. If either of them is true, output $l$. All numbers at most $10^5$ have at most $128$ divisors, so this will take $\sim 128 \cdot 10^5$ operations, which is fast enough.
[ "brute force", "implementation", "number theory", "strings" ]
1,500
#include <bits/stdc++.h> using namespace std; void solve() { int n; cin >> n; string s; cin >> s; for(int i = 1; i <= n; i++) { if(n%i == 0) { int satisfy = 2; for(int j = 0; j < i; j++) { for(int k = j+i; k < n; k+=i) { if(s[k] != s[j]) { satisfy--; } } } if(satisfy > 0) { cout << i << endl; return; } satisfy = 2; for(int j = n-i; j < n; j++) { for(int k = j-i; k >= 0; k-=i) { if(s[k] != s[j]) { satisfy--; } } } if(satisfy > 0) { cout << i << endl; return; } } } } int32_t main(){ int t = 1; cin >> t; while (t--) { solve(); } }
1950
F
0, 1, 2, Tree!
Find the minimum height of a rooted tree$^{\dagger}$ with $a+b+c$ vertices that satisfies the following conditions: - $a$ vertices have exactly $2$ children, - $b$ vertices have exactly $1$ child, and - $c$ vertices have exactly $0$ children. If no such tree exists, you should report it.\begin{center} {\small The tree above is rooted at the top vertex, and each vertex is labeled with the number of children it has. Here $a=2$, $b=1$, $c=3$, and the height is $2$.} \end{center} $^{\dagger}$ A rooted tree is a connected graph without cycles, with a special vertex called the root. In a rooted tree, among any two vertices connected by an edge, one vertex is a parent (the one closer to the root), and the other one is a child. The distance between two vertices in a tree is the number of edges in the shortest path between them. The height of a rooted tree is the maximum distance from a vertex to the root.
Note that since the tree has $a+b+c$ vertices, all vertices have $0$, $1$, or $2$ children. Call a vertex a leaf if it has no children. The idea is to "grow" the tree from the root by adding one or two vertices at a time. Formally: Start with a root (which is initially a leaf). Repeatedly add $1$ or $2$ children to a leaf. In total, add $2$ children $a$ times and $1$ child $b$ times. Every time we grow by adding $1$ child, the number of leaves does not change (since we lose one and gain one). Every time we grow by adding $2$ children, the number of leaves increases by one (since we lose one and gain two). Otherwise we need to minimize the height. The idea is greedy: note that we should always grow by $2$ instead of $1$ when we have a choice, because it's clear that it will strictly decrease the height. Similarly, we should always grow the node closest to the root to minimize the height. Thus we can just simulate the process described above growing by $2$ first and $1$ afterwards, which takes $\mathcal{O}(a+b+c)$ time. Bonus: can you solve the problem in $\mathcal{O}(\log(a+b+c))$ time? Or even better? A note on the implementation. You can store the number of "free" nodes on the current level and the next level in two variables, i.e. you don't need to store a whole tree at all. As you iterate through the current level, store the number of nodes in the next level.
[ "bitmasks", "brute force", "greedy", "implementation", "trees" ]
1,700
#include <bits/stdc++.h> using namespace std; void solve() { int a, b, c; cin >> a >> b >> c; if (a + 1 != c) {cout << -1 << '\n'; return;} if (a + b + c == 1) {cout << 0 << '\n'; return;} int curr = 1, next = 0, res = 1; for (int i = 0; i < a + b; i++) { if (!curr) { swap(next, curr); res++; } curr--; next++; if (i < a) {next++;} } cout << res << '\n'; } int main() { int tt; cin >> tt; for (int i = 1; i <= tt; i++) {solve();} // solve(); }
1950
G
Shuffling Songs
Vladislav has a playlist consisting of $n$ songs, numbered from $1$ to $n$. Song $i$ has genre $g_i$ and writer $w_i$. He wants to make a playlist in such a way that every pair of adjacent songs either have the same writer or are from the same genre (or both). He calls such a playlist exciting. Both $g_i$ and $w_i$ are strings of length no more than $10^4$. It might not always be possible to make an exciting playlist using all the songs, so the shuffling process occurs in two steps. First, some amount (possibly zero) of the songs are removed, and then the remaining songs in the playlist are rearranged to make it exciting. Since Vladislav doesn't like when songs get removed from his playlist, he wants the making playlist to perform as few removals as possible. Help him find the minimum number of removals that need to be performed in order to be able to rearrange the rest of the songs to make the playlist exciting.
First of all, always comparing strings takes quite a long time, so, let's map the strings to integers. We can do that by keeping all strings in some array, sorting the array, and mapping each string to its position in the array. This process is called "Normalization" or "Coordinate Compression". Now, we can do a dynamic programming solution over subsets. We denote mask as our current bit-mask and we say it has the value of all elements we include. For example, if our mask is equal to 7, in binary it looks like ...000111, so we can say that we included elements 0, 1 and 2. Each power of two set in our mask, implies we include that element. So now, if we iterate over masks and the last included element, we can mark $dp_{mask, i}$ as a boolean which tells whether it is possible to get to this state. We transition from a state to another by using the current mask and trying to include all non-included elements one-by-one, and checking out if it is possible to include them. If it is, we update our new mask. After calculating for each state whether we can get to it, using previously calculated states, we update our answer as the maximum number of included elements (bits set) in a mask which is obtainable.
[ "bitmasks", "dfs and similar", "dp", "graphs", "hashing", "implementation", "strings" ]
1,900
#include "bits/stdc++.h" using namespace std; #define all(x) x.begin(),x.end() void solve() { int n; cin >> n; vector<int> s(n), g(n); vector<string> aa(n), bb(n); vector<string> vals; for(int i = 0; i < n; ++i) { string a, b; cin >> a >> b; vals.push_back(a); vals.push_back(b); aa[i] = a, bb[i] = b; } sort(all(vals)); vals.erase(unique(all(vals)), vals.end()); for(int i = 0; i < n; ++i) { s[i] = lower_bound(all(vals), aa[i]) - vals.begin(); g[i] = lower_bound(all(vals), bb[i]) - vals.begin(); } vector<vector<int>> dp(1 << n, vector<int>(n, 0)); for(int i = 0; i < n; ++i) dp[1 << i][i] = 1; for(int mask = 0; mask < (1 << n); ++mask) { for(int lst = 0; lst < n; ++lst) { if(!dp[mask][lst]) continue; for(int i = 0; i < n; ++i) { if(mask >> i & 1) continue; if(s[lst] == s[i] || g[lst] == g[i]) { dp[mask | (1 << i)][i] |= dp[mask][lst]; } } } } int ans = 0; for(int mask = 0; mask < (1 << n); ++mask) { for(int i = 0; i < n; ++i) { if(dp[mask][i]) { ans = max(ans, __builtin_popcount(mask)); } } } cout << n - ans << "\n"; } main() { int t = 1; cin >> t; while(t--) { solve(); } }
1951
A
Dual Trigger
\begin{quote} Ngọt - LẦN CUỐI (đi bên em xót xa người ơi) \hfill ඞ \end{quote} There are $n$ lamps numbered $1$ to $n$ lined up in a row, initially turned off. You can perform the following operation any number of times (possibly zero): - Choose two \textbf{non-adjacent}${}^\dagger$ lamps that are currently turned off, then turn them on. Determine whether you can reach configuration $s$, where $s_i = 1$ means the $i$-th lamp is turned on, and $s_i = 0$ otherwise. ${}^\dagger$ Only lamp $i$ and $i + 1$ are adjacent for all $1 \le i < n$. Note that lamp $n$ and $1$ are \textbf{not} adjacent when $n \ne 2$.
The answer is obviously "NO" if there is an odd amount of $\mathtt{1}$ in $s$. When there are zero $\mathtt{1}$'s, the answer is trivially "YES". When there are exactly two $\mathtt{1}$'s in $s$, then we also need to check whether the two $\mathtt{1}$'s are adjacent or not. Otherwise, when there are $k \ge 4$ $\mathtt{1}$'s in $s$, let their positions be $x_1, x_2, \dots, x_k$. Note that we can always turn on the $x_i$-th and the $x_{i + k/2}$-th lamp as they are not adjacent. Therefore the answer is "YES" in this case.
[ "constructive algorithms", "greedy", "math" ]
900
#include <bits/stdc++.h> using namespace std; int main() { ios_base::sync_with_stdio(false); cin.tie(nullptr); int t; cin >> t; while (t--) { int n; cin >> n; string s; cin >> s; int cnt = 0, mi = n, mx = -1; for (int i = 0; i < n; i++) { if (s[i] == '1') { cnt++; mi = min(mi, i); mx = max(mx, i); } } cout << (cnt % 2 == 1 || (cnt == 2 && mx - mi == 1) ? "NO\n" : "YES\n"); } }
1951
B
Battle Cows
\begin{quote} The HU - Shireg Shireg \hfill ඞ \end{quote} There are $n$ cows participating in a coding tournament. Cow $i$ has a Cowdeforces rating of $a_i$ (all distinct), and is initially in position $i$. The tournament consists of $n-1$ matches as follows: - The first match is between the cow in position $1$ and the cow in position $2$. - Subsequently, each match $i$ is between the cow in position $i+1$ and the winner of match $i-1$. - In each match, the cow with the higher Cowdeforces rating wins and proceeds to the next match. You are the owner of cow $k$. For you, winning the tournament is not important; rather, you want your cow to win in as many matches as possible. As an acquaintance of the tournament organizers, you can ask them to swap the position of your cow with another cow \textbf{only once}, or you can choose to do nothing. Find the maximum number of wins your cow can achieve.
It is easy to note that the $i$-th match is between cow $i+1$ and the strongest cow among cows $1, 2, \cdots, i$. Therefore, in order to win any match at all, you cow have to be stronger than every cow before her (and cow $2$ if your cow is cow $1$). Consider the following cases: There exists no stronger cow before your cow. Then you should maximize the number of wins by swapping your cow with cow $1$. There exists some stronger cows before your cow. Let $i$ be the first such cow. Obviously you want cow $i$ to end up to the right of your cow. There are two ways to achieve this: Swap your cow before cow $i$. Since your cow will lose to cow $i$ anyway, you should maximize the number of wins by swapping with cow $1$. Swap your cow with cow $i$. Swap your cow before cow $i$. Since your cow will lose to cow $i$ anyway, you should maximize the number of wins by swapping with cow $1$. Swap your cow with cow $i$. The answer for the problem is the maximum among all above candidates. Since there are only $2$ of them, the overall time complexity is $O(n)$.
[ "binary search", "data structures", "greedy" ]
1,200
#include <bits/stdc++.h> using namespace std; int main() { ios_base::sync_with_stdio(false); cin.tie(nullptr); int t; cin >> t; while (t--) { int n, k; cin >> n >> k; k--; vector<int> a(n); for (int i = 0; i < n; i++) { cin >> a[i]; } int x = find_if(a.begin(), a.end(), [&](int v) { return v > a[k]; }) - a.begin(); vector<int> pos(n); iota(pos.begin(), pos.end(), 0); int ans = 0; for (int i : {0, x}) { if (i == n) { continue; } swap(pos[i], pos[k]); vector<int> stt(n); for (int j = 1, u = pos[0]; j < n; j++) { int v = pos[j]; u = (a[u] > a[v] ? u : v); stt[u]++; } swap(pos[i], pos[k]); ans = max(ans, stt[k]); } cout << ans << '\n'; } }
1951
C
Ticket Hoarding
\begin{quote} Maître Gims - Est-ce que tu m'aimes ? \hfill ඞ \end{quote} As the CEO of a startup company, you want to reward each of your $k$ employees with a ticket to the upcoming concert. The tickets will be on sale for $n$ days, and by some time travelling, you have predicted that the price per ticket at day $i$ will be $a_i$. However, to prevent ticket hoarding, the concert organizers have implemented the following measures: - A person may purchase no more than $m$ tickets per day. - If a person purchases $x$ tickets on day $i$, all subsequent days (i.e. from day $i+1$ onwards) will have their prices per ticket increased by $x$. For example, if $a = [1, 3, 8, 4, 5]$ and you purchase $2$ tickets on day $1$, they will cost $2$ in total, and the prices from day $2$ onwards will become $[5, 10, 6, 7]$. If you then purchase $3$ more tickets on day $2$, they will cost in total an additional $15$, and the prices from day $3$ onwards will become $[13, 9, 10]$. Find the minimum spending to purchase $k$ tickets.
Note: we will present the intuitive idea first; the formal proof follows later. Let's try to find a way to interpret the raised price at day $i$ from buying on earlier days. We know that every ticket bought on an earlier day than $i$ raised the price per ticket $i$ by $1$; in turn, this "additional tax" will be added to our total cost every time a ticket is bought on day $i$. Parsing in a different way, every pair of ticket where one is bought before day $i$ and one is bought on day $i$ adds $1$ additional tax to the total cost, so the final additional tax is exactly "the number of pair of tickets that were bought on different days". We now try to find a way to optimize both the base cost and the total additional tax. Observe that the same greedy strategy of "take as many cheap tickets as possible" works for both of them (obviously works for the first quantity, and for the second quantity it works because we're limiting the number of pair of tickets bought on different days). Implementing this gives an $O(n \log n)$ solution. - - - Formally, suppose we buy $b_1, b_2, \ldots, b_n$ tickets on day $1, 2, \ldots, n$, respectively, then $0 \leq b_i \leq m$, $\sum_{i=1}^{n} b_i = k$ and the price per ticket on day $i$ will be $a_i + b_1 + b_2 + \ldots + b_{i-1} = a_i + \sum_{j=1}^{i-1} b_j$. Thus, the total cost will be: $\sum_{i=1}^{n} (a_i + \sum_{j=1}^{i-1} b_j)b_i = \sum_{i=1}^{n} a_ib_i + \sum_{1 \leq j < i \leq n} b_ib_j$ Note that if we choose two days $i$, $j$ and swap $(a_i, b_i)$ with $(a_j, b_j)$, the above sum does not change its value. Therefore, we can freely rearrange the sequence $a$ without changing the answer. Let's sort the sequence $a$ in non-decreasing order. It is obvious that the first sum is minimized when we buy the $k$ cheapest tickets, i.e. $b = (m, m, \ldots, k \mod m, 0, 0, \ldots, 0)$. We will prove that the greedy strategy also minimizes the second sum, by noting that the second sum is equal to: $\frac{1}{2} ((\sum_{i = 1}^{n}b_i)^{2} - \sum_{i = 1}^{n}{b_i^2}) = \frac{1}{2}k^2 - \frac{1}{2} \sum_{i = 1}^{n}{b_i^2}$ If there exists two days $i$, $j$ such that $0 < b_i \leq b_j < m$, then replacing $(b_i, b_j)$ with $(b_i - 1, b_j + 1)$ gives a smaller cost. Hence, the sequence $b = (m, m, \ldots, k \mod m, 0, 0, \ldots, 0)$ minimizes the overall cost.
[ "greedy", "math", "sortings" ]
1,400
#include <bits/stdc++.h> using namespace std; int main() { ios_base::sync_with_stdio(false); cin.tie(nullptr); int t; cin >> t; while (t--) { int n, m, k; cin >> n >> m >> k; vector<int> a(n); for (int i = 0; i < n; i++) { cin >> a[i]; } sort(a.begin(), a.end()); int64_t ans = 0; int tax = 0; for (int i = 0; i < n; ++i) { int buy = min(m, k); ans += 1LL * buy * (a[i] + tax); tax += buy; k -= buy; } cout << ans << '\n'; } }
1951
D
Buying Jewels
\begin{quote} Nightwish feat. Jonsu - Erämaan Viimeinen \hfill ඞ \end{quote} Alice has $n$ coins and wants to shop at Bob's jewelry store. Today, although Bob has not set up the store yet, Bob wants to make sure Alice will buy \textbf{exactly} $k$ jewels. To set up the store, Bob can erect at most $60$ stalls (each containing an unlimited amount of jewels) and set the price per jewel for each stall to be an integer number of coins between $1$ and $10^{18}$. Fortunately, Bob knows that Alice buys greedily: and she will go to stall $1$, buy as many jewels as possible, then go to stall $2$, buy as many jewels as possible, and so on until the last stall. Knowing this, Bob can choose the number of stalls to set up, as well as set the price for each stall so that Alice buys exactly $k$ jewels. Help Bob fulfill the task, or determine if it is impossible to do so. Note that Alice does not need to spend all her coins.
We first assume that $p_i \leq n$ for $1 \leq i \leq s$, since adding stalls with price greater than $n$ doesn't change the result. If $n < k$ then the answer is obviously "NO". If $n = k$ then the answer is obviously "YES" - we can set up a single stall of price $1$. Otherwise, the first stall should have price $p_1 \geq 2$. Let $n = p_1q + r$ such that $q$, $r$ are integers and $q \geq 1$, $0 \leq r < p_1$. Obviously Alice can buy at most $q + r$ jewels ($q$ from stall $1$, at most $r$ from other stalls). On the other hand: $n - 2(q+r) = (p_1 - 2)q - r$ $\geq (p_1 - 2)q - p_1 + 1$ (since $r \leq p_1 - 1$) $\geq (p_1 - 2) - p_1 + 1$ (since $q \geq 1, p_1 \geq 2$) $= -1$ $\Rightarrow 2(q+r) \leq n + 1$ Thus, if $k < n$ and $2k > n + 1$, the answer is "NO". Otherwise, choosing $p_1 = n - k + 1$ and $p_2 = 1$ solves the problem (you also have the option to print $58$ random integers afterwards to test our checker!)
[ "constructive algorithms", "greedy", "math" ]
2,000
#include <bits/stdc++.h> using namespace std; int main() { ios_base::sync_with_stdio(false); cin.tie(nullptr); int t; cin >> t; while (t--) { int64_t n, k; cin >> n >> k; if (n < k) { cout << "NO\n"; } else if (n == k) { cout << "YES\n1\n1\n"; } else if (n < 2 * k - 1) { cout << "NO\n"; } else { cout << "YES\n2\n" << n - k + 1 << " 1\n"; } } }
1951
E
No Palindromes
\begin{quote} Christopher Tin ft. Soweto Gospel Choir - Baba Yetu \hfill ඞ \end{quote} You are given a string $s$ consisting of lowercase Latin characters. You need to partition$^\dagger$ this string into some substrings, such that each substring is not a palindrome$^\ddagger$. $^\dagger$ A partition of a string $s$ is an ordered sequence of some $k$ strings $t_1, t_2, \ldots, t_k$, such that $t_1 + t_2 + \ldots + t_k = s$, where $+$ here represents the concatenation operation. $^\ddagger$ A string $s$ is considered a palindrome if it reads the same backwards as forwards. For example, $\mathtt{racecar}$, $\mathtt{abccba}$, and $\mathtt{a}$ are palindromes, but $\mathtt{ab}$, $\mathtt{dokibird}$, and $\mathtt{kurosanji}$ are not.
If $s$ is not a palindrome then we are done. If $s$ consists of only one type of character then we're also done. Consider the case when $s$ is a palindrome and there are at least $2$ distinct characters. Let $t$ be the first position where $s_t \neq s_1$, and note that this means $s_{[1, t]}$ is not a palindrome. If $s_{[t + 1, n]}$ isn't a palindrome then we're done. Otherwise, we note that the string has the form $A\mathtt{b}A\mathtt{b} \dots A\mathtt{b}A$, where $A$ consists of $t-1$ $\mathtt{a}$ characters (from the assumptions that $s$ and $s_{[t + 1, n]}$ are palindromes). We do a case analysis on $t$, observing that $t$ is between $2$ and $\frac{n + 1}{2}$: If $t = \frac{n + 1}{2}$, the string has the form $A\mathtt{b}A$, and we can verify that this is a "NO" case (proof: for every partition, either the substring containing $s_1$ or the substring containing $s_n$ has length at most $\frac{n - 1}{2}$, which means it consists of only character $\mathtt{a}$). If $t = 2$, the string has the form $\mathtt{abab\dots aba}$, and we can verify that this is another "NO" case (proof: every partition must have an odd-length substring, which is always a palindrome). Otherwise, $s_{[1, t + 1]} = A\mathtt{ba}$ starts with $t - 1$ $\mathtt{a}$ characters and ends with $1$ $\mathtt{a}$ character, and $s_{[t + 2, n]}$ starts with $t - 2$ $\mathtt{a}$ characters and ends with $t - 1$ $\mathtt{a}$ character. Therefore, both substrings are not palindromes, which means this is a valid partition. Implementing this idea directly gives a time complexity of $O(n)$.
[ "brute force", "constructive algorithms", "divide and conquer", "greedy", "hashing", "implementation", "math", "strings" ]
2,000
#include<bits/stdc++.h> using namespace std; int main(void) { ios_base::sync_with_stdio(false); cin.tie(NULL); int ntest; cin >> ntest; while (ntest--) { string s; cin >> s; int n = s.size(); auto is_palindrome = [&](int l, int r) -> bool { for (int i = l; i < r + l - i; ++i) if (s[i] != s[r + l - i]) return false; return true; }; if (!is_palindrome(0, n - 1)) { cout << "YES\n1\n" << s << "\n"; continue; } int first_dif = -1; for (int i = 1; i < n; ++i) if (s[i] != s[0]) { first_dif = i; break; } if (first_dif == -1) { cout << "NO\n"; continue; } if (!is_palindrome(0, first_dif) && !is_palindrome(first_dif + 1, n - 1)) { cout << "YES\n2\n" << s.substr(0, first_dif + 1) << ' ' << s.substr(first_dif + 1) << "\n"; continue; } else if (first_dif == 1 || first_dif == n / 2) { cout << "NO\n"; } else { cout << "YES\n2\n" << s.substr(0, first_dif + 2) << ' ' << s.substr(first_dif + 2) << "\n"; } } return 0; }
1951
F
Inversion Composition
\begin{quote} My Chemical Romance - Disenchanted \hfill ඞ \end{quote} You are given a permutation $p$ of size $n$, as well as a non-negative integer $k$. You need to construct a permutation $q$ of size $n$ such that $\operatorname{inv}(q) + \operatorname{inv}(q \cdot p) = k {}^\dagger {}^\ddagger$, or determine if it is impossible to do so. ${}^\dagger$ For two permutations $p$ and $q$ of the same size $n$, the permutation $w = q \cdot p$ is such that $w_i = q_{p_i}$ for all $1 \le i \le n$. ${}^\ddagger$ For a permutation $p$ of size $n$, the function $\operatorname{inv}(p)$ returns the number of inversions of $p$, i.e. the number of pairs of indices $1 \le i < j \le n$ such that $p_i > p_j$.
We say that $(i, j)$ is an inverse on permutation $p$ if $i < j$ and $p_i > p_j$, or $i > j$ and $p_i < p_j$. For any two integers $1 \le i < j \le n$, let's observe the "inverseness" of $(p_i, p_j)$ on $q$ and $(i, j)$ on $q \cdot p$: If $(i, j)$ is not an inverse on $p$, then the inverseness of $(p_i, p_j)$ on $q$ and $(i, j)$ on $q \cdot p$ is the same. Therefore, this pair contributes either $0$ or $2$ to $\operatorname{inv}(q) + \operatorname{inv}(q \cdot p)$. Otherwise, the inverseness of $(p_i, p_j)$ on $q$ and $(i, j)$ on $q \cdot p$ is different. Therefore, this pair always contribute $1$ to $\operatorname{inv}(q) + \operatorname{inv}(q \cdot p)$. Therefore, the answer is "NO" if $k \notin [\operatorname{inv}(p), n(n - 1) - \operatorname{inv}(p)]$, or if $k$ has different parity from $\operatorname{inv}(p)$. Otherwise, we claim that we can always construct $q$; the following implementation actually constructs $q \cdot p$, but we can easily find one permutation from the other. Let $k' = \frac{k - \operatorname{inv}(p)}{2}$, then the problem becomes "find $q \cdot p$ with $k'$ special inversions", where an inverse $(i, j)$ on $q \cdot p$ is special if $(i, j)$ is not an inverse on $p$. Let $x_i$ be the number of indices $j < i$ where $p_j < p_i$, which can be easily found via a standard Fenwick tree (note that $\sum_{i=1}^n x_i = \frac{n(n-1)}{2} - \operatorname{inv}(p)$). Then there exists an index $t$ such that $\sum_{i < t} x_i \le k'$, and $\sum_{i \le t} x_i \ge k'$. It's straightforward to see that the permutation $q \cdot p = [t, t - 1, \dots, v + 1, v - 1, v - 2, \dots, 1, v, t + 1, t + 2, \dots, n]$ works for some value $v \in [1, t]$, chosen such that there are exactly $k' - \sum_{i < t} x_i$ indices before $t$ that forms a special inversion with $t$. Time complexity: $O(n \log n)$.
[ "constructive algorithms", "data structures", "greedy" ]
2,500
#include <bits/stdc++.h> using namespace std; template<typename T> struct fenwick_tree { vector<T> bit; int n; fenwick_tree(int n) : n(n), bit(n + 1) {} T sum(int x) { T ans = 0; for (; x > 0; x -= x & -x) { ans += bit[x]; } return ans; } T sum(int l, int r) { return sum(r) - sum(l); } void add(int x, T v) { for (++x; x <= n; x += x & -x) { bit[x] += v; } } }; int main() { ios_base::sync_with_stdio(false); cin.tie(nullptr); int t; cin >> t; while (t--) { int n; cin >> n; int64_t k; cin >> k; fenwick_tree<int> fen(n); vector<int> p(n), ip(n), cnt(n); int64_t inv = 0; for (int i = 0; i < n; i++) { cin >> p[i]; p[i]--; ip[p[i]] = i; inv += i - (cnt[i] = fen.sum(0, p[i])); fen.add(p[i], 1); } if (k < inv || k > 1LL * n * (n - 1) - inv || (k - inv) % 2 == 1) { cout << "NO\n"; } else { cout << "YES\n"; k = (k - inv) / 2; vector<int> qp(n); for (int i = 0; i < n; i++) { if (cnt[i] < k) { k -= cnt[i]; } else { for (int j = 0, v = i; j < i; j++) { qp[j] = v--; if (p[j] < p[i] && --k == 0) { qp[i] = v--; } } for (int j = i + 1; j < n; j++) { qp[j] = j; } break; } } for (int i = 0; i < n; i++) { cout << qp[ip[i]] + 1 << " \n"[i + 1 == n]; } } } }
1951
G
Clacking Balls
\begin{quote} Rammstein - Ausländer \hfill ඞ \end{quote} There are $m$ baskets placed along a circle, numbered from $1$ to $m$ in clockwise order (basket $m$ is next to basket $1$). Furthermore, there are $n$ balls, where ball $i$ is initially placed in basket $a_i$, and no basket contains more than one ball. Alice is allowed to perform the following operation, which always takes exactly one second whether you move/throw a ball or not: - Alice chooses an integer $i$ between $1$ and $n$ \textbf{uniformly at random}. - If ball $i$ was thrown away before, do nothing. - Otherwise, ball $i$ is moved from the basket currently containing it to the next basket (in clockwise order). If the target basket currently contains another ball $j$, throw ball $j$ away. She repeats this operation until there is exactly one ball left. Calculate the expected time needed (in seconds) for Alice to end the process. It can be proven that the answer can be represented as a rational number $\frac{p}{q}$ with coprime $p$ and $q$. You need to output $p \cdot q^{-1} \bmod 10^9 + 7$. It can be proven that $10^9 + 7 \nmid q$.
Note: there are other (more elegant in our opinion) solutions from some of our testers, which you may or may not find under the comment section. We will present here the original solution here. Consider a state with $k \leq n$ balls remaining. Let's represent the state as a sequence $S = (d_1, d_2, \cdots, d_k)$ where $d_i$ is the distance between ball $i$ and ball $(i \bmod{k}) + 1$. Note that $\sum_{i=1}^{k} d_i = m$. Then, after a second, one of the following happens: $d_i$ is increased by $1$ and $d_{(i \bmod{k}) + 1}$ is decreased by $1$ with probability $\frac{1}{n}$ for each $1 \leq i \leq k$ (if $d_{(i \bmod{k}) + 1} = 0$, we erase it from the sequence). We denote the new sequence as $S + \{i\}$. Nothing changes with probability $1 - \frac{k}{n}$. The terminal state is $(m)$. Therefore, let $f(S)$ be the expected number of seconds to reach the terminal state from state $S = (d_1, d_2, \cdots, d_k)$, then: $f((m)) = 0$ (*) $f(S) = 1 + \frac{1}{n} \sum_{i=1}^{k} f(S + \{i\}) + (1 - \frac{k}{n}) f(S)$ $\Rightarrow$ $k f(S) = n + \sum_{i=1}^{k} f(S + \{i\})$ (**) Let $h(S, i) = f(S) - f(S + \{i\})$, condition (**) becomes $\sum_{i=1}^{k} h(S, i) = n$. To proceed, we will apply the idea from this comment. If we choose $f(S) = c + \sum_{i=1}^{k} \sum_{x=0}^{d_i} g(x)$ for some constant $c$ and function $g: \mathbb{Z_{\geq 0}} \rightarrow \mathbb{R}$, then: $n = \sum_{i=1}^{k} h(S, i)$ $= \sum_{i=1}^{k} g(d_{(i \bmod{k}) + 1}) - g(d_i + 1)$ $= \sum_{i=1}^{k} g(d_i) - g(d_i + 1)$ Additionally, since we want $h(S, i) = g(d_{(i \bmod{k}) + 1}) - g(d_i + 1)$ to hold even when $d_{(i \bmod{k}) + 1} = 0$, we need to choose $g$ such that $g(0) = 0$. Let's choose $g$ such that $g(0) = 0$ and $g(x + 1) - g(x) = -\frac{n}{m}x$ for all $x \geq 0$. Then $\sum_{i=1}^{k} g(d_i) - g(d_i + 1) = \frac{n}{m} \sum_{i=1}^{k} d_i = n$, thus satisfies (**). Solving for $g$ gives $g(x) = -\frac{n}{m}\frac{x(x-1)}{2} = -\frac{n}{m} \binom{x}{2}$. Finally, $f(S) = c - \frac{n}{m} \sum_{i=1}^{k} \binom{d_i + 1}{3}$. In order to satisfy (*), we need to choose $c = \frac{n}{m} \binom{m + 1}{3}$.
[ "combinatorics", "math", "probabilities" ]
3,100
#include <bits/stdc++.h> #include <cassert> #include <numeric> #include <type_traits> #ifdef _MSC_VER #include <intrin.h> #endif #include <utility> #ifdef _MSC_VER #include <intrin.h> #endif namespace atcoder { namespace internal { constexpr long long safe_mod(long long x, long long m) { x %= m; if (x < 0) x += m; return x; } struct barrett { unsigned int _m; unsigned long long im; explicit barrett(unsigned int m) : _m(m), im((unsigned long long)(-1) / m + 1) {} unsigned int umod() const { return _m; } unsigned int mul(unsigned int a, unsigned int b) const { unsigned long long z = a; z *= b; #ifdef _MSC_VER unsigned long long x; _umul128(z, im, &x); #else unsigned long long x = (unsigned long long)(((unsigned __int128)(z)*im) >> 64); #endif unsigned int v = (unsigned int)(z - x * _m); if (_m <= v) v += _m; return v; } }; constexpr long long pow_mod_constexpr(long long x, long long n, int m) { if (m == 1) return 0; unsigned int _m = (unsigned int)(m); unsigned long long r = 1; unsigned long long y = safe_mod(x, m); while (n) { if (n & 1) r = (r * y) % _m; y = (y * y) % _m; n >>= 1; } return r; } constexpr bool is_prime_constexpr(int n) { if (n <= 1) return false; if (n == 2 || n == 7 || n == 61) return true; if (n % 2 == 0) return false; long long d = n - 1; while (d % 2 == 0) d /= 2; constexpr long long bases[3] = {2, 7, 61}; for (long long a : bases) { long long t = d; long long y = pow_mod_constexpr(a, t, n); while (t != n - 1 && y != 1 && y != n - 1) { y = y * y % n; t <<= 1; } if (y != n - 1 && t % 2 == 0) { return false; } } return true; } template <int n> constexpr bool is_prime = is_prime_constexpr(n); constexpr std::pair<long long, long long> inv_gcd(long long a, long long b) { a = safe_mod(a, b); if (a == 0) return {b, 0}; long long s = b, t = a; long long m0 = 0, m1 = 1; while (t) { long long u = s / t; s -= t * u; m0 -= m1 * u; // |m1 * u| <= |m1| * s <= b auto tmp = s; s = t; t = tmp; tmp = m0; m0 = m1; m1 = tmp; } if (m0 < 0) m0 += b / s; return {s, m0}; } constexpr int primitive_root_constexpr(int m) { if (m == 2) return 1; if (m == 167772161) return 3; if (m == 469762049) return 3; if (m == 754974721) return 11; if (m == 998244353) return 3; int divs[20] = {}; divs[0] = 2; int cnt = 1; int x = (m - 1) / 2; while (x % 2 == 0) x /= 2; for (int i = 3; (long long)(i)*i <= x; i += 2) { if (x % i == 0) { divs[cnt++] = i; while (x % i == 0) { x /= i; } } } if (x > 1) { divs[cnt++] = x; } for (int g = 2;; g++) { bool ok = true; for (int i = 0; i < cnt; i++) { if (pow_mod_constexpr(g, (m - 1) / divs[i], m) == 1) { ok = false; break; } } if (ok) return g; } } template <int m> constexpr int primitive_root = primitive_root_constexpr(m); unsigned long long floor_sum_unsigned(unsigned long long n, unsigned long long m, unsigned long long a, unsigned long long b) { unsigned long long ans = 0; while (true) { if (a >= m) { ans += n * (n - 1) / 2 * (a / m); a %= m; } if (b >= m) { ans += n * (b / m); b %= m; } unsigned long long y_max = a * n + b; if (y_max < m) break; n = (unsigned long long)(y_max / m); b = (unsigned long long)(y_max % m); std::swap(m, a); } return ans; } } // namespace internal } // namespace atcoder #include <cassert> #include <numeric> #include <type_traits> namespace atcoder { namespace internal { #ifndef _MSC_VER template <class T> using is_signed_int128 = typename std::conditional<std::is_same<T, __int128_t>::value || std::is_same<T, __int128>::value, std::true_type, std::false_type>::type; template <class T> using is_unsigned_int128 = typename std::conditional<std::is_same<T, __uint128_t>::value || std::is_same<T, unsigned __int128>::value, std::true_type, std::false_type>::type; template <class T> using make_unsigned_int128 = typename std::conditional<std::is_same<T, __int128_t>::value, __uint128_t, unsigned __int128>; template <class T> using is_integral = typename std::conditional<std::is_integral<T>::value || is_signed_int128<T>::value || is_unsigned_int128<T>::value, std::true_type, std::false_type>::type; template <class T> using is_signed_int = typename std::conditional<(is_integral<T>::value && std::is_signed<T>::value) || is_signed_int128<T>::value, std::true_type, std::false_type>::type; template <class T> using is_unsigned_int = typename std::conditional<(is_integral<T>::value && std::is_unsigned<T>::value) || is_unsigned_int128<T>::value, std::true_type, std::false_type>::type; template <class T> using to_unsigned = typename std::conditional< is_signed_int128<T>::value, make_unsigned_int128<T>, typename std::conditional<std::is_signed<T>::value, std::make_unsigned<T>, std::common_type<T>>::type>::type; #else template <class T> using is_integral = typename std::is_integral<T>; template <class T> using is_signed_int = typename std::conditional<is_integral<T>::value && std::is_signed<T>::value, std::true_type, std::false_type>::type; template <class T> using is_unsigned_int = typename std::conditional<is_integral<T>::value && std::is_unsigned<T>::value, std::true_type, std::false_type>::type; template <class T> using to_unsigned = typename std::conditional<is_signed_int<T>::value, std::make_unsigned<T>, std::common_type<T>>::type; #endif template <class T> using is_signed_int_t = std::enable_if_t<is_signed_int<T>::value>; template <class T> using is_unsigned_int_t = std::enable_if_t<is_unsigned_int<T>::value>; template <class T> using to_unsigned_t = typename to_unsigned<T>::type; } // namespace internal } // namespace atcoder namespace atcoder { namespace internal { struct modint_base {}; struct static_modint_base : modint_base {}; template <class T> using is_modint = std::is_base_of<modint_base, T>; template <class T> using is_modint_t = std::enable_if_t<is_modint<T>::value>; } // namespace internal template <int m, std::enable_if_t<(1 <= m)>* = nullptr> struct static_modint : internal::static_modint_base { using mint = static_modint; public: static constexpr int mod() { return m; } static mint raw(int v) { mint x; x._v = v; return x; } static_modint() : _v(0) {} template <class T, internal::is_signed_int_t<T>* = nullptr> static_modint(T v) { long long x = (long long)(v % (long long)(umod())); if (x < 0) x += umod(); _v = (unsigned int)(x); } template <class T, internal::is_unsigned_int_t<T>* = nullptr> static_modint(T v) { _v = (unsigned int)(v % umod()); } unsigned int val() const { return _v; } mint& operator++() { _v++; if (_v == umod()) _v = 0; return *this; } mint& operator--() { if (_v == 0) _v = umod(); _v--; return *this; } mint operator++(int) { mint result = *this; ++*this; return result; } mint operator--(int) { mint result = *this; --*this; return result; } mint& operator+=(const mint& rhs) { _v += rhs._v; if (_v >= umod()) _v -= umod(); return *this; } mint& operator-=(const mint& rhs) { _v -= rhs._v; if (_v >= umod()) _v += umod(); return *this; } mint& operator*=(const mint& rhs) { unsigned long long z = _v; z *= rhs._v; _v = (unsigned int)(z % umod()); return *this; } mint& operator/=(const mint& rhs) { return *this = *this * rhs.inv(); } mint operator+() const { return *this; } mint operator-() const { return mint() - *this; } mint pow(long long n) const { assert(0 <= n); mint x = *this, r = 1; while (n) { if (n & 1) r *= x; x *= x; n >>= 1; } return r; } mint inv() const { if (prime) { assert(_v); return pow(umod() - 2); } else { auto eg = internal::inv_gcd(_v, m); assert(eg.first == 1); return eg.second; } } friend mint operator+(const mint& lhs, const mint& rhs) { return mint(lhs) += rhs; } friend mint operator-(const mint& lhs, const mint& rhs) { return mint(lhs) -= rhs; } friend mint operator*(const mint& lhs, const mint& rhs) { return mint(lhs) *= rhs; } friend mint operator/(const mint& lhs, const mint& rhs) { return mint(lhs) /= rhs; } friend bool operator==(const mint& lhs, const mint& rhs) { return lhs._v == rhs._v; } friend bool operator!=(const mint& lhs, const mint& rhs) { return lhs._v != rhs._v; } private: unsigned int _v; static constexpr unsigned int umod() { return m; } static constexpr bool prime = internal::is_prime<m>; }; template <int id> struct dynamic_modint : internal::modint_base { using mint = dynamic_modint; public: static int mod() { return (int)(bt.umod()); } static void set_mod(int m) { assert(1 <= m); bt = internal::barrett(m); } static mint raw(int v) { mint x; x._v = v; return x; } dynamic_modint() : _v(0) {} template <class T, internal::is_signed_int_t<T>* = nullptr> dynamic_modint(T v) { long long x = (long long)(v % (long long)(mod())); if (x < 0) x += mod(); _v = (unsigned int)(x); } template <class T, internal::is_unsigned_int_t<T>* = nullptr> dynamic_modint(T v) { _v = (unsigned int)(v % mod()); } unsigned int val() const { return _v; } mint& operator++() { _v++; if (_v == umod()) _v = 0; return *this; } mint& operator--() { if (_v == 0) _v = umod(); _v--; return *this; } mint operator++(int) { mint result = *this; ++*this; return result; } mint operator--(int) { mint result = *this; --*this; return result; } mint& operator+=(const mint& rhs) { _v += rhs._v; if (_v >= umod()) _v -= umod(); return *this; } mint& operator-=(const mint& rhs) { _v += mod() - rhs._v; if (_v >= umod()) _v -= umod(); return *this; } mint& operator*=(const mint& rhs) { _v = bt.mul(_v, rhs._v); return *this; } mint& operator/=(const mint& rhs) { return *this = *this * rhs.inv(); } mint operator+() const { return *this; } mint operator-() const { return mint() - *this; } mint pow(long long n) const { assert(0 <= n); mint x = *this, r = 1; while (n) { if (n & 1) r *= x; x *= x; n >>= 1; } return r; } mint inv() const { auto eg = internal::inv_gcd(_v, mod()); assert(eg.first == 1); return eg.second; } friend mint operator+(const mint& lhs, const mint& rhs) { return mint(lhs) += rhs; } friend mint operator-(const mint& lhs, const mint& rhs) { return mint(lhs) -= rhs; } friend mint operator*(const mint& lhs, const mint& rhs) { return mint(lhs) *= rhs; } friend mint operator/(const mint& lhs, const mint& rhs) { return mint(lhs) /= rhs; } friend bool operator==(const mint& lhs, const mint& rhs) { return lhs._v == rhs._v; } friend bool operator!=(const mint& lhs, const mint& rhs) { return lhs._v != rhs._v; } private: unsigned int _v; static internal::barrett bt; static unsigned int umod() { return bt.umod(); } }; template <int id> internal::barrett dynamic_modint<id>::bt(998244353); using modint998244353 = static_modint<998244353>; using modint1000000007 = static_modint<1000000007>; using modint = dynamic_modint<-1>; namespace internal { template <class T> using is_static_modint = std::is_base_of<internal::static_modint_base, T>; template <class T> using is_static_modint_t = std::enable_if_t<is_static_modint<T>::value>; template <class> struct is_dynamic_modint : public std::false_type {}; template <int id> struct is_dynamic_modint<dynamic_modint<id>> : public std::true_type {}; template <class T> using is_dynamic_modint_t = std::enable_if_t<is_dynamic_modint<T>::value>; } // namespace internal } // namespace atcoder using namespace std; using namespace atcoder; using mint = modint1000000007; int main() { ios_base::sync_with_stdio(false); cin.tie(nullptr); int t; cin >> t; auto C3 = [&](int n) { return mint(n) * (n - 1) * (n - 2) / 6; }; while (t--) { int n, m; cin >> n >> m; vector<int> a(n); for (int i = 0; i < n; i++) { cin >> a[i]; } sort(a.begin(), a.end()); mint ans = C3(m + 1); for (int i = 0; i < n; i++) { int dis = (a[(i + 1) % n] - a[i] + m) % m; if (dis == 0) { dis = m; } ans -= C3(dis + 1); } ans = ans * n / m; cout << ans.val() << '\n'; } }
1951
H
Thanos Snap
\begin{quote} Piotr Rubik - Psalm dla Ciebie \hfill ඞ \end{quote} There is an array $a$ of size $2^k$ for some positive integer $k$, which is initially a permutation of values from $1$ to $2^k$. Alice and Bob play the following game on the array $a$. First, a value $t$ between $1$ and $k$ is shown to both Alice and Bob. Then, for exactly $t$ turns, the following happens: - Alice either does nothing, or chooses two distinct elements of the array $a$ and swaps them. - Bob chooses either the left half or the right half of the array $a$ and erases it. The score of the game is defined as the maximum value in $a$ after all $t$ turns have been played. Alice wants to maximize this score, while Bob wants to minimize it. You need to output $k$ numbers: the score of the game if both Alice and Bob play optimally for $t$ from $1$ to $k$.
Let $n = 2^k$. Let's see how we can solve for each $t$ first. We binary search on the answer, which results in every element being either $0$ or $1$ depending on if it's larger than the threshold, and we now need to check if the answer is $0$ or $1$. Let's instead try to directly construct Alice's strategy to achieve $1$; if we cannot construct such a strategy, the answer is $0$. The main idea here is that Alice's strategy can be represented as a perfect binary tree with $t + 1$ layers, where: Every leaf node represents a subarray of $a$ of size $2^{n - t}$. Every non-leaf node represents Alice's next action when the game is restricted to the subarray $[l, r]$ covered by this node; such an action is in the form "swap $a_i$ and $a_j$", where $l \le i \le j \le r$. Some trivial observations: For Alice to win, we would want every leaf node to have at least one value $1$ when the game reaches that leaf node. We say a leaf is deficient if it originally did not satisfy this condition. Every leaf node that has more than one value $1$ can "donate" the extra $1$'s via the swapping action. If an action can be performed by Alice in a node, it can also be performed in any of its ancestors. The third observation motivates us to look for a greedy bottom-up construction of Alice's strategy. Combining with the first two observations, we get the following greedy construction: For every node, maintain the number of deficient leaves and the number of extra $1$'s that can be donated from other leaves. At any non-leaf node, we try to match an extra $1$ with a deficient leaf (if both quantities are more than $0$), and we propagate the remaining deficient leaves and extra $1$'s to the node's parent. Alice wins if at the root node, the number of deficient leaves remaining is $0$. This strategy can be implemented in time $O((n + 2^t) \log n)$ or $O(n + 2^t \log n)$ depending on implementation (to get the second complexity, note that while binary searching, the total number of element status changes between $0$ and $1$ is $O(n)$). This gives us a complexity of $O(n \log^2 n)$ or $O(n \log n)$ after looping through all $t$.
[ "binary search", "dp", "games", "greedy", "trees" ]
3,200
#include <bits/stdc++.h> using namespace std; bool solve(vector<int>& v) { int n = v.size(); vector<int> a(n), b(n); for (int i = 0; i < n; ++i) a[i] = max(1 - v[i], 0), b[i] = max(0, v[i] - 1); for (; n > 1; n /= 2) { for (int i = 0; i < n; i += 2) { a[i / 2] = a[i] + a[i + 1]; b[i / 2] = b[i] + b[i + 1]; if (a[i / 2] && b[i / 2]) a[i / 2]--, b[i / 2]--; } a.resize(n / 2); b.resize(n / 2); } return a[0] == 0; } int main() { ios_base::sync_with_stdio(false); cin.tie(nullptr); int te; cin >> te; while (te--) { int k; cin >> k; vector<int> pos(1 << k); for (int i = 0; i < (1 << k); i++) { int u; cin >> u; u--; pos[u] = i; } for (int t = 1; t <= k; t++) { vector<int> v(1 << t); int le = 0, ri = (1 << k); while (le + 1 < ri) { int mi = (le + ri) / 2; for (int i = mi; i < ri; i++) { v[pos[i] >> (k - t)]++; } (solve(v) ? le : ri) = mi; for (int i = mi; i < ri; i++) { v[pos[i] >> (k - t)]--; } } cout << ri << " \n"[t == k]; } } }
1951
I
Growing Trees
\begin{quote} wowaka ft. Hatsune Miku - Ura-Omote Lovers \hfill ඞ \end{quote} You are given an undirected connected simple graph with $n$ nodes and $m$ edges, where edge $i$ connects node $u_i$ and $v_i$, with two positive parameters $a_i$ and $b_i$ attached to it. Additionally, you are also given an integer $k$. A non-negative array $x$ with size $m$ is called a $k$-spanning-tree generator if it satisfies the following: - Consider the undirected multigraph with $n$ nodes where edge $i$ is cloned $x_i$ times (i.e. there are $x_i$ edges connecting $u_i$ and $v_i$). It is possible to partition the edges of this graph into $k$ spanning trees, where each edge belongs to exactly one spanning tree$^\dagger$. The cost of such array $x$ is defined as $\sum_{i = 1}^m a_i x_i^2 + b_i x_i$. Find the minimum cost of a $k$-spanning-tree generator. $^\dagger$ A spanning tree of a (multi)graph is a subset of the graph's edges that form a tree connecting all vertices of the graph.
From now on, we say an array $x$ to be good if the graph created by cloning the $i$-th edge $x_i$ times can be partitioned into $k$ forests (this is slightly different from $k$-spanning-tree generators). Let's first discuss how to check whether an array $x$ is good. Let $G'$ be the graph created by cloning the $i$-th edge $x_i$ times. By Nash-Williams theorem, for every subset of vertices $U$, we need the induced subgraph $G'[U]$ to have at most $k(|U| - 1)$ edges, so it suffices to check this condition quickly. Furthermore, this condition can easily be modified to $k$-spanning-tree generators: the only additional condition necessary is to have $\sum_{i=1}^m x_i = k(n - 1)$. Consider the unweighted bipartite graph $B_{G'}$ where every left node represents a node $u$ of $G'$, every right node represents an edge $(u, v)$ of $G'$, and we connect edges $u - (u, v)$ and $v - (u, v)$ for every right node $(u, v)$, then Hall's marriage theorem states that $B_{G'}$ has a right-saturating matching iff the induced subgraph $G'[U]$ has at most $|U|$ edges for all $U$. We can further extend this idea by making $k$ clones of every left node $u$, which now means that the $B_{G'}$ has a right-saturating matching iff $|G'[U]| \le k |U|$ for all $U$. The big last idea is to try "turning off" a left node $u$: formally, consider $B_{G', u}$ be the same as $B_{G'}$, but all clones of some left node $u$ are deleted, and note that this graph enforces the condition $|G'[U']| \le k(|U'| - 1)$ for every subset $U'$ containing $u$. Therefore, by checking if $B_{G', u}$ has a right-saturating matching matching for all $u$, we can now ensure the Nash-Williams condition. Finally, note that we can quickly check if $B_{G', u}$ has a right-saturating matching by constructing the following directed graph: direct an edge from the source node to every left node $v \neq u$ with capacity $k$, direct an edge from every right node $(u_i, v_i)$ to the sink node with capacity $x_i$, and direct an edge from left node $u_i$ (or $v_i$) to right node $(u_i, v_i)$ with capacity $\infty$; $B_{G', u}$ has a perfect matching iff this new graph has a flow from source to sink that saturates all incoming edges to sink. As the constructed flow graph has $O(n + m)$ nodes and edges, we now know how to check if an array $x$ is good in time complexity $O(n \cdot F(n + m, n + m))$, where $F(N, M)$ is the complexity to run max flow on a graph with $N$ nodes and $M$ edges. We now return to the original problem. Observe that the cost function is convex on every coordinate $x_i$; hence, one could imagine the following greedy strategy: starts with $x = 0$; until the sum of $x_i$ reaches $k(n - 1)$, try to find a coordinate $i$ and increases $x_i$ by $1$ such that increasing $x_i$ by $1$ doesn't make $x$ bad, and increasing $x_i$ by $1$ increases the cost by as little as possible. It turns out that this strategy works, as the set of all good arrays forms a "matroid", and $k$-spanning-tree generators are the "bases" of this "matroid" (note that the greedy algorithm guarantees to find the min-weight basis of a matroid). To be more formal, consider a set of elements $E$, where each element denoted by a pair $(i, t)$ represents the clone of the edge $i$ with cost $(a_i t^2 + b_i t) - (a_i (t-1)^2 + b_i(t - 1))$, and a subset $E' \subseteq E$ is independent if the multiset edges represented by elements in $E'$ can be partitioned into $k$ forests (here, we let $i \in [1, n]$ and $t \in [1, k]$). Observe that this set $E$ forms a matroid (we can think of $E$ as a "permuted" direct sum of $k$ disjoint versions of the graphic matroids on $G$, and note that direct sums of matroids are themselves matroid). Furthermore, the weight of element $(i, t)$ represents exactly the increment to the cost when we increase $x_i$ from $t - 1$ to $t$. By convexity of the cost on each edge, the weight of $(i, t)$ is less than the weight of $(i, t + 1)$. Hence the min-cost $k$-spanning-tree generator represents exactly the min-weight basis of $E$, as the basis would greedily use $(i, j)$ with $j$ being as small as possible. Finally, while this greedy algorithm is correct, we still need to optimize it as the number of steps is $k(n - 1)$. Observe that if at some step, increasing a coordinate $i$ makes $x$ bad, we would never consider coordinate $i$ again at later steps of the algorithm. Therefore, we can effectively simulate the greedy algorithm by 1) finding the first step where a coordinate $i$ turns bad, 2) advancing until such step, 3) discarding coordinate $i$ from consideration, and 4) repeating until every coordinate turns bad. Fortunately, step 1) can be quickly implemented by binary searching on the cost of the increment, and the number of iterations is $m$ as every coordinate can only be discarded once. Therefore, the problem has been solved in time complexity of either $O(m \cdot (\log W + m) \cdot n \cdot F(n + m, n + m))$, $O(m \cdot \log mW \cdot n \cdot F(n + m, n + m))$, or $O(m \cdot \log W \cdot n \cdot F(n + m, n + m) + m^2 n^2)$ depending on how you break ties on the same cost increment (note that we can ensure no ties by adding $\epsilon i$ to $x_i$), where $W \le 2 k \cdot \max a + \max b$ is the maximum cost increment.
[ "binary search", "constructive algorithms", "flows", "graphs", "greedy" ]
3,200
#include <bits/stdc++.h> #include <algorithm> #include <cassert> #include <limits> #include <queue> #include <vector> #include <vector> namespace atcoder { namespace internal { template <class T> struct simple_queue { std::vector<T> payload; int pos = 0; void reserve(int n) { payload.reserve(n); } int size() const { return int(payload.size()) - pos; } bool empty() const { return pos == int(payload.size()); } void push(const T& t) { payload.push_back(t); } T& front() { return payload[pos]; } void clear() { payload.clear(); pos = 0; } void pop() { pos++; } }; } // namespace internal } // namespace atcoder namespace atcoder { template <class Cap> struct mf_graph { public: mf_graph() : _n(0) {} explicit mf_graph(int n) : _n(n), g(n) {} int add_edge(int from, int to, Cap cap) { assert(0 <= from && from < _n); assert(0 <= to && to < _n); assert(0 <= cap); int m = int(pos.size()); pos.push_back({from, int(g[from].size())}); int from_id = int(g[from].size()); int to_id = int(g[to].size()); if (from == to) to_id++; g[from].push_back(_edge{to, to_id, cap}); g[to].push_back(_edge{from, from_id, 0}); return m; } struct edge { int from, to; Cap cap, flow; }; edge get_edge(int i) { int m = int(pos.size()); assert(0 <= i && i < m); auto _e = g[pos[i].first][pos[i].second]; auto _re = g[_e.to][_e.rev]; return edge{pos[i].first, _e.to, _e.cap + _re.cap, _re.cap}; } std::vector<edge> edges() { int m = int(pos.size()); std::vector<edge> result; for (int i = 0; i < m; i++) { result.push_back(get_edge(i)); } return result; } void change_edge(int i, Cap new_cap, Cap new_flow) { int m = int(pos.size()); assert(0 <= i && i < m); assert(0 <= new_flow && new_flow <= new_cap); auto& _e = g[pos[i].first][pos[i].second]; auto& _re = g[_e.to][_e.rev]; _e.cap = new_cap - new_flow; _re.cap = new_flow; } Cap flow(int s, int t) { return flow(s, t, std::numeric_limits<Cap>::max()); } Cap flow(int s, int t, Cap flow_limit) { assert(0 <= s && s < _n); assert(0 <= t && t < _n); assert(s != t); std::vector<int> level(_n), iter(_n); internal::simple_queue<int> que; auto bfs = [&]() { std::fill(level.begin(), level.end(), -1); level[s] = 0; que.clear(); que.push(s); while (!que.empty()) { int v = que.front(); que.pop(); for (auto e : g[v]) { if (e.cap == 0 || level[e.to] >= 0) continue; level[e.to] = level[v] + 1; if (e.to == t) return; que.push(e.to); } } }; auto dfs = [&](auto self, int v, Cap up) { if (v == s) return up; Cap res = 0; int level_v = level[v]; for (int& i = iter[v]; i < int(g[v].size()); i++) { _edge& e = g[v][i]; if (level_v <= level[e.to] || g[e.to][e.rev].cap == 0) continue; Cap d = self(self, e.to, std::min(up - res, g[e.to][e.rev].cap)); if (d <= 0) continue; g[v][i].cap += d; g[e.to][e.rev].cap -= d; res += d; if (res == up) return res; } level[v] = _n; return res; }; Cap flow = 0; while (flow < flow_limit) { bfs(); if (level[t] == -1) break; std::fill(iter.begin(), iter.end(), 0); Cap f = dfs(dfs, t, flow_limit - flow); if (!f) break; flow += f; } return flow; } std::vector<bool> min_cut(int s) { std::vector<bool> visited(_n); internal::simple_queue<int> que; que.push(s); while (!que.empty()) { int p = que.front(); que.pop(); visited[p] = true; for (auto e : g[p]) { if (e.cap && !visited[e.to]) { visited[e.to] = true; que.push(e.to); } } } return visited; } private: int _n; struct _edge { int to, rev; Cap cap; }; std::vector<std::pair<int, int>> pos; std::vector<std::vector<_edge>> g; }; } // namespace atcoder using namespace std; using namespace atcoder; const int64_t INF = 1E13 + 100; int main() { ios_base::sync_with_stdio(false); cin.tie(nullptr); int t; cin >> t; while (t--) { int n, m, k; cin >> n >> m >> k; vector<array<int, 4>> edg(m); vector<int64_t> lim(m, INF); for (auto& [u, v, a, b] : edg) { cin >> u >> v >> a >> b; u--; v--; } auto initialize_counts = [&](int64_t thr) { vector<int64_t> cnt(m); for (int i = 0; i < m; i++) { int a = edg[i][2], b = edg[i][3]; int64_t val = (thr - i) / m; // a * (2c - 1) + b <= thr cnt[i] = max(0LL, min(lim[i], ((val - b) / a + 1) / 2)); } return cnt; }; auto check = [&](int64_t thr) { auto cnt = initialize_counts(thr); mf_graph<int64_t> b(n + m + 3); int src = 0, snk = 1; auto vertex = [&](int u) { return u + 2; }; auto edge = [&](int i) { return i + n + 2; }; for (int i = 0; i < n; i++) { b.add_edge(src, vertex(i), k); } int64_t sum = 0; for (int i = 0; i < m; i++) { int u = edg[i][0], v = edg[i][1]; b.add_edge(vertex(u), edge(i), INF); b.add_edge(vertex(v), edge(i), INF); b.add_edge(edge(i), snk, cnt[i]); sum += cnt[i]; } if (b.flow(src, snk) < sum) { return false; } for (int i = 0; i < n; i++) { auto bi = b; bi.add_edge(vertex(i), snk, k); if (bi.flow(src, snk) < k) { return false; } } return true; }; int64_t low = 0; for (int lft = m; lft > 0; lft--) { int64_t hig = INF; while (low + 1 < hig) { int64_t mid = (low + hig) / 2; (check(mid) ? low : hig) = mid; } int bad = (low + 1) % m; auto [u_, v_, a, b] = edg[bad]; int64_t val = (low - bad) / m; lim[bad] = ((val - b) / a + 1) / 2; low++; } int64_t ans = 0; for (int i = 0; i < m; i++) { int a = edg[i][2], b = edg[i][3]; ans += lim[i] * lim[i] * a + lim[i] * b; } cout << ans << '\n'; } }
1954
A
Painting the Ribbon
Alice and Bob have bought a ribbon consisting of $n$ parts. Now they want to paint it. First, Alice will paint every part of the ribbon into one of $m$ colors. For each part, she can choose its color arbitrarily. Then, Bob will choose \textbf{at most $k$} parts of the ribbon and repaint them \textbf{into the same color} (he chooses the affected parts and the color arbitrarily). Bob would like all parts to have the same color. However, Alice thinks that this is too dull, so she wants to paint the ribbon in such a way that Bob cannot make all parts have the same color. Is it possible to paint the ribbon in such a way?
When is Bob able to get a ribbon where each part has color $i$? There should be at least $n-k$ parts of color $i$. So if the number of parts with color $i$ is less than $n-k$, Bob cannot repaint the whole ribbon into color $i$. So, Alice has to paint the ribbon in such a way that for every color, there are at most $n-k-1$ parts of that color. There are at least two ways to check if it is possible: for example, you can calculate the maximum possible length of the ribbon such that it contains no more than $n-k-1$ parts of every color; or you can calculate the maximum number of parts among all colors if you try to color the ribbon as evenly as possible.
[ "constructive algorithms", "greedy", "math" ]
900
t = int(input()) for i in range(t): n, m, k = map(int, input().split()) max_color = (n + m - 1) / m if max_color + k >= n: print('NO') else: print('YES')
1954
B
Make It Ugly
Let's call an array $a$ beautiful if you can make all its elements the same by using the following operation an arbitrary number of times (possibly, zero): - choose an index $i$ ($2 \le i \le |a| - 1$) such that $a_{i - 1} = a_{i + 1}$, and replace $a_i$ with $a_{i - 1}$. You are given a beautiful array $a_1, a_2, \dots, a_n$. What is the minimum number of elements you have to remove from it in order for it to stop being beautiful? Swapping elements is prohibited. If it is impossible to do so, then output -1.
As given in the problem statement, the definition of a beautiful array is not very interesting to us, since checking the beauty of an array is quite complex. Let's try to simplify it. First of all, the first and last elements will never be changed, as it is impossible to choose such $i$ for operations. If they are different, then the array is definitely not beautiful. Moreover, if the array is beautiful, then all its elements at the end will be equal to the first and the last elements. The second idea is a bit more complicated. Notice that each element can be changed at most once. Consider an arbitrary operation. We choose some $i$ for which $a_{i - 1} = a_{i + 1}$, and change $a_i$ to $a_{i - 1}$. Now both $a_{i - 1}$ and $a_{i + 1}$ will always remain equal to their current values, because in any operation involving them, $a_i$ will also be involved. This means that $a_i$ will also remain equal to the new value. The next idea is as follows. We know what all the elements should be equal to in the end. This means that we need to apply operations to all elements that are not equal to this value. According to the previous idea, this is possible if and only if there are no two consecutive numbers in the array that are not equal to this value. The sufficiency of this condition is obvious, and the necessity is left as an exercise to the reader. In other words, the check looks like this: $a_1 = a_n$, and $a_i = a_1$ or $a_{i + 1} = a_1$ (or both are equal) for all $i$. What elements should be removed so that the check does not pass? There are two options: break the first or second condition. So, you can do the following: remove the entire prefix of numbers equal to $a_1$; remove the entire suffix of numbers equal to $a_1$ (or $a_n$ - they are equal to each other, since the given array is beautiful); choose two numbers that are not equal to $a_1$, and remove all the numbers between them, so that these two numbers become adjacent. The third condition can be simplified. If other numbers not equal to $a_1$ are encountered between the selected numbers, then another pair can be chosen, for which fewer numbers have to be removed. Therefore, it is only optimal to choose a pair for which all the numbers between them are equal to $a_1$. Then the solution is as follows. Find the shortest block of numbers equal to $a_1$. Remove it from the array. It can be at the prefix or at the suffix - then the first condition will be broken. Or it can be somewhere in the middle - then the second condition will be broken. To find such a block, you can go through the array from left to right, maintaining the position of the previous element not equal to $a_1$. If the current element is not equal to $a_1$, update the answer with the difference between the saved and current positions, and update the saved position. The only case when the answer is -1 is when all the elements of the array are the same. Otherwise, it is always possible to make the array not beautiful. Overall complexity: $O(n)$ for each testcase.
[ "implementation", "math" ]
1,200
for _ in range(int(input())): n = int(input()) a = list(map(int, input().split())) lst = -1 ans = n for i in range(n): if a[i] != a[0]: ans = min(ans, i - lst - 1) lst = i ans = min(ans, n - lst - 1) if ans == n: print(-1) else: print(ans)
1954
C
Long Multiplication
You are given two integers $x$ and $y$ of the same length, consisting of digits from $1$ to $9$. You can perform the following operation any number of times (possibly zero): swap the $i$-th digit in $x$ and the $i$-th digit in $y$. For example, if $x=73$ and $y=31$, you can swap the $2$-nd digits and get $x=71$ and $y=33$. Your task is to maximize the product of $x$ and $y$ using the aforementioned operation any number of times. If there are multiple answers, print any of them.
There are two observations to solve the problem: applying the operation does not change the sum of the numbers; the smaller the difference of the numbers, the greater their product (the proof is given below). Proof: let's denote the sum of the numbers as $s$, the smallest number as $\frac{s}{2} - a$ and the largest number as $\frac{s}{2} + a$. Then the product is equal to $\left(\frac{s}{2} - a\right)\left(\frac{s}{2} + a\right) = \left(\frac{s}{2}\right)^2 - a^2$. We can see that, the smaller $a$ (the half of difference), the larger the product. In order to get the minimum difference, we can use the following algorithm: let $i$ be the smallest index (the most significant digit) such that $x_i \ne y_i$ and set the maximum digit among $x_i$ and $y_i$ to the number $x$ and smallest to the number $y$; thus $x$ is definitely greater than $y$, then the less significant digits should be the maximum possible for the number $y$ (i. e. the inequality $x_j \le y_j$ must hold for all $j > i$).
[ "greedy", "math", "number theory" ]
1,200
#include <bits/stdc++.h> using namespace std; int main() { int t; cin >> t; while (t--) { string x, y; cin >> x >> y; int n = x.size(); bool f = false; for (int i = 0; i < n; ++i) { if ((x[i] > y[i]) == f) swap(x[i], y[i]); f |= (x[i] != y[i]); } cout << x << '\n' << y << '\n'; } }
1954
D
Colored Balls
There are balls of $n$ different colors; the number of balls of the $i$-th color is $a_i$. The balls can be combined into groups. Each group should contain at most $2$ balls, and no more than $1$ ball of each color. Consider all $2^n$ sets of colors. For a set of colors, let's denote its value as the minimum number of groups the balls of those colors can be distributed into. For example, if there are three colors with $3$, $1$ and $7$ balls respectively, they can be combined into $7$ groups (and not less than $7$), so the value of that set of colors is $7$. Your task is to calculate the sum of values over all $2^n$ possible sets of colors. Since the answer may be too large, print it modulo $998\,244\,353$.
For a fixed set of colors, this is a standard problem with the following solution: let's denote the total number of balls as $s$, then the value of the set is $\left\lceil\frac{s}{2}\right\rceil$; but there is an exception in the case when there is a color with more than $\frac{s}{2}$ balls, then the value is the number of balls of this color. So the answer depends only on whether there is a color that has more balls than all the others combined. Using the aforementioned fact, we can come up with the following solution: let's iterate over the total number of balls in the set (denote it as $j$) and increase the answer by $\left\lceil\frac{j}{2}\right\rceil$ for each subset of colors with exactly $j$ balls in it. The number of subsets with the fixed number of balls can be calculated using simple knapsack dynamic programming. It remains to consider the subsets of colors with a "dominant" color, because we added the wrong value to the answer for them. Let the "dominant" color be $i$ and the total number of balls in all other colors be $j$ ($j < a_i$). The answer should be increased by $a_i - \left\lceil\frac{a_i + j}{2}\right\rceil$ (the correct value of this set is $a_i$, but we have already added the wrong value in the answer - $\left\lceil\frac{a_i + j}{2}\right\rceil$, so we should compensate it) for each subset of colors with exactly $j$ balls in it (we can use the same dp as in previous case). Note that if you consider only $j < a_i$, you don't have to deal with the possibility that the "dominant" color could already be included in the subset. This solution works in $O(nS)$ time, where $S$ is the total number of balls.
[ "combinatorics", "dp", "math", "sortings" ]
1,800
#include <bits/stdc++.h> using namespace std; const int MOD = 998244353; int add(int x, int y) { x += y; if (x >= MOD) x -= MOD; return x; } int mul(int x, int y) { return x * 1LL * y % MOD; } int main() { int n; cin >> n; vector<int> a(n); for (int i = 0; i < n; ++i) cin >> a[i]; int s = accumulate(a.begin(), a.end(), 0); vector<int> dp(s + 1); dp[0] = 1; for (int i = 0; i < n; ++i) for (int j = s - a[i]; j >= 0; --j) dp[j + a[i]] = add(dp[j + a[i]], dp[j]); int ans = 0; for (int j = 0; j <= s; ++j) ans = add(ans, mul((j + 1) / 2, dp[j])); for (int i = 0; i < n; ++i) for (int j = 0; j < a[i]; ++j) ans = add(ans, mul(a[i] - (a[i] + j + 1) / 2, dp[j])); cout << ans << '\n'; }
1954
E
Chain Reaction
There are $n$ monsters standing in a row. The $i$-th monster has $a_i$ health points. Every second, you can choose one \textbf{alive} monster and launch a chain lightning at it. The lightning deals $k$ damage to it, and also spreads to the left (towards decreasing $i$) and to the right (towards increasing $i$) to \textbf{alive} monsters, dealing $k$ damage to each. When the lightning reaches a dead monster or the beginning/end of the row, it stops. A monster is considered alive if its health points are strictly greater than $0$. For example, consider the following scenario: there are three monsters with health equal to $[5, 2, 7]$, and $k = 3$. You can kill them all in $4$ seconds: - launch a chain lightning at the $3$-rd monster, then their health values are $[2, -1, 4]$; - launch a chain lightning at the $1$-st monster, then their health values are $[-1, -1, 4]$; - launch a chain lightning at the $3$-rd monster, then their health values are $[-1, -1, 1]$; - launch a chain lightning at the $3$-th monster, then their health values are $[-1, -1, -2]$. For each $k$ from $1$ to $\max(a_1, a_2, \dots, a_n)$, calculate the minimum number of seconds it takes to kill all the monsters.
Let's solve the problem for a single $k$. We'll start with $k = 1$ for simplicity. The first lightning can be launched at any monster, as it will always spread to all of them. We will continue launching lightnings until a monster dies. When one or more monsters die, the problem breaks down into several independent subproblems, because no lightning will pass through dead monsters. This means that there is no concept of "minimum number of seconds" - the answer does not depend on the choice of monsters to launch the lightnings. Great, so how do we calculate this answer? The idea is as follows. We will attack the first monster until it dies. This will take $a_1$ seconds. We then move on to the second monster. If it has more health than the first one, we need to launch an additional $a_2 - a_1$ lightnings to kill it. Otherwise, it will already be dead. How much damage will the third monster receive in both cases? Let's say it has a lot of health. In the first case, it will receive $a_2$ damage, because all the lightnings will reach it. But in the second case, it will also receive $a_2$ damage, because the lightnings launched at the first monster after the death of the second one will not reach the third one. This means that we now need to compare the health of the second monster with the third one in the same way. And so on. This means that the $i$-th monster needs to be hit with $\max(0, a_i - a_{i - 1})$ lightnings. Then the answer for $k = 1$ is equal to $a_1 + \sum\limits_{i = 2}^{n} \max(0, a_i - a_{i - 1})$. How to calculate the answer for any $k$? In fact, the difference is not very significant. It is sufficient to change the health of each monster from $a_i$ to $\lceil \frac{a_i}{k} \rceil$, and the entire process described earlier will remain the same. Therefore, the answer for any $k$ is equal to $\lceil \frac{a_1}{k} \rceil + \sum\limits_{i = 2}^{n} \max(0, \lceil \frac{a_i}{k} \rceil - \lceil \frac{a_{i - 1}}{k} \rceil)$. To further optimize this solution, another transformation is needed. Ideally, we would like each $a_i$ to contribute to the answer independently of other values. And this can almost be achieved. Notice that the maximum returns $0$ only if $a_i < a_{i - 1}$ for any $k$, not just for $k = 1$. This may require proof, but it is quite obvious. This means that the coefficient for $\lceil \frac{a_i}{k} \rceil$ in the answer depends on two conditions: it is increased by $1$ if $i = 1$ or $a_i \ge a_{i - 1}$; it is decreased by $1$ if $i < n$ and $a_i < a{i + 1}$. Let's call this coefficient for the $i$-th monster $c_i$. Therefore, we need to calculate $\sum\limits_{i = 1}^n c_i \cdot \lceil \frac{a_i}{k} \rceil$. There are two ways to optimize the solution further. The first option is to notice that $\lceil \frac{a_i}{k} \rceil$ doesn't take a lot of different values for different $k$. More precisely, it is $O(\sqrt{a_i})$. This can be shown as follows. Consider $\lceil \frac{a_i}{k} \rceil = x$. Either $k \le \sqrt{a_i}$, or $x \le \sqrt{a_i}$. Therefore, $x$ takes no more than $2 \sqrt{a_i}$ different values. Then the solution can be implemented as follows. For each $a_i$, we will identify all possible values that the rounding result takes. For each of them, we will find the range of $k$ for which the result is equal to that. And we will add the contribution of the $i$-th monster within this range of values to the result. This can be done using a difference array to achieve a complexity of $O(n \cdot \sqrt{A})$. The second option is a bit smarter. Let's take another look at the formula for calculating the answer for a fixed $k$: $\sum\limits_{i = 1}^n c_i \cdot \lceil \frac{a_i}{k} \rceil$. Let's group the terms by equal values of $\lceil \frac{a_i}{k} \rceil$. What do they look like? Numbers from $1$ to $k$ give the value $1$. Numbers from $k + 1$ to $2k$ give the value $2$, and so on. This means that for a certain $k$, there are $\frac{A}{k}$ segments, on each of which we need to calculate the sum of $c_i$ for those $i$ for which $a_i$ fall into this segment. The total number of segments for all $k$ is $O(A \log A)$. The complexity of the solution will then be $O(n + A \log A)$.
[ "binary search", "data structures", "dsu", "greedy", "implementation", "math", "number theory" ]
2,200
#include <bits/stdc++.h> #define forn(i, n) for (int i = 0; i < int(n); i++) using namespace std; int main() { cin.tie(0); ios::sync_with_stdio(false); int n; cin >> n; vector<int> a(n); forn(i, n) cin >> a[i]; int mx = *max_element(a.begin(), a.end()); vector<long long> pr(mx + 1); forn(i, n){ int coef = 0; if (i == 0 || a[i] > a[i - 1]) ++coef; if (i + 1 < n && a[i] < a[i + 1]) --coef; pr[a[i]] += coef; } forn(i, mx) pr[i + 1] += pr[i]; for (int k = 1; k <= mx; ++k){ long long ans = 0; for (int l = 1, val = 1; l <= mx; l += k, ++val) ans += val * 1ll * (pr[min(mx, l + k - 1)] - pr[l - 1]); cout << ans << ' '; } cout << '\n'; return 0; }
1954
F
Unique Strings
Let's say that two strings $a$ and $b$ are \textbf{equal} if you can get the string $b$ by cyclically shifting string $a$. For example, the strings 0100110 and 1100100 are equal, while 1010 and 1100 are not. You are given a binary string $s$ of length $n$. Its first $c$ characters are 1-s, and its last $n - c$ characters are 0-s. In one operation, you can replace one 0 with 1. Calculate the number of unique strings you can get using no more than $k$ operations. Since the answer may be too large, print it modulo $10^9 + 7$.
What's common in all strings we can get: each string has no more than $c + k$ ones and at least $c$ consecutive ones. So let's loosen up our constraints a little and just calculate the number of strings with no more than $c + k$ ones and at least $c$ consecutive ones, i. e. this block of ones can be anywhere, can even start at the end and continue at the beginning. Let's name such strings as good strings. Note that the number of unique good strings is exactly equal to the answer of the initial task, since we can shift each good string and make it start from the block of ones. How to calculate the number of good strings? Using Burnside's lemma! Since the group of transformations is just a group of cyclic shifts, we can calculate the answer as following $\mathrm{ans} = \frac{1}{n} \sum_{i=1}^{n}{\mathrm{FixedPoints}(i)}$ Note that if the string doesn't change while shifting by $i$ characters, then it means that $s[p] = s[(p + i) \mod |s|]$ for all $p$. Further investigation reveals that all characters will be split into exactly $g = \gcd(i, n)$ groups and each group will contain exactly $\frac{n}{g}$ equal characters. It means that if $\gcd(i, n) = \gcd(j, n)$ then $\mathrm{FixedPoints}(i) = \mathrm{FixedPoints}(j)$ since in both cases we'll get exactly the same group division. So, we can rewrite the answer as following: $\mathrm{ans} = \frac{1}{n} \sum_{g | n}{cnt[g] \cdot \mathrm{FixedPoints}(g)}$ So, it's time to calculate $\mathrm{FixedPoints}(g)$ for some divisor $g$ of $n$. It's not hard to see that if $s[p] = s[(p + g) \mod |s|]$ then the first $g$ characters of $s$ will uniquely define the whole string $s$. So it's enough to work with only a prefix of length $g$, remembering that each character will be copied $\frac{n}{g}$ times. Remember that a good string is a string with at most $c + k$ ones, and since each character will be copied $\frac{n}{g}$ times, we can place at most $on = \frac{(c + k) g}{n}$ ones in our prefix (or at least $zr = g - on$ zeroes). Also, since a good string has $c$ consecutive ones, our prefix should also have $c$ consecutive ones, including the case where the ones go "cyclically" starting at the end of the prefix. In case if $c \ge g$ then the whole prefix should consist of ones, and it's either possible (if $c + k = n$) or impossible (if $c + k < n$). In case $c < g$ we need to calculate the number of good prefixes that can be described as "the cyclic strings of length $g$ that has no more than $on$ ones and contains $c$ consecutive ones". Instead of good prefixes, let's calculate bad prefixes and subtract them from all possible prefixes. All prefixes are just strings with at most $on$ ones and there are $all = \sum\limits_{i=0}^{on}{\binom{g}{i}}$ such strings. Bad prefixes are cyclic strings with at most $on$ ones, where all blocks of ones have length less than $c$. In order to calculate bad prefixes, let's precalc the following dp: $d[z][l]$ is the number of strings of length $l$ with $z$ zeroes where all blocks of ones have length less than $c$ and the last character of the string is $0$. Why did we use the number of zeroes in the state, and why did we add the last zero in $d[z][l]$? Because it will help us to calculate dp fast. $d[0][0] = 1$. Now, if we have value $d[z][l]$, we can add a block of $i$ ones ($0 \le i < c$) and zero to the end of the string and update the value in $d[z + 1][l + i + 1]$. Note that we are updating a segment of row $z + 1$ from $l + 1$ to $l + c$ with value $d[z][l]$ - we can do it in $O(1)$. So, we can precalc the whole dp in $O(n^2)$ time. Now, it's time to calculate the number of bad strings: if we iterate over the length of the prefix of ones, length of the suffix of ones and the number of zeroes in between, we'll get $bad = \sum_{p=0}^{c - 1}\sum_{s = 0}^{c - p - 1}\sum_{z = zr - 1}^{g - p - s - 1}{d[z][g - p - s - 1]}$ If we play a little with the sum, we can simplify it in the following way: $bad = \sum_{p = 0}^{c - 1}\sum_{z = \max(0, zr - 1)}^{g - p - 1}{\mathrm{sumD}(z, g - c, g - p - 1)}$ Good prefixes are just $all - bad$ and since $bad$ can be calculated in $O(g^2)$, the total complexity of $\mathrm{FixedPoints}(g)$ is $O(g^2)$. The resulting complexity is $O(n^2) + \sum\limits_{g|n}{O(g^2)}$ that looks like just $O(n^2)$.
[ "combinatorics", "dp", "math" ]
3,100
#include<bits/stdc++.h> using namespace std; #define fore(i, l, r) for(int i = int(l); i < int(r); i++) #define sz(a) int((a).size()) typedef long long li; typedef pair<int, int> pt; template<class A, class B> ostream& operator <<(ostream& out, const pair<A, B> &p) { return out << "(" << p.x << ", " << p.y << ")"; } template<class A> ostream& operator <<(ostream& out, const vector<A> &v) { fore(i, 0, sz(v)) { if(i) out << " "; out << v[i]; } return out; } const int MOD = int(1e9) + 7; int add(int a, int b) { a += b; while (a >= MOD) a -= MOD; while (a < 0) a += MOD; return a; } void upd(int &ans, int val) { ans = add(ans, val); } int mul(int a, int b) { return int(a * 1ll * b % MOD); } int binPow(int a, int k) { int ans = 1; while (k > 0) { if (k & 1) ans = mul(ans, a); a = mul(a, a); k >>= 1; } return ans; } const int N = 3055; int f[N], invf[N]; void precalcFact() { f[0] = invf[0] = 1; fore (i, 1, N) { f[i] = mul(f[i - 1], i); invf[i] = binPow(f[i], MOD - 2); } } int C(int n, int k) { if (k < 0 || k > n) return 0; return mul(f[n], mul(invf[k], invf[n - k])); } int n, c, k; inline bool read() { if(!(cin >> n >> c >> k)) return false; return true; } int d[N][N], sum[N][N]; // d[z][l] - number of strings of length l, with z zeroes and blocks of 1-s shorter than mx void precalcShort(int mx) { memset(d, 0, sizeof d); d[0][0] = 1; fore (z, 0, n) { fore (l, 0, n) { if (d[z][l] == 0) continue; upd(d[z + 1][l + 1], +d[z][l]); upd(d[z + 1][min(n, l + mx) + 1], -d[z][l]); } fore (l, 0, n + 1) upd(d[z + 1][l + 1], d[z + 1][l]); } memset(sum, 0, sizeof sum); fore (z, 0, n + 1) fore (l, 0, n + 1) sum[z][l + 1] = add(sum[z][l], d[z][l]); } //[lf, rg) int getSum(int z, int lf, int rg) { if (z < 0 || z > n || lf >= rg) return 0; return add(sum[z][rg], -sum[z][lf]); } // cnt[g] is a number of x <= n with gcd(x, n) = g vector<int> precalcCnt(int n) { vector<int> cnt(n + 1, 0); fore (x, 1, n + 1) cnt[gcd(x, n)]++; return cnt; } int calcFixedPoints(int g) { if (c >= g) return c + k >= n; int cntOnes = (c + k) / (n / g); int cntZeros = g - cntOnes; int all = 0; fore (ones, 0, cntOnes + 1) upd(all, C(g, ones)); int bad = 0; fore (pref, 0, c) { int minZeros = max(0, cntZeros - 1); int minMid = g - c; int sufLen = g - pref - 1; fore (z, minZeros, sufLen + 1) upd(bad, getSum(z, minMid, sufLen + 1)); } return add(all, -bad); } inline void solve() { precalcFact(); precalcShort(c); auto cnt = precalcCnt(n); int ans = 0; fore (g, 1, n + 1) { if (n % g != 0) continue; int cntFP = calcFixedPoints(g); // cerr << "g = " << g << " += " << cnt[g] << " * " << cntFP << endl; upd(ans, mul(cnt[g], cntFP)); } // cerr << "ans/n = " << ans << " " << n << endl; cout << mul(ans, binPow(n, MOD - 2)) << endl; } int main() { #ifdef _DEBUG freopen("input.txt", "r", stdin); int tt = clock(); #endif ios_base::sync_with_stdio(false); cin.tie(0), cout.tie(0); cout << fixed << setprecision(15); if(read()) { solve(); #ifdef _DEBUG cerr << "TIME = " << clock() - tt << endl; tt = clock(); #endif } return 0; }
1955
A
Yogurt Sale
The price of one yogurt at the "Vosmiorochka" store is $a$ burles, but there is a promotion where you can buy two yogurts for $b$ burles. Maxim needs to buy \textbf{exactly} $n$ yogurts. When buying two yogurts, he can choose to buy them at the regular price or at the promotion price. What is the minimum amount of burles Maxim should spend to buy $n$ yogurts?
You can always buy yogurts one by one without using the promotion, then the answer is $n \cdot a$. Suppose it is more advantageous to buy yogurts on promotion than one by one, that is, $b < 2 \cdot a$. If $n$ is even, the answer is ${n \over 2} \cdot b$, otherwise ${(n - 1) \over 2} \cdot b + a$. Now you just need to choose whether to buy yogurts one by one or on promotion, taking the minimum of the two options.
[ "math" ]
800
#include <bits/stdc++.h> using ll = signed long long int; using namespace std; void solve() { ll n, a, b; cin >> n >> a >> b; ll ans = n * a; if (b < 2 * a) { ans = (n / 2) * b + (n % 2) * a; } cout << ans << '\n'; } int main() { int t; cin >> t; while (t--) { solve(); } }
1955
B
Progressive Square
A progressive square of size $n$ is an $n \times n$ matrix. Maxim chooses three integers $a_{1,1}$, $c$, and $d$ and constructs a progressive square according to the following rules: $$a_{i+1,j} = a_{i,j} + c$$ $$a_{i,j+1} = a_{i,j} + d$$ For example, if $n = 3$, $a_{1,1} = 1$, $c=2$, and $d=3$, then the progressive square looks as follows: $$ \begin{pmatrix} 1 & 4 & 7 \\ 3 & 6 & 9 \\ 5 & 8 & 11 \end{pmatrix} $$ Last month Maxim constructed a progressive square and remembered the values of $n$, $c$, and $d$. Recently, he found an array $b$ of $n^2$ integers in random order and wants to make sure that these elements are the elements of \textbf{that specific} square. It can be shown that for any values of $n$, $a_{1,1}$, $c$, and $d$, there exists exactly one progressive square that satisfies all the rules.
Since $c > 0$ and $d > 0$, the elements of the square increase starting from the top left corner. Thus, $a_{1,1}$ is the minimum element in the square, and consequently in the found elements. Given $n$, $c$, $d$, and the found $a_{1,1}$, we will reconstruct the square. It remains to check that the given numbers in the input form the same square. The easiest way is to sort both arrays of numbers and check for equality. The complexity of the solution is $O(n ^ 2 \cdot \log{n})$.
[ "constructive algorithms", "data structures", "implementation", "sortings" ]
1,000
#include <bits/stdc++.h> using namespace std; #define all(x) (x).begin(), (x).end() using ll = signed long long int; using pii = pair<int, int>; using pll = pair<ll, ll>; using vi = vector<int>; using vl = vector<ll>; void solve() { int n; ll c, d; cin >> n >> c >> d; vl a(n * n); for (int i = 0; i < n * n; ++i) { cin >> a[i]; } sort(all(a)); vl b(n * n); b[0] = a[0]; for (int i = 1; i < n; ++i) { b[i] = b[i - 1] + c; } for (int i = 1; i < n; ++i) { for (int j = 0; j < n; ++j) { b[i * n + j] = b[(i - 1) * n + j] + d; } } sort(all(b)); cout << (a == b ? "YEs" : "nO") << '\n'; } int main() { int t; cin >> t; while (t--) { solve(); } }
1955
C
Inhabitant of the Deep Sea
$n$ ships set out to explore the depths of the ocean. The ships are numbered from $1$ to $n$ and follow each other in ascending order; the $i$-th ship has a durability of $a_i$. The Kraken attacked the ships $k$ times in a specific order. First, it attacks the first of the ships, then the last, then the first again, and so on. Each attack by the Kraken reduces the durability of the ship by $1$. When the durability of the ship drops to $0$, it sinks and is no longer subjected to attacks (thus the ship ceases to be the first or last, and the Kraken only attacks the ships that have not yet sunk). If all the ships have sunk, the Kraken has nothing to attack and it swims away. For example, if $n=4$, $k=5$, and $a=[1, 2, 4, 3]$, the following will happen: - The Kraken attacks the first ship, its durability becomes zero and now $a = [2, 4, 3]$; - The Kraken attacks the last ship, now $a = [2, 4, 2]$; - The Kraken attacks the first ship, now $a = [1, 4, 2]$; - The Kraken attacks the last ship, now $a = [1, 4, 1]$; - The Kraken attacks the first ship, its durability becomes zero and now $a = [4, 1]$. How many ships were sunk after the Kraken's attack?
To solve the problem, let's model the behavior of the Kraken. Suppose initially there are two or more ships in the sea, we will consider the first and last ship, denote their durabilities as $a_l$ and $a_r$, and also let $m = \min(a_l, a_r)$, initially setting $l = 1$ and $r = n$. After two attacks, the durability of both ships will decrease by $1$. If $k \ge 2 \cdot m$, then we need to subtract $m$ from the durabilities of both ships, and also reduce the remaining attacks of the Kraken by $2 \cdot m$. If $k < 2 \cdot m$, then the Kraken will inflict $\lfloor {k \over 2} \rfloor$ damage to the $r$-th ship. In this case, if $k$ is odd, the $l$-th ship will receive $\lfloor {k \over 2} \rfloor + 1$ damage, otherwise $\lfloor {k \over 2} \rfloor$ damage. After these attacks, some ships may have sunk. If $a_l = 0$, we increase $l$ by $1$, if $a_r = 0$, we decrease $r$ by $1$, and move on to considering the next pair of ships that will be under attack by the Kraken. If at some point $l = r$, it means that there is only one ship left in the sea, and the Kraken can sink it if $k \ge a_l$. A deque is perfect for this problem, allowing us to not think about $l$ and $r$ and simply look at the first and last elements in the queue. At each step of considering a pair of ships, either the Kraken's attacks end, or at least one ship sinks. The final complexity of the solution is $O(n)$.
[ "greedy", "implementation", "math" ]
1,300
#include <bits/stdc++.h> using namespace std; #define all(x) (x).begin(), (x).end() using ll = signed long long int; void solve() { int n; ll k; cin >> n >> k; deque<ll> dq(n); for (int i = 0; i < n; ++i) { cin >> dq[i]; } while (dq.size() > 1 && k) { ll mn = min(dq.front(), dq.back()); if (k < 2 * mn) { dq.front() -= k / 2 + k % 2; dq.back() -= k / 2; k = 0; } else { dq.front() -= mn; dq.back() -= mn; k -= 2 * mn; } if (dq.front() == 0) { dq.pop_front(); } if (dq.back() == 0) { dq.pop_back(); } } int ans = n - dq.size(); cout << ans + (dq.size() && dq.front() <= k) << '\n'; } int main() { int t; cin >> t; while (t--) { solve(); } }
1955
D
Inaccurate Subsequence Search
Maxim has an array $a$ of $n$ integers and an array $b$ of $m$ integers ($m \le n$). Maxim considers an array $c$ of length $m$ to be good if the elements of array $c$ can be rearranged in such a way that at least $k$ of them match the elements of array $b$. For example, if $b = [1, 2, 3, 4]$ and $k = 3$, then the arrays $[4, 1, 2, 3]$ and $[2, 3, 4, 5]$ are good (they can be reordered as follows: $[1, 2, 3, 4]$ and $[5, 2, 3, 4]$), while the arrays $[3, 4, 5, 6]$ and $[3, 4, 3, 4]$ are not good. Maxim wants to choose every subsegment of array $a$ of length $m$ as the elements of array $c$. Help Maxim count how many selected arrays will be good. In other words, find the number of positions $1 \le l \le n - m + 1$ such that the elements $a_l, a_{l+1}, \dots, a_{l + m - 1}$ form a good array.
How to check if two arrays are good? For each element of $b$, try to pair it with an element from array $a$. If we managed to create $k$ or more pairs, then the arrays are good. Since rearranging the elements of the array is allowed, we will maintain three multisets $C$, $D$, and $E$. In $D$, we will store all elements from $b$ for which we found a pair, and in $C$ and $E$, all elements from $b$ and $a$ for which a pair was not found, i.e., $C \cap E = \emptyset$. Then the arrays are good if $|D| \ge k$. It remains to understand how to find all good subsegments using this data organization. Suppose we shift the left boundary $l$ by $1$, simultaneously moving the right boundary, where $r = l + m - 1$. We need to remove the pair with the element $a_l$ from $D$, if it exists, and immediately try to find a replacement for the removed pair in $E$. After that, we try to find a pair for $a_r$ from $C$, and if a pair cannot be found, we place the element in $E$. The complexity of the solution is $O(n \log n)$ or $O(n)$ if a hash table is used.
[ "data structures", "two pointers" ]
1,400
#include <bits/stdc++.h> using ll = signed long long int; #define all(x) (x).begin(), (x).end() using pii = std::pair<int, int>; using pll = std::pair<ll, ll>; using namespace std; void solve() { int n, m; size_t k; cin >> n >> m >> k; vector<int> a(n); for (int i = 0; i < n; ++i) { cin >> a[i]; } multiset<int> todo, done, extra; for (int j = 0; j < m; ++j) { int b; cin >> b; todo.insert(b); } for (int j = 0; j < m; ++j) { if (todo.find(a[j]) != todo.end()) { todo.erase(todo.find(a[j])); done.insert(a[j]); } else { extra.insert(a[j]); } } int ans = (done.size() >= k); for (int r = m; r < n; ++r) { int old = a[r - m]; if (extra.find(old) != extra.end()) { extra.erase(extra.find(old)); } else if (done.find(old) != done.end()) { done.erase(done.find(old)); todo.insert(old); } if (todo.find(a[r]) != todo.end()) { todo.erase(todo.find(a[r])); done.insert(a[r]); } else { extra.insert(a[r]); } ans += (done.size() >= k); } cout << ans << '\n'; } int main() { int t; cin >> t; while (t--) { solve(); } }
1955
E
Long Inversions
A binary string $s$ of length $n$ is given. A binary string is a string consisting only of the characters '1' and '0'. You can choose an integer $k$ ($1 \le k \le n$) and then apply the following operation any number of times: choose $k$ consecutive characters of the string and invert them, i.e., replace all '0' with '1' and vice versa. Using these operations, you need to make all the characters in the string equal to '1'. For example, if $n=5$, $s=00100$, you can choose $k=3$ and proceed as follows: - choose the substring from the $1$-st to the $3$-rd character and obtain $s=\textcolor{blue}{110}00$; - choose the substring from the $3$-rd to the $5$-th character and obtain $s=11\textcolor{blue}{111}$; Find the maximum value of $k$ for which it is possible to make all the characters in the string equal to '1' using the described operations. Note that the number of operations required to achieve this is not important.
No substring of the string needs to be inverted twice, as it does not change the string in any way. Let's fix $k$ and try to check if all the characters of the string can be made equal to 1. Suppose the first $i$ characters are already equal to 1, and $s_{i + 1}=0$. Then we need to invert all the bits starting from the $i + 1$-th to the $i + k$-th inclusive, i.e., not invert the first $i$ characters. If we invert any of the first $i$ characters, then we will have to invert it again. Either we will invert $s_{i + 1}$ again, so it will become equal to 0, or we will invert the characters to the left and come to the same situation, but for a smaller prefix. Naive checking for a fixed $k$ takes $O(n \cdot k)$ time, if we honestly invert $k$ characters every time we encounter 0. We will maintain an inversion counter - how many times we need to invert a character of the string. Getting the actual value of a character is simple - if the counter is odd, invert the character. If $s_{i} = 0$, add $1$ to the counter, and remember that we need to subtract $1$ from the counter after position $i + k$, forming a segment $[i, i + k)$. This way, the complexity of the check will be $O(n)$. It remains to iterate over $k$ and find the maximum for which it was possible to bring the string to all 1s. The complexity of the solution is $O(n ^ 2)$.
[ "brute force", "greedy", "implementation", "sortings" ]
1,700
#include <bits/stdc++.h> using ll = signed long long int; #define all(x) (x).begin(), (x).end() using pii = std::array<int, 2>; using pll = std::array<ll, 2>; using vi = std::vector<int>; using vl = std::vector<ll>; using namespace std; void solve() { int n; string s; cin >> n >> s; for (int k = n; k > 0; --k) { vector<char> t(n), end(n + 1); for (int i = 0; i < n; ++i) { t[i] = s[i] - '0'; } int cnt = 0; for (int i = 0; i < n; ++i) { cnt -= end[i]; t[i] ^= (cnt & 1); if (t[i] == 0) { if (i + k <= n) { ++end[i + k]; ++cnt; t[i] = 1; } else { break; } } } if (*min_element(all(t)) == 1) { cout << k << '\n'; return; } } assert(false); } int main() { int t; cin >> t; while (t--) { solve(); } }
1955
F
Unfair Game
Alice and Bob gathered in the evening to play an exciting game on a sequence of $n$ integers, each integer of the sequence \textbf{doesn't exceed $4$}. The rules of the game are too complex to describe, so let's just describe the winning condition — Alice wins if the bitwise XOR of all the numbers in the sequence is non-zero; otherwise, Bob wins. The guys invited Eve to act as a judge. Initially, Alice and Bob play with $n$ numbers. After one game, Eve removes one of the numbers from the sequence, then Alice and Bob play with $n-1$ numbers. Eve removes one number again, after which Alice and Bob play with $n - 2$ numbers. This continues until the sequence of numbers is empty. Eve seems to think that in such a game, Alice almost always wins, so she wants Bob to win as many times as possible. Determine the maximum number of times Bob can win against Alice if Eve removes the numbers optimally.
Let's try to solve the problem if all the numbers in the sequence are equal to $4$. If the number of elements is even, the bitwise XOR is zero, and if it's odd, then it's equal to $4$. By removing one $4$ at a time, Eve will change the parity, so the answer will be $\lfloor {p_4 \over 2} \rfloor$. Suppose there are some other numbers in the sequence besides $4$. For Bob to win, the number of $4$s still needs to be even. Therefore, we can solve the problem for the remaining numbers separately from the fours. One of the solutions uses dynamic programming. Let's denote $dp_{i,j,k}$ as the maximum number of Bob's wins, if there were initially $i$ ones, $j$ twos, and $k$ threes. Since an empty sequence is not considered in the answer, then $dp_{0,0,0} = 0$. We will iterate over which number to remove, and get the following transitions: $dp_{i,j,k} = \max(dp_{i - 1,j,k}, dp_{i,j - 1,k}, dp_{i,j,k - 1}) + x(i,j,k)$. Here, $x(i,j,k) = 1$ if the bitwise XOR of $i$ ones, $j$ twos, and $k$ threes is zero, and $x(i,j,k) = 0$ otherwise. The complexity of calculating these values is $O(N ^ 3)$, where $N = 200$. Another solution is analytical. Bob wins in two cases: if the number of ones, twos, and threes is even; if the number of ones, twos, and threes is odd. In the first case, the bitwise XOR is zero, because each number occurs an even number of times, and in the second case, $1 \oplus 2 \oplus 3 = 0$. To maintain the parities, Eve needs to remove the same number, so with two removals, Bob will win once. If the second condition is not initially met, it is always more advantageous to remove numbers to reach the first case, then the answer is $\sum^{3}_{i = 1} \lfloor{cnt_i \over 2}\rfloor$. If the second condition is already met, then $1$ needs to be added to the answer.
[ "dp", "games", "greedy", "math", "schedules" ]
1,800
#include <bits/stdc++.h> using namespace std; const int N = 201; int dp[N][N][N]; void precalc() { dp[0][0][0] = 0; for (int i = 0; i < N; ++i) { for (int j = 0; j < N; ++j) { for (int k = 0; k < N; ++k) { int prev = 0; if (i) prev = max(prev, dp[i - 1][j][k]); if (j) prev = max(prev, dp[i][j - 1][k]); if (k) prev = max(prev, dp[i][j][k - 1]); dp[i][j][k] = prev; int xr = ((i & 1) * 1) ^ ((j & 1) * 2) ^ ((k & 1) * 3); if (xr == 0 && (i || j || k)) { ++dp[i][j][k]; } } } } } void solve() { vector<int> cnt(4); for (int i = 0; i < 4; ++i) { cin >> cnt[i]; } cout << dp[cnt[0]][cnt[1]][cnt[2]] + cnt[3] / 2 << '\n'; } int main() { precalc(); int t; cin >> t; while (t--) { solve(); } }
1955
G
GCD on a grid
Not long ago, Egor learned about the Euclidean algorithm for finding the greatest common divisor of two numbers. The greatest common divisor of two numbers $a$ and $b$ is the largest number that divides both $a$ and $b$ without leaving a remainder. With this knowledge, Egor can solve a problem that he once couldn't. Vasily has a grid with $n$ rows and $m$ columns, and the integer ${a_i}_j$ is located at the intersection of the $i$-th row and the $j$-th column. Egor wants to go from the top left corner (at the intersection of the first row and the first column) to the bottom right corner (at the intersection of the last row and the last column) and find the greatest common divisor of all the numbers along the path. He is only allowed to move down and to the right. Egor has written down several paths and obtained different GCD values. He became interested in finding the maximum possible GCD. Unfortunately, Egor is tired of calculating GCDs, so he asks for your help in finding the maximum GCD of the integers along the path from the top left corner to the bottom right corner of the grid.
First, let's learn how to check for a fixed $x$ if there exists a path from $(1,1)$ to $(n,m)$ with a GCD of $x$. It is necessary for all numbers along the path to be divisible by $x$. Let's define a grid $b$ of size $n$ by $m$, where $b_{i,j} = 1$ if $a_{i,j}$ is divisible by $x$, and $b_{i,j} = 0$ otherwise. If there exists a path of ones in $b$, then there exists a path in $a$ with a GCD of $x$. To check if there exists a path consisting entirely of ones, dynamic programming can be used. Let $dp_{i, j}$ denote whether it is possible to reach $(i,j)$ from $(1,1)$. Then the transitions are $dp_{i,j} = dp_{i - 1,j} \lor dp_{i,j - 1}$, with the base case of the dynamic programming being $dp_{1,1} = b_{1,1}$. Since the path will definitely pass through the cells $(1,1)$ and $(n,m)$, we iterate through all divisors of the number $g = \gcd(a_{1,1}, a_{n,m})$, check for each one if there exists a path with that GCD, and take the maximum such divisor. The complexity of the solution is $O(n \cdot m \cdot \sqrt[3]{A})$, where $A \le 10^6$.
[ "brute force", "dfs and similar", "dp", "implementation", "math", "number theory" ]
1,900
#include <bits/stdc++.h> using ll = signed long long int; #define all(x) (x).begin(), (x).end() using pii = std::pair<int, int>; using pll = std::pair<ll, ll>; using namespace std; void solve() { int n, m; cin >> n >> m; vector<vector<int> > a(n, vector<int>(m)); for (int i = 0; i < n; ++i) { for (int j = 0; j < m; ++j) { cin >> a[i][j]; } } int ans = 1, g = gcd(a[0][0], a[n - 1][m - 1]); vector<vector<char> > dp(n, vector<char>(m)); for (int x = 1; x * x <= g; ++x) { if (g % x > 0) { continue; } vector<int> cand = {x, g / x}; for (int el : cand) { for (int i = 0; i < n; ++i) { dp[i].assign(m, 0); } dp[0][0] = 1; for (int i = 0; i < n; ++i) { for (int j = 0; j < m; ++j) { if (a[i][j] % el > 0) { continue; } if (!dp[i][j] && i) { dp[i][j] = (dp[i - 1][j] == 1 ? 1 : 0); } if (!dp[i][j] && j) { dp[i][j] = (dp[i][j - 1] == 1 ? 1 : 0); } } } if (dp[n - 1][m - 1]) { ans = max(ans, el); } } } cout << ans << '\n'; } int main() { int t; cin >> t; while (t--) { solve(); } }
1955
H
The Most Reckless Defense
You are playing a very popular Tower Defense game called "Runnerfield 2". In this game, the player sets up defensive towers that attack enemies moving from a certain starting point to the player's base. You are given a grid of size $n \times m$, on which $k$ towers are already placed and a path is laid out through which enemies will move. The cell at the intersection of the $x$-th row and the $y$-th column is denoted as $(x, y)$. Each second, a tower deals $p_i$ units of damage to all enemies within its range. For example, if an enemy is located at cell $(x, y)$ and a tower is at $(x_i, y_i)$ with a range of $r$, then the enemy will take damage of $p_i$ if $(x - x_i) ^ 2 + (y - y_i) ^ 2 \le r ^ 2$. Enemies move from cell $(1, 1)$ to cell $(n, m)$, visiting each cell of the path exactly once. An enemy instantly moves to an adjacent cell horizontally or vertically, but before doing so, it spends one second in the current cell. If its health becomes zero or less during this second, the enemy can no longer move. The player loses if an enemy reaches cell $(n, m)$ and can make one more move. By default, all towers have a zero range, but the player can set a tower's range to an integer $r$ ($r > 0$), in which case the health of all enemies will increase by $3^r$. However, each $r$ can only be used for \textbf{at most one} tower. Suppose an enemy has a base health of $h$ units. If the tower ranges are $2$, $4$, and $5$, then the enemy's health at the start of the path will be $h + 3 ^ 2 + 3 ^ 4 + 3 ^ 5 = h + 9 + 81 + 243 = h + 333$. The choice of ranges is made once before the appearance of enemies and cannot be changed after the game starts. Find the maximum amount of base health $h$ for which it is possible to set the ranges so that the player does not lose when an enemy with health $h$ passes through (without considering the additions for tower ranges).
Let's solve the problem for a single tower. The tower's area of effect is a circle, so the theoretically possible number of cells in which a tower with radius $r$ will deal damage is $\pi \cdot r ^ 2$. In total, the tower will deal $p_i \cdot \pi \cdot r^2$ damage to the enemy. At the same time, the initial health of the enemy will be increased by $3^r$, so in fact, this health increase needs to be subtracted from the tower's damage. Thus, the maximum radius for the tower can be found from the inequality $500 \cdot \pi \cdot r ^ 2 - 3 ^ r > 0$, assuming that the tower has a damage of $p_i = 500$. This estimate gives $R = 12$, above which the enemy's health increase is too large for the tower to overcome. So, there are not so many radii, and each radius can be applied to no more than one tower. We will use the subset dynamic programming method: $dp_{i,mask}$ - the maximum damage that the first $i$ towers will deal with the optimal distribution of the radii in the $mask$. The transitions are quite simple: $dp_{i, mask} = \max_{r \in mask}(dp_{i - 1, mask}, dp_{i - 1, mask \oplus r} + p_i \cdot cover(i, r))$. The answer will be the maximum value $\max_{mask} (dp_{k, mask} - \sum_{r \in mask} {3 ^ r})$. $cover(i, r)$ - this is the number of cells covered by the $i$-th tower when the radius is set to $r$. It can be calculated for one tower and for one radius in $O(r ^ 2)$ time, just iterate through the square with a side length of $2 \cdot r$ and a center coinciding with the location of the tower. In total, the precalculation of $cover$ for all towers and all radii will take $O(k \cdot R ^ 3)$ time. The complexity of calculating the dynamic programming values is $O(k \cdot R \cdot 2 ^ R)$, so the final complexity of the solution is $O(k \cdot R \cdot (2 ^ R + R ^ 2))$.
[ "bitmasks", "brute force", "constructive algorithms", "dp", "flows", "graph matchings", "shortest paths" ]
2,300
#include <bits/stdc++.h> using ll = signed long long int; #define all(x) (x).begin(), (x).end() using pii = std::pair<int, int>; using pll = std::pair<ll, ll>; using namespace std; const int R = 12, INF = 2e9; bool check(int x, int n) { return (0 <= x && x < n); } void solve() { int n, m, k; cin >> n >> m >> k; vector<string> gr(n); for (int i = 0; i < n; ++i) { cin >> gr[i]; } vector<pii> cord(k); vector<int> p(k); for (int i = 0; i < k; ++i) { cin >> cord[i].first >> cord[i].second >> p[i]; --cord[i].first; --cord[i].second; } vector<vector<int> > cover(k, vector<int>(R + 1)); for (int i = 0; i < k; ++i) { int x = cord[i].first; int y = cord[i].second; for (int r = 1; r <= R; ++r) { for (int dx = -r; dx <= r; ++dx) { for (int dy = -r; dy <= r; ++dy) { int nx = x + dx; int ny = y + dy; if (!check(nx, n) || !check(ny, m)) { continue; } if ((x - nx) * (x - nx) + (y - ny) * (y - ny) <= r * r) { cover[i][r] += (gr[nx][ny] == '#'); } } } } } vector<vector<int> > dp(k + 1, vector<int>(1 << R, -INF)); dp[0][0] = 0; for (int i = 1; i <= k; ++i) { for (int mask = 0; mask < (1 << R); ++mask) { dp[i][mask] = dp[i - 1][mask]; for (int r = 1; r <= R; ++r) { int j = r - 1; if (!(mask & (1 << j))) { continue; } dp[i][mask] = max(dp[i][mask], dp[i - 1][mask ^ (1 << j)] + p[i - 1] * cover[i - 1][r]); } } } int ans = 0; for (int mask = 0; mask < (1 << R); ++mask) { int hp = 0, mlt = 3; for (int j = 0; j < R; ++j) { if (mask & (1 << j)) { hp += mlt; } mlt *= 3; } for (int i = 0; i <= k; ++i) { ans = max(ans, dp[i][mask] - hp); } } cout << ans << '\n'; } int main() { int t; cin >> t; while (t--) { solve(); } }
1956
A
Nene's Game
Nene invented a new game based on an increasing sequence of integers $a_1, a_2, \ldots, a_k$. In this game, initially $n$ players are lined up in a row. In each of the rounds of this game, the following happens: - Nene finds the $a_1$-th, $a_2$-th, $\ldots$, $a_k$-th players in a row. They are kicked out of the game simultaneously. If the $i$-th player in a row should be kicked out, but there are fewer than $i$ players in a row, they are skipped. Once no one is kicked out of the game in some round, all the players that are still in the game are declared as winners. For example, consider the game with $a=[3, 5]$ and $n=5$ players. Let the players be named player A, player B, $\ldots$, player E in the order they are lined up initially. Then, - Before the first round, players are lined up as ABCDE. Nene finds the $3$-rd and the $5$-th players in a row. These are players C and E. They are kicked out in the first round. - Now players are lined up as ABD. Nene finds the $3$-rd and the $5$-th players in a row. The $3$-rd player is player D and there is no $5$-th player in a row. Thus, only player D is kicked out in the second round. - In the third round, no one is kicked out of the game, so the game ends after this round. - Players A and B are declared as the winners. Nene has not yet decided how many people would join the game initially. Nene gave you $q$ integers $n_1, n_2, \ldots, n_q$ and you should answer the following question for each $1 \le i \le q$ \textbf{independently}: - How many people would be declared as winners if there are $n_i$ players in the game initially?
Obviously, a person at place $p$ will be kicked out if and only if $p \ge a_1$. Therefore, the answer is $\min(n,a_1-1)$.
[ "binary search", "brute force", "data structures", "games", "greedy" ]
800
#include<bits/stdc++.h> using namespace std; #define ll long long #define MP make_pair mt19937 rnd(time(0)); int a[105]; void solve(){ int q,k,n;cin>>k>>q; for(int i=1;i<=k;i++) cin>>a[i]; for(int i=1;i<=q;i++){ cin>>n; cout<<min(a[1]-1,n)<<' '; } cout<<endl; } int main(){ ios::sync_with_stdio(false); int _;cin>>_; while(_--) solve(); }
1956
B
Nene and the Card Game
You and Nene are playing a card game. The deck with $2n$ cards is used to play this game. Each card has an integer from $1$ to $n$ on it, and each of integers $1$ through $n$ appears exactly on $2$ cards. Additionally, there is a table where cards are placed during the game (initially, the table is empty). In the beginning of the game, these $2n$ cards are distributed between you and Nene so that each player receives $n$ cards. After it, you and Nene alternatively take $2n$ turns, i.e. each person takes $n$ turns, \textbf{starting with you}. On each turn: - The player whose turn is it selects one of the cards in his hand. Let $x$ be the number on it. - The player whose turn is it receives $1$ point if there is already a card with the integer $x$ on the table (otherwise, he receives no points). After it, he places the selected card with the integer $x$ on the table. Note that turns are made publicly: each player can see all the cards on the table at each moment. Nene is very smart so she always selects cards optimally in order to maximize her score in the end of the game (after $2n$ rounds). If she has several optimal moves, she selects the move that minimizes your score in the end of the game. More formally, Nene always takes turns optimally in order to maximize her score in the end of the game in the first place and to minimize your score in the end of the game in the second place. Assuming that the cards are already distributed and cards in your hand have integers $a_1, a_2, \ldots, a_n$ written on them, what is the maximum number of points you can get by taking your turns optimally?
The number of pairs on each side should be the same. For each color (in the following text, "this point" refers to the point someone got by playing a card with this color): If you have both cards of this color in your hand, you will be able to get this point. If you have both cards of this color in your hand, you will be able to get this point. If Nene has both cards, you will not be able to get this point. If Nene has both cards, you will not be able to get this point. If you have only one card, you cannot get this point when Nene is using the following strategy: If you have only one card, you cannot get this point when Nene is using the following strategy: When you play one of your paired cards, Nene also plays one of her paired cards; Otherwise, Nene will have the card with the same color. She can play it and get this point. Therefore, the answer will be the amount of pairs in your hand.
[ "games", "greedy" ]
800
#include<bits/stdc++.h> using namespace std; const int MAXN=4e5+5; int cnt[MAXN]; void solve() { int n,ans=0; scanf("%d",&n); fill(cnt+1,cnt+n+1,0); for(int i=1,a;i<=n;++i) scanf("%d",&a),ans+=(++cnt[a]==2); printf("%d\n",ans); } signed main() { int T; scanf("%d",&T); while(T--) solve(); return 0; }
1956
C
Nene's Magical Matrix
The magical girl Nene has an $n\times n$ matrix $a$ filled with zeroes. The $j$-th element of the $i$-th row of matrix $a$ is denoted as $a_{i, j}$. She can perform operations of the following two types with this matrix: - Type $1$ operation: choose an integer $i$ between $1$ and $n$ and a permutation $p_1, p_2, \ldots, p_n$ of integers from $1$ to $n$. Assign $a_{i, j}:=p_j$ for all $1 \le j \le n$ simultaneously. - Type $2$ operation: choose an integer $i$ between $1$ and $n$ and a permutation $p_1, p_2, \ldots, p_n$ of integers from $1$ to $n$. Assign $a_{j, i}:=p_j$ for all $1 \le j \le n$ simultaneously. Nene wants to maximize the sum of all the numbers in the matrix $\sum\limits_{i=1}^{n}\sum\limits_{j=1}^{n}a_{i,j}$. She asks you to find the way to perform the operations so that this sum is maximized. As she doesn't want to make too many operations, you should provide a solution with no more than $2n$ operations. A permutation of length $n$ is an array consisting of $n$ distinct integers from $1$ to $n$ in arbitrary order. For example, $[2,3,1,5,4]$ is a permutation, but $[1,2,2]$ is not a permutation ($2$ appears twice in the array), and $[1,3,4]$ is also not a permutation ($n=3$ but there is $4$ in the array).
When $n=3$, the optimal matrix is the following: The optimal matrix would be: Construction method: This takes exactly $2n$ operations. For the final matrix, we define $f(x)$ as the number of elements greater or equal to $x$. The sum of all elements in the matrix is $\sum_{i=1}^n f(i)$ because an element with value $x$ will be counted $x$ times in the formula before. Now, we proof that $f(x) \le n^2-(x-1)^2$: Let's rewrite the problem to make it a little simpler: You have an $n\times n$ matrix. In each operation, you can paint exactly $x-1$ cells white and $n-(x-1)$ cells black in a row or a column. Proof that there will be at most $n^2-(x-1)^2$ black cells. Try to strengthen the conclusion by stating that: For any matrix of size $n\times m$, each operation can paint a row into $x$ white cells and $m-x$ black cells, or a column into $y$ white cells and $n-y$ black cells. No matter how we paint, the final matrix will have at most $nm-xy$ black cells. We will prove this by induction. If $x=m$ or $y=n$, the conclusion holds. Otherwise, if the last operation is to paint a row, then this row has exactly $m-x$ black cells. And, by induction, other rows will contain at most $(n-1)m-x(y-1)$ black cells. Painting a column in the last step is similar. Then, we have proven the conclusion above. Since the construction above maximizes each $f(x)$, it is the optimal answer.
[ "constructive algorithms", "greedy", "math" ]
1,600
#include<bits/stdc++.h> using namespace std; #define ll long long #define MP make_pair mt19937 rnd(time(0)); void solve(){ int n;cin>>n; int ans=0; for(int i=1;i<=n;i++) ans+=(2*i-1)*i; cout<<ans<<' '<<2*n<<endl; for(int i=n;i>=1;i--){ cout<<"1 "<<i<<' '; for(int j=1;j<=n;j++) cout<<j<<' ';cout<<endl; cout<<"2 "<<i<<' '; for(int j=1;j<=n;j++) cout<<j<<' ';cout<<endl; } } int main(){ ios::sync_with_stdio(false); int _;cin>>_; while(_--) solve(); }
1956
D
Nene and the Mex Operator
Nene gave you an array of integers $a_1, a_2, \ldots, a_n$ of length $n$. You can perform the following operation no more than $5\cdot 10^5$ times (possibly zero): - Choose two integers $l$ and $r$ such that $1 \le l \le r \le n$, compute $x$ as $\operatorname{MEX}(\{a_l, a_{l+1}, \ldots, a_r\})$, and simultaneously set $a_l:=x, a_{l+1}:=x, \ldots, a_r:=x$. Here, $\operatorname{MEX}$ of a set of integers $\{c_1, c_2, \ldots, c_k\}$ is defined as the smallest non-negative integer $m$ which does not occur in the set $c$. Your goal is to maximize the sum of the elements of the array $a$. Find the maximum sum and construct a sequence of operations that achieves this sum. Note that you don't need to minimize the number of operations in this sequence, you only should use no more than $5\cdot 10^5$ operations in your solution.
What is the answer when $a_i = 0$? When $a_i =0$, the sum can hit $n^2$ with making $a_i = n$ at last. Construction: Here, solve(k) will take about $2^k$ operations. Since doing operation $[l,r]$ will make $a_l,\cdots,a_r \le r-l+1$, if for all $l\le i\le r$, $a_i$ is included in at least one of the operations and $a_{l-1},a_{r+1}$ are not, the optimal strategy will be setting $a_i = r-l+1$ for $i\in[l,r]$ using the construction above. Finally, we can use DFS or DP to determine whether each element is included in operations. The number of operations used will not exceed $2^n$.
[ "bitmasks", "brute force", "constructive algorithms", "divide and conquer", "dp", "greedy", "implementation", "math" ]
2,000
#include<bits/stdc++.h> using namespace std; int n,a[20],cnt[20]; vector <array<int,2>> I; void oper(int l,int r) { fill(cnt,cnt+n+1,0); for(int i=l;i<=r;++i) if(a[i]<=n) ++cnt[a[i]]; int mex=0; while(cnt[mex]) ++mex; for(int i=l;i<=r;++i) a[i]=mex; I.push_back({l,r}); } void build(int l,int r) { if(l==r) { if(a[l]) oper(l,r); return ; } build(l,r-1); if(a[r]!=r-l) oper(l,r),build(l,r-1); } void solve() { scanf("%d",&n),I.clear(),memset(a,0,sizeof(a)); for(int i=0;i<n;++i) scanf("%d",&a[i]); int cur=0,ans=0; for(int s=0;s<(1<<n);++s) { int tmp=0; for(int l=0;l<n;++l) { if(s&(1<<l)) { int r=l; while(r+1<n&&(s&(1<<(r+1)))) ++r; tmp+=(r-l+1)*(r-l+1); l=r; } else tmp+=a[l]; } if(tmp>ans) ans=tmp,cur=s; } for(int l=0;l<n;++l) if(cur&(1<<l)) { int r=l; while(r+1<n&&(cur&(1<<(r+1)))) ++r; build(l,r),oper(l,r),l=r; } printf("%d %d\n",ans,(int)I.size()); for(auto i:I) printf("%d %d\n",i[0]+1,i[1]+1); } signed main() { solve(); return 0; }
1956
E2
Nene vs. Monsters (Hard Version)
This is the hard version of the problem. The only difference between the versions is the constraints on $a_i$. You can make hacks only if both versions of the problem are solved. Nene is fighting with $n$ monsters, located in a circle. These monsters are numbered from $1$ to $n$, and the $i$-th ($1 \le i \le n$) monster's current energy level is $a_i$. Since the monsters are too strong, Nene decided to fight with them using the Attack Your Neighbour spell. When Nene uses this spell, the following actions happen in the following order \textbf{one by one}: - The $1$-st monster attacks the $2$-nd monster; - The $2$-nd monster attacks the $3$-rd monster; - $\ldots$ - The $(n-1)$-th monster attacks the $n$-th monster; - The $n$-th monster attacks the $1$-st monster. When the monster with energy level $x$ attacks the monster with the energy level $y$, the energy level of the defending monster becomes $\max(0, y-x)$ (the energy level of the attacking monster remains equal to $x$). Nene is going to use this spell $10^{100}$ times and deal with the monsters that will still have a non-zero energy level herself. She wants you to determine which monsters will have a non-zero energy level once she will use the described spell $10^{100}$ times.
If three consecutive monsters have energy level $0,x,y\ (x,y>0)$, the monster with energy lever $y$ will "die"(have energy level $0$) at last. If four consecutive monsters have energy levels $0,x,y,z\ (x,y,z>0)$, what will happen to the monster $z$? If four consecutive monsters have energy level $x,y,z,w\ (x,y,z,w>0)$, how many rounds of spells must be used to make at least one of these monsters die? If four consecutive monsters have energy level $x,y,z,w\ (x,y,z,w>0)$ and they did not die after $t$ rounds of spells, then $y$ will receive at least $t$ points of damage, $z$ will receive at least $(t-1) +(t-2)+\cdots=O(t^2)$ of damage, and $w$ will receive at least $O(t^3)$ of damage. That is to say, let $V=\max_{i=1}^n a_i$, after $O(\sqrt[3]{V})$ rounds, at least one of $x,y,z,w$ will die. So, we can simulate the process by brute force until there are no four consecutive alive monsters, and then the problem is reduced to the one described in Hint 2. If four consecutive monster have energy level $0,x,y,z\ (x,y,z>0)$, $x$ will remain alive, $y$ will die at last and sending $D=(y-x)+(y-2x)+\cdots+(y\bmod x)$ damage to $z$ before that. Therefore, $z$ will remain alive if and only if $z>D$. The time complexity is $O(n\sqrt[3]{V})$. Bonus: Actually, it can be shown that after $O(\sqrt[k]{V})$ rounds, there will be no $k$ consecutive alive monsters. Making $k$ bigger than $3$ can further reduce the time complexity, but it will be harder to implement and optimize little on actual performance.
[ "brute force", "greedy", "implementation", "math" ]
2,700
#include<bits/stdc++.h> using namespace std; #define ll long long #define MP make_pair mt19937 rnd(time(0)); const int MAXN=2e5+5; int n,a[MAXN];bool b[MAXN]; bool check(){ a[n+1]=a[1],a[n+2]=a[2],a[n+3]=a[3]; for(int i=1;i<=n;i++) if(a[i]&&a[i+1]&&a[i+2]&&a[i+3]) return true; return false; } void solve(){ cin>>n; for(int i=1;i<=n;i++) cin>>a[i]; if(n==2){ while(a[1]&&a[2]){ a[2]=max(0,a[2]-a[1]); a[1]=max(0,a[1]-a[2]); } b[1]=(a[1]>0);b[2]=(a[2]>0); }else if(n==3){ while(a[1]&&a[2]&&a[3]){ a[2]=max(0,a[2]-a[1]); a[3]=max(0,a[3]-a[2]); a[1]=max(0,a[1]-a[3]); } b[1]=(!a[3]&&a[1]); b[2]=(!a[1]&&a[2]); b[3]=(!a[2]&&a[3]); }else{ while(check()){ for(int i=1;i<=n;i++) a[i%n+1]=max(0,a[i%n+1]-a[i]); } for(int i=1;i<=n;i++) b[i]=false; auto attack=[&](ll x,ll y){ ll k=x/y; return (2*x-(k+1)*y)*k/2; }; for(int p=1;p<=n;p++) if(a[p]&&a[p%n+1]) a[p%n+1]=max(0,a[p%n+1]-a[p]); else break; for(int i=1;i<=n;i++) if(!a[i]&&a[i%n+1]){ b[i%n+1]=true; b[(i+2)%n+1]=(a[(i+2)%n+1]>attack(a[(i+1)%n+1],a[i%n+1])); } } int cnt=0; for(int i=0;i<=n;i++) if(b[i]) cnt++; cout<<cnt<<endl; for(int i=1;i<=n;i++) if(b[i]) cout<<i<<' '; cout<<endl; } int main(){ ios::sync_with_stdio(false); int _;cin>>_; while(_--) solve(); }
1956
F
Nene and the Passing Game
Nene is training her team as a basketball coach. Nene's team consists of $n$ players, numbered from $1$ to $n$. The $i$-th player has an arm interval $[l_i,r_i]$. Two players $i$ and $j$ ($i \neq j$) can pass the ball to each other if and only if $|i-j|\in[l_i+l_j,r_i+r_j]$ (here, $|x|$ denotes the absolute value of $x$). Nene wants to test the cooperation ability of these players. In order to do this, she will hold several rounds of assessment. - In each round, Nene will select a sequence of players $p_1,p_2,\ldots,p_m$ such that players $p_i$ and $p_{i+1}$ can pass the ball to each other for all $1 \le i < m$. The length of the sequence $m$ can be chosen by Nene. Each player can appear in the sequence $p_1,p_2,\ldots,p_m$ multiple times or not appear in it at all. - Then, Nene will throw a ball to player $p_1$, player $p_1$ will pass the ball to player $p_2$ and so on... Player $p_m$ will throw a ball away from the basketball court so it can no longer be used. As a coach, Nene wants each of $n$ players to appear in at least one round of assessment. Since Nene has to go on a date after school, Nene wants you to calculate the minimum number of rounds of assessment needed to complete the task.
Two people $i$ and $j\ (i<j)$ can pass the ball to each other directly if and only if $[i+l_i,i+r_i]\cap[j-r_j,j-l_j]\neq \varnothing$ According to the hint above, we can build the following graph: There are $2n$ vertices in the graph. Vertice $i$ links to vertices $([n+i-r_i,n+i-l_1]\cup[n+i+l_i,n+i+r_i])\cap[n+1,2n]\cap Z$. That is, assuming vertices $1$ to $n$ are players, vertices $n+1$ to $2n$ are temporary spots, and player $i$ links to all the spots where his/her arm can reach. Then, the answer will be the number of connected components in this graph which contains at least one vertice with an index less than or equal to $n$. But there's still a little problem with the solution. For two players $i, j\ (i<j)$ satisfying $[i+l_i,i+r_i]\cap[j+l_i,j+r_i]\neq \varnothing$ (that is, both players reaching out their "right arm"s), they are incorrectly counted as connected. To solve that, we can delete all the vertices $n+x$ such that $\forall i,\ x\not\in[i-r_i,i-l_i]$ or $\forall i,\ x\not\in[i+l_i,i+r_i]$ (that is, nobody's left/right arm can reach $x$). Finding such $x$ can be done easily in $O(n)$. The last issue is, the graph contains $O(n^2)$ edges. but since we only care about connectivity, operation "link $x$ to $[y,z]$" can be changed to "link $x$ to $y$, and link $i$ to $i+1$ for all $i$ in $[y,z-1]$". After that and removing multiple edges, the number of edges is reduced to $O(n)$. Finally, counting connected components in a graph can be easily done in $O(n)$, so the time complexity is $O(n)$.
[ "constructive algorithms", "data structures", "dsu", "graphs", "sortings" ]
3,000
#include<bits/stdc++.h> using namespace std; #define ll long long #define MP make_pair const int MAXN=2e6+5; int n,tot,le[MAXN],ri[MAXN]; int fa[MAXN<<1],s[MAXN],t[MAXN],pre[MAXN],suf[MAXN]; inline int find(int x){ while(x^fa[x]) x=fa[x]=fa[fa[x]]; return x; } void solve(){ cin>>n; for(int i=1;i<=2*n+1;i++) fa[i]=i; for(int i=0;i<=n+1;i++) s[i]=t[i]=pre[i]=suf[i]=0; for(int i=1;i<=n;i++){ cin>>le[i]>>ri[i]; s[max(1,i-ri[i])]++;s[max(0,i-le[i])+1]--; t[min(n+1,i+le[i])]++;t[min(n,i+ri[i])+1]--; } tot=n; for(int i=1;i<=n;i++){ s[i]+=s[i-1],t[i]+=t[i-1]; if(s[i]&&t[i]) suf[i]=pre[i]=++tot; } suf[n+1]=0; for(int i=1;i<=n;i++) pre[i]=(pre[i]?pre[i]:pre[i-1]); for(int i=n;i>=1;i--) suf[i]=(suf[i]?suf[i]:suf[i+1]); for(int i=1;i<=n;i++){ int l=max(1,i-ri[i]),r=max(0,i-le[i]); if(l<=r){ l=suf[l],r=pre[r]; if(l&&r&&l<=r) for(int i=find(l);i<r;i=find(i)) fa[i]=i+1; } l=min(n+1,i+le[i]),r=min(n,i+ri[i]); if(l<=r){ l=suf[l],r=pre[r]; if(l&&r&&l<=r) for(int i=find(l);i<r;i=find(i)) fa[i]=i+1; } } for(int i=1;i<=n;i++){ int l=max(1,i-ri[i]),r=max(0,i-le[i]); if(l<=r){ l=suf[l],r=pre[r]; if(l&&r&&l<=r) fa[find(i)]=find(l); } l=min(n+1,i+le[i]),r=min(n,i+ri[i]); if(l<=r){ l=suf[l],r=pre[r]; if(l&&r&&l<=r) fa[find(i)]=find(l); } } int ans=0; for(int i=1;i<=tot;i++) if(fa[i]==i) ans++; cout<<ans<<'\n'; } int main(){ ios::sync_with_stdio(false); // freopen("Otomachi_Una.in","r",stdin); // freopen("Otomachi_Una.out","w",stdout); int _;cin>>_; while(_--) solve(); return 0; }
1957
A
Stickogon
You are given $n$ sticks of lengths $a_1, a_2, \ldots, a_n$. Find the maximum number of regular (equal-sided) polygons you can construct simultaneously, such that: - Each side of a polygon is formed by exactly one stick. - No stick is used in more than $1$ polygon. Note: Sticks cannot be broken.
The first observation that needs to be made in this problem is that we have to greedily try to build triangles from the sticks available. The number of triangles that can be created simultaneously by $S$ sticks of the same length is $\left\lfloor \frac{S}{3} \right\rfloor$. Hence, the answer is just the sum of the count of all triangles for all stick lengths, $\sum\limits_{i = 1}^{100} \left\lfloor \frac{S_i}{3} \right\rfloor$, where $S_i$ denotes the number of sticks of length $i$. The time complexity of the problem is therefore $O(n) + O(100) = O(n)$.
[ "constructive algorithms", "greedy" ]
800
#include <bits/stdc++.h> using namespace std; int main(){ int t; cin >> t; while(t--) { int n; cin >> n; vector<int> a(101, 0); for (int i = 0; i < n; i++) { int x; cin >> x; a[x]++; } int sum = 0; for (auto& s : a) sum += s / 3; cout << sum << "\n"; } }
1957
B
A BIT of a Construction
Given integers $n$ and $k$, construct a sequence of $n$ non-negative (i.e. $\geq 0$) integers $a_1, a_2, \ldots, a_n$ such that - $\sum\limits_{i = 1}^n a_i = k$ - The \textbf{number} of $1$s in the binary representation of $a_1 | a_2 | \ldots | a_n$ is maximized, where $|$ denotes the bitwise OR operation.
The case $n = 1$ needs to be handled separately, as we can only output $k$ itself. For $n > 1$, we make the following observations. Let $x$ be the position of the most significant bit in $k$, that is $2^{x} \leq k < 2^{x+1}$. From this, we learn that the bitwise OR of the sequence cannot have more than $x+1$ set bits because that would make the sum greater than $2^{x+1}$. Now, having $x+1$ bits set in the bitwise OR of the sequence is only possible if $k = 2^{x+1} - 1$ (or $k = 111\ldots1_2$). Any $k$ less than this cannot have $x+1$ bits set in the bitwise OR of the sequence, as otherwise the sum would exceed $k$. However, we can always set $x$ bits, as we can always have one number in the sequence as $2^x - 1$ (which has exactly $x$ bits set). Using these observations, we get our solution as $2^x - 1, k - (2^x - 1), 0, 0, 0,\ldots, 0$. This ensures that we have at least $x$ bits set in the bitwise OR, and additionally also handles the case where $x+1$ bits can be set, while maintaining the sum.
[ "bitmasks", "constructive algorithms", "greedy", "implementation" ]
1,100
#include <bits/stdc++.h> using namespace std; int main(){ int t; cin >> t; while(t--) { int n, k; cin >> n >> k; vector<int> a(n); if (n == 1) { a[0] = k; } else { int msb = 0; // find the msb of k for (int i = 0; i < 31; i++) { if (k & (1 << i)) { msb = i; } } a[0] = (1 << msb) - 1; a[1] = k - a[0]; for (int i = 2; i < n; i++) { a[i] = 0; } } for (int i = 0; i < n; i++) { cout << a[i] << " "; } cout << "\n"; } return 0; }
1957
C
How Does the Rook Move?
You are given an $n \times n$ chessboard where you and the computer take turns alternatingly to place white rooks & black rooks on the board respectively. While placing rooks, you have to ensure that no two rooks attack each other. Two rooks attack each other if they share the same row or column \textbf{regardless of color}. A valid move is placing a rook on a position ($r$, $c$) such that it doesn't attack any other rook. You start first, and when you make a valid move in your turn, placing a white rook at position ($r$, $c$), the computer will mirror you and place a black rook at position ($c$, $r$) in its turn. If $r = c$, then the computer can't mirror your move, and skips its turn. You have already played $k$ moves with the computer (the computer tries to mirror these moves too), and you must continue playing the game until there are no valid moves remaining. How many different final configurations are possible when you continue the game after the $k$ moves? It is guaranteed that the $k$ moves and the implied computer moves are valid. Since the answer may be large, print it modulo $10^9+7$. Two configurations are considered different if there exists a coordinate ($r$, $c$) which has a rook in one configuration, but not in the other \textbf{or} the color of the rook on the coordinate is different.
There are essentially two types of moves: Placing a rook at some $(i, i)$: This reduces the number of free rows and columns available by $1$. Placing a rook at some $(i, j)$, where $i \neq j$: The computer now mirrors this by placing a rook at $(j, i)$, blocking rows $i$ and $j$ along with columns $i$ and $j$. So the number of free rows and columns is reduced by $2$. First, we account for the $k$ moves played earlier and count the number of free columns/rows remaining to place rooks in, and call it $m$. Notice that the order of removing rows/columns doesn't affect the final configuration of rooks, and hence only the count of rows matters, to determine the number of final configurations. We can use a dynamic programming approach where $dp[i]$ represents the number of final configurations when $i$ rows and columns are left. Since the order of removing rows/columns is unimportant, let's start by removing the last row or column. When removing the last row or column in an $i \times i$ grid, we have two options: We place a rook $(i, i)$, resulting in the deletion of only the last row and column leaving an $(i-1) \times (i-1)$ grid. The number of final configurations in this case are given by $dp[i-1]$. Alternatively, we can place a rook at $(i, j)$ or $(j, i)$ for any $j \in \{1, 2, \ldots, i-1\}$. After this move, both the $j$-th and the $i$-th rows and columns are deleted, leaving an $(i-2) \times (i-2)$ grid. This contributes $2 (i-1) \cdot dp[i-2]$ to $dp[i]$. Overall, we compute $dp[i] = dp[i-1] + 2 (i-1) \cdot dp[i-2]$ for all $i \in \{2, 3, \ldots, n\}$, with the base case of $dp[0] = dp[1] = 1$. Our final answer is $dp[m]$. Altenatively, we can iterate on the number of type $1$ moves we play. Let's term this to be $c$. There are ${m\choose c}$ ways to choose the $c$ type $1$ moves. Now, we have $m - c$ rows/columns left, and this must be even (type 2 moves cannot exhaust an odd number of rows/columns). We can see that each of the $(m - c)!$ permutations of the remaining columns correspond to a set of moves we can play. For example, if we have the columns $(1, 4, 5, 6)$ remaining, a permutation $(4, 5, 6, 1)$ corresponds to playing the moves $(4, 5), (6, 1)$. However, if we simply count the number of permutations, we would also be counting the permutation $(6, 1, 4, 5)$, which corresponds to the same set of moves. To remove overcounting, we can just divide $(m - c)!$ by $((m - c)/2)!$ (removing the permutations of the pairs chosen). Hence, the answer becomes $\sum\limits_{c = 0}^m [(m - c) \bmod 2 = 0] {m \choose c} \frac{(m - c)!}{\left(\frac{m - c}{2}\right)!}$
[ "combinatorics", "dp", "math" ]
1,600
#include <bits/stdc++.h> using namespace std; int dp[(int) 3e5+5]; const int MOD = 1e9 + 7; int main() { cin.tie(0), cout.tie(0)->sync_with_stdio(0); int t; cin >> t; while (t--) { int n, k; cin >> n >> k; int used = 0; for (int i = 0; i < k; i++) { int r, c; cin >> r >> c; used += 2 - (r == c); } int m = n - used; dp[0] = dp[1] = 1; for (int i = 2; i <= m; i++) dp[i] = (dp[i-1] + 2ll * (i-1) * dp[i-2] % MOD) % MOD; cout << dp[m] << "\n"; } }
1957
D
A BIT of an Inequality
You are given an array $a_1, a_2, \ldots, a_n$. Find the number of tuples ($x, y, z$) such that: - $1 \leq x \leq y \leq z \leq n$, and - $f(x, y) \oplus f(y, z) > f(x, z)$. We define $f(l, r) = a_l \oplus a_{l + 1} \oplus \ldots \oplus a_{r}$, where $\oplus$ denotes the bitwise XOR operation.
How can you simplify the given inequality? Use the fact that the XOR of a number with itself is 0. The inequality simplifies to $f(x, z) \oplus a_y > f(x, z)$. For a given $a_y$ what subarrays (that include $a_y$) would satisfy this? First we may rewrite the inequality as $f(x, z) \oplus a_y > f(x, z)$. So, for each index $y$, we want to find the total number of subarrays that contain $y$ but whose $\text{xor}$ decreases when we include the contribution of $a_y$. Including $a_y$ in some subarray $[x, z]$ (where $x \le y \le z$) can make the $\text{xor}$ lower only when some set bit of $a_y$ cancels out an existing set bit in $f(x, y - 1) \oplus f(y + 1, z)$. Consider the most signicant set bit in $a_y$. Call this bit $i$. Including $a_y$ would decrease the subarray $\text{xor}$ in subarrays where $f(x, y - 1)$ has $i$ set while $f(y+1, z)$ is unset or vice-versa. Now, for the subarrays where both the prefix subarray ($[x \dots y - 1]$) as well as the suffix subarray ($[y + 1 \dots z]$) where either both have bit $i$ set or both have it unset, by including $a_y$, we increment the xor by at least $2^i$. Even if by including $a_y$, every other smaller bit gets unset (because of cancelling out with $a_y$), this sum of these decrements is still less than $2^i$ thereby resulting in an overall positive contribution by including $a_y$.
[ "bitmasks", "brute force", "dp", "math" ]
1,900
#include <bits/stdc++.h> using namespace std; const int Z = 30; const int MAX_N = 1e5 + 3; int pref[Z][MAX_N][2]; int suff[Z][MAX_N][2]; void solve() { int n; cin >> n; vector<int> a(n + 1); for (int i = 1; i <= n; i++) cin >> a[i]; for (int i = 0; i < Z; i++) suff[i][n + 1][0] = suff[i][n + 1][1] = 0; for (int i = 0; i < Z; i++) { for (int j = 1; j <= n; j++) { int t = !!(a[j] & (1 << i)); for (int k = 0; k < 2; k++) pref[i][j][k] = (t == k) + pref[i][j - 1][k ^ t]; } for (int j = n; j >= 1; j--) { int t = !!(a[j] & (1 << i)); for (int k = 0; k < 2; k++) suff[i][j][k] = (t == k) + suff[i][j + 1][k ^ t]; } } long long ans = 0; for (int i = 1; i <= n; i++) { int z = 31 - __builtin_clz(a[i]); ans += 1ll * pref[z][i - 1][1] * (1 + suff[z][i + 1][0]); ans += 1ll * (1 + pref[z][i - 1][0]) * suff[z][i + 1][1]; } cout << ans << "\n"; } int main() { int tc; cin >> tc; while (tc--) solve(); return 0; }
1957
E
Carousel of Combinations
You are given an integer $n$. The function $C(i,k)$ represents the number of distinct ways you can select $k$ distinct numbers from the set {$1, 2, \ldots, i$} and arrange them in a circle$^\dagger$. Find the value of $$ \sum\limits_{i=1}^n \sum\limits_{j=1}^i \left( C(i,j) \bmod j \right). $$ Here, the operation $x \bmod y$ denotes the remainder from dividing $x$ by $y$. Since this value can be very large, find it modulo $10^9+7$. $^\dagger$ In a circular arrangement, sequences are considered identical if one can be rotated to match the other. For instance, $[1, 2, 3]$ and $[2, 3, 1]$ are equivalent in a circle.
For what values of $j$ is $C(i,j) \bmod j = 0$? For all other values of $j$, how can you precompute the result? To precompute, switch around the loop order. The number of distinct ways you can select $k$ distinct numbers from the set {$1, 2, \ldots, i$} and arrange them in a line is $i(i-1)\cdots(i-k+1)$, and since arranging in a circle introduces rotational symmetry we have to divide by $k$, so we have $C(i,k) = \frac{i(i-1)\cdots(i-k+1)}{k}$. Therefore $C(i,j) \bmod j = \frac{i(i-1)\cdots(i-j+1)}{j} \bmod j$. Now since the numerator is a product of $j$ consecutive integers, atleast one of them will be divisible by $j$. More precisely the exact integer which will be divisible by $j$ will be $j \times \lfloor \frac{i}{j} \rfloor$. Hence we can simplify the fraction by removing the denominator and replacing the term $j \times \lfloor \frac{i}{j} \rfloor$ with $\lfloor \frac{i}{j} \rfloor$ in the numerator. Each of the other $j-1$ integers in the numerator, after applying $\bmod j$ would cover all integers from $1$ to $j-1$. Hence $C(i,j) \bmod j = \frac{i(i-1)\cdots(i-j+1)}{j} \bmod j = \left( (j-1)! \times \left\lfloor \frac{i}{j} \right\rfloor \right) \bmod j$ Here we can notice that all proper factors of $j$ will occur in $(j-1)!$, so based on this we can tell that $C(i,j) \bmod j = 0$ for all composite numbers $j$ except $j=4$. We first can handle the case of $j$ being prime. Using Wilson's Theorem, we know that $(j-1)! \equiv -1 \bmod j$ when $j$ is prime. Hence $C(i,j) \bmod j = - \left\lfloor \frac{i}{j} \right\rfloor$ Now we can reverse the order of loops to sum over all primes, and to calculate the contribution of each prime we can maintain a update array called $delta$. To calculate the contribution for a single prime $p$, we know that for all $n$ from $kp$ to $(k + 1)p - 1$ (for all $k$ such that $kp < 1e6$) the contribution would be $-k$. So, in the $delta$ array, we increment index $kp$ with $-k \bmod p$ and decrement index $(k + 1)p$ with $-k \mod p$. Now, when we perform a prefix sum on this $delta$ array, we obtain the correct contributions from all primes. For the case of $j=4$, we just need to handle it as a prime.
[ "brute force", "combinatorics", "dp", "math", "number theory" ]
2,400
#include <bits/stdc++.h> using namespace std; const int MOD = 1e9 + 7; const int MAX_N = 1e6 + 3; bitset<MAX_N> isprime; vector<int> primes; vector<int> eratosthenesSieve(int lim) { isprime.set(); isprime[0] = isprime[1] = false; for (int i = 4; i < lim; i += 2) isprime[i] = false; for (int i = 3; i * i < lim; i += 2) if (isprime[i]) for (int j = i * i; j < lim; j += i * 2) isprime[j] = false; vector<int> pr; for (int i = 2; i < lim; i++) if (isprime[i]) pr.push_back(i); return pr; } vector<int> ans(MAX_N, 0); signed main() { primes = eratosthenesSieve(MAX_N); vector<int> del(MAX_N, 0); // Handle the contribution for all primes for (auto &p: primes) { for (int curr = p; curr < MAX_N; curr += p) { int inc = (p - ((curr / p) % p)) % p; del[curr] = (del[curr] + inc) % MOD; if (curr + p < MAX_N) del[curr + p] = (del[curr + p] - inc + MOD) % MOD; } } //Special case of 4 for (int curr = 4; curr < MAX_N; curr += 4) { int inc = (2 * (curr / 4)) % 4; del[curr] = (del[curr] + inc) % MOD; if (curr + 4 < MAX_N) del[curr + 4] = (del[curr + 4] - inc + MOD) % MOD; } int pref = 0; for (int i = 1; i < MAX_N; i++) { pref = (pref + del[i]) % MOD; ans[i] = (ans[i - 1] + pref) % MOD; } int tc; cin >> tc; while (tc--) { int n; cin >> n; cout << ans[n] << "\n"; } }
1957
F1
Frequency Mismatch (Easy Version)
\textbf{This is the easy version of the problem. The difference between the two versions of this problem is the constraint on $k$. You can make hacks only if all versions of the problem are solved.} You are given an undirected tree of $n$ nodes. Each node $v$ has a value $a_v$ written on it. You have to answer queries related to the tree. You are given $q$ queries. In each query, you are given $5$ integers, $u_1, v_1, u_2, v_2, k$. Denote the count of nodes with value $c$ on path $u_1 \rightarrow v_1$ with $x_c$, and the count of nodes with value $c$ on path $u_2 \rightarrow v_2$ with $y_c$. If there are $z$ such values of $c$ such that $x_c \neq y_c$, output any $\min(z, k)$ such values in any order.
Let's try answering a different question. Given two multisets of elements how can we check if they differ? Hashing? How to multiset hash efficiently? Okay if we can now differentiate two multisets of unequal elements, can we try to answer queries by inserting into these hypothetical multisets and binary search the differing point to identify the element? Let's discuss how to hash a multiset of elements $a$, $b$ and $c$. Here, I will link you to a famous blog by rng_58 Hashing and Probability of Collision. Quoting, let's take a random integer $r$ from the interval $[0, MOD)$, and compute the hash $(r+a_1)(r+a_2)\dots(r+a_n)$. This is a polynomial of $r$ of degree $n$, so again from Schwartz-Zippel lemma, the collision probability is at most $\frac{n}{MOD}$. The nice thing about this construction is that we can compute rolling hashes using this idea fast. To make implementation easier, this bound applies for summing the random numbers as well. You can check this for proof. Let's try to answer a single query $(u_1, v_1, u_2, v_2)$ using binary search. We will solve this query in $nlog^2(n)$ using this idea. To check for some $mid$ in our binary search, we insert the values of all nodes which have values from $1$ to $mid$ into a data structure that we can query the path sum of $u$ to $v$ using. Querying path sum is a fairly standard problem that can be solved using BIT / Segment trees and ETT (Euler-Tour Trick). Now to solve this query, we only need to binary search and find the first vertex where the hashes differ for both the paths. This vertex is guaranteed to have mismatched frequency on the two paths since it's addition into the path multi-sets changed their hashes. So now we can solve a single query in $nlog^2(n)$ time using hashing + BIT / Segtree. Now to solve this problem for all $Q$ queries. We can use the idea of parallel binary search here to improve our idea to answering all $Q$ queries efficiently. We can run the binary search for all queries in parallel. For each iteration, sort queries by the current position of their $mid$ values. Then insert values from $1$ to $mid$ of the first query into the BIT and query range sum to determine for that particular query how to adjust $mid$. You can then move the $mid$ pointer to that of the next query and so on. This solution will run in $O(nlog(n) + qlog^2(n))$. Upd: Thanks to IceKnight1093 for pointing this out. If we just use a single int hash with a field size of $\approx 10^9$, it gives us a probability of failure of $\frac{1}{10^9}$ per query. Since we're doing somewhere of the order $10^6$ comparisons per hash representation, this gives a rough $1 - \Big(1 - \frac{1}{10^9}\Big)^{10^6} \approx 10^{-3}$ chance of failure. This is not a great bound theoretically speaking, but from a practical standpoint, it is a loose bound and it is extremely unlikely that this solution can be hacked. That said, if we want better theoretical bounds we can just use a hash with field size $\approx 10^{18}$ or use double hashing. Even if we were to query all ${n \choose 2}$ paths, the chance of collision is $\approx \frac{n^2}{10^{18}} \approx 10^{-8}$, which is more than good enough. TL's were set to allow double hashing solutions to pass comfortably.
[ "binary search", "data structures", "divide and conquer", "hashing", "probabilities", "trees" ]
2,600
#include <bits/stdc++.h> using namespace std; const int MOD = 1e9 + 7; using ll = long long; using dbl = long double; //#define int ll using vi = vector<int>; using vvi = vector<vi>; using pii = pair<int, int>; using vii = vector<pii>; using vvii = vector<vii>; using vll = vector<ll>; #define ff first #define ss second #define pb push_back #define rep(i, a, b) for(int i = a; i < (b); ++i) #define all(x) begin(x), end(x) #define sz(x) (int)(x).size() #define tc int t; cin>>t; while(t--) #define fightFight cin.tie(0) -> sync_with_stdio(0) template<class T> struct RMQ { vector<vector<T>> jmp; RMQ(const vector<T>& V) : jmp(1, V) { for (int pw = 1, k = 1; pw * 2 <= sz(V); pw *= 2, ++k) { jmp.emplace_back(sz(V) - pw * 2 + 1); rep(j,0,sz(jmp[k])) jmp[k][j] = min(jmp[k - 1][j], jmp[k - 1][j + pw]); } } T query(int a, int b) { assert(a <= b); // tie(a, b) = minimax(a, b) int dep = 31 - __builtin_clz(b-a+1); return min(jmp[dep][a], jmp[dep][b - (1 << dep) + 1]); } }; struct LCA { int T = 0; vi st, path, ret; vi en, d; RMQ<int> rmq; LCA(vector<vi>& C) : st(sz(C)), en(sz(C)), d(sz(C)), rmq((dfs(C,0,-1), ret)) {} void dfs(vvi &adj, int v, int par) { st[v] = T++; for (auto to : adj[v]) if (to != par) { path.pb(v), ret.pb(st[v]); d[to] = d[v] + 1; dfs(adj, to, v); } en[v] = T-1; } bool anc(int p, int c) { return st[p] <= st[c] and en[p] >= en[c]; } int lca(int a, int b) { if (a == b) return a; tie(a, b) = minmax(st[a], st[b]); return path[rmq.query(a, b-1)]; } int dist(int a, int b) { return d[a] + d[b] - 2*d[lca(a,b)]; } }; template<const int mod> struct mint { constexpr mint(int x = 0) : val((x % mod + mod) % mod) {} mint& operator+=(const mint &b) { val += b.val; val -= mod * (val >= mod); return *this; } mint& operator-=(const mint &b) { val -= b.val; val += mod * (val < 0); return *this; } mint& operator*=(const mint &b) { val = 1ll * val * b.val % mod; return *this; } mint& operator/=(const mint &b) { return *this *= b.inv(); } mint inv() const { int x = 1, y = 0, t; for(int a=val, b=mod; b; swap(a, b), swap(x, y)) t = a/b, a -= t * b, x -= t * y; return mint(x); } mint pow(int b) const { mint a = *this, res(1); for(; b; a *= a, b /= 2) if(b&1) res *= a; return res; } friend mint operator+(const mint &a, const mint &b) {return mint(a) += b;} friend mint operator-(const mint &a, const mint &b) {return mint(a) -= b;} friend mint operator*(const mint &a, const mint &b) {return mint(a) *= b;} friend mint operator/(const mint &a, const mint &b) {return mint(a) /= b;} friend bool operator==(const mint &a, const mint &b) {return a.val == b.val;} friend bool operator!=(const mint &a, const mint &b) {return a.val != b.val;} friend bool operator<(const mint &a, const mint &b) {return a.val < b.val;} friend ostream& operator<<(ostream &os, const mint &a) {return os << a.val;} int val; }; using Mint = mint<MOD>; template<typename... Ts, size_t... Is, typename F> void __op(index_sequence<Is...>, tuple<Ts...>& a, const tuple<Ts...>& b, F op) { ((get<Is>(a) = op(get<Is>(a), get<Is>(b))), ...); } #define OVERLOAD(OP, F) \ template<typename... Ts> auto& operator OP##=(tuple<Ts...> &a, const tuple<Ts...> &b) { __op(index_sequence_for<Ts...>(), a, b, F<>{}); return a; } \ template<typename... Ts> auto operator OP(const tuple<Ts...> &a, const tuple<Ts...> &b) { auto c = a; c OP##= b; return c; } OVERLOAD(+, plus) OVERLOAD(-, minus) OVERLOAD(*, multiplies) OVERLOAD(/, divides) constexpr int NUM_HASHES = 2; // * constexpr array<int, NUM_HASHES> mods = {127657753, 987654319}; // * template <size_t N = NUM_HASHES> constexpr auto mint_ntuple(const int &v) { return [&]<size_t... Is>(index_sequence<Is...>) { return make_tuple(mint<mods[Is]>(v)...); }(make_index_sequence<N>{}); } using HT = decltype(mint_ntuple(0)); template<typename T> struct FT { vector<T> s; T def; FT(int n, T def) : s(n, def), def(def) {} void update(int pos, T dif) { // a[pos] += dif for (; pos < sz(s); pos |= pos + 1) s[pos] += dif; } T query(int pos) { // sum of values in [0, pos) pos++; T res = def; for (; pos > 0; pos &= pos - 1) res += s[pos-1]; return res; } }; struct Query { int u1, v1, u2, v2, k; int l, r, ans, i; int mid(){ return l + (r-l)/2; } }; auto rng = std::mt19937(std::random_device()()); constexpr const int MXN = 1e5+5; void solve(){ int n; cin >> n; vi a(n); for(auto &x : a) cin >> x, x--; vvi adj(n); for(int i=0; i < n-1; i++){ int u, v; cin >> u >> v; u--, v--; adj[u].pb(v); adj[v].pb(u); } int q; cin >> q; vector<Query> queries(q); int idx=0; for(auto &[u1, v1, u2, v2, k, l, r, ans, i] : queries) cin >> u1 >> v1 >> u2 >> v2 >> k, u1--, v1--, u2--, v2--, l=0, ans=-1, i=idx++; LCA lca(adj); vi uni(a); sort(all(uni)); uni.resize(unique(all(uni)) - uni.begin()); vvi cnode(MXN); for(int v=0; v < n; v++) cnode[a[v]].pb(v); vector<HT> hash(MXN); for(auto &c : uni) hash[c] = {rng(), rng()}; auto get_ett = [&](vvi &adj){ vi tin(n), tout(n); int timer = 0; function<void(int,int)> dfs = [&](int v, int p){ tin[v] = timer++; for(auto &to : adj[v]) if(to != p) dfs(to, v); tout[v] = timer++; }; dfs(0, -1); return make_pair(tin, tout); }; auto [tin, tout] = get_ett(adj); for(auto &q : queries) q.r = sz(uni)-1; vi vis(MXN); for(int _=0; _<20; _++){ FT<HT> st(2*n, mint_ntuple(0)); sort(all(queries), [&](Query &a, Query &b) { return a.mid() < b.mid(); }); for(int qq=0, cptr=0; qq < q; qq++) if(queries[qq].l <= queries[qq].r) { auto &[u1, v1, u2, v2, k, l, r, ans, i] = queries[qq]; for(; cptr < sz(uni) and cptr <= queries[qq].mid(); cptr++){ for(auto &v : cnode[uni[cptr]]) st.update(tin[v], hash[uni[cptr]]), st.update(tout[v], mint_ntuple(0)-hash[uni[cptr]]); vis[uni[cptr]] = true; } int lca1 = lca.lca(u1, v1), lca2 = lca.lca(u2, v2); HT r1 = st.query(tin[lca1]), r2 = st.query(tin[lca2]); HT hash1 = (st.query(tin[u1]) + st.query(tin[v1]) - (r1 + r1)); if(vis[a[lca1]]) hash1 += hash[a[lca1]]; HT hash2 = (st.query(tin[u2]) + st.query(tin[v2]) - (r2 + r2)); if(vis[a[lca2]]) hash2 += hash[a[lca2]]; if(hash1 != hash2){ ans = queries[qq].mid(); r = queries[qq].mid()-1; } else l = queries[qq].mid()+1; } for(auto &c : uni) vis[c] = false; } sort(all(queries), [&](Query &a, Query &b) { return a.i < b.i; }); for(auto &[u1, v1, u2, v2, k, l, r, ans, i] : queries){ if(ans == -1) cout << 0 << '\n'; else cout << 1 << ' ' << uni[ans]+1 << '\n'; } } signed main(){ fightFight; solve(); }
1957
F2
Frequency Mismatch (Hard Version)
\textbf{This is the hard version of the problem. The difference between the two versions of this problem is the constraint on $k$. You can make hacks only if all versions of the problem are solved.} You are given an undirected tree of $n$ nodes. Each node $v$ has a value $a_v$ written on it. You have to answer queries related to the tree. You are given $q$ queries. In each query, you are given $5$ integers, $u_1, v_1, u_2, v_2, k$. Denote the count of nodes with value $c$ on path $u_1 \rightarrow v_1$ with $x_c$, and the count of nodes with value $c$ on path $u_2 \rightarrow v_2$ with $y_c$. If there are $z$ such values of $c$ such that $x_c \neq y_c$, output any $\min(z, k)$ such values in any order.
Let's start by first solving the problem for $k = 1$, and extend the idea to $k > 1$ later. To solve for $k = 1$, we'll find the smallest value that occurs with different frequencies on the two paths. We'll solve an easier version by solving for two static arrays, instead of solving the problem of two paths. To find the smallest value that has a different frequency between the two arrays, we can have a segment tree on the frequency array for each static array. Then, we can store the hash of each segment tree node and perform a descent to find the first point at which the hashes in the two segment trees differ. The hash of a node is the polynomial hash of the subarray it corresponds to. Now, in order to use the same technique on the path, we want the frequency array of the path $u \rightarrow v$. To achieve this, we can use persistent segment trees. We define $seg(u \rightarrow v)$ to be the segment tree that corresponds to the frequency array of the values on the path $u \rightarrow v$. With the idea of persistence, we can quickly compute $seg(1 \rightarrow u)$ for all $u$, when we root at $1$. To compute $seg(u \rightarrow v)$, we can use this: $seg(u \rightarrow v) = seg(1 \rightarrow u) + seg(1 \rightarrow v) - seg(1 \rightarrow lca(u, v)) - seg(1 \rightarrow parent(lca(u, v)))$ for every node in the segment tree that we want. Hence, we are able to get the segment tree for the two paths we need to compare in a query. In our solution with the static arrays, we used the polynomial hash to find the first point of difference between the two frequency arrays. So, we need a way to quickly compute the hash of the pseudo node we computed for $seg(u \rightarrow v)$. If we have two frequency arrays $f_1$, $f_2$, $hash(f_1) + hash(f_2) = hash(f_1 + f_2)$, where the hash of a node is the polynomial hash of the subarray that corresponds to that node. Hence, we can say: $hash(seg(u \rightarrow v)) = hash(seg(1 \rightarrow u)) + hash(seg(1 \rightarrow v)) - hash(seg(1 \rightarrow lca(u, v))) - hash(seg(1 \rightarrow parent(lca(u, v)))$ Hence, we can perform the same descent that we talked about earlier on the derived segment tree. Now to solve this for $k > 1$, you can perform a dfs on $seg(u \rightarrow v)$, and keep entering nodes of the segment tree which have differing hashes, until we find $k$ values. The total time complexity comes out to $O(nlogn) + O(q) \times O(k) \times O(logn) = O(qklogn + nlogn)$ You can also use an idea similar to the hashing technique used in F1 to hash the segment tree nodes.
[ "binary search", "data structures", "dfs and similar", "hashing", "probabilities", "trees" ]
2,700
#include <bits/stdc++.h> using namespace std; const int MOD1 = 1e9 + 7; const int MOD2 = 998244353; using ll = long long; using dbl = long double; //#define int ll using vi = vector<int>; using vvi = vector<vi>; using pii = pair<int, int>; using vii = vector<pii>; using vvii = vector<vii>; using vll = vector<ll>; #define ff first #define ss second #define pb push_back #define rep(i, a, b) for(int i = a; i < (b); ++i) #define all(x) begin(x), end(x) #define sz(x) (int)(x).size() #define tc int t; cin>>t; while(t--) #define fightFight cin.tie(0) -> sync_with_stdio(0) template<class T> struct RMQ { vector<vector<T>> jmp; RMQ(const vector<T>& V) : jmp(1, V) { for (int pw = 1, k = 1; pw * 2 <= sz(V); pw *= 2, ++k) { jmp.emplace_back(sz(V) - pw * 2 + 1); rep(j,0,sz(jmp[k])) jmp[k][j] = min(jmp[k - 1][j], jmp[k - 1][j + pw]); } } T query(int a, int b) { assert(a <= b); // tie(a, b) = minimax(a, b) int dep = 31 - __builtin_clz(b-a+1); return min(jmp[dep][a], jmp[dep][b - (1 << dep) + 1]); } }; struct LCA { int T = 0; vi st, path, ret; vi en, d; RMQ<int> rmq; LCA(vector<vi>& C) : st(sz(C)), en(sz(C)), d(sz(C)), rmq((dfs(C,1,-1), ret)) {} void dfs(vvi &adj, int v, int par) { st[v] = T++; for (auto to : adj[v]) if (to != par) { path.pb(v), ret.pb(st[v]); d[to] = d[v] + 1; dfs(adj, to, v); } en[v] = T-1; } bool anc(int p, int c) { return st[p] <= st[c] and en[p] >= en[c]; } int lca(int a, int b) { if (a == b) return a; tie(a, b) = minmax(st[a], st[b]); return path[rmq.query(a, b-1)]; } int dist(int a, int b) { return d[a] + d[b] - 2*d[lca(a,b)]; } }; template<const int mod> struct mint { constexpr mint(int x = 0) : val((x % mod + mod) % mod) {} mint& operator+=(const mint &b) { val += b.val; val -= mod * (val >= mod); return *this; } mint& operator-=(const mint &b) { val -= b.val; val += mod * (val < 0); return *this; } mint& operator*=(const mint &b) { val = 1ll * val * b.val % mod; return *this; } mint& operator/=(const mint &b) { return *this *= b.inv(); } mint inv() const { int x = 1, y = 0, t; for(int a=val, b=mod; b; swap(a, b), swap(x, y)) t = a/b, a -= t * b, x -= t * y; return mint(x); } mint pow(int b) const { mint a = *this, res(1); for(; b; a *= a, b /= 2) if(b&1) res *= a; return res; } friend mint operator+(const mint &a, const mint &b) { return mint(a) += b; } friend mint operator-(const mint &a, const mint &b) { return mint(a) -= b; } friend mint operator*(const mint &a, const mint &b) { return mint(a) *= b; } friend mint operator/(const mint &a, const mint &b) { return mint(a) /= b; } friend bool operator==(const mint &a, const mint &b) { return a.val == b.val; } friend bool operator!=(const mint &a, const mint &b) { return a.val != b.val; } friend bool operator<(const mint &a, const mint &b) { return a.val < b.val; } friend ostream& operator<<(ostream &os, const mint &a) { return os << a.val; } int val; }; mt19937_64 rng(chrono::steady_clock::now().time_since_epoch().count()); uniform_int_distribution<ll> rnd(20,10000); using Mint1 = mint<MOD1>; using Mint2 = mint<MOD2>; using Mint = pair<Mint1,Mint2>; const int N = 3e5 + 10, LOGN = 20; int p1, p2; int blen = 0; int L[N * LOGN], R[N * LOGN]; Mint ST[N * LOGN], p_pow[N]; void prec() { p1 = rnd(rng); p2 = p1; while (p2 == p1) p2 = rnd(rng); p_pow[0].ff = 1; p_pow[0].ss = 1; for (int i = 1; i < N; i++) { p_pow[i].ff = p_pow[i - 1].ff * p1; p_pow[i].ss = p_pow[i - 1].ss * p2; } } int update(int pos, int l, int r, int id) { if (pos < l || pos > r) return id; int ID = ++blen, m = (l + r) / 2; if (l == r) return (ST[ID] = {ST[id].ff + 1, ST[id].ss + 1}, ID); L[ID] = update(pos, l, m, L[id]); R[ID] = update(pos, m + 1, r, R[id]); return (ST[ID] = {ST[L[ID]].ff + p_pow[m - l + 1].ff * ST[R[ID]].ff, ST[L[ID]].ss + p_pow[m - l + 1].ss * ST[R[ID]].ss}, ID); } vi vals; using a4 = array<int,4>; void descent(int l, int r, const array<int, 4>& a, const array<int, 4>& b, int k) { if (l == r) return void(vals.push_back(l)); int m = (l + r) / 2; #define stm(X, y) {ST[X[y[0]]].ff + ST[X[y[1]]].ff - ST[X[y[2]]].ff - ST[X[y[3]]].ff, ST[X[y[0]]].ss + ST[X[y[1]]].ss - ST[X[y[2]]].ss - ST[X[y[3]]].ss} #define arr(X, y) (a4{X[y[0]], X[y[1]], X[y[2]], X[y[3]]}) Mint l1 = stm(L, a), l2 = stm(L, b), r1 = stm(R, a), r2 = stm(R, b); if (sz(vals) < k && l1 != l2) descent(l, m, arr(L, a), arr(L, b), k); if (sz(vals) < k && r1 != r2) descent(m + 1, r, arr(R, a), arr(R, b), k); } vvi adj; vi a, roots, par; void dfs(int x, int p) { par[x] = p; roots[x] = update(a[x], 0, N, roots[par[x]]); for (auto& s : adj[x]) if (s != p) { dfs(s, x); } } void solve(){ int n; cin >> n; adj = vvi(n + 1); a = roots = par = vi(n + 1); for (int i = 1; i <= n; i++) cin >> a[i]; for (int i = 0; i < n - 1; i++) { int a, b; cin >> a >> b; adj[a].pb(b), adj[b].pb(a); } dfs(1, 0); LCA lca(adj); int q; cin >> q; while (q--) { vals.clear(); int u1, v1, u2, v2, k; cin >> u1 >> v1 >> u2 >> v2 >> k; int l1 = lca.lca(u1, v1), l2 = lca.lca(u2, v2); a4 a{roots[u1], roots[v1], roots[l1], roots[par[l1]]}; a4 b{roots[u2], roots[v2], roots[l2], roots[par[l2]]}; descent(0, N, a, b, k); cout << sz(vals) << " "; for (auto& s : vals) cout << s << " "; cout << "\n"; } } signed main(){ fightFight; prec(); solve(); }
1965
A
Everything Nim
Alice and Bob are playing a game on $n$ piles of stones. On each player's turn, they select a positive integer $k$ that is at most the size of the smallest \textbf{nonempty} pile and remove $k$ stones from \textbf{each} nonempty pile at once. The first player who is unable to make a move (because all piles are empty) loses. Given that Alice goes first, who will win the game if both players play optimally?
If the smallest pile is of size $1$, then Alice must choose $k=1$ in her first move. Therefore, we can imagine subtracting $1$ from all piles, and determining who wins given that Bob goes first. We can repeat this process, switching the first player back and forth, until there is no longer a pile of size $1$. At this point, we are in one of two states: If there are no piles remaining, the first player loses, because they cannot make any moves Otherwise, the smallest pile is of size $x \ge 2$. We can show that the first player will always win. To do this, consider what happens if the first player chooses $k=x$: If this would create a losing state for the next player, then the first player can choose $k=x$ and win. Otherwise, the state reached by choosing $k=x$ is a winning state for the next player to move. So the first player can choose $k=x-1$, forcing the second player to choose $k=1$. The first player will now be in the winning state and can proceed to win the game. If this would create a losing state for the next player, then the first player can choose $k=x$ and win. Otherwise, the state reached by choosing $k=x$ is a winning state for the next player to move. So the first player can choose $k=x-1$, forcing the second player to choose $k=1$. The first player will now be in the winning state and can proceed to win the game. To implement this solution, we only need to keep track of the largest pile size $a$, and the smallest positive integer $b$ that is not a pile size (essentially the MEX of the pile sizes, excluding $0$). If $b > a$, then Alice and Bob will be forced to choose $k=1$ until the end of the game, so the parity of $a$ determines the winner. Otherwise, they will eventually reach a state with minimum pile size at least $2$, so the parity of $b$ determines the winner. Complexity: $O(n)$ or $O(n\log n)$ depending on implementation
[ "games", "greedy", "math", "sortings" ]
1,400
t = int(input()) for tc in range(t): n = int(input()) a = list(map(int, input().split())) maxsize = max(a) a.sort() mexsize = 1 for sz in a: if sz == mexsize: mexsize = mexsize + 1 if mexsize > maxsize: print("Alice" if maxsize % 2 == 1 else "Bob") else: print("Alice" if mexsize % 2 == 1 else "Bob")
1965
B
Missing Subsequence Sum
You are given two integers $n$ and $k$. Find a sequence $a$ of non-negative integers of size at most $25$ such that the following conditions hold. - There is no subsequence of $a$ with a sum of $k$. - For all $1 \le v \le n$ where $v \ne k$, there is a subsequence of $a$ with a sum of $v$. A sequence $b$ is a subsequence of $a$ if $b$ can be obtained from $a$ by the deletion of several (possibly, zero or all) elements, without changing the order of the remaining elements. For example, $[5, 2, 3]$ is a subsequence of $[1, 5, 7, 8, 2, 4, 3]$. It can be shown that under the given constraints, a solution always exists.
Notice that for a fixed $k$, a solution for $n=c$ is also a solution for all $n < c$. So we can ignore the value of $n$ and just assume it's always $10^6$. If we didn't have the restriction that no subsequence can add up to $k$, the most natural solution would be $[1, 2, 4, 8, \cdots 2^{19}]$. Every value from $1$ to $10^6$ appears as the sum of the subsequence given by its binary representation. We will use a modified version of this array to solve the problem. Let $i$ be the largest integer such that $2^i \le k$. We will use this array (of size $22$): $a=[k-2^i, k+1, k+1+2^i, 1, 2, 4, ... 2^{i-1}, 2^{i+1}, ... 2^{19}]$ To prove that no subsequence of $a$ adds up to $k$, consider the list of all elements in the array that are at most $k$, since these are the only ones that could be present in a subsequence adding to $k$. These are $k-2^i, 1, 2, 4, ... 2^{i-1}$ Since these add up to $k-1$, no subsequence can add up to $k$. To prove that for all $1 \le v \le n$ where $v \ne k$, there is a subsequence adding up to $v$, we consider several cases: If $v < 2^i$, we can simply use the binary representation of $v$. If $2^i \le v < k$, we can first take all of the elements that are at most $k$ as part of our subsequence. We then need to remove elements with a sum equal to $k-1-v$. Because $2^i \le v < k < 2^{i+1}$, $k-1-v$ is less than $2^i$, so we can simply remove its binary representation. If $v > k$, we can take $k+1$ along with the binary representation of $v-k-1$. The one edge case is when the $2^i$ bit is set in $v-k-1$. In this case, we replace $k+1$ with $k+1+2^i$. So in all cases, we can form a subsequence adding up to $v$. Complexity: $O(\log n)$
[ "bitmasks", "constructive algorithms", "greedy", "number theory" ]
1,800
t = int(input()) for tc in range(t): n, k = map(int, input().split()) i = 0 while (1 << (i + 1)) <= k: i = i + 1 ans = [k - (1 << i), k + 1, k + 1 + (1 << i)] for j in range(20): if j != i: ans.append(1 << j); print(len(ans)) print(*ans)
1965
C
Folding Strip
You have a strip of paper with a binary string $s$ of length $n$. You can fold the paper in between any pair of adjacent digits. A set of folds is considered valid if after the folds, all characters that are on top of or below each other match. Note that all folds are made at the same time, so the characters don't have to match in between folds. For example, these are valid foldings of $s = \mathtt{110110110011}$ and $s = \mathtt{01110}$: The length of the folded strip is the length seen from above after all folds are made. So for the two above examples, after the folds shown above, the lengths would be $7$ and $3$, respectively. Notice that for the above folding of $s = \mathtt{01110}$, if we made either of the two folds on their own, that would not be a valid folding. However, because we don't check for validity until all folds are made, this folding is valid. After performing a set of valid folds, what is the minimum length strip you can form?
Define the pattern of a (validly) folded strip to be the set of characters, in order, seen from above after all folds are made. It is always possible to fold the strip in such a way that no two adjacent characters in the pattern are equal. If we fold in between every pair of equal characters, and don't fold in between every pair of distinct characters, we will achieve this. This diagram shows one example of this (the red lines indicate where to fold, and the final pattern is $10101$): This set of folds will always be valid because it ensures that all $1$s in the original string are on odd indices and all $0$s are on even indices (or vice versa). Also, there is only one obtainable pattern (up to reversal) that is alternating in this way. It is never possible to fold in between two adjacent different characters, because that can never be part of a valid folding, and if there exists a pair of adjacent equal characters that you don't fold in between, the pattern will not be alternating. We can also show that the pattern formed by this process is always optimal. Let $t$ be any pattern obtained from a valid folding of $s$. Notice that if we perform a valid fold on $t$, that corresponds to a valid fold on $s$, because we can essentially "compose" the valid folding of $s$ into $t$ and the valid folding of $t$ into one valid folding. So we can fold $t$ using the process above, which will yield a pattern with alternating characters of length at most $len(t)$. Because the alternating pattern is unique for a given $s$, it must be the same (up to reversal) as the one described above. So the above pattern is of size at most $len(t)$ for any valid pattern $t$, and is therefore optimal. We can simulate the folding process to determine the final length. Complexity: $O(n)$
[ "constructive algorithms", "greedy", "strings" ]
2,300
t = int(input()) for tc in range(t): n = int(input()) s = input() mn = 0 mx = 0 cur = 0 for c in s: if (cur % 2 == 0) == (c == '1'): cur = cur + 1 else: cur = cur - 1 mn = min(mn, cur) mx = max(mx, cur) print(mx - mn)
1965
D
Missing Subarray Sum
There is a hidden array $a$ of $n$ positive integers. You know that $a$ is a \textbf{palindrome}, or in other words, for all $1 \le i \le n$, $a_i = a_{n + 1 - i}$. You are given the sums of all but one of its distinct subarrays, in arbitrary order. The subarray whose sum is not given can be any of the $\frac{n(n+1)}{2}$ distinct subarrays of $a$. Recover any possible palindrome $a$. The input is chosen such that there is always at least one array $a$ that satisfies the conditions. An array $b$ is a subarray of $a$ if $b$ can be obtained from $a$ by the deletion of several (possibly, zero or all) elements from the beginning and several (possibly, zero or all) elements from the end.
Let's first look at the set of subarray sums of a palindromic array $a$ of $n$ positive integers. Because $a$ is a palindrome, the sum of the subarray with indices in the range $[l, r]$ is the same as the sum of the subarray with indices in the range $[n + 1 - r, n + 1 - l]$. So if we ignore all subarrays where $l + r = n + 1$ (subarrays centered at the center of $a$), each sum must appear an even number of times. Also, every "centered" subarray $[l, n + 1 - l]$ must have a unique sum. This is because all elements of $a$ are strictly positive, and if the "centered" subarrays are ordered by length, each one contains all elements in the previous one, along with two new elements, so its sum must be strictly greater. Therefore, the set of subarray sums that appear an odd number of times is exactly the set of sums of subarrays centered at the center of $a$. For example, in the array $a = [1, 2, 3, 2, 1]$, the sums that appear an odd number of times are: $3$ ($[3]$, $[1, 2]$, $[2, 1]$) $7$ ($[2, 3, 2]$) $9$ ($[1, 2, 3, 2, 1]$) So if we have all $\frac{n(n+1)}{2}$ subarray sums of $a$, we can then use the "centered" sums to reconstruct $a$ itself. The smallest "centered" sum is either the middle element or the sum of the middle two (equal) elements, depending on the parity of $n$, and for each "centered" sum in ascending order after that, its difference with the previous sum must be the sum of the next two (equal) elements closest to the center of $a$. Now, let's find out how to reconstruct the missing sum. We consider two cases: If the missing sum is for a "centered" subarray, then there will be exactly $\lceil\frac{n}{2}\rceil - 1$ sums that appear an odd number of times in the input. We can use these sums to construct a palindromic array $b$ of size $n-2$ as described above.We can then remove all sums of subarrays of $b$ from the initial input list, and look at what is remaining. If we let $[l, r]$ be the indices of the missing subarray, the largest remaining sum in the list must be the sum of $[1, r]$ in $a$ (or equivalently $[l, n]$). Let this sum be $x$ and the sum of $b$ be $y$. If $l > 1$ (and therefore $r < n$), the missing sum must be $2x-y$, because the $2x$ part includes everything in the missing subarray twice, and everything else once, and $y$ includes everything once. $l=1$, $r=n$ initially seems like it will be an edge case, but the same equation works there as well, since $2x$ includes everything in the missing array (all of $a$) once, and everything in $b$ once, and $y$ includes everything in $b$ once. We can then remove all sums of subarrays of $b$ from the initial input list, and look at what is remaining. If we let $[l, r]$ be the indices of the missing subarray, the largest remaining sum in the list must be the sum of $[1, r]$ in $a$ (or equivalently $[l, n]$). Let this sum be $x$ and the sum of $b$ be $y$. If $l > 1$ (and therefore $r < n$), the missing sum must be $2x-y$, because the $2x$ part includes everything in the missing subarray twice, and everything else once, and $y$ includes everything once. $l=1$, $r=n$ initially seems like it will be an edge case, but the same equation works there as well, since $2x$ includes everything in the missing array (all of $a$) once, and everything in $b$ once, and $y$ includes everything in $b$ once. If the missing sum is not for a "centered" subarray, then there will be exactly $\lceil\frac{n}{2}\rceil + 1$ sums that appear an odd number of times in the input. We can use these sums to construct a palindromic array $b$ of size $n+2$ as described above.In a similar way to the previous case, we can then remove all sums from the initial input list from the set of subset sums of $b$. If we let $[l, r]$ be the indices of the extra "centered" subarray in $b$, the largest remaining sum in the list must be the sum of $[1, r]$ in $b$ (or equivalently $[l, n + 2]$). If we let $x$ be this largest sum, and $y$ be the sum of $b$, we can use similar logic to the previous case to determine that the missing sum is $2x-y$. In a similar way to the previous case, we can then remove all sums from the initial input list from the set of subset sums of $b$. If we let $[l, r]$ be the indices of the extra "centered" subarray in $b$, the largest remaining sum in the list must be the sum of $[1, r]$ in $b$ (or equivalently $[l, n + 2]$). If we let $x$ be this largest sum, and $y$ be the sum of $b$, we can use similar logic to the previous case to determine that the missing sum is $2x-y$. So we can determine the missing sum, using the number of sums that appear an odd number of times in the input to determine which case we are in. Once we have found the missing sum, we just need to reconstruct $a$ using the process above. Also, notice that by the construction we have followed so far, the solution is always unique. Complexity: $O(n^2 \log n)$
[ "constructive algorithms" ]
2,900
def getSubarraySums(a): cts = [] for i in range(len(a)): sm = 0 for j in range(i, len(a)): sm = sm + a[j] cts.append(sm) cts.sort() return cts def getOddOccurringElements(cts): odds = [] for ct in cts: if len(odds) > 0 and ct == odds[-1]: odds.pop() else: odds.append(ct) return odds def getPalindrome(odds, n): a = [0] * n prev = 0 idx = (n - 1) // 2 for x in odds: if idx == n - 1 - idx: a[idx] = x else: a[idx] = (x - prev) // 2 a[n - 1 - idx] = (x - prev) // 2 prev = x idx = idx - 1 return a def getLargestExcluded(bigList, smallList): while len(smallList) > 0 and bigList[-1] == smallList[-1]: bigList.pop() smallList.pop() return bigList[-1] t = int(input()) for tc in range(t): n = int(input()) subarraySums = list(map(int, input().split())) subarraySums.sort() odds = getOddOccurringElements(subarraySums) missingSum = -1 if len(odds) > (n + 1) // 2: oddvals = [] evenvals = [] for x in odds: if x % 2 == 1: oddvals.append(x) else: evenvals.append(x) if len(evenvals) > 0 and len(oddvals) > 0: missingSum = evenvals[0] if len(evenvals) == 1 else oddvals[0] else: b = getPalindrome(odds, n + 2) bSums = getSubarraySums(b) y = bSums[-1] x = getLargestExcluded(bSums, subarraySums) missingSum = 2 * x - y else: b = getPalindrome(odds, n - 2) bSums = getSubarraySums(b) y = bSums[-1] x = getLargestExcluded(subarraySums, bSums) missingSum = 2 * x - y odds.append(missingSum) odds.sort() odds = getOddOccurringElements(odds) ans = getPalindrome(odds, n) print(*ans)
1965
E
Connected Cubes
There are $n \cdot m$ unit cubes currently in positions $(1, 1, 1)$ through $(n, m, 1)$. Each of these cubes is one of $k$ colors. You want to add additional cubes at any integer coordinates such that the subset of cubes of each color is connected, where two cubes are considered connected if they share a face. In other words, for every pair of cubes of the same color $c$, it should be possible to travel from one to the other, moving only through cubes of color $c$ that share a face. The existing cubes are currently in the corner of a room. There are colorless cubes completely filling the planes $x = 0$, $y = 0$, and $z = 0$, preventing you from placing additional cubes there or at any negative coordinates. Find a solution that uses at most $4 \cdot 10^5$ additional cubes (not including the cubes that are currently present), or determine that there is no solution. It can be shown that under the given constraints, if there is a solution, there is one using at most $4 \cdot 10^5$ additional cubes.
We can show that a solution always exists. This example with $n=3$, $m=5$, $k=4$ will demonstrate the construction we will use: First, extend each of the even columns up by $n+k-1$ spaces, keeping all cubes in a given vertical column the same color: Then, for the odd columns, do something similar, except we use the bottom $n-1$ rows to "bend" them around so the tops point out to the right: After that, extend each of these new "bent" rows (each of which corresponds to an input cube in an odd column) to the right by $k$: Now, each initial cube in the $n$ by $m$ grid corresponds to one of these extended rows or columns, each of which is a connected group. Fill in the remaining columns between the even columns with $k$ rows each. The $i$-th of these should be color $i$. Now do the same for the odd columns: At this point, each cell of a given color is connected to at least one of the rows/columns we added in the last two steps. Now, we want to connect all of those by color. Start by adding these $k$ rows: Followed by these $k-1$ rows: Once again, at this point, each cube of a given color is connected to one of the rows of that color we added in the last two steps. So the last remaining step is to connect them: Now, all cubes of each color are connected. This construction uses exactly $(n+k)^2m - (k-1)^2(m-1) - nm$ additional cubes. Since $n, m, k \le 50$, this is at most $379,851$, which fits within the bounds of the problem. Complexity: $O((n+k)^2 m)$
[ "constructive algorithms", "games" ]
3,100
n, m, k = map(int, input().split()) a = [] for i in range(n): a.append(list(map(int, input().split()))) ans = [] for x in range(n): for y in range(m): for z in range(1, n + 1): if y % 2 == 1: ans.append([x, y, z, a[x][y]]) else: ans.append([x, y, z, a[min(x, n - z)][y]]) for z in range(n + 1, n + k + 1): if y % 2 == 1: ans.append([x, y, z, a[x][y]]) else: ans.append([x, y, z, z - n]) for x in range(n, n + k): for y in range(m): for z in range(1, n + 1): if y % 2 == 1: ans.append([x, y, z, x - n + 1]) else: ans.append([x, y, z, a[n - z][y]]) ans.append([x, y, n + 1, x - n + 1]) for y in range(m): for z in range(n + 2, n + k + 1): ans.append([n, y, z, z - n]) for x in range(n + 1, n + k): for z in range(n + 2, n + k + 1): ans.append([x, 0, z, max(x - n + 1, z - n)]) print(len(ans)) for cube in ans: print(cube[0] + 1, cube[1] + 1, cube[2] + 1, cube[3])
1965
F
Conference
You have been asked to organize a very important art conference. The first step is to choose the dates. The conference must last for a certain number of consecutive days. Each day, one lecturer must perform, and the same lecturer cannot perform more than once. You asked $n$ potential lecturers if they could participate in the conference. Lecturer $i$ indicated that they could perform on any day from $l_i$ to $r_i$ inclusive. A certain segment of days can be chosen as the conference dates if there is a way to assign an available lecturer to each day of the segment, assigning each lecturer to no more than one day. For each $k$ from $1$ to $n$, find how many ways there are to choose a segment of $k$ consecutive days as the conference dates.
For a segment of days, how can we tell if there's a way to assign a lecturer to each day of the segment? Consider a bipartite graph: the first part consists of the days in the segment, the second part consists of all lecturers, an edge between a day and a lecturer exists if that lecturer is available on that day. We need to check if the maximum matching covers all vertices in the first part. If this is the case, we'll call the segment of days valid. A common way to check if a segment is valid is to use Hall's marriage theorem. In our case, we can formulate it as follows: If for each subset of days $s$, the number of lecturers available on at least one day of that subset is at least $|s|$, then this segment of days is valid. It might be tempting to only consider subsets which form contiguous subsegments. However, consider the following test case: For a segment of days $[1; 3]$, the only subset that violates the Hall's marriage theorem condition is $\{1, 3\}$, which is not contiguous. Let's try to fix that. Suppose there are two lecturers with equal $l_i$: let their availability segments be $[a; b]$ and $[a; c]$, where $b \le c$. Then, if we replace $[a; c]$ with $[a+1; c]$, the answer does not change. This can be easily seen if you consider the set of all pairs of days that these two lecturers can cover, and notice that this set stays the same after the $[a; c] \rightarrow [a+1; c]$ transformation. (Note that when $a = b = c$, replacing $[a; c]$ with $[a+1; c]$ is effectively equivalent to removing one of the lecturers: their segment becomes empty.) We can keep applying this operation until all $l_i$ are distinct (potentially removing some lecturers in the process). This process can be simulated in $O(n \log n)$ time by going left to right using a priority queue. Why is this transformation useful? Consider a subset of days $t$ that violates the Hall's marriage theorem condition. Suppose it's non-contiguous: say, days $x$ and $y$ ($x + 1 < y$) belong to $t$, while none of days $x+1, x+2, \ldots, y-1$ belong to $t$. Then, if we include days $x+1, x+2, \ldots, y-1$ into $t$, then $t$ will still violate the condition! (proof left as an exercise) As a consequence, if we include all "gaps" in $t$, we'll still get a violating subset, but this time, it will be contiguous. We have described a transformation that makes all $l_i$ distinct. Similarly, we can apply it in the same way to make all $r_i$ distinct. After that, we'll get another useful property: monotonicity. Specifically, if $t = [l; r]$ is a violating subset, then $[l-1; r]$ and $[l; r+1]$ are violating subsets as well. Now we can see that a segment of days is valid iff it is not a violating subset itself (i.e. instead of checking all subsets of the segment, it's enough to just check the whole segment). To finish the solution, we can use the two pointers technique to find all valid segments in linear time. Bonus: solve the problem for $1 \le l_i \le r_i \le 10^{12}$.
[ "data structures", "flows" ]
3,300
/** * author: tourist * created: 26.11.2023 09:36:38 **/ #include <bits/stdc++.h> using namespace std; #ifdef LOCAL #include "algo/debug.h" #else #define debug(...) 42 #endif const long long inf = (long long) 1e18; int main() { ios::sync_with_stdio(false); cin.tie(0); int n; cin >> n; vector<long long> l(n), r(n); for (int i = 0; i < n; i++) { cin >> l[i] >> r[i]; } int original_n = n; for (int rot = 0; rot < 2; rot++) { // make all left ends distinct map<long long, vector<long long>> mp; for (int i = 0; i < n; i++) { mp[l[i]].push_back(r[i]); } vector<long long> new_l, new_r; auto it = mp.begin(); multiset<long long> s; long long T = -inf; while (true) { if (s.empty()) { if (it == mp.end()) { break; } T = it->first; } while (it != mp.end() && T == it->first) { s.insert(it->second.begin(), it->second.end()); ++it; } assert(!s.empty()); new_l.push_back(T); new_r.push_back(*s.begin()); s.erase(s.begin()); T += 1; while (!s.empty() && *s.begin() < T) { s.erase(s.begin()); } } swap(l, new_l); swap(r, new_r); n = (int) l.size(); for (int i = 0; i < n; i++) { l[i] *= -1; r[i] *= -1; swap(l[i], r[i]); } } sort(l.begin(), l.end()); sort(r.begin(), r.end()); vector<long long> ans(original_n + 1); long long lx = -inf, rx = -inf; int pl = 0, pr = 0; int k = 0; while (pl < n || pr < n) { long long wait = min(pl < n ? l[pl] - lx : inf, pr < n ? r[pr] - rx : inf); ans[k] += wait; lx += wait; rx += wait; while (pl < n && l[pl] == lx) { k += 1; lx += 1; pl += 1; } while (pr < n && r[pr] == rx) { ans[k] += 1; k -= 1; rx += 1; pr += 1; } } for (int i = n; i > 1; i--) { ans[i - 1] += ans[i]; } for (int i = 1; i <= original_n; i++) { cout << ans[i] << '\n'; } return 0; }
1966
A
Card Exchange
You have a hand of $n$ cards, where each card has a number written on it, and a fixed integer $k$. You can perform the following operation any number of times: - Choose any $k$ cards from your hand that all have the same number. - Exchange these cards for $k-1$ cards, each of which can have \textbf{any} number you choose (including the number written on the cards you just exchanged). Here is one possible sequence of operations for the first example case, which has $k=3$: What is the minimum number of cards you can have in your hand at the end of this process?
If you don't initially have at least $k$ copies of any number, you can't perform any operations, so the answer is $n$. Otherwise, we can show that we can always get down to $k-1$ cards, with the following algorithm: Choose any card that you have $k$ copies of, and remove those $k$ copies If you have no more cards, take any $k-1$ cards and end the process. Otherwise, let $x$ be the number on any card you have. Take $k-1$ copies of $x$. Now, you have at least $k$ copies of $x$, so return to step $1$. Since the total number of cards decreases at each step, this process will always terminate, so you will always end up with $k-1$ cards. Also, since the total number of cards decreases by exactly $1$ at each step, and you can't do any operations if you have less than $k$ cards, it is impossible to do better than $k-1$, so our solution of $k-1$ is optimal. Complexity: $O(n)$ or $O(n \log n)$ depending on implementation.
[ "constructive algorithms", "games", "greedy" ]
800
t = int(input()) for tc in range(t): n, k = map(int, input().split()) cards = list(map(int, input().split())) ct = {} ans = n for c in cards: if c in ct: ct.update({c: ct[c] + 1}) else: ct.update({c: 1}) if ct[c] >= k: ans = k - 1 print(ans)
1966
B
Rectangle Filling
There is an $n \times m$ grid of white and black squares. In one operation, you can select any two squares of the same color, and color all squares in the subrectangle between them that color. Formally, if you select positions $(x_1, y_1)$ and $(x_2, y_2)$, both of which are currently the same color $c$, set the color of all $(x, y)$ where $\min(x_1, x_2) \le x \le \max(x_1, x_2)$ and $\min(y_1, y_2) \le y \le \max(y_1, y_2)$ to $c$. This diagram shows a sequence of two possible operations on a grid: Is it possible for all squares in the grid to be the same color, after performing any number of operations (possibly zero)?
If either pair of opposite corners is the same color, then we can choose those corners to make everything the same color in one operation. Otherwise, we have four cases for the colors of the corners: Notice that these are all essentially rotations of each other, so we can only consider the first case by symmetry: If any of the squares in the first row are black, then we can color everything black in two operations: In the same way, if any of the squares in the last row are white, then we can color everything white in two operations. Otherwise, the grid looks like this: Notice that no matter how many operations we do, all squares in the top row will remain white, and all squares in the bottom row will remain black, so we can never make everything the same color. So, considering the four cases from earlier, the solution is: NO if all squares in the top row are the same color, all squares in the bottom row are the same color, and these two colors are different NO if all squares in the leftmost column are the same color, all squares in the rightmost column are the same color, and these two colors are different YES otherwise Complexity: $O(nm)$
[ "constructive algorithms", "implementation" ]
1,100
t = int(input()) for tc in range(t): n, m = map(int, input().split()) gr = [] for i in range(n): gr.append(input()) ans = "YES" if gr[0][0] != gr[n - 1][m - 1]: impossible = True for j in range(m - 1): if gr[0][j] != gr[0][j + 1] or gr[n - 1][j] != gr[n - 1][j + 1]: impossible = False if impossible: ans = "NO" impossible = True for i in range(n - 1): if gr[i][0] != gr[i + 1][0] or gr[i][m - 1] != gr[i + 1][m - 1]: impossible = False if impossible: ans = "NO" print(ans)
1967
A
Permutation Counting
You have some cards. An integer between $1$ and $n$ is written on each card: specifically, for each $i$ from $1$ to $n$, you have $a_i$ cards which have the number $i$ written on them. There is also a shop which contains unlimited cards of each type. You have $k$ coins, so you can buy $k$ new cards in total, and the cards you buy can contain any integer between $1$ and $n$. After buying the new cards, you rearrange all your cards in a line. The score of a rearrangement is the number of (contiguous) subarrays of length $n$ which are a permutation of $[1, 2, \ldots, n]$. What's the maximum score you can get?
If $a_1=a_2=\cdots=a_n$ and $k=0$, it's obvious that the optimal rearrangement is $[1,2,\cdots,n,1,2,\cdots,n,\cdots,1,2,\cdots,n]$, because every subarray of length $n$ is a permutation of $1\sim n$. WLOG, assume that $a_1\ge a_2\ge\cdots a_n=w$. If $k=0$, we can insert more numbers at the back and form more permutations. But because $a_n$ is the minimum number, we can only make these subarrays become permutations: The remaining cards can be placed arbitrarily. This won't always be satisfied (if $a_{n-1}=w$, the green subarray won't exist). But this will only happen if $a_{n-1}=w$ is also minimum. It's the same for $a_{n-2}$ etc. So we can calculate the answer: $ans = nw - \sum_{i=1}^n [a_i = w] + 1$ Considering $k > 0$ cases, we find that every time we choose a minimum $a_i$ and increase it by $1$, the answer will be increased. And the answer won't change if we increase some other $a_i$. So we have two different approaches: Sort the array $a$, enumerate the range where the minimum number will be, and check if it's possible. If so, we can just calculate by using the above equation. Binary search the $w$ after buying cards. After calculating the array $a$ after buying cards, we will be able to calculate the answer. Time complexity: $\mathcal O(n\log n)$ or $\mathcal O(n\log a_i)$.
[ "binary search", "greedy", "implementation", "math", "sortings" ]
1,400
#include<bits/stdc++.h> using namespace std; void solve() { int n;long long k; cin>>n>>k; vector<long long>a(n); for(int x=0;x<n;x++) cin>>a[x]; sort(a.begin(),a.end()); reverse(a.begin(),a.end()); long long lst=a.back(),cnt=1; a.pop_back(); while(!a.empty()&&lst==a.back())a.pop_back(),cnt++; while(!a.empty()) { long long delta=a.back()-lst; if(k<delta*cnt)break; k-=delta*cnt; lst=a.back(); while(!a.empty()&&lst==a.back())a.pop_back(),cnt++; } lst+=k/cnt; k%=cnt; cnt-=k; cout<<lst*n-cnt+1<<endl; } main() { ios::sync_with_stdio(false),cin.tie(0); int t; cin>>t; while(t--)solve(); }
1967
B1
Reverse Card (Easy Version)
\textbf{The two versions are different problems. You may want to read both versions. You can make hacks only if both versions are solved.} You are given two positive integers $n$, $m$. Calculate the number of ordered pairs $(a, b)$ satisfying the following conditions: - $1\le a\le n$, $1\le b\le m$; - $a+b$ is a multiple of $b \cdot \gcd(a,b)$.
Denote $\gcd(a,b)$ as $d$. Assume that $a=pd$ and $b=qd$, then we know that $\gcd(p,q)=1$. $(b\cdot\gcd(a,b))\mid (a+b)\iff (qd^2)\mid (pd+qd)\iff (qd)\mid (p+q)$. Assume that $p+q=kqd$, then $p=(kd-1)q$. We know $q=1$ because $\gcd(p,q)=1$. Enumerate $d$ from $1$ to $m$, we know $p+1=kd\le\lfloor\frac{n}{d}\rfloor+1$, so we add $\left\lfloor\frac{\lfloor\frac{n}{d}\rfloor+1}{d}\right\rfloor$ to answer. In this method, $p=0,k=1,d=1$ will also be included in the answer, so we should subtract $1$ from the answer. Time Complexity: $\mathcal O(\sum m)$.
[ "brute force", "math", "number theory" ]
1,400
#include<bits/stdc++.h> using namespace std; typedef long long ll; const int N=2000005; int tc,n,m; ll ans; inline void solve(){ cin>>n>>m; ans=0; for(int i=1;i<=m;i++) ans+=(n+i)/(1ll*i*i); cout<<ans-1<<'\n'; } int main(){ ios::sync_with_stdio(0); cin.tie(0); cout.tie(0); cin>>tc; while(tc--) solve(); return 0; }
1967
B2
Reverse Card (Hard Version)
\textbf{The two versions are different problems. You may want to read both versions. You can make hacks only if both versions are solved.} You are given two positive integers $n$, $m$. Calculate the number of ordered pairs $(a, b)$ satisfying the following conditions: - $1\le a\le n$, $1\le b\le m$; - $b \cdot \gcd(a,b)$ is a multiple of $a+b$.
Denote $\gcd(a,b)$ as $d$. Assume that $a=pd$ and $b=qd$, then we know that $\gcd(p,q)=1$. $(a+b)\mid (b\cdot\gcd(a,b))\iff (pd+qd)\mid (qd^2)\iff (p+q)\mid (qd)$. We know that $\gcd(p+q,q)=\gcd(p,q)=1$, so $(p+q)\mid d$. We also know that $p\ge 1,q\ge 1$, so $p < d=\frac{a}{p}\le \frac{n}{p}$ and thus $p^2 < n$. Similarly, we can prove $q^2 < m$. So the number of $(p,q)$ is $\mathcal O(\sqrt{nm})=\mathcal O(n+m)$. We can enumerate each $(p,q)$ such that $\gcd(p,q)=1$ and calculate the answer. $(p+q)\mid d$ is required, so we add $\left\lfloor\frac{\min\{\lfloor\frac{n}{p}\rfloor,\lfloor\frac{m}{q}\rfloor\}}{p+q}\right\rfloor$. Time Complexity: $\mathcal O(\sum n + \sum m)$.
[ "brute force", "math", "number theory" ]
2,200
#include <bits/stdc++.h> using namespace std; #define nl "\n" #define nf endl #define ll long long #define pb push_back #define _ << ' ' << #define INF (ll)1e18 #define mod 998244353 #define maxn 110 int main() { ios::sync_with_stdio(0); cin.tie(0); #if !ONLINE_JUDGE && !EVAL ifstream cin("input.txt"); ofstream cout("output.txt"); #endif int t; cin>>t; while(t--) { ll n,m; cin >> n>>m; ll sq = sqrt(n) + 2,sqm=sqrt(m)+2; vector bad(sq + 1, vector<bool>(sqm+1, 0)); for (ll i = 2; i <= min(sq,sqm); i++) { for (ll a = i; a <= sq; a += i) { for (ll b = i; b <= sqm; b += i) { bad[a][b] = true; } } } ll ans = 0; for (ll a = 1; a * a <= n; a++) { for (ll b = 1; b * b <= m; b++) { if (bad[a][b]) continue; ans += min(n/(a+b)/a,m/(a+b)/b); } } cout << ans << nl; } return 0; }
1967
C
Fenwick Tree
Let $\operatorname{lowbit}(x)$ denote the value of the lowest binary bit of $x$, e.g. $\operatorname{lowbit}(12)=4$, $\operatorname{lowbit}(8)=8$. For an array $a$ of length $n$, if an array $s$ of length $n$ satisfies $s_k=\left(\sum\limits_{i=k-\operatorname{lowbit}(k)+1}^{k}a_i\right)\bmod 998\,244\,353$ for all $k$, then $s$ is called the Fenwick Tree of $a$. Let's denote it as $s=f(a)$. For a positive integer $k$ and an array $a$, $f^k(a)$ is defined as follows: $$ f^k(a)= \begin{cases} f(a)&\textrm{if }k=1\\ f(f^{k-1}(a))&\textrm{otherwise.}\\ \end{cases} $$ You are given an array $b$ of length $n$ and a positive integer $k$. Find an array $a$ that satisfies $0\le a_i < 998\,244\,353$ and $f^k(a)=b$. It can be proved that an answer always exists. If there are multiple possible answers, you may print any of them.
It's well-known that Fenwick Tree is the data structure shown in the image below, and the sum of each subtree is stored at each vertex (i.e. $c=f(a)$ and $c_u=\sum\limits_{v\textrm{ in subtree of }u}a_v$). Denote the depth of a vertex $u$ as $\operatorname{dep}(u)$. Assume that $b=f^k(a)$. Consider a vertex $u$ and one of its ancestors $v$. Let $\Delta d=\operatorname{dep}(u)-\operatorname{dep}(v)$. It can be easily proved (by using the stars and bars method or generating functions) that the coefficient of $a_u$ in $b_v$ is $\binom{\Delta d+k-1}{\Delta d}$. Obviously, $a_u=b_u$ is satisfied for each leaf $u$. Enumerate each vertex $u$ whose $a$ value is already known (just in the increasing order is fine), and all its ancestors $v$, remove the $\textrm{coefficient}\cdot a_u$ part from $b_v$, and we can calculate the $a$ value of each vertex. Time complexity is $\mathcal O(n\log n)$ because the height of a Fenwick Tree is $\mathcal O(\log n)$.
[ "bitmasks", "brute force", "combinatorics", "data structures", "dp", "math", "trees" ]
2,300
//By: OIer rui_er #include <bits/stdc++.h> #define rep(x, y, z) for(int x = (y); x <= (z); ++x) #define per(x, y, z) for(int x = (y); x >= (z); --x) #define debug(format...) fprintf(stderr, format) #define fileIO(s) do {freopen(s".in", "r", stdin); freopen(s".out", "w", stdout);} while(false) #define endl '\n' using namespace std; typedef long long ll; mt19937 rnd(std::chrono::duration_cast<std::chrono::nanoseconds>(std::chrono::system_clock::now().time_since_epoch()).count()); int randint(int L, int R) { uniform_int_distribution<int> dist(L, R); return dist(rnd); } template<typename T> void chkmin(T& x, T y) {if(x > y) x = y;} template<typename T> void chkmax(T& x, T y) {if(x < y) x = y;} template<int mod> inline unsigned int down(unsigned int x) { return x >= mod ? x - mod : x; } template<int mod> struct Modint { unsigned int x; Modint() = default; Modint(unsigned int x) : x(x) {} friend istream& operator>>(istream& in, Modint& a) {return in >> a.x;} friend ostream& operator<<(ostream& out, Modint a) {return out << a.x;} friend Modint operator+(Modint a, Modint b) {return down<mod>(a.x + b.x);} friend Modint operator-(Modint a, Modint b) {return down<mod>(a.x - b.x + mod);} friend Modint operator*(Modint a, Modint b) {return 1ULL * a.x * b.x % mod;} friend Modint operator/(Modint a, Modint b) {return a * ~b;} friend Modint operator^(Modint a, int b) {Modint ans = 1; for(; b; b >>= 1, a *= a) if(b & 1) ans *= a; return ans;} friend Modint operator~(Modint a) {return a ^ (mod - 2);} friend Modint operator-(Modint a) {return down<mod>(mod - a.x);} friend Modint& operator+=(Modint& a, Modint b) {return a = a + b;} friend Modint& operator-=(Modint& a, Modint b) {return a = a - b;} friend Modint& operator*=(Modint& a, Modint b) {return a = a * b;} friend Modint& operator/=(Modint& a, Modint b) {return a = a / b;} friend Modint& operator^=(Modint& a, int b) {return a = a ^ b;} friend Modint& operator++(Modint& a) {return a += 1;} friend Modint operator++(Modint& a, int) {Modint x = a; a += 1; return x;} friend Modint& operator--(Modint& a) {return a -= 1;} friend Modint operator--(Modint& a, int) {Modint x = a; a -= 1; return x;} friend bool operator==(Modint a, Modint b) {return a.x == b.x;} friend bool operator!=(Modint a, Modint b) {return !(a == b);} }; const int N = 1e6 + 100, mod = 998244353; typedef Modint<mod> mint; int T, n, k; mint a[N], inv[N]; inline int lowbit(int x) {return x & -x;} int main() { ios::sync_with_stdio(false); cin.tie(0); cout.tie(0); inv[0] = inv[1] = 1; rep(i, 2, N - 1) inv[i] = (mod - mod / i) * inv[mod % i]; for(cin >> T; T; --T) { cin >> n >> k; rep(i, 1, n) cin >> a[i]; rep(i, 1, n) { mint mul = 1; for(int u = i + lowbit(i), d = 1; u <= n; u += lowbit(u), ++d) { mul *= (d + k - 1) * inv[d]; a[u] -= mul * a[i]; } } rep(i, 1, n) cout << a[i] << " \n"[i == n]; } return 0; }
1967
D
Long Way to be Non-decreasing
Little R is a magician who likes non-decreasing arrays. She has an array of length $n$, initially as $a_1, \ldots, a_n$, in which each element is an integer between $[1, m]$. She wants it to be non-decreasing, i.e., $a_1 \leq a_2 \leq \ldots \leq a_n$. To do this, she can perform several magic tricks. Little R has a fixed array $b_1\ldots b_m$ of length $m$. Formally, let's define a trick as a procedure that does the following things in order: - Choose a set $S \subseteq \{1, 2, \ldots, n\}$. - For each $u \in S$, assign $a_u$ with $b_{a_u}$. Little R wonders how many tricks are needed at least to make the initial array non-decreasing. If it is not possible with any amount of tricks, print $-1$ instead.
Binary search on the answer of magics. You may come up with many $\mathcal O(m\log m + n\log^2 m)$ solutions with heavy data structures. Unfortunately, none of them is helpful. The key is to judge $\mathcal O(m\log n)$ times whether vertex $u$ is reachable from vertex $v$ in $k$ steps, instead of querying the minimal value or something else.
[ "binary search", "dfs and similar", "graphs", "implementation", "shortest paths", "two pointers" ]
2,800
#include <bits/stdc++.h> namespace FastIO { template <typename T> inline T read() { T x = 0, w = 0; char ch = getchar(); while (ch < '0' || ch > '9') w |= (ch == '-'), ch = getchar(); while ('0' <= ch && ch <= '9') x = x * 10 + (ch ^ '0'), ch = getchar(); return w ? -x : x; } template <typename T> inline void write(T x) { if (!x) return; write<T>(x / 10), putchar(x % 10 ^ '0'); } template <typename T> inline void print(T x) { if (x < 0) putchar('-'), x = -x; else if (x == 0) putchar('0'); write<T>(x); } template <typename T> inline void print(T x, char en) { if (x < 0) putchar('-'), x = -x; else if (x == 0) putchar('0'); write<T>(x), putchar(en); } }; using namespace FastIO; #define MAXM 1000001 int dep[MAXM], id[MAXM], dfn[MAXM], to[MAXM], sz[MAXM], tot = 0; std::vector<int> ch[MAXM]; void dfs(int u) { sz[u] = 1, dfn[u] = ++tot; for (int v : ch[u]) { dep[v] = dep[u] + 1, id[v] = id[u]; dfs(v), sz[u] += sz[v]; } } inline bool inSub(int u, int v) /* v \in u ? */ { return dfn[u] <= dfn[v] && dfn[v] < dfn[u] + sz[u]; } constexpr int INF = 0x3f3f3f3f; inline int query(int u, int v) /* u -> v */ { if (u == v) return 0; if (id[u] != id[v]) return INF; int res = INF; if (inSub(v, u)) res = dep[u] - dep[v]; if (inSub(v, to[id[u]])) res = std::min(dep[u] - dep[v] + dep[to[id[u]]] + 1, res); // printf("query(%d, %d) = %d\n", u, v, res); return res; } #define MAXN 1000001 int a[MAXN], N, M; bool check(int val) { // printf("check %d\n", val); int lst = 1; for (int i = 1; i <= N; ++i) { while (lst <= M && query(a[i], lst) > val) ++lst; if (lst > M) return false; // printf("a[%d] = %d\n", i, lst); } return true; } namespace DSU { int fa[MAXM]; void inis(int n) { for (int i = 1; i <= n; ++i) fa[i] = i; } inline int find(int x) { return x == fa[x] ? x : fa[x] = find(fa[x]); } inline bool merge(int x, int y) { if (find(x) == find(y)) return false; fa[fa[x]] = fa[y]; return true; } }; using namespace DSU; int main() { int T = read<int>(); while (T--) { N = read<int>(), M = read<int>(), inis(M); for (int i = 1; i <= N; ++i) a[i] = read<int>(); for (int x = 1; x <= M; ++x) dep[x] = id[x] = dfn[x] = to[x] = sz[x] = 0, ch[x].clear(); tot = 0; for (int i = 1, p; i <= M; ++i) { p = read<int>(); if (merge(i, p)) ch[p].push_back(i); else to[i] = p; } for (int i = 1; i <= M; ++i) if (to[i] > 0) id[i] = i, dfs(i); if (!check(M)) { puts("-1"); continue; } int L = 0, R = M; while (L < R) { int mid = L + R >> 1; if (check(mid)) R = mid; else L = mid + 1; } print<int>(R, '\n'); } }
1967
E1
Again Counting Arrays (Easy Version)
\textbf{This is the easy version of the problem. The differences between the two versions are the constraints on $n, m, b_0$ and the time limit. You can make hacks only if both versions are solved.} Little R has counted many sets before, and now she decides to count arrays. Little R thinks an array $b_0, \ldots, b_n$ consisting of non-negative integers is continuous if and only if, for each $i$ such that $1 \leq i \leq n$, $\lvert b_i - b_{i-1} \rvert = 1$ is satisfied. She likes continuity, so she only wants to generate continuous arrays. If Little R is given $b_0$ and $a_1, \ldots, a_n$, she will try to generate a non-negative continuous array $b$, which has no similarity with $a$. More formally, for all $1 \leq i \leq n$, $a_i \neq b_i$ holds. However, Little R does not have any array $a$. Instead, she gives you $n$, $m$ and $b_0$. She wants to count the different integer arrays $a_1, \ldots, a_n$ satisfying: - $1 \leq a_i \leq m$; - At least one non-negative continuous array $b_0, \ldots, b_n$ can be generated. \textbf{Note that $b_i \geq 0$, but the $b_i$ can be arbitrarily large.} Since the actual answer may be enormous, please just tell her the answer modulo $998\,244\,353$.
It can be easily proven that for a given $a$, we can always try to get the largest $b_i$ from $1$ to $n$ one by one. A perceptual understanding is that if not so, we can't escape from some gap in $a$ either. Thus, it's easy to get a $\mathcal O(nm)$ solution: Let $dp_{i,j}$ be, in the optimal $b$ determination progress, the ways of $b_i = j$. The transition is easy, and it reminds us that $b_{i, m}$s always add to answers with $m^{n-i}$ straightly without continuing to transmit. There is a $\mathcal O(n\log^2 n)$ solution that solves the above grid path counting problem with NTT. It can pass this version of the problem, but it is not that useful for the hard version. However, we strongly suggest you read the following part as it reveals the $\mathcal O(n)$ solution in the hard version. It currently works in $\mathcal O(n\sqrt{n})$, but in fact, it runs much faster than the polylog one. Try to modify the solution so that it works in $\mathcal O(\frac{n^2}{m})$. Consider the simplified version: You have two lines $y = x + b_1$ and $y = x + b_2$ in a Cartesian coordinate system. You are at $(0, 0)$ and you want to move to $(p, q)$. Each move is $x \gets x + 1$ or $y \gets y + 1$. You are not allowed to get onto the points on the two lines. Count the number of ways to get to $(p, q)$. We will introduce a classical $\mathcal O(\frac{p+q}{|b_2 - b_1|})$ solution$^\dagger$. Then, for each invalid path, we enumerate the end position where $b_i = 0, a_{i+1} = 1$, and after that, simply conduct the $\mathcal O(\frac nm)$ process (you just need to change some parameters). Now, we get a $\mathcal O(n\sqrt{n})$ solution, which should be fast enough to pass. Note $^\dagger$: If there is only one line $y = x + b$, we can do like what we have done in solving the grid path counting problem, which ultimately yields one of the general term formulas for Catalan numbers. Remind that the key part is to flip $(p, q)$ by the line $y = x + b$ to $(p', q')$. Thus, for each invalid path (It touches at least once $y = x + b$), if we flip all the way from the last time it touches the line to $(p, q)$, then it uniquely corresponds to a path starting from the origin, and finally arriving at $(p', q')$. For the answer to the problem with two lines $y_1 = x + b_1$, $y_2 = x + b_2$, we perform the inclusion-exclusion principle (aka. the reflection method). Let's denote $\tt A$ as touching $y_1$, $\tt B$ as touching $y_2$. An invalid path can, for example, be explained as $S = \tt AAABBABAABBBA$. From the experiment in solving the previous problem, we shrink $S$ into $\tt ABABABA$ (That is, to replace each $\tt AA$ with $\tt A$, and each $\tt BB$ with $\tt B$). Then we can calculate the answer as, $f(\varnothing) - f(\texttt{A}) - f(\texttt{B}) + f(\texttt{AB}) + f(\texttt{BA}) - f(\texttt{ABA}) - f(\texttt{BAB}) + \ldots$. $f(S)$ refers to flip $(p, q)$ by the characters in $S$ in order. For example, $f(\texttt{ABA})$ is, flip $(p, q)$ by first $y_1$, then $y_2$, then $y_1$ again, and count the paths from the origin to $(p^\star, q^\star)$. And $f(S)$ actually refers to all the invalid schemes containing the pattern $S$ which are the borderlines a path touches in order. There are $\mathcal O(\frac{p+q}{\lvert b_1 - b_2 \rvert})$ non-zero terms, each term is a combinatorial number. So the overall time complexity is $\mathcal O(\frac{p+q}{\lvert b_1 - b_2 \rvert})$.
[ "combinatorics", "dp", "fft", "math" ]
3,100
#include <bits/stdc++.h> namespace FastIO { template <typename T> inline T read() { T x = 0, w = 0; char ch = getchar(); while (ch < '0' || ch > '9') w |= (ch == '-'), ch = getchar(); while ('0' <= ch && ch <= '9') x = x * 10 + (ch ^ '0'), ch = getchar(); return w ? -x : x; } template <typename T> inline void write(T x) { if (!x) return; write<T>(x / 10), putchar(x % 10 ^ '0'); } template <typename T> inline void print(T x) { if (x < 0) putchar('-'), x = -x; else if (x == 0) putchar('0'); write<T>(x); } template <typename T> inline void print(T x, char en) { if (x < 0) putchar('-'), x = -x; else if (x == 0) putchar('0'); write<T>(x), putchar(en); } }; using namespace FastIO; #define MAXN 2000001 namespace Maths { constexpr int MOD = 998244353; long long qpow(long long a, long long x) { long long ans = 1; while (x) (x & 1) && (ans = ans * a % MOD), a = a * a % MOD, x >>= 1; return ans; } long long frac[MAXN << 1 | 1], prac[MAXN << 1 | 1]; inline void inis(int V = MAXN * 2) { frac[0] = prac[0] = 1; for (int i = 1; i <= V; ++i) frac[i] = frac[i - 1] * i % MOD; prac[V] = qpow(frac[V], MOD - 2); for (int i = V - 1; i; --i) prac[i] = prac[i + 1] * (i + 1) % MOD; } inline long long C(int N, int M) { if (N < 0 || M < 0 || N < M) return 0; return frac[N] * prac[M] % MOD * prac[N - M] % MOD; } }; using namespace Maths; struct Point { int x, y; Point () {} Point (int X, int Y) : x(X), y(Y) {} inline void flip(int b) { x += b, y -= b, std::swap(x, y); } inline int calc() { return C(x + y, y); } } pA, pB; inline void add(int& x, int y) { (x += y) >= MOD && (x -= MOD); } inline void del(int& x, int y) { (x -= y) < 0 && (x += MOD); } int calc(int p, int q, int b1, int b2) { pA = pB = Point(p, q); int ans = pA.calc(); while (pA.x >= 0 && pA.y >= 0) pA.flip(b1), del(ans, pA.calc()), pA.flip(b2), add(ans, pA.calc()); while (pB.x >= 0 && pB.y >= 0) pB.flip(b2), del(ans, pB.calc()), pB.flip(b1), add(ans, pB.calc()); return ans; } int dp[2][3001], powm[MAXN]; void solve() { int N = read<int>(), M = read<int>(), b0 = read<int>(); if (b0 >= M) return (void)print<int>(qpow(M, N), '\n'); powm[0] = 1; for (int i = 1; i <= N; ++i) powm[i] = 1ll * M * powm[i - 1] % MOD; if (1ll * M * M <= N) { // dp for (int k = 0; k < M; ++k) dp[0][k] = (int)(k == b0), dp[1][k] = 0; int ans = 1ll * dp[0][M - 1] * (M - 1) % MOD * powm[N - 1] % MOD; for (int i = 1; i <= N; ++i) { auto now = dp[i & 1], lst = dp[(i & 1) ^ 1]; now[0] = 0; for (int k = 0; k + 1 < M; ++k) now[k + 1] = 1ll * lst[k] * (M - 1) % MOD; for (int k = 1; k < M; ++k) add(now[k - 1], lst[k]); if (i < N) add(ans, 1ll * now[M - 1] * (M - 1) % MOD * powm[N - i - 1] % MOD); } for (int k = 0; k < M; ++k) add(ans, dp[N & 1][k]); print<int>(ans, '\n'); } else { // reflective inclusion-exclusion const int B1 = M - b0, B2 = -1 - b0; int ans = qpow(M, N); for (int x = b0, y = 0, k = 1, p = b0; p < N; p += 2, ++x, ++y, k = 1ll * k * (M - 1) % MOD) del(ans, 1ll * calc(x, y, B1, B2) * k % MOD * powm[N - p - 1] % MOD); print<int>(ans, '\n'); } } int main() { int T = read<int>(); inis(); while (T--) solve(); return 0; }
1967
E2
Again Counting Arrays (Hard Version)
\textbf{This is the hard version of the problem. The differences between the two versions are the constraints on $n, m, b_0$ and the time limit. You can make hacks only if both versions are solved.} Little R has counted many sets before, and now she decides to count arrays. Little R thinks an array $b_0, \ldots, b_n$ consisting of non-negative integers is continuous if and only if, for each $i$ such that $1 \leq i \leq n$, $\lvert b_i - b_{i-1} \rvert = 1$ is satisfied. She likes continuity, so she only wants to generate continuous arrays. If Little R is given $b_0$ and $a_1, \ldots, a_n$, she will try to generate a non-negative continuous array $b$, which has no similarity with $a$. More formally, for all $1 \leq i \leq n$, $a_i \neq b_i$ holds. However, Little R does not have any array $a$. Instead, she gives you $n$, $m$ and $b_0$. She wants to count the different integer arrays $a_1, \ldots, a_n$ satisfying: - $1 \leq a_i \leq m$; - At least one non-negative continuous array $b_0, \ldots, b_n$ can be generated. \textbf{Note that $b_i \geq 0$, but the $b_i$ can be arbitrarily large.} Since the actual answer may be enormous, please just tell her the answer modulo $998\,244\,353$.
The definition of the function $f$ is the same as that in G1 editorial. Please read that first. Consider the process of the reflection method. We don't need to enumerate the place from where the contribution is $0$. We can just consider the contribution of each path. Note that each step is either $x\gets x+1$ or $y\gets y\pm 1$ here, instead of rotating it by $45^\circ$. Denote $\tt X$ as touching $y=m$ and $\tt Y$ as touching $y=-1$. Then we need to calculate the paths that either are empty or begin with $\tt X$. We can write down the equation of the answer: $f(\varnothing) - f(\texttt{Y}) + f(\texttt{XY}) - f(\texttt{YXY}) + f(\texttt{XYXY}) - \dots$. Let's consider how to calculate $f(\varnothing)$. We can represent it as: $\sum_{x=b-n}^{b+n} (m - 1)^{(x - b + n)/2} {n \choose (x - b + n) / 2}$ A point $(n,k)$ will be flipped at $(n,-2-k)$ after $\tt Y$, and will be flipped at $(n,2m+2+k)$ after $\tt XY$. So we only need to calculate: $\sum_{p \geq 0} \sum_{x = b - n}^{b + n} (m - 1)^{(x - b + n)/2} {n \choose (x - b + n + p(2m + 2))/2}\\-\sum_{p \geq 0} \sum_{x =b-n}^{b+n} (m - 1)^{(x - b + n)/2} {n \choose (-2 - x - b + n + p(2m + 2))/2}$ There are still some small problems. The reflection method points out that, if the targeted point $T$ is between the two lines, then we can keep reflecting $T$ and get $T'$, every path to $T'$ bijects to a path to $T$ which contains reflections. But what if $T$ is not between the two lines? We must solve a problem - the path to $T'$ maybe doesn't touch the two lines at all and there won't be reflections, and these should not be included in the answer. So we should consider $x\ge 0$ part and $x\le -1$ part separately. The above equation is for the $x\ge 0$ part. For the $x\le -1$ part, we are sure that $\tt Y$ will occur, so the path should begin with $\tt X$, then it will be $f(\texttt{X})-f(\texttt{YX})+f(\texttt{XYX})-\dots$ and can be calculated in the same way. The combinatorial number is hard to deal with, so let's maintain a coefficient sequence $c_0\cdots c_n$, that the answer is $\sum_i c_i\binom{n}{i}$. The sequence $c_i$ can be calculated using differentiation. Time Complexity: $\mathcal O(n)$.
[ "combinatorics", "dp", "math" ]
3,500
#include <bits/stdc++.h> using namespace std; const int MOD = 998244353; const int N = 2e6+5; int inv[N]; int inversemod(int p, int q) { // assumes p > 0 // https://codeforces.com/blog/entry/23365 return (p > 1 ? q-1LL*inversemod(q%p, p)*q/p : 1); } void add(int& x, int y) { x += y; if (x >= MOD) x -= MOD; } void sub(int& x, int y) { x -= y; if (x < 0) x += MOD; } int solve(int n, int m, int b0) { // let X = hit -1, Y = hit m // f(S) = count of sequences that end up in [0, infty] while containing the pattern S // g(S) = count of sequences that end up in [-infty, -1] while containing the pattern S // we want f() - f(X) + f(YX) - f(XYX) + f(YXYX) - f(XYXYX) + ... // + g(Y) - g(XY) + g(YXY) - g(XYXY) + g(YXYXY) - ... vector<int> pw(n+1); pw[0] = 1; for (int i = 1; i <= n; i++) pw[i] = 1LL*pw[i-1]*(m-1) % MOD; // final ans will be sum from i = 0 to n of (n choose i) a_i vector<int> a(n+2); auto work = [&] (int c, int pw_coeff, int sgn_x) -> bool { // let RANGE = [-infty, -1] if sgn_x == -1 and [0, infty] if sgn_x == 1 // for all x in RANGE such that (n+x+c)/2 is between 0 and n inclusive, // add pw_coeff*pw[(n+x-b0)/2] to a[(n+x+c)/2] // return 0 to signal that we are out of bounds and should exit, otherwise 1 int l = 0; if (sgn_x == 1) l = max(l, (n+c+1)>>1); int r = n; if (sgn_x == -1) r = min(r, (n+c-1)>>1); if (l > r) return 0; add(a[l], 1LL*pw_coeff*pw[l-(b0+c)/2] % MOD); sub(a[r+1], 1LL*pw_coeff*pw[r+1-(b0+c)/2] % MOD); return 1; }; int ans = 0; // f(k*YX) // after reflection trick, end up in x + 2*(m+1)*k for (int k = 0; work(2*(m+1)*k - b0, 1, 1); k++); // f(X + k*YX) // after reflection trick, end up in -2-x - 2*(m+1)*k for (int k = 0; work(2*(m+1)*k+2+b0, MOD-1, 1); k++); // g(Y + k*XY) // after reflection trick, end up in 2*m-x + 2*(m+1)*k for (int k = 0; work(-2*m -2*(m+1)*k + b0, 1, -1); k++); // g(k*XY) // after reflection trick, end up in x - 2*(m+1)*k for (int k = 1; work(-2*(m+1)*k - b0, MOD-1, -1); k++); for (int i = 1; i <= n; i++) { add(a[i], 1LL*a[i-1]*(m-1) % MOD); } // do the binomial stuff without precalculated factorials because why not int coeff = 1; for (int i = 0; i <= n; i++) { add(ans, 1LL * coeff * a[i] % MOD); coeff = 1LL * coeff * (n-i) % MOD * inv[i+1] % MOD; } return ans; } int main () { ios_base::sync_with_stdio(0); cin.tie(0); inv[1] = 1; for (int i = 2; i < N; i++) inv[i] = 1LL*(MOD-MOD/i)*inv[MOD % i] % MOD; int T; cin >> T; while (T--) { int n, m, b0; cin >> n >> m >> b0; if (b0 >= m) { int ans = 1; for (int i = 0; i < n; i++) ans = 1LL*ans*m % MOD; cout << ans << '\n'; continue; } cout << solve(n, m, b0) << '\n'; } }
1967
F
Next and Prev
Let $p_1, \ldots, p_n$ be a permutation of $[1, \ldots, n]$. Let the $q$-subsequence of $p$ be a permutation of $[1, q]$, whose elements are in the same relative order as in $p_1, \ldots, p_n$. That is, we extract all elements not exceeding $q$ together from $p$ in the original order, and they make the $q$-subsequence of $p$. For a given array $a$, let $pre(i)$ be the largest value satisfying $pre(i) < i$ and $a_{pre(i)} > a_i$. If it does not exist, let $pre(i) = -10^{100}$. Let $nxt(i)$ be the smallest value satisfying $nxt(i) > i$ and $a_{nxt(i)} > a_i$. If it does not exist, let $nxt(i) = 10^{100}$. For each $q$ such that $1 \leq q \leq n$, let $a_1, \ldots, a_q$ be the $q$-subsequence of $p$. For each $i$ such that $1 \leq i \leq q$, $pre(i)$ and $nxt(i)$ will be calculated as defined. Then, you will be given some integer values of $x$, and for each of them you have to calculate $\sum\limits_{i=1}^q \min(nxt(i) - pre(i), x)$.
Consider an array with length $n+x-1$. For each integer $k$ from $n$ to $1$, consider $i$ s.t. $p_i = k$, we tag the untagged (that is, a position will not be tagged for a second time) positions in the range $[i, i + x)\cap \mathbb{Z}$. By examining the total number of positions tagged, we have $\begin{aligned} n + x - 1 &= \sum_{i=1}^n \max(\min(i + x, nxt_i) - \max(pre_i + x, i), 0) \\ &= \sum_{i=1}^n \max(\min(x, nxt_i - i) + \min(i - pre_i, x) - x, 0) \\ &= \sum_{i=1}^n \min(x, nxt_i - i) + \min(i - pre_i, x) -\min(x, \min(x, nxt_i - i) + \min(i - pre_i, x)) \\ &= \sum_{i=1}^n \min(x, nxt_i - i) + \min(i - pre_i, x) - \min(x, nxt_i - pre_i) \\ \end{aligned}$ Symmetrically on the other side, we only have to compute $\displaystyle \sum_{i=1}^n \min(x, nxt_i - i)$. We want to maintain all $nxt_i - i$ in a sorted order, so that queries can be done using binary search. This can be done with the help of chunking. Let the length of each block be $B$. In each block, we divide the positions into two categories: Positions with the maximum $nxt_i$ (hereinafter referred to as $\tt A$) and positions without the maximum $nxt_i$ (hereinafter referred to as $\tt B$). We sort the positions by $nxt_i - i$ for A and for B respectively. How does an update affect the $nxt_i - i$ values? For the block that the new number is inserted, we brutely reconstruct it. For an affected complete block, the update is $+1(nxt_i\gets nxt_i+1)$ or $\operatorname{chkmin}(nxt_i \gets \min(nxt_i, pos))$. A $+1$ operation can just be handled with a lazy tag. For a $\operatorname{chkmin}$ operation, if it only affects the elements with maximum $nxt_i$, it can be done lazily, otherwise you can reconstruct the whole block. Let the potential of a block $\Phi:= \text{Numbers of different } nxt_i \text{ values in the block}$. Similar to the segment tree beats, each insertion increases $\sum \Phi$ by at most $1$, and each brute reconstruction takes $\mathcal O(B)$ time and decreases $\sum \Phi$ by at least $1$. Therefore the overall time complexity for the insertion part would be $\mathcal O(nB + \frac{n^2}{B})$. To answer a query, we iterate over all the different blocks. If we precalculate the prefix sums of $nxt_i - i$ in the sorted order, with a simple binary search, this part can be done easily in $\mathcal O(\frac{nk\log B}{B})$. Let $B = \mathcal O(\sqrt{n})$. The time complexity would be $\mathcal O(n\sqrt{n} + k\sqrt{n}\log n)$, while the space complexity is $\mathcal O(n)$. Merge sorting or fractional cascading will make the time complexity $\mathcal O((n + k)\sqrt{n})$, but it runs slower than the previously mentioned solution.
[ "brute force", "data structures", "implementation" ]
3,200
#include<bits/stdc++.h> using namespace std; constexpr int maxn=300010,maxq=100010,B=400; int n,bn,a[maxn],b[maxn],idx[maxn],nxt[maxn],add[maxn/B+5],mxadd[maxn/B+5],mx[maxn/B+5],se[maxn/B+5],t[maxn]; long long ans[maxq]; vector<int>val[maxn/B+5],mxval[maxn/B+5],pre[maxn/B+5],mxpre[maxn/B+5],pos[maxn/B+5],ks[maxn]; void vAdd(int i) { for(;i<=n;i+=(i&-i)) t[i]++; } int nQuery(int i) { int s=0; for(;i;i-=(i&-i)) s+=t[i]; return s; } void vWork() { int i,j,tot=0; for(i=1;i<=n;i++) b[a[i]]=i; for(i=1;i<=n;i++) { int p=b[i],bp=(p-1)/B+1; val[bp].clear(); mxval[bp].clear(); auto radixsort=[](vector<int>&v) { if(v.empty()) return; static int buc[1024],res[B+5]; auto tmp=minmax_element(v.begin(),v.end()); int mn=*tmp.first,rg=*tmp.second-mn; if(!rg) return; int lv=__lg(rg)/2+1,len=1<<lv,i; memset(buc,0,len*4); for(int &it:v) it-=mn,buc[it&(len-1)]++; for(i=1;i<len;i++) buc[i]+=buc[i-1]; for(int it:v) res[--buc[it&(len-1)]]=it; memset(buc,0,len*4); for(int it:v) buc[it>>lv]++; for(i=1;i<len;i++) buc[i]+=buc[i-1]; for(i=v.size()-1;i>=0;i--) v[--buc[res[i]>>lv]]=res[i]+mn; }; auto getpre=[&](vector<int>&pre,const vector<int>&ori) { pre.resize(ori.size()); if(ori.empty()) return; pre[0]=ori[0]; for(int i=1;i<(int)ori.size();i++) pre[i]=pre[i-1]+ori[i]; }; int lstmx=mx[bp]; vAdd(p); idx[p]=nQuery(p); mx[bp]=nxt[p]=n*2; se[bp]=0; auto it=pos[bp].begin(); for(;it<pos[bp].end();it++) { j=*it; if(j>p) break; nxt[j]+=add[bp]; if(nxt[j]>lstmx) nxt[j]+=mxadd[bp]; nxt[j]=min(nxt[j],idx[p]); idx[j]+=add[bp]; if(nxt[j]>mx[bp]) se[bp]=mx[bp],mx[bp]=nxt[j]; else if(nxt[j]>se[bp]) se[bp]=nxt[j]; } it=pos[bp].insert(it,p); for(it++;it!=pos[bp].end();it++) { j=*it; nxt[j]+=add[bp]; if(nxt[j]>lstmx) nxt[j]+=mxadd[bp]; nxt[j]++; idx[j]+=add[bp]+1; if(nxt[j]>mx[bp]) se[bp]=mx[bp],mx[bp]=nxt[j]; else if(nxt[j]>se[bp]) se[bp]=nxt[j]; } for(int j:pos[bp]) { if(nxt[j]==mx[bp]) mxval[bp].push_back(nxt[j]-idx[j]); else val[bp].push_back(nxt[j]-idx[j]); } add[bp]=mxadd[bp]=0; radixsort(val[bp]); getpre(pre[bp],val[bp]); radixsort(mxval[bp]); getpre(mxpre[bp],mxval[bp]); for(j=bp+1;j<=bn;j++) add[j]++,mx[j]++,se[j]++; for(j=1;j<bp;j++) { if(mx[j]<=idx[p]) continue; if(se[j]<idx[p]) { mxadd[j]+=idx[p]-mx[j],mx[j]=idx[p]; continue; } val[j].clear(); mxval[j].clear(); lstmx=mx[j]; mx[j]=idx[p],se[j]=0; for(int x:pos[j]) { nxt[x]+=add[j]; idx[x]+=add[j]; if(nxt[x]>lstmx) nxt[x]+=mxadd[j]; if(nxt[x]>=idx[p]) { nxt[x]=idx[p]; mxval[j].push_back(nxt[x]-idx[x]); } else { if(nxt[x]>se[j]) se[j]=nxt[x]; val[j].push_back(nxt[x]-idx[x]); } } add[j]=mxadd[j]=0; radixsort(val[j]); getpre(pre[j],val[j]); radixsort(mxval[j]); getpre(mxpre[j],mxval[j]); } for(int ki:ks[i]) { tot++; for(j=1;j<=bn;j++) { auto it=lower_bound(val[j].begin(),val[j].end(),ki); ans[tot]+=(val[j].end()-it)*ki; if(it!=val[j].begin()) ans[tot]+=pre[j][it-val[j].begin()-1]; it=lower_bound(mxval[j].begin(),mxval[j].end(),ki-mxadd[j]); ans[tot]+=(mxval[j].end()-it)*ki; if(it!=mxval[j].begin()) ans[tot]+=(it-mxval[j].begin())*mxadd[j]+mxpre[j][it-mxval[j].begin()-1]; } } } } int main() { ios::sync_with_stdio(false),cin.tie(0); int T; cin>>T; while(T--) { int i,ki,tot=0; cin>>n; bn=(n-1)/B+1; for(i=1;i<=n;i++) cin>>a[i]; for(i=1;i<=n;i++) { cin>>ki; ks[i].resize(ki); for(int &it:ks[i]) { cin>>it; ans[++tot]=-(i+it-1); } } vWork(); reverse(a+1,a+n+1); for(int x=1;x<=n;x++) t[x]=0; for(i=1;i<=bn;i++) mx[i]=0,se[i]=0,val[i].clear(),mxval[i].clear(),pos[i].clear(); vWork(); for(int x=1;x<=n;x++) t[x]=0; for(i=1;i<=bn;i++) mx[i]=0,se[i]=0,val[i].clear(),mxval[i].clear(),pos[i].clear(); for(i=1;i<=tot;i++) cout<<ans[i]<<'\n'; tot=0; } return 0; }
1968
A
Maximize?
You are given an integer $x$. Your task is to find any integer $y$ $(1\le y<x)$ such that $\gcd(x,y)+y$ is maximum possible. \textbf{Note that if there is more than one $y$ which satisfies the statement, you are allowed to find any.} $\gcd(a,b)$ is the Greatest Common Divisor of $a$ and $b$. For example, $\gcd(6,4)=2$.
The core idea is to find the upper bound of $\gcd(x,y)+y$. Let us look closer at the formula $\gcd(x,y)+y$. It is a well-known fact that $\gcd(a,b)=\gcd(a-b,b)$ for $a\geq b$. Applying it to our formula, we get $\gcd(x-y,y)+y$. Using the fact that $\gcd(x-y,y)\le x-y$, we get $\gcd(x-y,y)+y\le x-y+y=x$. Hence, $\gcd(x,y)+y\le x$ for $1\le y<x$. For $y=x-1$, we have $\gcd(x,x-1)+x-1=1+x-1=x$, which is the maximal possible value. Note that the constraints allow finding the optimal $y$ in $O(x)$, while the above solution works in $O(1)$.
[ "brute force", "math", "number theory" ]
800
#include<iostream> using namespace std; int main(){ int t; cin>>t; while(t--){ int x; cin>>x; cout<<x-1<<"\n"; } }
1968
B
Prefiquence
You are given two binary strings $a$ and $b$. A binary string is a string consisting of the characters '0' and '1'. Your task is to determine the maximum possible number $k$ such that a prefix of string $a$ of length $k$ is a subsequence of string $b$. A sequence $a$ is a subsequence of a sequence $b$ if $a$ can be obtained from $b$ by the deletion of several (possibly, zero or all) elements.
We will be solving this task using dynamic programming. Let us define $dp_i$ as the maximal prefix of $a$ that is contained in $b_1,\dots,b_i$ as a subsequence. Then the transitions are as follows: if $b_i$ is equal to $a_{dp_{i-1}+1}$ then $dp_i = dp_{i-1} + 1$. otherwise $dp_i=dp_{i-1}$. The answer is $dp_{m}$.
[ "greedy", "two pointers" ]
800
#include<iostream> #include<vector> using namespace std; int main(){ int t; cin>>t; while(t--){ int n,m; cin>>n>>m; vector<char>a(n+1),b(m+1); for(int i=1;i<=n;i++){ cin>>a[i]; } for(int i=1;i<=m;i++){ cin>>b[i]; } vector<int>dp(m+1); dp[1]=(a[1]==b[1]?1:0); for(int i=2;i<=m;i++){ if(dp[i-1]!=n && b[i]==a[dp[i-1]+1]){ dp[i]=dp[i-1]+1; }else{ dp[i]=dp[i-1]; } } cout<<dp[m]<<"\n"; } }
1968
C
Assembly via Remainders
You are given an array $x_2,x_3,\dots,x_n$. Your task is to find \textbf{any} array $a_1,\dots,a_n$, where: - $1\le a_i\le 10^9$ for all $1\le i\le n$. - $x_i=a_i \bmod a_{i-1}$ for all $2\le i\le n$. Here $c\bmod d$ denotes the remainder of the division of the integer $c$ by the integer $d$. For example $5 \bmod 2 = 1$, $72 \bmod 3 = 0$, $143 \bmod 14 = 3$. \textbf{Note that if there is more than one $a$ which satisfies the statement, you are allowed to find any.}
Notice that $((a+b) \bmod a)=b$ for $0\le b< a$. So we may try to generate a sequence with $b=x_i$. Let us take $a_1=1000$, because $1000$ is larger than any of $x_i$. Then, we can take $a_i$ as $a_{i-1}+x_i$, since $((a_{i-1}+x_i)\bmod a_{i-1})=x_i$ will be hold. The maximal value of $a$ will be at most $1000+500n$ what is smaller than $10^9$.
[ "constructive algorithms", "number theory" ]
1,000
#include<iostream> using namespace std; int main(){ int t; cin>>t; while(t--){ int n; cin>>n; int S=1000; cout<<S<<" "; for(int i=2;i<=n;i++){ int x; cin>>x; S+=x; cout<<S<<" "; } cout<<"\n"; } }
1968
D
Permutation Game
Bodya and Sasha found a permutation $p_1,\dots,p_n$ and an array $a_1,\dots,a_n$. They decided to play a well-known "Permutation game". A permutation of length $n$ is an array consisting of $n$ distinct integers from $1$ to $n$ in arbitrary order. For example, $[2,3,1,5,4]$ is a permutation, but $[1,2,2]$ is not a permutation ($2$ appears twice in the array), and $[1,3,4]$ is also not a permutation ($n=3$ but there is $4$ in the array). Both of them chose a starting position in the permutation. The game lasts $k$ turns. The players make moves simultaneously. On each turn, two things happen to each player: - If the current position of the player is $x$, his score increases by $a_x$. - Then the player either \textbf{stays} at his current position $x$ or \textbf{moves} from $x$ to $p_x$. The winner of the game is the player with the higher score after exactly $k$ turns.Knowing Bodya's starting position $P_B$ and Sasha's starting position $P_S$, determine who wins the game if both players are trying to win.
Because $p$ is a permutation, it will be divided into cycles. For every player it is optimal to move during the first $min(n,k)$ turns. We will answer the question by calculating maximal possible score for both players. Let us define $sum_i$ and $pos_i$: sum of values in the positions that occur during the first $i$ turns. position in which player will stop if he decides to move $i$ times. $sum_i = sum_{i-1} + a_{pos_{i}}$ $pos_i$=$p_{pos_{i-1}}$. Now the maximal possible answer for one player is equal to: $ans=\max\limits_{0\le i\le \min(n,k)}(sum_i+(k-i)a_{pos_i})$ Now we will compare maximal possible answers for every player.
[ "brute force", "dfs and similar", "games", "graphs", "greedy", "math" ]
1,300
#include<bits/stdc++.h> using namespace std; long long score(vector<int>&p,vector<int>&a,int s,int k){ int n=p.size(); long long mx=0,cur=0; vector<bool>vis(n); while(!vis[s]&&k>0){ vis[s]=1; mx=max(mx,cur+1ll*k*a[s]); cur+=a[s]; k--; s=p[s]; } return mx; } int main(){ int t; cin>>t; while(t--){ int n,k,s1,s2; cin>>n>>k>>s1>>s2; vector<int>p(n),a(n); for(auto&e:p){ cin>>e; e--; } for(auto&e:a){ cin>>e; } long long A=score(p,a,s1-1,k),B=score(p,a,s2-1,k); cout<<(A>B?"Bodya\n":A<B?"Sasha\n":"Draw\n"); } }
1968
E
Cells Arrangement
You are given an integer $n$. You choose $n$ cells $(x_1,y_1), (x_2,y_2),\dots,(x_n,y_n)$ in the grid $n\times n$ where $1\le x_i\le n$ and $1\le y_i\le n$. Let $\mathcal{H}$ be the set of \textbf{distinct} Manhattan distances between any pair of cells. Your task is to maximize the size of $\mathcal{H}$. Examples of sets and their construction are given in the notes. \textbf{If there exists more than one solution, you are allowed to output any.} Manhattan distance between cells $(x_1,y_1)$ and $(x_2,y_2)$ equals $|x_1-x_2|+|y_1-y_2|$.
Author: JuicyGrape What is the maximal possible size of $\mathcal{H}$? Can you always get that size for $n\geq 4$? Consider odd and even distances independently. Let us find an interesting pattern for $n\geq 4$. Can you generalize the pattern? We put $n-2$ cells on the main diagonal. Then put two cells at $(n-1,n)$ and $(n,n)$. But why does it work? Interesting fact, that in such way we generate all possible Manhattan distances. Odd distances are generated between cells from the main diagonal and $(n-1,n)$. Even distances are generated between cells from the main diagonal and $(n,n)$.
[ "constructive algorithms" ]
1,600
#include<iostream> using namespace std; int main(){ int t; cin>>t; while(t--){ int n; cin>>n; for(int i=1;i<=n-2;i++){ cout<<i<<' '<<i<<"\n"; } cout<<n-1<<' '<<n<<"\n"<<n<<' '<<n<<"\n"; } }
1968
F
Equal XOR Segments
Let us call an array $x_1,\dots,x_m$ interesting if it is possible to divide the array into $k>1$ parts so that bitwise XOR of values from each part are equal. More formally, you must split array $x$ into $k$ consecutive segments, each element of $x$ must belong to \textbf{exactly} $1$ segment. Let $y_1,\dots,y_k$ be the XOR of elements from each part respectively. Then $y_1=y_2=\dots=y_k$ must be fulfilled. For example, if $x = [1, 1, 2, 3, 0]$, you can split it as follows: $[\color{blue}1], [\color{green}1], [\color{red}2, \color{red}3, \color{red}0]$. Indeed $\color{blue}1=\color{green}1=\color{red}2 \oplus \color{red}3\oplus \color{red}0$. You are given an array $a_1,\dots,a_n$. Your task is to answer $q$ queries: - For fixed $l$, $r$, determine whether the subarray $a_l,a_{l+1},\dots,a_r$ is interesting.
Observation: any division on more than $k>3$ segments can be reduced to at most $3$ segments. Proof: suppose the XORs of elements from the segments are $x_1,\dots,x_k$. The condition $x_1=x_2=\dots=x_k$ must be fulfilled. If $k>3$ we may take any three consecutive segments and merge them into one, because $x\oplus x\oplus x=x$. In such way, we reduce $k$ by two. We may repeat this process while $k>3$. Let us construct an array $b_1=a_1$ and $b_i=b_{i-1}\oplus a_i$ for $i\geq 2$. Now, $a_l\oplus a_{l+1}\oplus \dots \oplus a_r=b_r\oplus b_{l-1}$. There are two cases: $k=2$, meaning that we divide the segment $[l,r]$ into two segments. Suppose the segments are $[l,m]$ and $[m+1,r]$. In such case we must check if $b_m\oplus b_{l-1}=b_{r}\oplus b_m$. This equality reduces to $b_{l-1}=b_r$. $k=3$, meaning that we divide segment $[l,r]$ into $[l,s]$, $[s+1,t]$ and $[t+1,r]$. We must check if $b_s\oplus b_{l-1}=b_{t}\oplus b_s$ and $b_s \oplus b_{t}=b_r \oplus b_{t}$. It is equivalent to check if $b_{l-1}=b_t$ and $b_s=b_r$. Also, $s<t$ must be fulfilled. Hence, we may find the largest $t < r$ that $b_t=b_{l-1}$ and the minimal $s\geq l$ that $b_s=b_r$. It can be done using binary search on the positions of a certain value.
[ "binary search", "data structures" ]
1,800
#include<bits/stdc++.h> using namespace std; const int N=200005; int a[N]; int main(){ int t; cin>>t; while(t--){ int n,q; cin>>n>>q; map<int,vector<int>>id; id[0].push_back(0); for(int i=1;i<=n;i++){ cin>>a[i]; a[i]^=a[i-1]; id[a[i]].push_back(i); } while(q--){ int l,r; cin>>l>>r; if(a[r]==a[l-1]){ cout<<"YES\n"; continue; } int pL=*--lower_bound(id[a[l-1]].begin(),id[a[l-1]].end(),r); int pR=*lower_bound(id[a[r]].begin(),id[a[r]].end(),l); cout<<(pL>pR?"YES\n":"NO\n"); } if(t)cout << "\n"; } }
1968
G1
Division + LCP (easy version)
\textbf{This is the easy version of the problem. In this version $l=r$.} You are given a string $s$. For a fixed $k$, consider a division of $s$ into exactly $k$ continuous substrings $w_1,\dots,w_k$. Let $f_k$ be the maximal possible $LCP(w_1,\dots,w_k)$ among all divisions. $LCP(w_1,\dots,w_m)$ is the length of the Longest Common Prefix of the strings $w_1,\dots,w_m$. For example, if $s=abababcab$ and $k=4$, a possible division is $\textcolor{red}{ab}\textcolor{blue}{ab}\textcolor{orange}{abc}\textcolor{green}{ab}$. The $LCP(\textcolor{red}{ab},\textcolor{blue}{ab},\textcolor{orange}{abc},\textcolor{green}{ab})$ is $2$, since $ab$ is the Longest Common Prefix of those four strings. Note that each substring consists of a continuous segment of characters and each character belongs to \textbf{exactly} one substring. Your task is to find $f_l,f_{l+1},\dots,f_r$. \textbf{In this version $l=r$}.
Consider $F_{\ell}$ is a function which returns true iff it is possible to divide the string into at least $k$ segments with their LCP equal to $\ell$. Notice that $F_{\ell}$ implies $F_{\ell - 1}$ for $\ell > 0$. So we may find the maximal $\ell$ using binary search, which will be the answer for the problem. How to compute $F_{\ell}$? Let us find Z-function $z_1,\dots,z_n$ of the given string. Notice that in the division $w_1,\dots,w_k$ we have $w_1$ as the prefix of the given string and all the strings have the common prefix with $w_1$ of length $\ell$. Notice that $z_p$ gives us the longest common prefix with $w_1$ and a segment starting at $p$. So if $z_p\geq \ell$ we may take this segment into account. In such greedy approach we count the maximal number of segments we can divide our string into. If this number is at least $k$, then $F_{\ell}$ is true. The complexity is $O(n\log n)$.
[ "binary search", "data structures", "dp", "hashing", "string suffix structures", "strings" ]
1,900
#include<bits/stdc++.h> using namespace std; vector<int>Zfunc(string& str){ int n=str.size(); vector<int>z(n); int l=0,r=0; for(int i=1;i<n;i++){ if(i<=r){ z[i]=min(r-i+1,z[i-l]); } while(i+z[i]<n&&str[z[i]]==str[i+z[i]]){ z[i]++; } if(i+z[i]-1>r){ l=i; r=i+z[i]-1; } } return z; } int f(vector<int>&z,int len){ int n=z.size(); int cnt=1; for(int i=len;i<n;){ if(z[i]>=len){ cnt++; i+=len; }else{ i++; } } return cnt; } int main(){ int t; cin>>t; while(t--){ int n,k; string s; cin>>n>>k>>k>>s; vector<int>z=Zfunc(s); int l=0,r=n+1; while(r-l>1){ int mid=(l+r)/2; if(f(z,mid)>=k){ l=mid; }else{ r=mid; } } cout<<l<<"\n"; } }
1968
G2
Division + LCP (hard version)
\textbf{This is the hard version of the problem. In this version $l\le r$.} You are given a string $s$. For a fixed $k$, consider a division of $s$ into exactly $k$ continuous substrings $w_1,\dots,w_k$. Let $f_k$ be the maximal possible $LCP(w_1,\dots,w_k)$ among all divisions. $LCP(w_1,\dots,w_m)$ is the length of the Longest Common Prefix of the strings $w_1,\dots,w_m$. For example, if $s=abababcab$ and $k=4$, a possible division is $\textcolor{red}{ab}\textcolor{blue}{ab}\textcolor{orange}{abc}\textcolor{green}{ab}$. The $LCP(\textcolor{red}{ab},\textcolor{blue}{ab},\textcolor{orange}{abc},\textcolor{green}{ab})$ is $2$, since $ab$ is the Longest Common Prefix of those four strings. Note that each substring consists of a continuous segment of characters and each character belongs to \textbf{exactly} one substring. Your task is to find $f_l,f_{l+1},\dots,f_r$.
In general, in this version we must answer for each $k\in\{1,2,\dots,n\}$. We will use a similar idea as in the easy version considering two cases: $k\le \sqrt{n}$. We may calculate these values as in the easy version in $O(n\sqrt{n}\log n)$. $k> \sqrt{n}$. As we divide the string into $k$ segments and the LCP is $\ell$, then $k\cdot \ell\le n$. We get, that $\ell\le \sqrt{n}$. We may find the maximal possible $k$ for a fixed $\ell$ as our answer. It works in $O(n\sqrt{n})$. There are other approaches to solving this problem, but the authors believe this solution is the simplest one.
[ "binary search", "brute force", "data structures", "dp", "hashing", "math", "string suffix structures", "strings" ]
2,200
#include<bits/stdc++.h> using namespace std; vector<int>Zfunc(string& str){ int n=str.size(); vector<int>z(n); int l=0,r=0; for(int i=1;i<n;i++){ if(i<=r){ z[i]=min(r-i+1,z[i-l]); } while(i+z[i]<n&&str[z[i]]==str[i+z[i]]){ z[i]++; } if(i+z[i]-1>r){ l=i; r=i+z[i]-1; } } return z; } int f(vector<int>&z,int len){ int n=z.size(); int cnt=1; for(int i=len;i<n;){ if(z[i]>=len){ cnt++; i+=len; }else{ i++; } } return cnt; } int main(){ int t; cin>>t; while(t--){ int n,L,R; string s; cin>>n>>L>>R>>s; vector<int>z=Zfunc(s); const int E=ceil(sqrt(n)); vector<int>ans(n+1); for(int k=1;k<=E;k++){ int l=0,r=n+1; while(r-l>1){ int mid=(l+r)/2; if(f(z,mid)>=k){ l=mid; }else{ r=mid; } } ans[k]=l; } for(int len=1;len<=E;len++){ int k=1; for(int i=len;i<n;){ if(z[i]>=len){ k++; i+=len; }else{ i++; } } ans[k]=max(ans[k],len); } for(int i=n-1;i>=1;i--){ ans[i]=max(ans[i],ans[i+1]); } for(int i=L;i<=R;i++){ cout<<ans[i]<<' '; } cout<<"\n"; } }
1969
A
Two Friends
Monocarp wants to throw a party. He has $n$ friends, and he wants to have at least $2$ of them at his party. The $i$-th friend's best friend is $p_i$. All $p_i$ are distinct, and for every $i \in [1, n]$, $p_i \ne i$. Monocarp can send invitations to friends. The $i$-th friend comes to the party if \textbf{both the $i$-th friend and the $p_i$-th friend} receive an invitation (note that the $p_i$-th friend doesn't have to actually come to the party). Each invitation is sent to exactly one of the friends. For example, if $p = [3, 1, 2, 5, 4]$, and Monocarp sends invitations to the friends $[1, 2, 4, 5]$, then the friends $[2, 4, 5]$ will come to the party. The friend $1$ won't come since his best friend didn't receive an invitation; the friend $3$ won't come since he didn't receive an invitation. Calculate the minimum number of invitations Monocarp has to send so that \textbf{at least $2$} friends come to the party.
Obviously, you can't send fewer than $2$ invitations. Since all $p_i \neq i$, you need to send at least $2$ invitations ($i$ and $p_i$) in order for at least some friend $i$ to come. On the other hand, you never need to send more than $3$ invitations. You can always send invitations to friends $i, p_i$, and $p_{p_i}$, so that $i$ and $p_i$ come. Now we need to determine the condition for when two invitations are enough. That is, we send invitations to friends $i$ and $j$, and both of them come. This means $p_i = j$ and $p_j = i$. This check is already enough to solve the problem in $O(n^2)$. But you can think further and see that since $j = p_i$, the second check becomes $p_{p_i} = i$. This means it is enough to iterate over friend $i$ and check if $p_{p_i} = i$ for at least one of them. Overall complexity: $O(n)$ per testcase.
[ "constructive algorithms", "implementation", "math" ]
800
for _ in range(int(input())): n = int(input()) p = [int(x) - 1 for x in input().split()] ans = 3 for i in range(n): if p[p[i]] == i: ans = 2 print(ans)
1969
B
Shifts and Sorting
Let's define a cyclic shift of some string $s$ as a transformation from $s_1 s_2 \dots s_{n-1} s_{n}$ into $s_{n} s_1 s_2 \dots s_{n-1}$. In other words, you take one last character $s_n$ and place it before the first character while moving all other characters to the right. You are given a binary string $s$ (a string consisting of only 0-s and/or 1-s). In one operation, you can choose any substring $s_l s_{l+1} \dots s_r$ ($1 \le l < r \le |s|$) and cyclically shift it. The cost of such operation is equal to $r - l + 1$ (or the length of the chosen substring). You can perform the given operation any number of times. What is the minimum \textbf{total} cost to make $s$ sorted in non-descending order?
Let's look at the operation as the following: you choose $(l, r)$, erase the element at position $r$ and then insert it before the element at position $l$. We can also interpret the cost of such operation as the following: you pay $1$ for the element at position $r$ you "teleport" to the left and $1$ for each element you teleport through (element inside segment $[l, r - 1]$). Now let's look at two indices $x < y$ where $a_x = 1$ and $a_y = 0$. Since at the end, all zeroes should be before ones, you have to move $a_y$ to the left of $a_x$. But the only thing that moves to the left is element $a_r$, so you have to make at least one operation ending at $a_y$. What does it mean? It means: for every 0 that has at least one 1 from the left, you have to pay at least $1$ for teleporting it to the left; for every 1, if there are $c$ 0-s to the right, you have to pay at least $c$, since each zero should be teleported through this 1. The thoughts above gave us the lower bound on the answer, and it's not hard to come up with some constructive algorithms that will give us exactly that cost. To calculate the lower bound, you just need to maintain some info while iterating from left to right: for example, the number of 0-s and 1-s to the left of the current position and the total number of 0-s in $s$. It's enough to check: is there any 1 to the left of the current position, and how many 0-s are to the right. Instead of calculating the lower bound itself, you can also implement one of the algorithms that reach that lower bound, and it may be even a little easier.
[ "constructive algorithms", "greedy" ]
1,000
fun main() { repeat(readln().toInt()) { val s = readln().map { it.code - '0'.code } val zeroes = s.count { it == 0 } val cnt = intArrayOf(0, 0) var ans = 0L for (c in s) { cnt[c]++ if (c == 0) ans += if (cnt[1] > 0) 1 else 0 else ans += (zeroes - cnt[0]) } println(ans) } }
1969
C
Minimizing the Sum
You are given an integer array $a$ of length $n$. You can perform the following operation: choose an element of the array and replace it with any of its neighbor's value. For example, if $a=[3, 1, 2]$, you can get one of the arrays $[3, 3, 2]$, $[3, 2, 2]$ and $[1, 1, 2]$ using one operation, but not $[2, 1, 2$] or $[3, 4, 2]$. Your task is to calculate the minimum possible total sum of the array if you can perform the aforementioned operation at most $k$ times.
The small values of $k$ leads to the idea that expected solution is dynamic programming. In fact, we can actually design a dynamic programming solution. Let $dp_{i, j}$ be the minimum sum, if we considered the first $i$ elements and already done $j$ operations. Note that, we can turn a segment of length $d+1$ into a minimum on it using $d$ operations. So the transitions can be done by iterating over the length of the next segment (denote it as $d$) and we can update $dp_{i + d + 1, j + d}$ with $dp_{i, j} + (d + 1) \cdot x$, where $x$ is the minimum among $a_i, a_{i + 1}, \dots, a_{i + d - 1}$ (that can be maintained in a single variable during iteration over $d$). There are $O(nk)$ states in the dynamic programming and $O(k)$ transitions from each of them, so the solution works in $O(nk^2)$.
[ "dp", "implementation" ]
1,700
#include <bits/stdc++.h> using namespace std; using li = long long; const li INF = 1e18; int main() { int t; cin >> t; while (t--) { int n, k; cin >> n >> k; vector<li> a(n); for (auto& x : a) cin >> x; vector<vector<li>> dp(n + 1, vector<li>(k + 1, INF)); dp[0][0] = 0; for (int i = 0; i < n; ++i) { for (int j = 0; j <= k; ++j) { li mn = INF; for (int d = 0; j + d <= k && i + d < n; ++d) { mn = min(mn, a[i + d]); dp[i + d + 1][j + d] = min(dp[i + d + 1][j + d], dp[i][j] + (d + 1) * mn); } } } cout << *min_element(dp[n].begin(), dp[n].end()) << '\n'; } }
1969
D
Shop Game
Alice and Bob are playing a game in the shop. There are $n$ items in the shop; each item has two parameters: $a_i$ (item price for Alice) and $b_i$ (item price for Bob). Alice wants to choose a subset (possibly empty) of items and buy them. After that, Bob does the following: - if Alice bought less than $k$ items, Bob can take all of them for free; - otherwise, he will take $k$ items for free that Alice bought (Bob chooses which $k$ items it will be), and for the rest of the chosen items, Bob will buy them from Alice and pay $b_i$ for the $i$-th item. Alice's profit is equal to $\sum\limits_{i \in S} b_i - \sum\limits_{j \in T} a_j$, where $S$ is the set of items Bob buys from Alice, and $T$ is the set of items Alice buys from the shop. In other words, Alice's profit is the difference between the amount Bob pays her and the amount she spends buying the items. Alice wants to maximize her profit, Bob wants to minimize Alice's profit. Your task is to calculate Alice's profit if both Alice and Bob act optimally.
Let's sort the array in descending order based on the array $b$. For a fixed set of Alice's items, Bob will take the first $k$ of them for free (because they are the most expensive) and pay for the rest. Now we can iterate over the first item that Bob will pay (denote it as $i$). Alice has to buy the cheapest $k$ items among $1, 2, \dots, i-1$ (denote the sum of these values as $f$), because Bob can take them for free. Bob has to pay for each of the items among $i, i+1, \dots, n$ that Alice will buy. So Alice will buy all the items with $b_i - a_i > 0$ (denote the sum of these values as $p$). Then the Alice's profit is $p - f$. Thus, we got a solution that works in $O(n^2)$. In order to speed up this solution, we have to calculate the values $f$ and $p$ faster than $O(n)$. We can do it as follows: while iterating over the value of $i$, let's store "free" items in the ordered set, and when the size of this set becomes larger than $k$, remove the most expensive element from it; and the value of $p$ can be calculated using prefix sums (over the values $\max(0, b_i - a_i)$) or maintaining a variable (and update it when moving to the next value of $i$).
[ "data structures", "greedy", "math", "sortings" ]
1,900
#include <bits/stdc++.h> using namespace std; #define sz(a) int((a).size()) using li = long long; int main() { int t; cin >> t; while (t--) { int n, k; cin >> n >> k; vector<int> a(n), b(n); for (auto& x : a) cin >> x; for (auto& x : b) cin >> x; vector<int> ord(n); iota(ord.begin(), ord.end(), 0); sort(ord.begin(), ord.end(), [&](int i, int j) { return b[i] > b[j]; }); li f = 0, p = 0; for (int i : ord) p += max(0, b[i] - a[i]); li ans = 0; multiset<int> s; if (sz(s) == k) ans = max(ans, p - f); for (int i : ord) { p -= max(0, b[i] - a[i]); s.insert(a[i]); f += a[i]; if (sz(s) > k) { f -= *s.rbegin(); s.erase(--s.end()); } if (sz(s) == k) ans = max(ans, p - f); } cout << ans << '\n'; } }
1969
E
Unique Array
You are given an integer array $a$ of length $n$. A subarray of $a$ is one of its contiguous subsequences (i. e. an array $[a_l, a_{l+1}, \dots, a_r]$ for some integers $l$ and $r$ such that $1 \le l < r \le n$). Let's call a subarray unique if there is an integer that occurs exactly once in the subarray. You can perform the following operation any number of times (possibly zero): choose an element of the array and replace it with any integer. Your task is to calculate the minimum number of aforementioned operation in order for all the subarrays of the array $a$ to be unique.
When we replace an element, we can always choose an integer that is not present in the array. So, if we replace the $i$-th element, every subarray containing it becomes unique; and the problem can be reformulated as follows: consider all non-unique subarrays of the array, and calculate the minimum number of elements you have to choose so that, for every non-unique subarray, at least one of its elements is chosen. We can use the following greedy strategy to do it: go through the array from left to right, maintaining the index $s$ of the last element we replaced. When we consider the $i$-th element of the array, if there is a non-unique subarray $[l, r]$ with $l > s$, we replace the $i$-th element, otherwise we don't replace anything. Why is it optimal? Essentially, this greedy approach always finds a non-unique subarray $[l, r]$ with the lowest value of $r$, and replaces the $r$-th element. We obviously have to replace at least one element from the subarray $[l, r]$; but replacing the $r$-th element is optimal since we picked the lowest value of $r$, so every non-unique subarray which contains any element from $[l, r]$ also contains the $r$-th element. Okay, but we need to make this greedy solution work fast. When we consider the $i$-th element, how do we check that there's a non-unique subarray starting after the element $s$ and ending at the $i$-th element? Suppose we go from the $i$-th element to the left and maintain a counter; when we meet an element for the first time, we increase this counter; when we meet an element for the second time, we decrease this counter. If this counter is equal to $0$, then the current subarray is non-unique: every element appears at least twice. Otherwise, at least one element has exactly one occurrence. Suppose we maintain an array $t$ where for each integer present in the original array, we put $1$ in the last position we've seen this element, and $-1$ in the second-to-last position we've seen this element (i. e. for every element, we consider its two last occurrences among the first $i$ positions in the array, put $1$ in the last of them, and $-1$ in the second-to-last of them). Then, if we go from $i$ to $l$ and maintain the counter in the same way as we described in the previous paragraph, the value of this counter will be equal to the sum of the corresponding segment in this array $t$. So, we want to check if there's a segment in the array $t$ such that its left border is greater than $s$ (the last position where we made a replacement), the right border is $i$, and the sum is $0$. We can show that the sum on any segment ending in the $i$-th position is currently non-negative; so, we actually want to find the segment with the minimum sum. We can store a segment tree that, for every position $l$ from $s+1$ to $i$, maintains the sum on segment $[l, i]$; then changing an element is just performing the query "add on segment", and finding the minimum sum is just performing the query "minimum on segment". This allows us to get a solution with complexity of $O(n \log n)$.
[ "binary search", "data structures", "divide and conquer", "dp", "greedy" ]
2,400
#include <bits/stdc++.h> using namespace std; #define sz(a) int((a).size()) vector<int> t, p; void push(int v) { if (v * 2 + 2 >= sz(t)) return; t[v * 2 + 1] += p[v]; p[v * 2 + 1] += p[v]; t[v * 2 + 2] += p[v]; p[v * 2 + 2] += p[v]; p[v] = 0; } void upd(int v, int l, int r, int L, int R, int x) { if (L >= R) return; if (l == L && r == R) { t[v] += x; p[v] += x; return; } int m = (l + r) / 2; push(v); upd(v * 2 + 1, l, m, l, min(m, R), x); upd(v * 2 + 2, m, r, max(m, L), R, x); t[v] = min(t[v * 2 + 1], t[v * 2 + 2]); } int get(int v, int l, int r, int L, int R) { if (L >= R) return 1e9; if (l == L && r == R) return t[v]; int m = (l + r) / 2; push(v); return min( get(v * 2 + 1, l, m, l, min(m, R)), get(v * 2 + 2, m, r, max(m, L), R) ); } void solve() { int n; cin >> n; vector<int> a(n); for (auto& x : a) cin >> x, --x; t = p = vector<int>(4 * n); vector<vector<int>> pos(n); int ans = 0, st = 0; for (int i = 0; i < n; ++i) { int x = a[i]; pos[x].push_back(i); int k = sz(pos[x]); if (k > 0) upd(0, 0, n, st, pos[x][k - 1] + 1, +1); if (k > 1) upd(0, 0, n, st, pos[x][k - 2] + 1, -2); if (k > 2) upd(0, 0, n, st, pos[x][k - 3] + 1, +1); if (get(0, 0, n, st, i + 1) == 0) { ans += 1; st = i + 1; } } cout << ans << '\n'; } int main() { int t; cin >> t; while (t--) solve(); }
1969
F
Card Pairing
There is a deck of $n$ cards, each card has one of $k$ types. You are given the sequence $a_1, a_2, \dots, a_n$ denoting the types of cards in the deck from top to bottom. Both $n$ and $k$ are even numbers. You play a game with these cards. First, you draw $k$ topmost cards from the deck. Then, the following happens each turn of the game: - you choose \textbf{exactly} two cards from your hand and play them. If these cards have the same type, you earn a coin; - then, if the deck is not empty, you draw \textbf{exactly} two top cards from it; - then, if both your hand and your deck are empty, the game ends. Otherwise, the new turn begins. You have to calculate the maximum number of coins you can earn during the game.
It's pretty obvious that every time we have a pair of equal cards in hand, we should play one of these pairs. If you're interested in a formal proof, please read the paragraph in italic, otherwise skip it. Formal proof: suppose we have a pair of cards of type $x$, but we play a card of type $y$ and a card of type $z$ in the optimal solution, where $z \ne x$ and $z \ne y$. Let's show that playing a pair of cards of type $x$ can also be optimal. If $y = x$, then let's find the next moment when we play a card of type $x$. By swapping that card with a card of type $z$, we "fix" the current pair and possibly "break" the pair from which we took the card of type $x$, so our number of coins won't go down. If $y \ne x$, almost the same proof can be used: find the first two cards of type $x$ we play after that moment. Either they are played at the same moment, then we can swap the pairs we used at the current moment and at that moment. Or they are played at different moments $t_1$ and $t_2$; by swapping these two cards with cards of type $y$ and $z$, we "fix" the pair we played at the current moment, we might "break" the pair played at the moment $t_2$, but we cannot "break" the pair played at the moment $t_1$, since the cards played at that moment are different. Whenever we have a pair of equal cards, we will always play it at some moment and earn a coin, and it does not matter in which order we play different pairs. So we have to make a meaningful choice about which cards we play only when all $k$ cards in our hand are different. In the ideal situation, if there are $c_i$ cards of type $i$, we want to earn $\lfloor\frac{c_i}{2}\rfloor$ coins by playing these cards. But whenever we play only one card of type $i$ and there is an odd number of cards of type $i$ left (both in hand and in deck), we "lose" a coin, because the number of pairs we can make decreases by $1$. Let's calculate the answer as the maximum possible number of pairs we can form (equal to $\sum\limits_{i=1}^{k} \lfloor\frac{c_i}{2}\rfloor$), minus the minimum number of coins we "lose" in such a way. Since we play a pair when we have at least one pair, we can "lose" coins only when all cards in our hand are different. So, let's try to use a dynamic programming of the form: $dp_i$ is the minimum number of coins we could lose when we have drawn $i$ first cards from the deck, and all cards in our hand are different. Let's analyze the transitions of this dynamic programming. When transitioning out of $dp_i$, we can iterate on the pair of cards we play (since we consider the situation when all cards in our hand are different, we can play any pair of different cards); for each of these two cards, check if we "lose" a coin by playing them, and try to find the next moment when all $k$ cards in our hand will be different (or update the answer if there is no such moment). However, when implemented naively, it is too slow (might take up to $O(n^5)$). We can use the following optimizations to improve it: XOR Hashing: this is one of my favorite techniques. Let's assign each card type a random $64$-bit number (let it be $h_i$ for type $i$). Then, let $p_i$ be the XOR of the numbers assigned to the first $i$ cards in the deck. Suppose we are considering transitions from $dp_i$; we try to play cards of type $x$ and $y$; when will be the next moment when we have all $k$ types of cards? If this moment is $j$, then we need to take an odd number of cards of type $x$ and $y$ from moment $i$ to moment $j$, and an even number of cards for all other types. So, we can see that $p_j = p_i \oplus h_x \oplus h_y$, and this allows us to locate the next moment when we have $k$ different cards more easily, in $O(n)$ or even $O(\log n)$. Reducing the number of transitions: we have up to $O(k^2)$ pairs of cards we can play from each state, but only $O(n)$ different states we can go into, and no two transitions lead to the same state. Let's try to make only $O(n)$ transitions from each state. When considering a state, we can split all $k$ cards into two types: the ones that make us lose a coin when we play them (group $1$), and all the others (group $2$). First, let's try to play two cards from the group $2$; if we find a combination of them such that, after playing it, we never have $k$ different cards in our hand - we don't need any other transitions from this state, because this transition updates the answer directly without any increases. Otherwise, perform all transitions with pairs of cards from group $2$, and try to play a pair of cards from different groups. If we find a combination that updates the answer directly, we again can stop considering transitions, the next transitions we use won't be more optimal. And then we do the same with transitions where we use a pair of cards of group $1$. This way, we will consider at most $n+1$ transitions from each state. Combining these two optimizations results in a solution in $O(n^3)$ or $O(n^2 \log n)$, but there are other optimizations you can try.
[ "dp", "greedy", "hashing", "implementation" ]
3,000
#include<bits/stdc++.h> using namespace std; mt19937_64 rnd(12341234); int n, k; vector<int> deck; vector<int> dp; vector<vector<bool>> odd; vector<bool> full_odd; vector<long long> val; vector<long long> hs; bool bad(const vector<bool>& v) { for(int i = 0; i < k; i++) if(!v[i]) return false; return true; } vector<bool> inv(const vector<bool>& v) { vector<bool> res(k); for(int i = 0; i < k; i++) res[i] = !v[i]; return res; } vector<bool> get_suffix(int l) { vector<bool> v(k); for(int i = l; i < n; i++) v[deck[i]] = !v[deck[i]]; return v; } int get_next(long long cur, int x) { for(int i = x; i <= n; i += 2) if(hs[i] == cur) return i; return -1; } int main() { cin >> n >> k; deck.resize(n); for(int i = 0; i < n; i++) cin >> deck[i]; for(int i = 0; i < n; i++) --deck[i]; val.resize(k); for(int i = 0; i < k; i++) while(val[i] == 0) val[i] = rnd(); int max_score = 0; dp.resize(n + 1); full_odd.resize(k); odd.resize(n + 1, vector<bool>(k)); hs.resize(n + 1); long long cur_hash = 0; for(int i = 0; i < n; i++) { cur_hash ^= val[deck[i]]; if(full_odd[deck[i]]) max_score++; full_odd[deck[i]] = !full_odd[deck[i]]; odd[i + 1] = full_odd; hs[i + 1] = cur_hash; } for(int i = k; i <= n; i++) dp[i] = 1e9; long long start = 0ll; for(int i = 0; i < k; i++) start ^= val[i]; int pos = get_next(start, k); if(pos == -1) { cout << max_score << endl; } else { dp[pos] = 0; int ans = 1e9; for(int p = k; p <= n; p += 2) { if(dp[p] > 1e8) continue; vector<bool> suff = get_suffix(p); vector<int> o, e; for(int j = 0; j < k; j++) if(suff[j]) e.push_back(j); else o.push_back(j); int es = e.size(); int os = o.size(); bool flag = true; for(int i = 0; i < os && flag; i++) for(int j = 0; j < i && flag; j++) { int x = o[i]; int y = o[j]; int add = 0; long long h = hs[p] ^ val[x] ^ val[y]; int pos = get_next(h, p); if(pos == -1) { flag = false; ans = min(ans, dp[p] + add); } else dp[pos] = min(dp[pos], dp[p] + add); } for(int i = 0; i < os && flag; i++) for(int j = 0; j < es && flag; j++) { int x = o[i]; int y = e[j]; int add = 1; long long h = hs[p] ^ val[x] ^ val[y]; int pos = get_next(h, p); if(pos == -1) { flag = false; ans = min(ans, dp[p] + add); } else dp[pos] = min(dp[pos], dp[p] + add); } for(int i = 0; i < es && flag; i++) for(int j = 0; j < i && flag; j++) { int x = e[i]; int y = e[j]; int add = 2; long long h = hs[p] ^ val[x] ^ val[y]; int pos = get_next(h, p); if(pos == -1) { flag = false; ans = min(ans, dp[p] + add); } else dp[pos] = min(dp[pos], dp[p] + add); } } cout << max_score - ans << endl; } }
1970
A1
Balanced Shuffle (Easy)
A parentheses sequence is a string consisting of characters "(" and ")", for example "(()((". A balanced parentheses sequence is a parentheses sequence which can become a valid mathematical expression after inserting numbers and operations into it, for example "(()(()))". The balance of a parentheses sequence is defined as the number of opening parentheses "(" minus the number of closing parentheses ")". For example, the balance of the sequence "(()((" is 3. A balanced parentheses sequence can also be defined as a parentheses sequence with balance 0 such that each of its prefixes has a non-negative balance. We define the balanced shuffle operation that takes a parentheses sequence and returns a parentheses sequence as follows: first, for every character of the input sequence, we compute the balance of the prefix of the sequence before that character and write those down in a table together with the positions of the characters in the input sequence, for example: \begin{tabular}{l||c||c||c||c||c||c||c||c} Prefix balance & 0 & 1 & 2 & 1 & 2 & 3 & 2 & 1 \ \hline \hline Position & 1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 \ \hline \hline Character & ( & ( & ) & ( & ( & ) & ) & ) \ \end{tabular} Then, we sort the columns of this table in increasing order of prefix balance, breaking ties in decreasing order of position. In the above example, we get: \begin{tabular}{l||c||c||c||c||c||c||c||c} Prefix balance & 0 & 1 & 1 & 1 & 2 & 2 & 2 & 3 \ \hline \hline Position & 1 & 8 & 4 & 2 & 7 & 5 & 3 & 6 \ \hline \hline Character & ( & ) & ( & ( & ) & ( & ) & ) \ \end{tabular} The last row of this table forms another parentheses sequence, in this case "()(()())". This sequence is called the result of applying the balanced shuffle operation to the input sequence, or in short just the balanced shuffle of the input sequence. You are given a balanced parentheses sequence. Print its balanced shuffle.
The problem statement describes exactly what needs to be done, so we just need to implement it carefully, using a $O(n\log n)$ sorting algorithm from the standard library. If you're using Python for this problem, the time limit can be a bit tight so you might need to optimize a bit. For example, tuples are much faster than custom classes in Python, so the following passes in 0.5s:
[ "implementation", "sortings" ]
1,000
null
1970
A2
Balanced Unshuffle (Medium)
\textbf{The differences with the easy version of this problem are highlighted in bold.} A parentheses sequence is a string consisting of characters "(" and ")", for example "(()((". A balanced parentheses sequence is a parentheses sequence which can become a valid mathematical expression after inserting numbers and operations into it, for example "(()(()))". The balance of a parentheses sequence is defined as the number of opening parentheses "(" minus the number of closing parentheses ")". For example, the balance of the sequence "(()((" is 3. A balanced parentheses sequence can also be defined as a parentheses sequence with balance 0 such that each of its prefixes has a non-negative balance. We define the balanced shuffle operation that takes a parentheses sequence and returns a parentheses sequence as follows: first, for every character of the input sequence, we compute the balance of the prefix of the sequence before that character and write those down in a table together with the positions of the characters in the input sequence, for example: \begin{tabular}{l||c||c||c||c||c||c||c||c} Prefix balance & 0 & 1 & 2 & 1 & 2 & 3 & 2 & 1 \ \hline \hline Position & 1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 \ \hline \hline Character & ( & ( & ) & ( & ( & ) & ) & ) \ \end{tabular} Then, we sort the columns of this table in increasing order of prefix balance, breaking ties in decreasing order of position. In the above example, we get: \begin{tabular}{l||c||c||c||c||c||c||c||c} Prefix balance & 0 & 1 & 1 & 1 & 2 & 2 & 2 & 3 \ \hline \hline Position & 1 & 8 & 4 & 2 & 7 & 5 & 3 & 6 \ \hline \hline Character & ( & ) & ( & ( & ) & ( & ) & ) \ \end{tabular} The last row of this table forms another parentheses sequence, in this case "()(()())". This sequence is called the result of applying the balanced shuffle operation to the input sequence, or in short just the balanced shuffle of the input sequence. \textbf{Surprisingly, it turns out that the balanced shuffle of any balanced parentheses sequence is always another balanced parentheses sequence (we will omit the proof for brevity). Even more surprisingly, the balanced shuffles of two different balanced parentheses sequences are always different, therefore the balanced shuffle operation is a bijection on the set of balanced parentheses sequences of any given length (we will omit this proof, too).} \textbf{You are given a balanced parentheses sequence. Find its preimage: the balanced parentheses sequence the balanced shuffle of which is equal to the given sequence.}
In a balanced parentheses sequence each opening parenthesis corresponds (would form a pair enclosing a subexpression in a mathematical expression) to exactly one closing parenthesis and vice versa. In such a pair the balance before the opening parenthesis is always 1 less than the balance before the closing parenthesis. After sorting, the parentheses with equal prefix balance go together. Let us consider them in groups of equal prefix balance. The first group, with prefix balance 0, will contain only opening parentheses. The second group, with prefix balance 1, will contain the closing parentheses corresponding to the opening parentheses from the first group, and potentially some more opening parentheses. The third group, with prefix balance 2, will contain the closing parentheses corresponding to the opening parentheses from the second group, and potentially some more opening parentheses, and so on. Moreover, each group except the first one will always start with a closing parenthesis (since we break ties in decreasing order of position). This observation allows to split the input string into the groups of equal prefix balance: the first group is everything before the first closing parenthesis. The second group is everything after that and before the $k+1$-th closing parenthesis, where $k$ is the number of opening parentheses in the first group, and so on. Having done that, we can also construct the original sequence group-by-group. After processing a certain number of groups we will have a string that is a parentheses sequence but not a balanced parentheses sequence yet: some opening parentheses in it will be marked as unmatched (for example, we can use opening square brackets instead of parentheses to denote those). When processing the next group, we put each closing parenthesis and all opening parentheses that follow it after the corresponding unmatched opening parenthesis. Here is how this process works on the sample testcase: After the first group, our string is [. After the second group, our string is ([[). After the third group, our string is (()([)). After the fourth group, our string is (()(())) and we have correctly recovered the original sequence. The straightforward implementation of this algorithm runs in $O(n^2)$, fast enough for the constraints of this subtask. The transformation described in this problem is called a sweep map in the literature, and searching the Internet using that term will lead to a much deeper dive on the subject if desired.
[ "brute force", "constructive algorithms", "trees" ]
2,400
null
1970
A3
Balanced Unshuffle (Hard)
\textbf{The only difference with the medium version is the maximum length of the input.} A parentheses sequence is a string consisting of characters "(" and ")", for example "(()((". A balanced parentheses sequence is a parentheses sequence which can become a valid mathematical expression after inserting numbers and operations into it, for example "(()(()))". The balance of a parentheses sequence is defined as the number of opening parentheses "(" minus the number of closing parentheses ")". For example, the balance of the sequence "(()((" is 3. A balanced parentheses sequence can also be defined as a parentheses sequence with balance 0 such that each of its prefixes has a non-negative balance. We define the balanced shuffle operation that takes a parentheses sequence and returns a parentheses sequence as follows: first, for every character of the input sequence, we compute the balance of the prefix of the sequence before that character and write those down in a table together with the positions of the characters in the input sequence, for example: \begin{tabular}{l||c||c||c||c||c||c||c||c} Prefix balance & 0 & 1 & 2 & 1 & 2 & 3 & 2 & 1 \ \hline \hline Position & 1 & 2 & 3 & 4 & 5 & 6 & 7 & 8 \ \hline \hline Character & ( & ( & ) & ( & ( & ) & ) & ) \ \end{tabular} Then, we sort the columns of this table in increasing order of prefix balance, breaking ties in decreasing order of position. In the above example, we get: \begin{tabular}{l||c||c||c||c||c||c||c||c} Prefix balance & 0 & 1 & 1 & 1 & 2 & 2 & 2 & 3 \ \hline \hline Position & 1 & 8 & 4 & 2 & 7 & 5 & 3 & 6 \ \hline \hline Character & ( & ) & ( & ( & ) & ( & ) & ) \ \end{tabular} The last row of this table forms another parentheses sequence, in this case "()(()())". This sequence is called the result of applying the balanced shuffle operation to the input sequence, or in short just the balanced shuffle of the input sequence. Surprisingly, it turns out that the balanced shuffle of any balanced parentheses sequence is always another balanced parentheses sequence (we will omit the proof for brevity). Even more surprisingly, the balanced shuffles of two different balanced parentheses sequences are always different, therefore the balanced shuffle operation is a bijection on the set of balanced parentheses sequences of any given length (we will omit this proof, too). You are given a balanced parentheses sequence. Find its preimage: the balanced parentheses sequence the balanced shuffle of which is equal to the given sequence.
Solving this problem requires a more careful implementation of the algorithm described in the previous subtask. Since we only ever insert new parentheses into the current string after an unmatched parenthesis, we can use a linked list of characters instead of a string to represent it, and also store a vector of pointers to unmatched parentheses in it. This way each insertion will be done in $O(1)$ for a total running time of $O(n)$. Another way to view this solution is that we are constructing a rooted tree corresponding to the original parentheses sequence in a breadth-first manner, layer-by-layer, but then need to traverse it in a depth-first manner to print the original sequence. This runs in $O(n)$ if we store the tree using pointers or indices of children. In fact, we can notice that the layer-by-layer construction is not really necessary: we just create a queue of tree nodes, each opening parenthesis creates a new child of the node at the front of the queue and puts it at the back of the queue, and each closing parenthesis removes the node at the front of the queue from the queue. This way the implementation becomes very short.
[ "constructive algorithms", "trees" ]
2,400
null
1970
B1
Exact Neighbours (Easy)
\textbf{The only difference between this and the hard version is that all $a_{i}$ are even.} After some recent attacks on Hogwarts Castle by the Death Eaters, the Order of the Phoenix has decided to station $n$ members in Hogsmead Village. The houses will be situated on a picturesque $n\times n$ square field. Each wizard will have their own house, and every house will belong to some wizard. Each house will take up the space of one square. However, as you might know wizards are very superstitious. During the weekends, each wizard $i$ will want to visit the house that is exactly $a_{i}$ $(0 \leq a_{i} \leq n)$ away from their own house. The roads in the village are built horizontally and vertically, so the distance between points $(x_{i}, y_{i})$ and $(x_{j}, y_{j})$ on the $n\times n$ field is $ |x_{i} - x_{j}| + |y_{i} - y_{j}|$. The wizards know and trust each other, so one wizard can visit another wizard's house when the second wizard is away. The houses to be built will be big enough for all $n$ wizards to simultaneously visit any house. Apart from that, each wizard is mandated to have a view of the Hogwarts Castle in the north and the Forbidden Forest in the south, so the house of no other wizard should block the view. In terms of the village, it means that in each column of the $n\times n$ field, there can be at most one house, i.e. if the $i$-th house has coordinates $(x_{i}, y_{i})$, then $x_{i} \neq x_{j}$ for all $i \neq j$. The Order of the Phoenix doesn't yet know if it is possible to place $n$ houses in such a way that will satisfy the visit and view requirements of all $n$ wizards, so they are asking for your help in designing such a plan. If it is possible to have a correct placement, where for the $i$-th wizard there is a house that is $a_{i}$ away from it and the house of the $i$-th wizard is the only house in their column, output YES, the position of houses for each wizard, and to the house of which wizard should each wizard go during the weekends. If it is impossible to have a correct placement, output NO.
Place all houses on the diagonal. Notice that for each house $i$ either to the left or to the right, on the diagonal, there will be a house with the desired distance $a_{i}$
[ "constructive algorithms" ]
1,900
null
1970
B2
Exact Neighbours (Medium)
\textbf{The only difference between this and the hard version is that $a_{1} = 0$.} After some recent attacks on Hogwarts Castle by the Death Eaters, the Order of the Phoenix has decided to station $n$ members in Hogsmead Village. The houses will be situated on a picturesque $n\times n$ square field. Each wizard will have their own house, and every house will belong to some wizard. Each house will take up the space of one square. However, as you might know wizards are very superstitious. During the weekends, each wizard $i$ will want to visit the house that is exactly $a_{i}$ $(0 \leq a_{i} \leq n)$ away from their own house. The roads in the village are built horizontally and vertically, so the distance between points $(x_{i}, y_{i})$ and $(x_{j}, y_{j})$ on the $n\times n$ field is $ |x_{i} - x_{j}| + |y_{i} - y_{j}|$. The wizards know and trust each other, so one wizard can visit another wizard's house when the second wizard is away. The houses to be built will be big enough for all $n$ wizards to simultaneously visit any house. Apart from that, each wizard is mandated to have a view of the Hogwarts Castle in the north and the Forbidden Forest in the south, so the house of no other wizard should block the view. In terms of the village, it means that in each column of the $n\times n$ field, there can be at most one house, i.e. if the $i$-th house has coordinates $(x_{i}, y_{i})$, then $x_{i} \neq x_{j}$ for all $i \neq j$. The Order of the Phoenix doesn't yet know if it is possible to place $n$ houses in such a way that will satisfy the visit and view requirements of all $n$ wizards, so they are asking for your help in designing such a plan. If it is possible to have a correct placement, where for the $i$-th wizard there is a house that is $a_{i}$ away from it and the house of the $i$-th wizard is the only house in their column, output YES, the position of houses for each wizard, and to the house of which wizard should each wizard go during the weekends. If it is impossible to have a correct placement, output NO.
We don't care about satisfying the condition for the first house, since this house will always be at a distance $0$ from itself. There are at least two approaches for constructing the desired placement. Approach 1: Let's sort all $a_{i}$ in non-increasing order, then we can place houses in the zigzag order the following way: we will start by placing the first house in $(1, 1)$, then we will place each new house in the next column, alternating the relative position to the previous house up to down. Since $a_{i}$ is sorted in non-increasing order, we will never get out of the bounds of the field and the $i$-th house in the sorted order will satisfy the condition for the $i - 1$ house in the sorted order. Approach 2: Place the first house in $(1, 1)$. Then, place each next house in the next column. If $a_{i} \geq i$, you can place the $i$-th house in such a way that it will be $a_{i}$ away from the first one. If $a_{i} < i$, you can place the house in the same row as the house with index $i - a_{i}$ and the house with this index will be $a_{i}$ away.
[ "constructive algorithms" ]
2,100
null
1970
B3
Exact Neighbours (Hard)
After some recent attacks on Hogwarts Castle by the Death Eaters, the Order of the Phoenix has decided to station $n$ members in Hogsmead Village. The houses will be situated on a picturesque $n\times n$ square field. Each wizard will have their own house, and every house will belong to some wizard. Each house will take up the space of one square. However, as you might know wizards are very superstitious. During the weekends, each wizard $i$ will want to visit the house that is exactly $a_{i}$ $(0 \leq a_{i} \leq n)$ away from their own house. The roads in the village are built horizontally and vertically, so the distance between points $(x_{i}, y_{i})$ and $(x_{j}, y_{j})$ on the $n\times n$ field is $ |x_{i} - x_{j}| + |y_{i} - y_{j}|$. The wizards know and trust each other, so one wizard can visit another wizard's house when the second wizard is away. The houses to be built will be big enough for all $n$ wizards to simultaneously visit any house. Apart from that, each wizard is mandated to have a view of the Hogwarts Castle in the north and the Forbidden Forest in the south, so the house of no other wizard should block the view. In terms of the village, it means that in each column of the $n\times n$ field, there can be at most one house, i.e. if the $i$-th house has coordinates $(x_{i}, y_{i})$, then $x_{i} \neq x_{j}$ for all $i \neq j$. The Order of the Phoenix doesn't yet know if it is possible to place $n$ houses in such a way that will satisfy the visit and view requirements of all $n$ wizards, so they are asking for your help in designing such a plan. If it is possible to have a correct placement, where for the $i$-th wizard there is a house that is $a_{i}$ away from it and the house of the $i$-th wizard is the only house in their column, output YES, the position of houses for each wizard, and to the house of which wizard should each wizard go during the weekends. If it is impossible to have a correct placement, output NO.
Let's divide all valid arrays $a$ into three cases and solve the problem for each of them. First case There exists $i$ such that $a_{i} = 0$. We can use the solution to Medium. Now we can assume $1 \leq a_{i} \leq n$. Second case There are two numbers $i \neq j$, such that $a_{i} = a_{j}$. We can adapt the algorithm from Medium to this case. For example, consider the first approach to Medium. We can do the same zigzag algorithm but skip the satisfaction of the next house when we have that in the sorted order two values are the same ($b_{i - 1} = b_{i}$ where $b$ is the sorted in non-increasing order array $a$). Third case In the remaining case the set of values will be exactly $\{1, 2, \ldots, n\}$. When $n = 2$, the answer is $-1$. When $n \geq 3$, we again sort $a_{i}$ and do the zigzag until we meet $3, 2, 1$. At that point we can do a special construction for $3, 2, 1$ which is not hard to come up with.
[ "constructive algorithms" ]
2,300
null
1970
C1
Game on Tree (Easy)
\textbf{This is the easy version of the problem. The difference in this version is that $t=1$ and we work on an array-like tree.} Ron and Hermione are playing a game on a tree of $n$ nodes that are initially inactive. This tree is special because it has exactly two leaves. It can thus be seen as an array. The game consists of $t$ rounds, each of which starts with a stone on exactly one node, which is considered as activated. A move consists of picking an inactive neighbor of the node with a stone on it and moving the stone there (thus activating this neighbor). Ron makes the first move, after which he alternates with Hermione until no valid move is available. The player that cannot make a move loses the round. If both players play optimally, who wins each round of this game? Note that all the rounds are played with the same tree; only the starting node changes. Moreover, after each round, all active nodes are considered inactive again.
We are given a linked list with an initial coin at index $1 \leq i \leq n$. There are $i-1$ nodes to its left and $n-i$ nodes to its right. Note that after the first move, all the remaining moves are fixed since there will be exactly one inactive neighbor. If one of $i-1, n-i$ is odd, Ron should move to the corresponding node as there will be an even number left thus guaranteeing a victory for Ron. Otherwise, Hermione is guaranteed to win.
[ "games" ]
1,400
null
1970
C2
Game on Tree (Medium)
\textbf{This is the medium version of the problem. The difference in this version is that $t=1$ and we work on trees.} Ron and Hermione are playing a game on a tree of $n$ nodes that are initially inactive. The game consists of $t$ rounds, each of which starts with a stone on exactly one node, which is considered as activated. A move consists of picking an inactive neighbor of the node with a stone on it and moving the stone there (thus activating this neighbor). Ron makes the first move, after which he alternates with Hermione until no valid move is available. The player that cannot make a move loses the round. If both players play optimally, who wins each round of this game? Note that all the rounds are played with the same tree; only the starting node changes. Moreover, after each round, all active nodes are considered inactive again.
Let's root the tree at node $u_1$ (the start node). By doing so, we guarantee that if the coin is at node $v$, the parent of $v$ is already active and thus we can only go down in the tree. This means that each subtree can be seen as its own independent game. So, each node is either a winning or losing position (w.r.t. to the player whose turn is next). We will find recursively whether $u_1$ corresponds to a winning game for Ron. If some child of a node $v$ is a losing position, then the current player should move the coin to that child to guarantee a win. If all children are winning positions, the current player will surely lose. So, $v$ is a winning position iff it has a child that is a losing position. Note that the leaves are losing positions. This is solved in $O(n)$ time.
[ "dfs and similar", "dp", "games", "trees" ]
1,700
null
1970
C3
Game on Tree (Hard)
\textbf{This is the hard version of the problem. The only difference in this version is the constraint on $t$.} Ron and Hermione are playing a game on a tree of $n$ nodes that are initially inactive. The game consists of $t$ rounds, each of which starts with a stone on exactly one node, which is considered as activated. A move consists of picking an inactive neighbor of the node with a stone on it and moving the stone there (thus activating this neighbor). Ron makes the first move, after which he alternates with Hermione until no valid move is available. The player that cannot make a move loses the round. If both players play optimally, who wins each round of this game? Note that all the rounds are played with the same tree; only the starting node changes. Moreover, after each round, all active nodes are considered inactive again.
Repeating the previous solution for each $u_i$ is too slow as it gives a $O(n.t)$ solution. Instead, we will reuse the computations we did when assigning losing/winning positions. We do so using the re-rooting technique. Let's first compute the positions of the nodes in the tree rooted at 0. We will now find in linear time the position of each node if the tree was rooted at that node. Let $v$ be the current root (initially $v=0$) and let $w$ be a child of $v$. Let $T$ be the initial tree and $T'$ be the tree re-rooted at $w$. We know the positions of all nodes in $T$, and we want to find the position of nodes in $T'$. First note that only the positions of $v$ and $w$ may differ because the subtrees of all the other nodes remain unchanged. If $v$ has a child other than $w$ which corresponds to a losing position (in $T)$, then $v$ is a winning position in $T'$. If $v$ has no losing child or has exactly one losing child which is $w$ (in $T)$, then it is a losing position in $T'$. Similarly, if $w$ has some child that is a losing position (including $v$ in $T'$), then it is a winning position otherwise it is a losing position in $T'$. The above checks can be done in constant time by counting the number of losing children before the recursive calls (this count also needs to be maintained before recurring on a subtree). We thus recur on $W$ and repeat. Once the recursive call is over, we backtrack our changes before recurring on a different child of $v$. At the beginning of each recursive call, we know whether Ron or Hermione wins if the game started at that node so we can answer all the queries by doing linear-time pre-processing.
[ "dfs and similar", "dp", "games", "trees" ]
1,900
null
1970
D1
Arithmancy (Easy)
Professor Vector is preparing to teach her Arithmancy class. She needs to prepare $n$ distinct magic words for the class. Each magic word is a string consisting of characters X and O. A spell is a string created by concatenating two magic words together. The power of a spell is equal to the number of its different non-empty substrings. For example, the power of the spell XOXO is equal to 7, because it has 7 different substrings: X, O, XO, OX, XOX, OXO and XOXO. Each student will create their own spell by concatenating two magic words. Since the students are not very good at magic yet, they will choose each of the two words independently and uniformly at random from the $n$ words provided by Professor Vector. It is therefore also possible that the two words a student chooses are the same. Each student will then compute the power of their spell, and tell it to Professor Vector. In order to check their work, and of course to impress the students, Professor Vector needs to find out which two magic words and in which order were concatenated by each student. Your program needs to perform the role of Professor Vector: first, create $n$ distinct magic words, and then handle multiple requests where it is given the spell power and needs to determine the indices of the two magic words, in the correct order, that were used to create the corresponding spell.
Given that we need to output 3 words only, we can manually (trying a few options on paper until we find a solution) construct 3 words such that all 9 spell powers are different.
[ "brute force", "constructive algorithms", "interactive", "strings" ]
2,100
null
1970
D2
Arithmancy (Medium)
\textbf{The only difference between the versions of this problem is the maximum value of $n$.} Professor Vector is preparing to teach her Arithmancy class. She needs to prepare $n$ distinct magic words for the class. Each magic word is a string consisting of characters X and O. A spell is a string created by concatenating two magic words together. The power of a spell is equal to the number of its different non-empty substrings. For example, the power of the spell XOXO is equal to 7, because it has 7 different substrings: X, O, XO, OX, XOX, OXO and XOXO. Each student will create their own spell by concatenating two magic words. Since the students are not very good at magic yet, they will choose each of the two words independently and uniformly at random from the $n$ words provided by Professor Vector. It is therefore also possible that the two words a student chooses are the same. Each student will then compute the power of their spell, and tell it to Professor Vector. In order to check their work, and of course to impress the students, Professor Vector needs to find out which two magic words and in which order were concatenated by each student. Your program needs to perform the role of Professor Vector: first, create $n$ distinct magic words, and then handle multiple requests where it is given the spell power and needs to determine the indices of the two magic words, in the correct order, that were used to create the corresponding spell.
Now we need to find 30 words, so manually solving on paper is out of question. However, "just trying" still works: what we can do is to keep generating random words until we find 30 that have all 900 corresponding spell powers different. Depending on how you fast is your computation of the spell power you can either do this during the time limit, or precompute locally and then just submit a solution that prints the precomputed words. To make sure that we have to precompute only once instead of 30 times (for $n=1$, $n=2$, ...), we can find 30 words such that the $i$-th word has length $30\cdot i$. This way the first $n$ words from this list give a valid answer to the problem for any $n<=30$. To compute the spell power, we can either use the naive approach of taking all substrings, sorting them and finding unique ones, some optimization thereof (for example, if the words are randomly generated, the chances that sufficiently long substrings coincide are vanishingly small and can be ignored), or the asymptotically optimal approach using any of the suffix data structures (the suffix array, or the suffix automaton, or the suffix tree).
[ "constructive algorithms", "interactive", "probabilities", "strings" ]
2,600
null
1970
D3
Arithmancy (Hard)
\textbf{The only difference between the versions of this problem is the maximum value of $n$.} Professor Vector is preparing to teach her Arithmancy class. She needs to prepare $n$ distinct magic words for the class. Each magic word is a string consisting of characters X and O. A spell is a string created by concatenating two magic words together. The power of a spell is equal to the number of its different non-empty substrings. For example, the power of the spell XOXO is equal to 7, because it has 7 different substrings: X, O, XO, OX, XOX, OXO and XOXO. Each student will create their own spell by concatenating two magic words. Since the students are not very good at magic yet, they will choose each of the two words independently and uniformly at random from the $n$ words provided by Professor Vector. It is therefore also possible that the two words a student chooses are the same. Each student will then compute the power of their spell, and tell it to Professor Vector. In order to check their work, and of course to impress the students, Professor Vector needs to find out which two magic words and in which order were concatenated by each student. Your program needs to perform the role of Professor Vector: first, create $n$ distinct magic words, and then handle multiple requests where it is given the spell power and needs to determine the indices of the two magic words, in the correct order, that were used to create the corresponding spell.
The previous approach of just generating random words does not even come close to working for $n=1000$ (in fact, it is hard to push it beyond roughly $n=50$). Now we need to add our own insights to the process. The first idea is: in order to answer the queries, we will likely need to know the spell power for all $n^2$ possible spells. But even if we use a suffix array to compute the spell power, it will be too slow to compute the spell power $10^6$ times for spells of length around $6\cdot 10^4$. Therefore it is better to choose the magic words in such a way that computing the spell power of a concatenation of two such words can be done in $O(1)$. One way to achieve this is to choose some family of magic words $g_i$ with a regular structure and a single parameter $i$, and just figure out the function $f(i, j)$ for the spell power for the concatenation of $g_i$ and $g_j$ on paper, hopefully it will be easy to figure out and quick to compute. Then, what we can do is to go in increasing order of $i$, and try to take the next word $g_i$ into our set by checking if after adding it to the set, the newly added spells have spell powers that are different from each other and from the existing spell powers. Since we compute the spell power in $O(1)$, the total running time of this process is around $O(n\cdot k)$, where $k$ is the number of $g_i$ we had to check before we managed to add $n$ of them into the set. The only remaining difficulty, and of course it is actually the main part of the solution, is to choose the family $g_i$. Here we have two main competing constraints: The words have to have simple structure, so that we can compute $f(i, j)$ quickly (in both senses: quickly figure out the formula, and the formula should be simple). However, we must have $f(i, j) \ne f(j, i)$. It turns out that this rules out many word families with a very simple structure. We expect that once you realize the two constraints above, after a small amount of experimentation you will stumble upon a family that works. Here are two families that work that we know about, but I expect that there are many many more: $g_i=\texttt{XOX}^{i-1}$. So $g_1=\texttt{XO}$, $g_2=\texttt{XOX}$, $g_3=\texttt{XOXX}$, and so on. We found this family in the upsolving solution from one of the teams in the onsite round. $g_i=\texttt{XO}^i\texttt{XO}^i$. So $g_1=\texttt{XOXO}$, $g_2=\texttt{XOOXOO}$, $g_3=\texttt{XOOOXOOO}$ and so on. This family actually leads to a string slightly longer than 30000 for $n=1000$, but it can be fixed by skipping the short strings that lead to too many collisions later (so we actually use $g_i=\texttt{XO}^{i+5n}\texttt{XO}^{i+5n}$). This was the original reference solution. As you noticed, our solution does not use the fact that the interactor is guaranteed to be random. The reason the problem was set like this is that we're not aware of a way to find collisions quickly enough that we could use to simply check if the $n$ printed magic words yield $n^2$ distinct spell powers. Therefore to make the problem well-defined and avoid the need for contestants to guess how strong the checker is, we made it weak (just trying 1000 random spells) but well-specified.
[ "interactive" ]
3,100
null
1970
E1
Trails (Easy)
Harry Potter is hiking in the Alps surrounding Lake Geneva. In this area there are $m$ cabins, numbered 1 to $m$. Each cabin is connected, with one or more trails, to a central meeting point next to the lake. Each trail is either \underline{short} or \underline{long}. Cabin $i$ is connected with $s_i$ short trails and $l_i$ long trails to the lake. Each day, Harry walks a trail from the cabin where he currently is to Lake Geneva, and then from there he walks a trail to any of the $m$ cabins (including the one he started in). However, as he has to finish the hike in a day, at least one of the two trails has to be short. How many possible combinations of trails can Harry take if he starts in cabin 1 and walks for $n$ days? Give the answer modulo $10^9 + 7$.
Let $t_i := s_i + l_i$. The number of possible paths between cabin $i$ and cabin $j$ is $t_i t_j - l_i l_j$. Let $\mathbf{v}_{k,i}$ be the number of paths that ends in cabin $i$ after walking for $k$ days. We then have $\mathbf{v}_{0,1} = 1, \\ \mathbf{v}_{0,i} = 0 \text{ for } i \neq 1,$ $\mathbf{v}_{k+1,i} = \sum_{j = 1}^{m} (t_i t_j - l_i l_j) \mathbf{v}_{k,j}.$ $\mathbf{v}_{n,1} + \dots + \mathbf{v}_{n,m}.$
[ "dp" ]
1,800
null
1970
E2
Trails (Medium)
Harry Potter is hiking in the Alps surrounding Lake Geneva. In this area there are $m$ cabins, numbered 1 to $m$. Each cabin is connected, with one or more trails, to a central meeting point next to the lake. Each trail is either \underline{short} or \underline{long}. Cabin $i$ is connected with $s_i$ short trails and $l_i$ long trails to the lake. Each day, Harry walks a trail from the cabin where he currently is to Lake Geneva, and then from there he walks a trail to any of the $m$ cabins (including the one he started in). However, as he has to finish the hike in a day, at least one of the two trails has to be short. How many possible combinations of trails can Harry take if he starts in cabin 1 and walks for $n$ days? Give the answer modulo $10^9 + 7$.
Let $t_i := s_i + l_i$. The number of possible paths between cabin $i$ and cabin $j$ is $t_i t_j - l_i l_j$. Let $\mathbf{v}_{k}$ be the vector whose $i$th entry is the number of paths that ends in cabin $i$ after walking for $k$ days. We then have $\mathbf{v}_0 = (1, 0, \dots, 0),$ $(\mathbf{v}_{k+1})_i = \sum_{j = 1}^{m} (t_i t_j - l_i l_j) (\mathbf{v}_{k})_j.$ $A_{i,j} = t_i t_j - l_i l_j.$ $\mathbf{v}_{k+1} = A \mathbf{v}_k.$ $\mathbf{v}_n = A^n \mathbf{v}_0.$ Compute $B := A^{\lfloor n/2 \rfloor}$ If $n$ is even: Return $B^2$ Else: Return $B^2 A$ The solution to the problem is now $(\mathbf{v}_n)_1 + \dots + (\mathbf{v}_n)_m.$
[ "dp", "matrices" ]
2,000
null
1970
E3
Trails (Hard)
Harry Potter is hiking in the Alps surrounding Lake Geneva. In this area there are $m$ cabins, numbered 1 to $m$. Each cabin is connected, with one or more trails, to a central meeting point next to the lake. Each trail is either \underline{short} or \underline{long}. Cabin $i$ is connected with $s_i$ short trails and $l_i$ long trails to the lake. Each day, Harry walks a trail from the cabin where he currently is to Lake Geneva, and then from there he walks a trail to any of the $m$ cabins (including the one he started in). However, as he has to finish the hike in a day, at least one of the two trails has to be short. How many possible combinations of trails can Harry take if he starts in cabin 1 and walks for $n$ days? Give the answer modulo $10^9 + 7$.
Let $t_i := s_i + l_i$. The number of possible paths between cabin $i$ and cabin $j$ is $t_i t_j - l_i l_j$. Let $\mathbf{v}_{k}$ be the vector whose $i$th entry is the number of paths that ends in cabin $i$ after walking for $k$ days. We then have $\mathbf{v}_0 = (1, 0, \dots, 0),$ $(\mathbf{v}_{k+1})_i = \sum_{j = 1}^{m} (t_i t_j - l_i l_j) (\mathbf{v}_{k})_j.$ $A_{i,j} = t_i t_j - l_i l_j.$ $\mathbf{v}_{k+1} = A \mathbf{v}_k.$ $\mathbf{v}_n = A^n \mathbf{v}_0.$ Now, observe that $A$ can be written as $A = BC$, where $B$ is the $(m \times 2)$-matrix $\begin{pmatrix} t_1 & l_1 \\ t_2 & l_2 \\ \vdots & \vdots \\ t_m & l_m \\ \end{pmatrix},$ $\begin{pmatrix} t_1 & t_2 & \cdots & t_m \\ -l_1 & -l_2 & \cdots & -l_m \\ \end{pmatrix}.$ $A^n = B (CB)^{n-1} C.$ $\mathbf{v}_n = B (CB)^{n-1} C \mathbf{v}_0$
[ "dp", "matrices" ]
2,200
null
1970
F3
Playing Quidditch (Hard)
This afternoon, you decided to enjoy the first days of Spring by taking a walk outside. As you come near the Quidditch field, you hear screams. Once again, there is a conflict about the score: the two teams are convinced that they won the game! To prevent this problem from happening one more time, you decide to get involved in the refereeing of the matches. Now, you will stay in the stadium to watch the game and count the score. At the end of the game, you will decide the winner. Today, two teams are competing: the red Gryffindor (R) and the blue Ravenclaw (B) team. Each team is composed of $P$ players ($1 \leq P \leq 10$). The field is a rectangle of $N$ lines and $M$ columns ($3 \leq N, M \leq 99$, $N$ and $M$ are odd). All the positions are integers, and several entities are allowed to be at the same position in the field. At the beginning of the game, the field contains goals for the two teams (each team can own between one and five goals), the players, and exactly one Quaffle. In this version of the problem, one Bludger \textbf{and a Golden Snitch} can be present. A game is composed of $T$ steps ($0 \leq T \leq 10000$). At each step, one entity on the field (a player or a ball) performs one action. All entities can move. A player can also catch a ball or throw the Quaffle that it is carrying. To catch a ball, a player must be located on the same cell as it. The Quaffle does not perform any action while it is being carried; it only follows the movements of the player. If a player carrying the Quaffle decides to throw it, the Quaffle is simply put at the current position of the player. If a player is on the same cell as a Bludger (either after a movement from the player or the Bludger), the player is eliminated. If the player is eliminated while it is carrying the Quaffle, the Quaffle remains on the cell containing both the player and the Bludger after the move. It is guaranteed that this never occurs while the player is in a cell containing a goal. To win a point, a player must leave the Quaffle at a goal of the other team. When it does, the team of the player wins one point, and the Quaffle instantly moves to the middle of the field (the cell at the $(M+1)/2$-th column of the $(N+1)/2$-th line of the field, starting from 1). There is no goal in the middle of the field. If a player puts the ball in its own goal, the other team wins the point. \textbf{If a player catches the Golden Snitch, their team wins 10 points and the game is over.}
This subject does not contain theoretical difficulty: it is only needed to simulate the game following the rules described in the statement. To be able to perform the simulation easily, it is useful to store wisely the current state of the game. the position of the goals, either in a grid, a set or a list the position of the players, for example a list containing the position of each player the position of the balls the current score of each team Then, at each step of the simulation, the current state must be updated following the rules.
[ "implementation" ]
2,300
null
1970
G1
Min-Fund Prison (Easy)
\textbf{In the easy version, $m = n-1$ and there exists a path between $u$ and $v$ for all $u, v$ ($1 \leq u, v \leq n$).} After a worker's strike organized by the Dementors asking for equal rights, the prison of Azkaban has suffered some damage. After settling the spirits, the Ministry of Magic is looking to renovate the prison to ensure that the Dementors are kept in check. The prison consists of $n$ prison cells and $m$ bi-directional corridors. The $i^{th}$ corridor is from cells $u_i$ to $v_i$. A subset of these cells $S$ is called a complex if any cell in $S$ is reachable from any other cell in $S$. Formally, a subset of cells $S$ is a complex if $x$ and $y$ are reachable from each other for all $x, y \in S$, using only cells from $S$ on the way. The funding required for a complex $S$ consisting of $k$ cells is defined as $k^2$. As part of your Intro to Magical Interior Design course at Hogwarts, you have been tasked with designing the prison. The Ministry of Magic has asked that you divide the prison into $2$ complexes with $\textbf{exactly one corridor}$ connecting them, so that the Dementors can't organize union meetings. For this purpose, you are allowed to build bi-directional corridors. The funding required to build a corridor between any $2$ cells is $c$. Due to budget cuts and the ongoing fight against the Death Eaters, you must find the $\textbf{minimum total funding}$ required to divide the prison as per the Ministry's requirements or $-1$ if no division is possible. Note: The total funding is the sum of the funding required for the $2$ complexes and the corridors built. If after the division, the two complexes have $x$ and $y$ cells respectively and you have built a total of $a$ corridors, the total funding will be $x^2 + y^2 + c \times a$. Note that $x+y=n$.
The cells and corridors in this subtask form a tree. No matter how we divide the prison into two complexes, there will be at least one existing corridor connecting them, but we must have at most one such corridor, which means that we do not need to build any more corridors. For each existing corridor, removing it splits the tree into two parts, and those two parts are the only possibility to have two complexes connected only by this corridor. So we need to compute the sizes of the two parts for every corridor, and then pick the corridor that minimizes the sum of squares of the sizes. In order to compute the sizes of the two parts for each corridor quickly we can root the tree and then use depth-first search that recursively computes the size of each subtree. The running time of this solution is $O(n)$.
[ "dfs and similar", "trees" ]
1,900
null
1970
G2
Min-Fund Prison (Medium)
\textbf{In the medium version, $2 \leq \sum n \leq 300$ and $1 \leq \sum m \leq 300$} After a worker's strike organized by the Dementors asking for equal rights, the prison of Azkaban has suffered some damage. After settling the spirits, the Ministry of Magic is looking to renovate the prison to ensure that the Dementors are kept in check. The prison consists of $n$ prison cells and $m$ bi-directional corridors. The $i^{th}$ corridor is from cells $u_i$ to $v_i$. A subset of these cells $S$ is called a complex if any cell in $S$ is reachable from any other cell in $S$. Formally, a subset of cells $S$ is a complex if $x$ and $y$ are reachable from each other for all $x, y \in S$, using only cells from $S$ on the way. The funding required for a complex $S$ consisting of $k$ cells is defined as $k^2$. As part of your Intro to Magical Interior Design course at Hogwarts, you have been tasked with designing the prison. The Ministry of Magic has asked that you divide the prison into $2$ complexes with $\textbf{exactly one corridor}$ connecting them, so that the Dementors can't organize union meetings. For this purpose, you are allowed to build bi-directional corridors. The funding required to build a corridor between any $2$ cells is $c$. Due to budget cuts and the ongoing fight against the Death Eaters, you must find the $\textbf{minimum total funding}$ required to divide the prison as per the Ministry's requirements or $-1$ if no division is possible. Note: The total funding is the sum of the funding required for the $2$ complexes and the corridors built. If after the division, the two complexes have $x$ and $y$ cells respectively and you have built a total of $a$ corridors, the total funding will be $x^2 + y^2 + c \times a$. Note that $x+y=n$.
The graph is no longer a tree in this problem, but we can try to generalize the solution for the first subtask. First of all, suppose the graph is connected. Similar to the first subtask, there will always be at least one existing corridor connecting the two complexes, so we do not need to build new corridors. Moreover, there will be exactly one existing corridor connecting the two complexes if and only if said corridor is a bridge in the graph, and the two complexes are the two connected components that appear after removing this bridge. We can modify the depth-first search algorithm that finds bridges in the graph to also compute the component sizes, and therefore we can solve our problem for connected graphs. Now, what to do if the graph is not connected? There are two cases: At least one connected component will be split between the two complexes. In this case exactly one component must be split in this way, and the split must be done using a bridge in this component similar to the connected graph solution. For every other connected component it must go either completely to the first complex or completely to the second complex. We need to build additional corridors to connect components within the complexes, and the number of additional corridors is equal to the number of connected components in the graph minus one. No connected component will be split between the two complexes. In this case we need to build additional corridors both to connect components within the complexes, but also to connect the complexes to each other. The number of additional corridors is still equal to the number of connected components in the graph minus one. Since the number of additional corridors is constant, we still need to minimize the sum of squares of the sizes of the complexes. We now need to solve a knapsack problem to find out the possible sizes of the first complex. To account for the fact that at most one component may be split using a bridge, we can add a boolean flag to the knapsack dynamic programming state that tracks whether we have already split a component. The running time of this solution is $O(n^2)$.
[ "brute force", "dfs and similar", "dp", "graphs", "trees" ]
2,200
null
1970
G3
Min-Fund Prison (Hard)
\textbf{In the hard version, $2 \leq \sum n \leq 10^5$ and $1 \leq \sum m \leq 5 \times 10^{5}$} After a worker's strike organized by the Dementors asking for equal rights, the prison of Azkaban has suffered some damage. After settling the spirits, the Ministry of Magic is looking to renovate the prison to ensure that the Dementors are kept in check. The prison consists of $n$ prison cells and $m$ bi-directional corridors. The $i^{th}$ corridor is from cells $u_i$ to $v_i$. A subset of these cells $S$ is called a complex if any cell in $S$ is reachable from any other cell in $S$. Formally, a subset of cells $S$ is a complex if $x$ and $y$ are reachable from each other for all $x, y \in S$, using only cells from $S$ on the way. The funding required for a complex $S$ consisting of $k$ cells is defined as $k^2$. As part of your Intro to Magical Interior Design course at Hogwarts, you have been tasked with designing the prison. The Ministry of Magic has asked that you divide the prison into $2$ complexes with $\textbf{exactly one corridor}$ connecting them, so that the Dementors can't organize union meetings. For this purpose, you are allowed to build bi-directional corridors. The funding required to build a corridor between any $2$ cells is $c$. Due to budget cuts and the ongoing fight against the Death Eaters, you must find the $\textbf{minimum total funding}$ required to divide the prison as per the Ministry's requirements or $-1$ if no division is possible. Note: The total funding is the sum of the funding required for the $2$ complexes and the corridors built. If after the division, the two complexes have $x$ and $y$ cells respectively and you have built a total of $a$ corridors, the total funding will be $x^2 + y^2 + c \times a$. Note that $x+y=n$.
In the hard subtask, the $O(n^2)$ solution is too slow. One way to get it accepted is to use bitsets to speed it up, as the knapsack transitions can simply be expressed as a bitwise shift + bitwise or. However, there is also an asymptotically faster approach. First of all, instead of remembering the possible splits using a bridge for each component, we will just remember for each component size, how it can be split by a bridge. Since the sum of component sizes is $n$, this needs only $O(n)$ memory. Now, if we have at least four components of the same size $x$, for the purposes of our knapsack we can replace two of them with a component of size $2x$, and the set of reachable complex sizes will not change. Since we keep at least two components of size $x$, the set of reachable complex sizes will not change even if we later split one of the components of size $x$ using the bridges. If we repeat the above procedure until we have at most three components of each size, the total number of components will be $O(\sqrt{n})$. Therefore the knapsack dynamic programming that does not take splitting via a bridge into account will run in $O(n\cdot\sqrt{n})$. In order to tackle the splitting via a bridge fast, let us first run the dynamic programming that does not allow to split components, but instead of computing a boolean for each complex size that tells if this size can be reached, we will compute the number of ways to reach it, modulo a large prime. A step of this dynamic programming is reversible, therefore we can then for each component size compute in $O(n)$ which sizes can be reached without using one of the components of this size. Now we need to combine this with the ways to split a component of this size using a bridge, and since the sum of squares of the sizes is smaller whenever the sizes are closer to each other, for each way to split the component using a bridge we need to find the reachable state of the dynamic programming that is the closest to $\frac{n}{2}$ minus the size of the part disconnected by the bridge, which can be done in $O(n)$ for all ways to split using a bridge via the two pointer method. Therefore the total running time of this solution is $O(n\cdot\sqrt{n})$.
[ "bitmasks", "dfs and similar", "dp", "graphs", "trees" ]
2,400
null
1971
A
My First Sorting Problem
You are given two integers $x$ and $y$. Output two integers: the minimum of $x$ and $y$, followed by the maximum of $x$ and $y$.
You can use, for example, an if-statement or some inbuilt $\texttt{min}$ and $\texttt{max}$ function available in most languages (like Python or C++).
[ "implementation", "sortings" ]
800
#include <bits/stdc++.h> using namespace std; const int MAX = 200'007; const int MOD = 1'000'000'007; void solve() { int x, y; cin >> x >> y; cout << min(x, y) << ' ' << max(x, y) << '\n'; } int main() { int tt; cin >> tt; for (int i = 1; i <= tt; i++) {solve();} // solve(); }
1971
B
Different String
You are given a string $s$ consisting of lowercase English letters. Rearrange the characters of $s$ to form a new string $r$ that is \textbf{not equal} to $s$, or report that it's impossible.
Since the string is length at most $10$, we can try all swaps of two characters of the string. This is $\mathcal{O}(|s|^2)$ per test case, which is fast enough. If none of them create a different string, then all characters in the original string are the same, so the answer is NO. Bonus: Actually, it's enough to try all swaps with the first character, solving the problem in $\mathcal{O}(|s|)$. Why?
[ "implementation", "strings" ]
800
#include <bits/stdc++.h> using namespace std; const int MAX = 200'007; const int MOD = 1'000'000'007; void solve() { string s; cin >> s; bool ok = false; for (int i = 1; i < (int)(s.length()); i++) { if (s[i] != s[0]) {swap(s[i], s[0]); ok = true; break;} } if (!ok) { cout << "NO\n"; return; } cout << "YES\n"; cout << s << '\n'; } int main() { int tt; cin >> tt; for (int i = 1; i <= tt; i++) {solve();} // solve(); }
1971
C
Clock and Strings
There is a clock labeled with the numbers $1$ through $12$ in clockwise order, as shown below. \begin{center} {\small In this example, $(a,b,c,d)=(2,9,10,6)$, and the strings intersect.} \end{center} Alice and Bob have four \textbf{distinct} integers $a$, $b$, $c$, $d$ not more than $12$. Alice ties a red string connecting $a$ and $b$, and Bob ties a blue string connecting $c$ and $d$. Do the strings intersect? (The strings are straight line segments.)
There are many ways to implement the solution, but many involve a lot of casework. Below is the shortest solution we know of. Walk around the clock in the order $1$, $2$, $\dots$, $12$. If we pass by two red strings or two blue strings in a row, the strings don't intersect; otherwise, they do. This is because if we don't have two red or blue in a row, then the red and blue strings alternate, so there must be an intersection.
[ "implementation" ]
900
#include <bits/stdc++.h> using namespace std; const int MAX = 200'007; const int MOD = 1'000'000'007; void solve() { int a, b, c, d; cin >> a >> b >> c >> d; string s; for (int i = 1; i <= 12; i++) { if (i == a || i == b) {s += "a";} if (i == c || i == d) {s += "b";} } cout << (s == "abab" || s == "baba" ? "YES\n" : "NO\n"); } int main() { int tt; cin >> tt; for (int i = 1; i <= tt; i++) {solve();} // solve(); }
1971
D
Binary Cut
You are given a binary string$^{\dagger}$. Please find the minimum number of pieces you need to cut it into, so that the resulting pieces can be rearranged into a sorted binary string. Note that: - each character must lie in exactly one of the pieces; - the pieces must be contiguous substrings of the original string; - you must use all the pieces in the rearrangement. $^{\dagger}$ A binary string is a string consisting of characters $0$ and $1$. A sorted binary string is a binary string such that all characters $0$ come before all characters $1$.
First, note that it's always optimal to divide the string into "blocks" of equal values; there is no use having two strings $\texttt{111}|\texttt{11}$ when we can just have $\texttt{11111}$ and use fewer pieces. Now note that to sort the string, we need all blocks of $\texttt{0}$ to come before all blocks of $\texttt{1}$. The only way that two blocks can join is if we have a block of $\texttt{0}$s before a block of $\texttt{1}$s, and we can have at most one such block. That is, all strings look like: $(\text{blocks of }\texttt{0}\text{s}) \underbrace{\texttt{0...1}}_{\leq 1\text{joined block of }\texttt{0}\text{s and }\texttt{1}\text{s}} (\text{blocks of }\texttt{1}\text{s})$ So the answer is the number of blocks, but we should subtract $1$ if a substring $\texttt{01}$ exists (because then we can make the center block above). The time complexity is $\mathcal{O}(|s|)$. For example, for the string $\texttt{111000110}$, we can use three pieces: $\texttt{111}|\texttt{00011}|\texttt{0} \to \texttt{0}|\texttt{00011}|\texttt{111}$.
[ "dp", "greedy", "implementation", "sortings", "strings" ]
1,100
#include <bits/stdc++.h> using namespace std; const int MAX = 200'007; const int MOD = 1'000'000'007; void solve() { string s; cin >> s; int res = 1; bool ex = false; for (int i = 0; i + 1 < (int)(s.size()); i++) { res += (s[i] != s[i + 1]); ex |= (s[i] == '0' && s[i + 1] == '1'); } cout << res - ex << '\n'; } int main() { int tt; cin >> tt; for (int i = 1; i <= tt; i++) {solve();} // solve(); }
1971
E
Find the Car
Timur is in a car traveling on the number line from point $0$ to point $n$. The car starts moving from point $0$ at minute $0$. There are $k+1$ signs on the line at points $0, a_1, a_2, \dots, a_k$, and Timur knows that the car will arrive there at minutes $0, b_1, b_2, \dots, b_k$, respectively. The sequences $a$ and $b$ are strictly increasing with $a_k = n$. Between any two adjacent signs, the car travels with a \textbf{constant speed}. Timur has $q$ queries: each query will be an integer $d$, and Timur wants you to output how many minutes it takes the car to reach point $d$, \textbf{rounded down to the nearest integer}.
For each query, we binary search to find the last sign we passed (since the array $a$ is sorted). Say it is $a_r$. Then, $b_r$ minutes have passed. To find out how much time has passed since we left that sign, we know that the speed between sign $r$ and $r+1$ is $\frac{a_{r+1} - a_r}{b_{r+1} - b_r} \, \frac{\text{distance}}{\text{minute}}$ (by the usual formula for speed). We have passed a distance $c - a_r$ since the last sign, so the total number of minutes since the last sign is $\frac{b_{r+1} - b_r}{a_{r+1} - a_r} \, \frac{\text{minute}}{\text{distance}} \times c - a_r \, \text{distance} = (c - a_r) \times \frac{b_{r+1} - b_r}{a_{r+1} - a_r} \, \text{minutes}.$ Be careful about using floating-point numbers, as they can behave strangely. Our solution below doesn't use them at all.
[ "binary search", "math", "sortings" ]
1,500
#include <iostream> #include <algorithm> #include <vector> #include <array> #include <set> #include <map> #include <queue> #include <stack> #include <list> #include <chrono> #include <random> #include <cstdlib> #include <cmath> #include <ctime> #include <cstring> #include <iomanip> #include <bitset> #include <cassert> typedef long long ll; using namespace std; void solve() { int n, k, q; cin >> n >> k >> q; vector<long long> a(k+1), b(k+1); a[0] = 0; b[0] = 0; for(int i = 1; i <= k; i++) { cin >> a[i]; } for(int i = 1; i <= k; i++) { cin >> b[i]; } for(int i = 0; i < q; i++) { long long c; cin >> c; int l = 0, r = k; while(l <= r) { int mid = l+r>>1; if(a[mid] > c) { r = mid-1; } else { l = mid+1; } } if(a[r] == c) { cout << b[r] << " "; continue; } long long ans = b[r] + (c-a[r])*(b[r+1]-b[r])/(a[r+1]-a[r]); cout << ans << " "; } cout << endl; } int32_t main(){ int t = 1; cin >> t; while (t--) { solve(); } }
1971
F
Circle Perimeter
Given an integer $r$, find the number of lattice points that have a Euclidean distance from $(0, 0)$ \textbf{greater than or equal to} $r$ but \textbf{strictly less} than $r+1$. A lattice point is a point with integer coordinates. The Euclidean distance from $(0, 0)$ to the point $(x,y)$ is $\sqrt{x^2 + y^2}$.
There are many solutions to this problem, some of which involve binary search, but we will present a solution that doesn't use it. In fact, our solution is basically just brute force, but with some small observations that make it pass. See the implementation for more detail. First, we can only count points $(x,y)$ such that $x \geq 0$ and $y > 0$; we can multiply by $4$ at the end to get points in all four quadrants, by symmetry. Let's store a variable $\mathrm{height}$ initially equal to $r$. It will tell us the maximum $y$-value to look at. Iterate through all values of $x$ from $0$ to $r$. For each $x$, decrease $\mathrm{height}$ until the distance of $(x,\mathrm{height})$ to the origin is $< r+1$. Then, brute force all values of $y$ from $\mathrm{height}$ downwards until we hit a point whose distance to the origin is $< r$; at this point, we break and add the number of valid points to our total. Note that we essentially only look at points whose distance to the origin is between $r$ and $r+1$; that is, we brute force over all valid points. How many valid points are there? Well, we can roughly estimate the number of points as the area of the region, which is $\pi(r^2 - (r-1)^2) = 2\pi r - \pi$. This means we only visit $\mathcal{O}(r)$ points per test case, which is fast enough.
[ "binary search", "brute force", "dfs and similar", "geometry", "implementation", "math" ]
1,600
#include <iostream> using namespace std; void solve() { long long r; cin >> r; long long height = r; long long ans = 0; for(long long i = 0; i <= r; i++) { while(i*i+height*height >= (r+1)*(r+1)) { height--; } long long cop = height; while(i*i+cop*cop >= r*r && cop > 0) { cop--; ans++; } } cout << ans*4 << endl; } int32_t main(){ int t = 1; cin >> t; while (t--) { solve(); } }
1971
G
XOUR
You are given an array $a$ consisting of $n$ nonnegative integers. You can swap the elements at positions $i$ and $j$ if $a_i~\mathsf{XOR}~a_j < 4$, where $\mathsf{XOR}$ is the bitwise XOR operation. Find the lexicographically smallest array that can be made with any number of swaps. An array $x$ is lexicographically smaller than an array $y$ if in the first position where $x$ and $y$ differ, $x_i < y_i$.
Note that if $a_i~\mathsf{XOR}~a_j < 4$, then $a_i$ and $a_j$ must share all bits in their binary representation, except for the last $2$ bits. This is because if they have a mismatch in any greater bit, their $\mathsf{XOR}$ will include this bit, making its value $\geq 2^2=4$. This means that we can group the numbers by removing the last two bits and putting equal numbers into the same group. In each group, we can order the numbers freely (since we can swap any two of them), so it's optimal to sort the numbers in each group. Thus we can just divide the numbers into groups and sort each, solving the problem in $\mathcal{O}(n \log n)$. There are several ways to implement this: for instead, you can use a map storing all the groups, and then sort the values in each group. The implementation we used maps each integer to a priority queue, which automatically will sort the numbers in each group.
[ "data structures", "dsu", "sortings" ]
1,700
#include <iostream> #include <algorithm> #include <vector> #include <map> #include <queue> using namespace std; void solve() { int n; cin >> n; vector<int> a(n); map<int, priority_queue<int>> mp; for(int i = 0; i < n; i++) { cin >> a[i]; mp[a[i]>>2].push(-a[i]); } for(int i = 0; i < n; i++) { cout << -mp[a[i]>>2].top() << " "; mp[a[i]>>2].pop(); } cout << endl; } int32_t main(){ int t = 1; cin >> t; while (t--) { solve(); } }
1971
H
±1
Bob has a grid with $3$ rows and $n$ columns, each of which contains either $a_i$ or $-a_i$ for some integer $1 \leq i \leq n$. For example, one possible grid for $n=4$ is shown below: $$\begin{bmatrix} a_1 & -a_2 & -a_3 & -a_2 \\ -a_4 & a_4 & -a_1 & -a_3 \\ a_1 & a_2 & -a_2 & a_4 \end{bmatrix}$$ Alice and Bob play a game as follows: - Bob shows Alice his grid. - Alice gives Bob an array $a_1, a_2, \dots, a_n$ of her choosing, \textbf{whose elements are all $\mathbf{-1}$ or $\mathbf{1}$}. - Bob substitutes these values into his grid to make a grid of $-1$s and $1$s. - Bob \textbf{sorts} the elements of each column in non-decreasing order. - Alice wins if all the elements in the middle row are $1$; otherwise, Bob wins. For example, suppose Alice gives Bob the array $[1, -1, -1, 1]$ for the grid above. Then the following will happen (colors are added for clarity): $$\begin{bmatrix} \textcolor{red}{a_1} & \textcolor{green}{-a_2} & \textcolor{blue}{-a_3} & \textcolor{green}{-a_2} \\ -a_4 & a_4 & \textcolor{red}{-a_1} & \textcolor{blue}{-a_3} \\ \textcolor{red}{a_1} & \textcolor{green}{a_2} & \textcolor{green}{-a_2} & a_4 \end{bmatrix} \xrightarrow{[\textcolor{red}{1},\textcolor{green}{-1},\textcolor{blue}{-1},1]} \begin{bmatrix} \textcolor{red}{1} & \textcolor{green}{1} & \textcolor{blue}{1} & \textcolor{green}{1} \\ -1 & 1 & \textcolor{red}{-1} & \textcolor{blue}{1} \\ \textcolor{red}{1} & \textcolor{green}{-1} & \textcolor{green}{1} & 1 \end{bmatrix} \xrightarrow{\text{sort each column}} \begin{bmatrix} -1 & -1 & -1 & 1 \\ \mathbf{1} & \mathbf{1} & \mathbf{1} & \mathbf{1} \\ 1 & 1 & 1 & 1 \\ \end{bmatrix}\,. $$ Since the middle row is all $1$, Alice wins. Given Bob's grid, determine whether or not Alice can choose the array $a$ to win the game.
The problem statement is somewhat reminiscent of SAT. Indeed, treating $+1$ as true and $-1$ as false, we have clauses of length $3$, and we need at least $2$ of the variables to be true. We can reduce this to 2-SAT with the following observation: at least $2$ of $(x,y,z)$ are true $\iff$ at least one of $(x,y)$ is true, at least one of $(y,z)$ is true, and at least one of $(z,x)$ is true. That is, for each column of the grid, we make three 2-SAT clauses. Then we just run 2-SAT on these clauses, and output if there is a solution. The time complexity is $\mathcal{O}(n)$ per test case. It might be time-consuming to code 2-SAT during the contest, so we recommend using some standard library (for example, our solution uses AtCoder library).
[ "2-sat", "dfs and similar", "graphs" ]
2,100
#include <bits/stdc++.h> #include <algorithm> #include <utility> #include <vector> namespace atcoder { namespace internal { template <class E> struct csr { std::vector<int> start; std::vector<E> elist; csr(int n, const std::vector<std::pair<int, E>>& edges) : start(n + 1), elist(edges.size()) { for (auto e : edges) { start[e.first + 1]++; } for (int i = 1; i <= n; i++) { start[i] += start[i - 1]; } auto counter = start; for (auto e : edges) { elist[counter[e.first]++] = e.second; } } }; // Reference: // R. Tarjan, // Depth-First Search and Linear Graph Algorithms struct scc_graph { public: scc_graph(int n) : _n(n) {} int num_vertices() { return _n; } void add_edge(int from, int to) { edges.push_back({from, {to}}); } // @return pair of (# of scc, scc id) std::pair<int, std::vector<int>> scc_ids() { auto g = csr<edge>(_n, edges); int now_ord = 0, group_num = 0; std::vector<int> visited, low(_n), ord(_n, -1), ids(_n); visited.reserve(_n); auto dfs = [&](auto self, int v) -> void { low[v] = ord[v] = now_ord++; visited.push_back(v); for (int i = g.start[v]; i < g.start[v + 1]; i++) { auto to = g.elist[i].to; if (ord[to] == -1) { self(self, to); low[v] = std::min(low[v], low[to]); } else { low[v] = std::min(low[v], ord[to]); } } if (low[v] == ord[v]) { while (true) { int u = visited.back(); visited.pop_back(); ord[u] = _n; ids[u] = group_num; if (u == v) break; } group_num++; } }; for (int i = 0; i < _n; i++) { if (ord[i] == -1) dfs(dfs, i); } for (auto& x : ids) { x = group_num - 1 - x; } return {group_num, ids}; } std::vector<std::vector<int>> scc() { auto ids = scc_ids(); int group_num = ids.first; std::vector<int> counts(group_num); for (auto x : ids.second) counts[x]++; std::vector<std::vector<int>> groups(ids.first); for (int i = 0; i < group_num; i++) { groups[i].reserve(counts[i]); } for (int i = 0; i < _n; i++) { groups[ids.second[i]].push_back(i); } return groups; } private: int _n; struct edge { int to; }; std::vector<std::pair<int, edge>> edges; }; } // namespace internal } // namespace atcoder #include <cassert> #include <vector> namespace atcoder { // Reference: // B. Aspvall, M. Plass, and R. Tarjan, // A Linear-Time Algorithm for Testing the Truth of Certain Quantified Boolean // Formulas struct two_sat { public: two_sat() : _n(0), scc(0) {} two_sat(int n) : _n(n), _answer(n), scc(2 * n) {} void add_clause(int i, bool f, int j, bool g) { assert(0 <= i && i < _n); assert(0 <= j && j < _n); scc.add_edge(2 * i + (f ? 0 : 1), 2 * j + (g ? 1 : 0)); scc.add_edge(2 * j + (g ? 0 : 1), 2 * i + (f ? 1 : 0)); } bool satisfiable() { auto id = scc.scc_ids().second; for (int i = 0; i < _n; i++) { if (id[2 * i] == id[2 * i + 1]) return false; _answer[i] = id[2 * i] < id[2 * i + 1]; } return true; } std::vector<bool> answer() { return _answer; } private: int _n; std::vector<bool> _answer; internal::scc_graph scc; }; } // namespace atcoder using namespace std; using namespace atcoder; void solve() { int n; cin >> n; int b[3][n]; for (int i = 0; i < 3; i++) { for (int j = 0; j < n; j++) { cin >> b[i][j]; } } two_sat ts(n); for (int j = 0; j < n; j++) { for (int i = 0; i < 3; i++) { int nxt = (i + 1) % 3; ts.add_clause(abs(b[i][j]) - 1, b[i][j] > 0, abs(b[nxt][j]) - 1, b[nxt][j] > 0); } } cout << (ts.satisfiable() ? "YES\n" : "NO\n"); } int main() { ios::sync_with_stdio(false); cin.tie(nullptr); int tt; cin >> tt; for (int i = 1; i <= tt; i++) {solve();} // solve(); }