contest_id
stringlengths 1
4
| index
stringclasses 43
values | title
stringlengths 2
63
| statement
stringlengths 51
4.24k
| tutorial
stringlengths 19
20.4k
| tags
listlengths 0
11
| rating
int64 800
3.5k
⌀ | code
stringlengths 46
29.6k
⌀ |
|---|---|---|---|---|---|---|---|
1400
|
G
|
Mercenaries
|
Polycarp plays a (yet another!) strategic computer game. In this game, he leads an army of mercenaries.
Polycarp wants to gather his army for a quest. There are $n$ mercenaries for hire, and the army should consist of some subset of them.
The $i$-th mercenary can be chosen if the \textbf{resulting} number of chosen mercenaries is not less than $l_i$ (otherwise he deems the quest to be doomed) and not greater than $r_i$ (he doesn't want to share the trophies with too many other mercenaries). Furthermore, $m$ pairs of mercenaries hate each other and cannot be chosen for the same quest.
How many \textbf{non-empty} subsets does Polycarp need to consider? In other words, calculate the number of non-empty subsets of mercenaries such that the size of this subset belongs to $[l_i, r_i]$ for each chosen mercenary, and there are no two mercenaries in the subset that hate each other.
The answer may be large, so calculate it modulo $998244353$.
|
We will use inclusion-exclusion formula to solve this problem (you may read about it here: https://cp-algorithms.com/combinatorics/inclusion-exclusion.html). To put it simply, we are going to count the number of subsets that meet the restrictions on $[l_i, r_i]$, ignoring the edges (I'll call the pairs of mercenaries that hate each other "edges" for simplicity). This number is not the answer, since we counted some subsets that violate the conditions for the edges - so, for each edge, we count the number of subsets that violate the condition on that edge, and subtract it from the answer. But it means that if a subset violates the conditions on multiple edges, we subtracted it multiple times, so, for each pair of edges, we count the subsets that violate both edges in that pair and add them back, then remove the subsets violating the triples, and so on. The mathematical formula for the answer is $\sum \limits_{E \in S} f(E)$, where $S$ is the set containing all $2^m$ subsets of edges, and $f(E)$ is the number of subsets that meet the constraints on $[l_i, r_i]$, and violate the constraints for every edge $e \in E$. Now we have to calculate $f(E)$ efficiently, since the outer loop already runs in $O(2^m)$. If we have to violate a set of edges, then we have a set of mercenaries that we should take (the "endpoints" of the edges), and the number of those mercenaries (let's denote it as $k$) is up to $40$. The size of the subset should meet the constraints on $[l_i, r_i]$ for each of those mercenaries, so let's intersect the segments $[l_i, r_i]$ for them (and if the intersection is empty, then there are no subsets that violate all of those edges). Let the intersection of those segments be $[L, R]$. The naive solution would be to iterate on the size of the subset and calculate the number of ways to compose a subset that contains some fixed mercenaries of given size (if the size is $s$, and $c_s$ is the number of mercenaries that are willing to be in a subset of size $s$, then we have to add ${{c_s - k} \choose {s - k}}$ to the answer). So, for each subset $E$, $f(E) = \sum \limits_{s = L}^{R} {{c_s - k} \choose {s - k}}$. This sum can be calculated in $O(1)$ as follows: note that $k$ is up to $40$, so for every possible value of $k$, precalculate the array of prefix sums of those binomial coefficients. This precalculation runs in $O(nm + n \log MOD)$, and each $f(E)$ can be calculated in $O(m \log m)$, so overall the complexity is $O(nm + n \log MOD + 2^m m \log m)$.
|
[
"bitmasks",
"brute force",
"combinatorics",
"dp",
"dsu",
"math",
"two pointers"
] | 2,600
|
#include<bits/stdc++.h>
using namespace std;
const int N = 300043;
const int MOD = 998244353;
int add(int x, int y)
{
return ((x + y) % MOD + MOD) % MOD;
}
int mul(int x, int y)
{
return (x * 1ll * y) % MOD;
}
int binpow(int x, int y)
{
int z = 1;
while(y > 0)
{
if(y % 2 == 1)
z = mul(z, x);
x = mul(x, x);
y /= 2;
}
return z;
}
int inv(int x)
{
return binpow(x, MOD - 2);
}
int fact[N];
int rfact[N];
void prepare_fact()
{
fact[0] = 1;
for(int i = 1; i < N; i++)
fact[i] = mul(i, fact[i - 1]);
for(int i = 0; i < N; i++)
rfact[i] = inv(fact[i]);
}
int c(int n, int k)
{
if(n < 0 || n < k || k < 0)
return 0;
return mul(fact[n], mul(rfact[k], rfact[n - k]));
}
int main()
{
int n, m;
scanf("%d %d", &n, &m);
vector<int> l(n), r(n), a(m), b(m);
for(int i = 0; i < n; i++)
scanf("%d %d", &l[i], &r[i]);
for(int i = 0; i < m; i++)
scanf("%d %d", &a[i], &b[i]);
vector<int> cnt(n + 2);
for(int i = 0; i < n; i++)
{
cnt[l[i]]++;
cnt[r[i] + 1]--;
}
for(int i = 0; i < n + 1; i++)
cnt[i + 1] += cnt[i];
prepare_fact();
vector<vector<int> > p(2 * m + 1, vector<int>(n + 1));
for(int i = 1; i <= n; i++)
for(int j = 0; j <= 2 * m; j++)
p[j][i] = add(p[j][i - 1], c(cnt[i] - j, i - j));
int ans = 0;
for(int mask = 0; mask < (1 << m); mask++)
{
int sign = 1;
set<int> used;
for(int i = 0; i < m; i++)
if(mask & (1 << i))
{
sign = mul(sign, MOD - 1);
used.insert(a[i] - 1);
used.insert(b[i] - 1);
}
int L = 1, R = n;
for(auto x : used)
{
L = max(L, l[x]);
R = min(R, r[x]);
}
if(R < L) continue;
ans = add(ans, mul(sign, add(p[used.size()][R], -p[used.size()][L - 1])));
}
printf("%d\n", ans);
}
|
1401
|
A
|
Distance and Axis
|
We have a point $A$ with coordinate $x = n$ on $OX$-axis. We'd like to find an integer point $B$ (also on $OX$-axis), such that the absolute difference between the distance from $O$ to $B$ and the distance from $A$ to $B$ is equal to $k$.
\begin{center}
{\small The description of the first test case.}
\end{center}
Since sometimes it's impossible to find such point $B$, we can, in one step, increase or decrease the coordinate of $A$ by $1$. What is the minimum number of steps we should do to make such point $B$ exist?
|
If $n$ is less than $k$, we have to move $A$ to coordinate $k$, and set the coordinate of $B$ as $0$ or $k$. So the answer is $k - n$. If $n$ is not less than $k$, let's define the coordinate of $B$ as $m$($m \times 2 \le n$). By the condition in the problem, the difference between ($m - 0$) and ($n - m$) should be equal to $k$. That is, $(n - m) - (m - 0)$ is $k$, and summarizing the formula, $m = (n - k) / 2$. Because the coordinate of $B$ is integer, if the parity of $n$ and $k$ is same the answer is $0$, otherwise the answer is $1$(If we increase the coordinate of $A$ by $1$, m becomes integer). Time complexity : $O(1)$
|
[
"constructive algorithms",
"math"
] | 900
|
#include<bits/stdc++.h>
#define endl '\n'
using namespace std;
main()
{
ios_base::sync_with_stdio(0);
cin.tie(0);
cout.tie(0);
int k, n, t;
cin >> t;
for(;t--;)
{
cin >> n >> k;
if(n < k)
cout << k - n << endl;
else if(n % 2 == k % 2)
cout << 0 << endl;
else
cout << 1 << endl;
}
return 0;
}
|
1401
|
B
|
Ternary Sequence
|
You are given two sequences $a_1, a_2, \dots, a_n$ and $b_1, b_2, \dots, b_n$. Each element of both sequences is either $0$, $1$ or $2$. The number of elements $0$, $1$, $2$ in the sequence $a$ is $x_1$, $y_1$, $z_1$ respectively, and the number of elements $0$, $1$, $2$ in the sequence $b$ is $x_2$, $y_2$, $z_2$ respectively.
You can rearrange the elements in both sequences $a$ and $b$ however you like. After that, let's define a sequence $c$ as follows:
$c_i = \begin{cases} a_i b_i & \mbox{if }a_i > b_i \\ 0 & \mbox{if }a_i = b_i \\ -a_i b_i & \mbox{if }a_i < b_i \end{cases}$
You'd like to make $\sum_{i=1}^n c_i$ (the sum of all elements of the sequence $c$) as large as possible. What is the maximum possible sum?
|
We can find the kind of the value of $c_i$ is three $\left(-2, 0, 2\right)$. And $c_i$ is $-2$ only if $a_i$ is $1$ and $b_i$ is $2$, and $c_i$ is $2$ only if $a_i$ is $2$ and $b_i$ is $1$. Otherwise $c_i$ is $0$. So we have to make ($a_i, b_i$) pair ($1, 2$) as little as possible, and pair ($2, 1$) as much as possible. To do this, first we can make ($1, 0$) pair, ($0, 2$) pair, and ($2, 1$) pair as much as possible. After that, pairing the remaining values doesn't affect the sum of $c_i$. (It $a_i$ in which value is $1$ and $b_i$ in which value is $2$ are all left, we have to pair them although the sum decreases.) Time complexity : $O(1)$
|
[
"constructive algorithms",
"greedy",
"math"
] | 1,100
|
#include<bits/stdc++.h>
#define endl '\n'
using namespace std;
main()
{
ios_base::sync_with_stdio(0);
cin.tie(0);
cout.tie(0);
int t;
cin >> t;
for(;t--;)
{
int m, sum = 0, x0, x1, x2, y0, y1, y2;
cin >> x0 >> x1 >> x2 >> y0 >> y1 >> y2;
m = min(x0, y2);
x0 -= m;
y2 -= m;
m = min(x1, y0);
x1 -= m;
y0 -= m;
m = min(x2, y1);
x2 -= m;
y1 -= m;
sum += 2 * m;
sum -= 2 * min(x1, y2);
cout << sum << endl;
}
}
|
1401
|
C
|
Mere Array
|
You are given an array $a_1, a_2, \dots, a_n$ where all $a_i$ are integers and greater than $0$.
In one operation, you can choose two different indices $i$ and $j$ ($1 \le i, j \le n$). If $gcd(a_i, a_j)$ is equal to the minimum element of the \textbf{whole array} $a$, you can swap $a_i$ and $a_j$. $gcd(x, y)$ denotes the greatest common divisor (GCD) of integers $x$ and $y$.
Now you'd like to make $a$ non-decreasing using the operation any number of times (possibly zero). Determine if you can do this.
An array $a$ is non-decreasing if and only if $a_1 \le a_2 \le \ldots \le a_n$.
|
Let's define the minimum element of $a$ as $m$. We can find the position of the elements which is not divisible by $m$ cannot be changed because these elements don't have $m$ as factor. But we can rearrange elements divisible by $m$ whatever we want in the following way: $\bullet$ Let's suppose $m$ = $a_x$, and there is two elements $a_y$, $a_z$ in which $x$, $y$, $z$ are all different. Swap($a_x$, $a_y$), swap($a_y$, $a_z$), and swap($a_z$, $a_x$). Then only $a_y$ and $a_z$ are swapped from the initial state. Repeat this process. So we can rearrange elements divisible by $m$ in non-descending order. After that if whole array is non-descending the answer is $YES$, otherwise $NO$. Time complexity : $O(n \log n)$
|
[
"constructive algorithms",
"math",
"number theory",
"sortings"
] | 1,300
|
#import<bits/stdc++.h>
#define endl '\n'
using namespace std;
int a[100005], b[100005];
main()
{
ios_base::sync_with_stdio(0);
cin.tie(0);
cout.tie(0);
int t;
cin >> t;
for(;t--;)
{
int k = 0, m = 1000000000, n;
cin >> n;
for(int i = 0; i < n; i++)
{
cin >> a[i];
b[i] = a[i];
m = min(m, a[i]);
}
sort(b, b + n);
for(int i = 0; i < n; i++)
if(a[i] != b[i] && a[i] % m > 0)
k = 1;
if(k)
cout<<"NO"<<endl;
else
cout<<"YES"<<endl;
}
}
|
1401
|
D
|
Maximum Distributed Tree
|
You are given a tree that consists of $n$ nodes. You should label each of its $n-1$ edges with an integer in such way that satisfies the following conditions:
- each integer must be greater than $0$;
- the product of all $n-1$ numbers should be equal to $k$;
- the number of $1$-s among all $n-1$ integers must be minimum possible.
Let's define $f(u,v)$ as the sum of the numbers on the simple path from node $u$ to node $v$. Also, let $\sum\limits_{i=1}^{n-1} \sum\limits_{j=i+1}^n f(i,j)$ be a distribution index of the tree.
Find the maximum possible distribution index you can get. Since answer can be too large, print it modulo $10^9 + 7$.
In this problem, since the number $k$ can be large, the result of the prime factorization of $k$ is given instead.
|
Let's define $w_i$ as the product of the number of vertices belonging to each of the two components divided when the $i_{th}$ edge is removed from the tree, and $z_i$ as the number on $i_{th}$ edge. Now a distribution index is equal to $\sum_{i=1}^{n-1}(w_i \times z_i)$. Now there are two cases: $A$ : $m \le n-1$ In this case, we have to label $p_1, p_2, \ldots p_m$ to $m$ distinct edges because we have to minimize the number of $1$-s. And to maximize distribution index, we can label a larger $p_i$ to the edge in which $w_i$ is larger because the following holds: $\bullet$ For four positive integers $a, b, c, d$ ($a \ge b, c \ge d$), $ac + bd \ge ad + bc$ Let's suppose $a=b+x, c=d+y$ $(x, y \ge 0)$. Then the equation can be written as follows: $(b+x)(d+y)+bd \ge (b+x)d+b(d+y)$ $bd+by+xd+xy+bd \ge bd+xd+bd+by$ $xy \ge 0$ Because $x, y \ge 0$, we proved it. And label $1$ to the remaining edges. $B$ : $m > n-1$ In this case, we can make no $1$-s exist out of $n-1$ integers, and some of $n-1$ number would be composite. And to maximize distribution index, we can label the product of $m-n+2$ largest $p_i$ to the edge in which $w_i$ is largest, and label the remaining $p_i$ to the remaining edges in the same way as case $A$ because the following holds: $\bullet$ For five positive integers $a, b, c, d, e$ ($a \ge b, d \ge e$), $acd+be \ge bcd+ae$ Substituting $f = cd$ in the above equation, we can find the equation is same as before. So we proved it. After filling in the edge, calculate it and find the answer. Time complexity : $O(\max(n, m) \log \max(n,m))$
|
[
"dfs and similar",
"dp",
"greedy",
"implementation",
"math",
"number theory",
"sortings",
"trees"
] | 1,800
|
#include<bits/stdc++.h>
#define endl '\n'
using namespace std;
typedef long long LL;
struct H{int x, y;};
int ii, n;
const int q = 1e9 + 7;
LL p[100005], pv[100005], vi[100005], w[100005];
H e[200040];
int C(H a, H b){return a.x < b.x;}
int G(LL a, LL b){return a > b;}
LL dfs(int v)
{
LL d = 1;
vi[v] = 1;
for(int i = pv[v]; i < pv[v+1]; i++)
if(!vi[e[i].y])
d += dfs(e[i].y);
w[ii] = d * (n - d);
ii++;
return d;
}
main()
{
ios_base::sync_with_stdio(0);
cin.tie(0);
cout.tie(0);
int t;
cin >> t;
for(;t--;)
{
int k = 0, m;
LL x = 1;
cin >> n;
for(int i = 0; i < n - 1; i++)
{
cin >> e[i].x >> e[i].y;
e[i + n - 1].x = e[i].y;
e[i + n - 1].y = e[i].x;
}
int sz = 2 * n - 2;
sort(e, e + sz, C);
for(int i = 1; i < sz; i++)
{
if(e[i].x > e[i - 1].x)
{
for(int j = e[i - 1].x + 1; j <= e[i].x; j++)
pv[j] = i;
}
}
for(int j = e[sz - 1].x + 1; j <= n + 2; j++)
pv[j] = sz;
ii = k = 0;
dfs(1);
cin >> m;
for(int i = 0; i < m; i++)
cin >> p[i];
sort(p, p + m, G);
sort(w, w + n - 1, G);
if(m < n)
for(int i = m; i < n - 1; i++)
p[i] = 1;
else
{
int i;
for(i = m - 1; i > m - n; k = i, i--)
w[i] = w[i - m + n - 1];
for(; i; i--)
w[i] = w[0];
}
int l = max(m, n - 1);
int i;
for(i = 0, x = w[0]; i <= k; i++)
x = x * p[i] % q;
for(;i < l; i++)
x = (x + w[i] * p[i]) % q;
cout << x << endl;
for(int i = 1; i <= n; i++)
vi[i] = 0;
}
}
|
1401
|
E
|
Divide Square
|
There is a square of size $10^6 \times 10^6$ on the coordinate plane with four points $(0, 0)$, $(0, 10^6)$, $(10^6, 0)$, and $(10^6, 10^6)$ as its vertices.
You are going to draw segments on the plane. All segments are either horizontal or vertical and intersect with at least one side of the square.
Now you are wondering how many pieces this square divides into after drawing all segments. Write a program calculating the number of pieces of the square.
|
Under given condition, there are only two cases in which the number of pieces increases. $\bullet$ When a segment intersects with two sides (facing each other) of the square, the number of pieces increases by one. $\bullet$ When two segment intersects in the square (not including sides), the number of pieces increases by one. So the answer is equal to (the number of segment intersecting with two sides of the square) $+$ (the number of intersection in the square). You can sort the segments by ascending order of $y$-coordinate (Set the start and end points of vertical lines separately.) and do sweeping using segment tree to calculate the number of intersections in the square. Time complexity : $O(n \log n + m \log m)$
|
[
"data structures",
"geometry",
"implementation",
"sortings"
] | 2,400
|
#include<bits/stdc++.h>
using namespace std;
typedef long long LL;
struct H{int k, l, r;};
const int p = 1048576;
LL l[2100000], r[2100000], z[2100000];
H h[100005], v[200040];
int C(H a, H b){return a.k < b.k;}
int D(H a, H b){return a.l < b.l || a.l == b.l && a.r > b.r;}
LL sum(int x, int y, int d) // find the sum of range [x, y]
{
if(x <= l[d] && r[d] <= y)
return z[d];
if(x > r[d] || y < l[d])
return 0;
return sum(x, y, d << 1) + sum(x, y, (d << 1) + 1);
}
void REP(int i, LL k) // add k to i-th number
{
i += p;
for(z[i] += k; i >>= 1;)
z[i] = z[i << 1] + z[(i << 1) + 1];
}
main()
{
ios_base::sync_with_stdio(0);
cin.tie(0);
cout.tie(0);
LL m, n, s = 1;
cin >> n >> m;
for(int i = 0; i < p; i++)
l[i + p] = r[i + p] = i;
for(int i = p; --i;)
{
l[i] = l[i << 1];
r[i] = r[(i << 1) + 1];
}
for(int i = 0; i < n; i++)
{
cin >> h[i].k >> h[i].l >> h[i].r;
if(h[i].r - h[i].l == 1000000)
s++;
}
sort(h, h + n, C);
for(int i = 0; i < m; i++)
{
int j = i << 1;
cin >> v[j].k >> v[j].l >> v[j+1].l;
v[j + 1].k = v[j].k;
v[j].r = 1;
v[j + 1].r = -1;
if(v[j + 1].l - v[j].l == 1000000)
s++;
}
sort(v, v + 2 * m, D);
for(int i = 0, j = 0; i < n && j < 2 * m;)
{
if(h[i].k < v[j].l || h[i].k == v[j].l && v[j].r < 0)
{
s += sum(h[i].l, h[i].r, 1);
i++;
}
else
{
REP(v[j].k, v[j].r);
j++;
}
}
cout << s;
return 0;
}
|
1401
|
F
|
Reverse and Swap
|
You are given an array $a$ of length $2^n$. You should process $q$ queries on it. Each query has one of the following $4$ types:
- $Replace(x, k)$ — change $a_x$ to $k$;
- $Reverse(k)$ — reverse each subarray $[(i-1) \cdot 2^k+1, i \cdot 2^k]$ for all $i$ ($i \ge 1$);
- $Swap(k)$ — swap subarrays $[(2i-2) \cdot 2^k+1, (2i-1) \cdot 2^k]$ and $[(2i-1) \cdot 2^k+1, 2i \cdot 2^k]$ for all $i$ ($i \ge 1$);
- $Sum(l, r)$ — print the sum of the elements of subarray $[l, r]$.
Write a program that can quickly process given queries.
|
Let's consider the sequence as $0$-based. Then we can find the following two facts: $\bullet$ If we do $Reverse(k)$ operation once, $a_i$ becomes $a_{i\text{^}(2^k-1)}$. $\bullet$ If we do $Swap(k)$ operation once, $a_i$ becomes $a_{i\text{^}(2^k)}.$ So in any state there is an integer $x$ that makes the current $a_i$ equal to the initial $a_{i\text{^}x}$ and current $a_{i\text{^}x}$ is equal to the initial $a_i$ for each $i$ ($0 \le i \le 2^n-1$). (Don't consider the change of the values by $Replace(x,k)$ operation.) Therefore, if we can calculate $\sum_{i=l}^r a_{i\text{^}x}$ fast for arbitrary $x$, also can solve the problem, and it can be done using segment tree. When processing $Replace(i,k)$ queries, replace $a_{i\text{^}x}$ as $k$. And when processing $Reverse(k)$ or $Swap(k)$ queries, just replace $x$ as $x\text{^}(2^k-1)$ or $x\text{^}(2^k)$. And $Sum(l,r)$ queries are left. To calculate this value, divide the segment $[l, r]$ into smaller segments, in which length of each segment is power of $2$ and can be found in the segment tree. (ex : segment $[3, 9]$ is divided into three segments - $[3, 3]$, $[4, 7]$, $[8, 9]$) Next, find the sum of numbers in which index is in each segment. For divided segment $[l, r]$ in which length is $2^k$, the values $l\text{^}x, (l+1)\text{^}x, \ldots (r-1)\text{^}x, r\text{^}x$ are, when sorted, form another segment $[l\text{^}(x\text{&~}(2^k-1)), r\text{^}(x\text{&~}(2^k-1))]$, which $x\text{&~}(2^k-1)$ means the largest of multiples of $2^k$ not greater than $x$. So you can find the sum of numbers in each divided segment $\sum_{i=l}^r$(current $a_i$) as $\sum_{i=l\text{^}(x\text{&~}(2^k-1))}^{r\text{^}(x\text{&~}(2^k-1))}$(initial $a_i$). Repeat this and we can find the answer for each $Sum$ query. Time complexity : $O(nq)$
|
[
"binary search",
"bitmasks",
"data structures"
] | 2,400
|
#include<bits/stdc++.h>
#define endl '\n'
using namespace std;
typedef long long LL;
LL p, a[528000], ll[528000], rr[528000];
LL SUM(LL l, LL r, LL d)
{
LL x, y, z;
x = ll[d];
y = rr[d];
z = b & ~(y - x);
x ^= z;
y ^= z;
if(x > r || y < l)
return 0;
else if(x < l || y > r)
return(SUM(l, r, z << 1) + SUM(l, r, (z << 1) + 1));
else
return a[z];
}
void REP(LL i,LL k)
{
i += p;
for(a[i] = k; i >>= 1;)
a[i] = (a[i << 1] + a[(i << 1) + 1]);
}
main()
{
ios_base::sync_with_stdio(0);
cin.tie(0);
cout.tie(0);
int b = 0, i, n, q;
cin >> n >> q;
p = 1 << n;
for(i = 0; i < p; i++)
{
cin >> a[p + i];
ll[p + i] = rr[p + i] = i;
}
for(; i < p; i++)
ll[p + i] = rr[p + i] = i;
for(; --i;)
{
a[i] = (a[i << 1] + a[(i << 1) + 1]);
ll[i] = ll[i << 1];
rr[i] = rr[(i << 1) + 1];
}
for(; q--;)
{
int m;
cin >> m;
if(m == 1)
{
int k, x;
cin >> x >> k;
REP((x - 1) ^ b, k);
}
else if(m == 2)
{
int k;
cin >> k;
b ^= (1 << k) - 1;
}
else if(m == 3)
{
int k;
cin >> k;
b ^= 1 << k;
}
else
{
int l, r;
cin >> l >> r;
cout << SUM(l - 1, r - 1, 1) << endl;
}
}
}
|
1402
|
A
|
Fancy Fence
|
Everybody knows that Balázs has the fanciest fence in the whole town. It's built up from $N$ fancy sections. The sections are rectangles standing closely next to each other on the ground. The $i$th section has integer height $h_i$ and integer width $w_i$. We are looking for fancy rectangles on this fancy fence. A rectangle is fancy if:
- its sides are either horizontal or vertical and have integer lengths
- the distance between the rectangle and the ground is integer
- the distance between the rectangle and the left side of the first section is integer
- it's lying completely on sections
What is the number of fancy rectangles? This number can be very big, so we are interested in it modulo $10^9+7$.
|
Subtask 2 $N, h_i \leq 50, w_i = 1$ There are at most $50^4$ different rectangles. For all of them, we can check if they are fancy or not. This can be done in constant time with some precomputation. Subtask 3 $h_i = 1$ or $h_i = 2$ for all $i$. Consider a rectangle with height $1$ and width $K$. Lemma: There are $\binom{K+1}{2}$ fancy rectangles in it. Proof: There are $K-p+1$ fancy rectangles with width $p$: $\sum_{p=1}^{K} (K-p+1) = \binom{K+1}{2}$ Now we can solve subtask 3: There are 2 types of fancy rectangles, the ones with height $1$, and the ones with height $2$. We can easily calculate the answer, applying the previous lemma in $O(N)$ time. A helpful observation Consider a rectangle with height $A$ and width $B$. Let's denote the number of fancy rectangles contained within this big rectangle by $T_{A,B}$. Now we have $T_{A,B} = \binom{A+1}{2} \cdot \binom{B+1}{2}$ Note that $\binom{X}{2} = \dfrac{X(X-1)}{2}$, where $X(X-1)$ is always divisible by 2. Subtask 4 The solution follows easily from the previous lemma. This subtask can be solved in $O(N)$ time. Subtask 5 The heights are in increasing order. Let $W_i$ be the sum of section widths from the $i$th to the $N$th section. The answer is given by the formula: $\sum_{i=1}^{N} T_{h_i,W_i} - T_{h_{i-1},W_i},$ This way, the subtask can be solved in $O(N)$ time. Subtask 6 $N \leq 1000$ For all $1 \leq i \leq j \leq N$, we calculate the number of fancy rectangles whose left side is part of the $i$th section and right side is part of the $j$th section. Let $H$ be the minimum of section heights from the $i$th to the $j$th section. Let $W$ be the sum of section widths from the $i$th to the $j$th section. The number of fancy rectangles is (if $i \ne j$): $T_{H,W} - T_{H,W-w_i} - T_{H,W-w_j} + T_{H,W-w_i-w_j},$ This subtask can be solved in $O(N^2)$. Subtask 7 Original constraints. Sorting Let's sort the sections in decreasing order according to their heights. Let us denote the original index of the $i$ section by $p_i$. In the $i$th step, we calculate the number of fancy rectangles lying exclusively on the first $i$ sections. Let $x$ be the smallest index for which the $x$th, $x+1$th ... $p_i-1$th sections preceed the $p_i$th section. Let $y$ be the biggest index for which the $p_i+1$th, $p_i+2$th ... $y$th sections succeed the $p_i$th section. Write $X_i = \sum_{j=x}^{p_i-1} w_j, \qquad Y_i = \sum_{j=p_i+1}^{y} w_j.$ This subtask can be solved in $O(NlogN)$ time. Linear Let's iterate through the sectionss from left to right maintaining a stack of sections with the following property: from bottom to top the height of sections are increasing and after the $i$th section is processed every fancy rectangle not present in the stack is already counted. When at the $i$th section three cases are possible, let the top of the stack contain a section with dimensions $H\times W$: if $h_i=H$ we can easily modify $W$ and increase it by $w_i$ not hurting the invariant described above if $h_i>H$ we can just push a $h_i \times w_i$ rectangle to the top of the stack if $h_i<H$ then we have to pop some elements from the stack until $h_i$ will be greater or equal to the height of the section on the top of the stack. While doing the popping we accumulate the width of the new top element (i.e. the sum of widths of all elements popped plus $w_i$) and also with a similar strategy to subtask 5 the number of fancy rectangles that will not be present in the stack should be calculated. Overall the time complexity of this solution is $O(N)$ since every rectangle is pushed and popped exactly once while doing a constant amount of operations.
|
[
"*special",
"data structures",
"dsu",
"implementation",
"math",
"sortings"
] | 1,800
| null |
1402
|
B
|
Roads
|
The government of Treeland wants to build a new road network. There are $2N$ cities in Treeland. The unfinished plan of the road network already contains $N$ road segments, each of which connects two cities with a straight line. No two road segments have a common point (including their endpoints). Your task is to determine $N-1$ additional road segments satisfying the following conditions:
- Every new road segment must connect two cities with a straight line.
- If two segments (new or old) have a common point, then this point must be an endpoint of both segments.
- The road network connects all cities: for each pair of cities there is a path consisting of segments that connects the two cities.
|
We assume that for segment $s=(p,q)$ the relation $p.x<q.x$ or $p.x=q.x$ and $p.y<q.y$ holds, therefore we can say that $p$ is the left endpoint of the segment. Consider the sequence of segment endpoints ordered by their x-coordinates. We apply the sweep-line method, the event points of the sweeping are the x-coordinates of the endpoints. For a given $sx$ coordinate let us denote by $Sl(sx)$ the set of segments intersecting the vertical line whose $x$ coordinate is $sx$. Elements of the set $Sl$ are ordered according to the $y$-coordinate of the intersection points. As the sweep progresses from left to right, if the point is a left endpoint, then it is inserted into $Sl$, and deleted if it is a right endpoint. We add a sentinel, as the figure shows. Each segment endpoint which is on the left of the sweep-line is already a node of the tree, the partial output. For each segment $u \in Sl$ we compute a segment endpoint $Rmost(u)$, with the following property. Let $u \in Sl$ and $v \in Sl$ be segments such that $v$ is directly next to $u$ according to the ordering. Then for every point $q$ on the sweep-line located between the intersection points of $u$ and $v$, if the $\overline{Rmost(u), q}$ intersects any old or newly added segment then the intersection point must be an endpoint. Therefore if we insert a left endpoint $q$ into $Sl$ then the segment $(Rmost(u), q)$ will be added to the solution. Moreover, during both insertion and deletion we can update $Rmost(u)$ value in $O(log\, N)$ time if we represent the set $Sl$ by STL ordered set. The running time of the whole algorithm is $O(N \log N)$. Subtask 1 Constraint: All segments are vertical. This subtask can be solved by sorting the segments and connecting consecutive segments' left and right endpoints. The sorting relation is easy to compute: $s_1=(p_1,q_1) < s_2=(p_2, q_2)$ iff $p_1.x<p_2.x$ or $p_1.x=p_2.x$ and $p_1.y< p_2.y$. Subtask 2 Constraint: Each pair of segments are parallel. This subtask also admits solution by sorting, but the computation of the sorting relation is not so easy. Namely, $s_1=(p_1,q_1) < s_2=(p_2, q_2)$ iff both $p_1$ and $q_1$ located on the left of the line determined by $s_2$ or $s_1$ and $s_2$ are colinear and $q_1$ is inbetween $p_2$ and $q_2$. Subtask 3 Constraint: Each segment is either horizontal or vertical. This subtask can be solved by a simplified implementation of the model solution algorithm, because it is easy to compute the sorting relation of the set $Sl$. Subtask 4 Constraint: $N \le 10000$ There is an $O(N^2)$running time algorithm that can solve this subtask. For example if we represent the set $Sl(sx)$ by sorted array.
|
[
"*special",
"geometry",
"sortings"
] | 2,900
| null |
1402
|
C
|
Star Trek
|
The United Federation of Planets is an alliance of $N$ planets, they are indexed from $1$ to $N$. Some planets are connected by space tunnels. In a space tunnel, a starship can fly both ways really fast. There are exactly $N-1$ space tunnels, and we can travel from any planet to any other planet in the Federation using these tunnels.
It's well known that there are $D$ additional parallel universes. These are exact copies of our universe, they have the same planets and space tunnels. They are indexed from $1$ to $D$ (our universe has index $0$). We denote the planet $x$ in universe $i$ by $P_x^i$. We can travel from one universe to another using dimension portals. For every $i$ ($0\leq i \leq D-1$), we will place exactly one portal that allows us to fly from $P_{A_i}^i$ to $P_{B_i}^{i+1}$, for some planet indices $A_i$ and $B_i$ (i.e. $1 \leq A_i, B_i \leq N$).
Once all the portals are placed, Starship Batthyány will embark on its maiden voyage. It is currently orbiting around $P_1^0$. Captain Ágnes and Lieutenant Gábor have decided to play the following game: they choose alternately a destination (a planet) to fly to. This planet can be in the same universe, if a space tunnel goes there, or it can be in another universe, if a portal goes there. Their aim is to visit places \underline{where no one has gone before}. That's why, once they have visited a planet $P_x^i$, they never go back there (but they can visit the planet $x$ in another universe). Captain Ágnes chooses the first destination (then Gábor, then Ágnes etc.). If somebody can't choose a planet where they have not been before in his/her turn, he/she loses.
Captain Ágnes and Lieutenant Gábor are both very clever: they know the locations of all tunnels and portals, and they both play optimally. For how many different placements of portals does Captain Ágnes win the game? Two placements are different if there is an index $i$ ($0\leq i \leq D-1$), where the $i$th portal connects different pairs of planets in the two placements (i.e $A_i$ or $B_i$ differs).
This number can be very big, so we are interested in it modulo $10^9+7$.
|
Subtask 2 $N = 2$ The Captain always wins. A possible winning strategy: she uses only tunnels. In this way, Gábor is forced to use only portals. After using a portal they will be in a new universe where the Captain can use the tunnel. So Captain can always move after Gábor, but there is a point where Gábor can't move. That's why Gábor can't win. The answer is the total number of possible placements: $4^D$. it can be computed in $O(logD)$ operations via fast exponentiation. This subtask can be solved in $O(logD)$ Winning-Losing states Lets play this game in a rooted tree where the first player moves from the root $r$. Let's call a node L(osing)-state if the player moving from there can't win. Call that node W(inning)-state otherwise. The players can only increase the distance from $r$; that's why every leaf is an L-state. A node is W-state iff it has an L-state child. The root's state can be calculated in $O(N)$ operation with dfs if the size of the tree is $N$. $\mathbf{L}$ : set of nodes that are L-states as the root $\mathbf{W}$ : set of nodes that are W-states as the root Subtask 3 $N \leq 100, D = 1$ We test all possible placements. A placement will give us a tree of size $2N$ rooted at $P_1^0$. For all possibility we check the root's state. There are $O(N^2)$ different placements and it takes $O(N)$ operation to check a single one. This subtask can be solved in $O(N^3)$ Critical L-state It's clear that we have $D+1$ trees of the same structure and they are connected into a bigger tree. This big tree is rooted at $P_1^0$, but all small trees have a root-like node in the big tree ($P_1^0$ in the first universe and $P_{B_{i-1}}^i$ in the $i$th parallel universe). We will work now with the small tree of size $N$. Let's root this tree in an arbitrary node $r$. Let's call this tree $tree_r$. Let's denote the parent of node $c$ by $P(c)$. Connecting a new node $y$ with a given state to a node $x$ (of the original tree) may change some states. It can be proved that the state of $x$ changes iff $x$ and $y$ are both L-states. In this case, $x$ will become W-state. This may cause further changes in the tree: If $P(x)$ has only one L-state child ($x$), then its state will also change from W to L. If $P(P(x))$ is L-state, then its state will also change from L to W. $P(P(P(x)))$ will act like $P(x)$. This wave of change will stop at some node $z$, where $z$ will be the uppermost node whose state changed. We call $x$ a critical L-state if $z = r$. $\mathbf{C_r}$ : set of nodes that are critical L-states when $r$ is the root. $\mathbf{C_r}$ can be computed in $O(N)$ time for a given $r$ using dfs. Subtask 4 $N \leq 1000$ and $D = 1$ We should connect 2 uniform trees of size N. The first tree is rooted at index 1 (the starting node). If the root of $tree_1$ is W-state it will only change it's state if we connect a L-state to one of its critical node. That's why; the answer is $N*|W| + (N-|C_1|)*|L|$. If the root of $tree_1$ is L-state it will only change it's state if we connect a L-state to one of its critical node. That's why; the answer is $|C_1|*|L|$. Calculating $|C_1|$ requires $O(N)$ time while calculating $|L|$ and $|W|$ requires $O(N^2)$. This subtask can be solved in $O(N^2)$ Subtask 5 $D = 1$ We must calculate $|L|$ and $|W|$ faster than in subtask 4. Let's say the original tree is rooted in $v$ and we want to reroot this tree in one of its neighbors, $u$. We can see that only $v$'s and $u$'s state may change while doing this. The new states can be computed in constant time if we know the number of L-state children for every node (which can be computed by a single dfs). We can reroot the tree easily in all nodes with one dfs. This subtask can be solved in $O(N)$ Subtask 6 $N \leq 1000, D \leq 10^5$ For all possible roots $r$ we calculate $|C_r|$. This takes $O(N^2)$ time. Let us define $L_D$ as the number of ways to choose a starting node, and install portals, By definition, the W/L status of the starting node $v$ will change if and only if we add an edge from a node in $\mathbf{C_v}$ leading to an L state. Therefore, the number of ways to add all $D$ portals in a way that changes the status of the starting node is $\left|\mathbf{C_v}\right| L_{D-1}.$ We can now calculate $(L_D)_v$, the number of ways to make $v$ a losing root with respect to the remaining $D$ (plus one) universes: if $v$ is W, then we have to add the remaining portals in a way that changes the status of $v$; if $v$ is L, then we have to add them in any other way. Hence, $(L_D)_v = \begin{cases} \left|\mathbf{C_v}\right| L_{D-1}& \textit{if } v\in W\\ N^{2D} - \left|\mathbf{C_v}\right| L_{D-1} & \textit{if }v\in L. \end{cases}$ $L_D = \sum_{v} (L_D)_v\\ = \sum_{v\in W}\left|\mathbf{C_v}\right| L_{D-1} + \sum_{v\in L}(N^{2D} - \left|\mathbf{C_v}\right| L_{D-1})\\ = \left|\mathbf{L}\right|N^{2D} + \left(\sum_{v\in W}\left|\mathbf{C_v}\right| - \sum_{v\in L}\left|\mathbf{C_v}\right|\right) L_{D-1}\\ = \left|\mathbf{L}\right|N^{2D} + E \cdot L_{D-1} \textit{where }E\triangleq \left(\sum_{v\in W}\left|\mathbf{C_v}\right| - \sum_{v\in L}\left|\mathbf{C_v}\right|\right).$ In the last universe, we have $L_0 = |\mathbf{L}|$, by definition. The answer to the original question is the number of ways to make the starting node $v_1$ into W, which is given by $\textit{Solution} = N^{2D} - (L_D)_{v_1}.$ We can calculate this value in $O(D)$ time using dynamic programming. This subtask can be solved in $O(N^2+D)$ Subtask 7 $D \leq 10^5$ We must calculate $|C_r|$ for all $r$ faster than in subtask 6. We can use the idea described in subtask 5 (when calculating $|L|$ and $|W|$ fast). This subtask can be solved in $O(N+D)$ Subtask 8 Original constraints. Solution 1: This subtask can be solved like subtask 6 but we calculate $L_1$, $L_2$, $L_4$, $L_8$ ... (i.e. $L_{2^i}$), where we can compute $L_{2^i}$ from $L_{2^{i-1}}$. With the bit-representation of $D-1$ we can calculate $L_{D-1}$ in $O(logD)$ operations. This subtask can be solved in $O(N+logD)$ Solution 2: Closed Form To solve the last subtask, we need to calculate $L_{D-1}$ in sub-linear time. To do this, we can solve the recurrence relation, which yields the closed form $L_{D-1} = |L| \frac{N^{2D} - E^{D}}{N^2 - E}.$ This could be calculated via $O(\log D)$ exponentiation and modular inverse, but this is not necessary: we can easily eliminate the division by writing $a \triangleq N^2$ and $b \triangleq E$, and using the well-known identity for $(a^D - b^D)$ to get $L_X = |L| \sum_{k=0}^{X} a^{k} b^{X-k}.$ $L_{2X+1} = (b^{X+1} + a^{X+1}) L_{X}\\ L_{2X} = b^{X+1} L_{X-1} + |L| a^X b^X + a^{X+1} L_{X-1}.$
|
[
"*special",
"combinatorics",
"dfs and similar",
"dp",
"games",
"graphs",
"matrices",
"trees"
] | 2,600
| null |
1403
|
A
|
The Potion of Great Power
|
Once upon a time, in the Land of the Shamans, everyone lived on the Sky-High Beanstalk. Each shaman had a unique identifying number $i$ between $0$ and $N-1$, and an altitude value $H_i$, representing how high he lived above ground level. The distance between two altitudes is the absolute value of their difference.
All shamans lived together in peace, until one of them stole the formula of the world-famous Potion of Great Power. To cover his/her tracks, the Thief has put a Curse on the land: most inhabitants could no longer trust each other...
Despite the very difficult circumstances, the Order of Good Investigators have gained the following information about the Curse:
- When the Curse first takes effect, everyone stops trusting each other.
- The Curse is unstable: at the end of each day (exactly at midnight), one pair of shamans will start or stop trusting each other.
- Unfortunately, each shaman will only ever trust at most $D$ others at any given time.
They have also reconstructed a log of who trusted whom: for each night they know which pair of shamans started/stopped trusting each other.They believe the Thief has whispered the formula to an Evil Shaman. To avoid detection, both of them visited the home of one of their (respective) trusted friends. During the visit, the Thief whispered the formula to the Evil Shaman through the window. (Note: this trusted friend did not have to be home at the time. In fact, it's even possible that they visited each other's houses – shamans are weird.)
Fortunately, whispers only travel short distances, so the Order knows the two trusted friends visited (by the Thief and the Evil Shaman) must live very close to each other.
They ask you to help with their investigation. They would like to test their suspicions: what if the Thief was $x$, the Evil Shaman was $y$, and the formula was whispered on day $v$? What is the smallest distance the whispered formula had to travel? That is, what is the minimum distance between the apartments of some shamans $x'$ and $y'$ (i.e. $\min\left(\left|H_{x'} - H_{y'}\right|\right)$), such that $x'$ was a trusted friend of $x$ and $y'$ was a trusted friend of $y$ on day $v$?
They will share all their information with you, then ask you a number of questions. You need to answer each question immediately, before receiving the next one.
|
We denote the maximum number of edges in the graph by $M$. You can see that $U$ is an upper bound for it. Subtask 2 $Q, U \leq 1000$ We can store each operation, and replay them for each question. We can store all edges in a data structure, or keep neighbours in separate data structures for each node. When answering a question, find the neighbours of both nodes, and consider the distance of each pair of neighbours, computing the minimum. This yields to a simple solution in $O(QUD + QD^2)$ time, or $O(QU\log(D) + QD^2)$ when using an associative container, like std::set. Ordering Trick Although not required for this subtask, we can improve the second part by ordering the neighbour lists by $H$ value, and stepping through them simultaneously, using two pointers. We always step in the list in which the pointer points to the entry with the smaller $H$ value out of the two, and consider the current pair for the minimum computation: The correctness of this is easy to prove. Subtask 3 $V=U$ for all queries In this case, each question will refer to the same version (the final one). Hence, we can just apply all updates at the start once (applying updates is done in the same way as in Subtask 2), and then answer questions on this single version (like before). Using an efficient data structure and the ordering trick (we can actually pre-sort, before answering any questions), we can solve this subtask in $O(U\log(D) + M\log(D) + Q D \log(D))$. Subtask 4 $H[i] \in \{0,1\}$ for all nodes $i$ For a node $u$ and version $V$, we need to be able to tell whether $u$ had a neighbour $u'$ with $H[u'] = f$ for both possible values of $f$ ($0$ or $1$). Once we have obtained this information for both $X$ and $Y$, we can easily work out the answer. For each node $u$ and possible $f$ value, let us build an ordered list of 'events' of the following types: Node $u$ stopped having any neighbours $u'$ with $H[u'] = f$ in version $V$. Node $u$ started having neighbours with $H[u'] = f$ in version $V$. This yields a solution in $O(U\log(M) + Q\log(U))$ time and $O(U)$ space for this special case. Subtask 5 $U,N \leq 10000$ $\sqrt{U}$ checkpoints We first apply all updates in order, producing $\sqrt{U}$ checkpoints, evenly spaced, then - for each question - we simulate updates from the closest checkpoint (in the same way as we did for Subtask 1). An efficient implementation of this can achieve $O(U\log(M) + \sqrt{U} M + Q \sqrt{U} \log(M) + Q D \log(D))$ time and $O(\sqrt{U}M)$ memory. Save neighbour list by node, binary search by version Another solution is to separate updates by node, and save the neighbour list of the updated node for each update (in a vector of neighbour lists for that node). Then, we can binary search for the neighbour list at version $V$. This can be implemented in $O(UD + Q \log(U) + QD)$ time and $O(UD)$ space. Self-copying data structures We can use a self-copying (also known as persistent or copy-on-write) data structure. These data structures are constructed as a (directed) tree, where each node holds some information. When updates are applied, we copy every node that was modified, including nodes whose children are modified, thus each version will have its own root node, from which queries can be performed. There are multiple possibilities here from static binary trees (holding neighbour lists, or neighbours directly) to balanced binary search trees (e.g. treap). These will solve this subtask, but will struggle to gain full marks for the problem due to exceeding memory constraints: the static versions have a high complexity ($O(ND + U\log(ND))$ space, or worse), and the BST version has very high constants (both in space and time). One segment tree We need to find when each edge is present in the graph. For each edge, this is the union of contiguous intervals. In total, we have at most $U$ intervals. Take a segment tree of length $U$, with one leaf per version. Each node will contain a vector of edges, ordered by starting node (edge $e$ is stored in the tree node for $[a,b]$, if edge is present in each version during the interval $[a,b]$ - and not entirely present in the parent's interval). This has a combined space requirement of $O(U\log(U))$. For each question, we look up edges starting from $X$ and $Y$ using binary search in the vector of each of the $\log U$ relevant tree nodes. This can be implemented in $O(U\log^2(U) + Q\log^2(U) + QD\log(D))$ time and $O(U\log U)$ space. With a few tricks and an efficient implementation, this solution could possibly pass subtask 6. Subtask 6 No additional constraints Save neighbour list by node, reduce storage space by constant factor Clearly, saving each version of the neighbour lists of each node will not fit in memory for these limits. However, since memory limits are very generous, we can cut this down by a reasonably small constant $C$: we only save the neighbour list of node $u$ after every $C$'th update that affects $u$. We also save an ordered list of updates affecting each node. For each question, we only need to replay at most $2C$ updates ($C$ for $X$ and $C$ for $Y$). Choosing $C\approx 50$ will suffice the pass every subtask. The time complexity is $O(U\log D + \frac{UD}{C} + Q(D+C\log C))$, using $O(\frac{UD}{C})$ memory. We think this solution is interesting in the sense that it demonstrates how big-$O$ complexity can often be misleading for real-world problems: this solution is far simpler than other solutions, some of which do not even gain $100$ marks, and it passes the limits because the constants involved (normally hidden by big-$O$ complexity classes) are far smaller. Multiple segment trees We can eliminate the binary search from the previous segment tree solution, by keeping a separate segment tree for each node. Na"ively, this will not fit in the memory limits. However, we can save space by only keeping a leaf for each update where the given node was affected. This yields a solution in $O(U\log(U) + Q\log(U) + QD\log(D))$ time and $O(U\log(U))$ space.
|
[
"*special",
"2-sat",
"binary search",
"data structures",
"graphs",
"interactive",
"sortings",
"two pointers"
] | 2,400
|
p1 = 0, p2 = 0
while (p1 < l1.length) and (p2 < l2.length)
consider(H[l1[p1]], H[l2[p2]])
if (H[l1[p1]] <= H[l2[p2]])
then p1++
else p2++
|
1403
|
B
|
Spring cleaning
|
Spring cleanings are probably the most boring parts of our lives, except this year, when Flóra and her mother found a dusty old tree graph under the carpet.
This tree has $N$ nodes (numbered from $1$ to $N$), connected by $N-1$ edges. The edges gathered too much dust, so Flóra's mom decided to clean them.
Cleaning the edges of an arbitrary tree is done by repeating the following process: She chooses 2 different leaves (a node is a leaf if it is connected to exactly one other node by an edge), and cleans every edge lying on the shortest path between them. If this path has $d$ edges, then the cost of cleaning this path is $d$.
She doesn't want to harm the leaves of the tree, so she chooses every one of them \textbf{at most once}. A tree is cleaned when all of its edges are cleaned. The cost of this is the sum of costs for all cleaned paths.
Flóra thinks the tree they found is too small and simple, so she imagines $Q$ variations of it. In the $i$-th variation, she adds a total of $D_i$ extra leaves to the \textbf{original} tree: for each new leaf, she chooses a node from the \textbf{original} tree, and connects that node with the new leaf by an edge. Note that some nodes may stop being leaves during this step.
For all these $Q$ variations, we are interested in the minimum cost that is required to clean the tree.
|
Cleanable tree It is obvious that a tree is not cleanable if it has an odd number of leaves. Also, every tree with an even number of leaves is cleanable. Subtask 2 $Q = 1$, there is an edge between node $1$ and $i$ for every $i$ $(2 \leq i \leq N)$ Flóra can't add extra leaf to node $1$ If we add 2 extra leaves to an inner node (not a leaf), it's a good strategy to clean the path between these extra leaves. So if we add an even number of extra leaves to an inner node, we can pair all these extra leaves. Otherwise there is an extra leaf without a pair. This leaf will be added to the tree. A similar thing happens if we add leaves to an original leaf. In this subtask we know that every path cleaning will go throw through node $1$ (except those that we already paired). So we just have to add the distances from node $1$. This subtask can be solved in $O(N + D_1)$ Subtask 3 $Q$ = 1, there is an edge between nodes $i$ and $i+1$ for all $i$ $(1 \leq i < N)$ Flóra can't add extra leaf to node $1$ nor node $N$ It's a good idea to clean the path between node 1 and node N. After this we should just simply pair the extra leaves in an optimal way. This subtask can be solved in $O(N+sum(D_i))$ Even-odd nodes It can be proved that in the optimal solution, all edges are cleaned at most twice. So our task is to minimize the number of edges cleaned twice. Let's root the tree in an inner node $r$ (It's possible since $N>2$). We denote the parent of a node $u$ by $P(u)$ in this rooted tree. Let's call a node $u$ even if in it's subtree there are even number of leaves. Call it odd otherwise (all leaves are odd nodes). It can be proved that we clean the edge from $u$ to $P(u)$ twice iff $u$ is an even node (where $u \ne r$). Let $E$ be the set of even nodes. In this case, the minimum required cost for the original tree is $N + |E| - 2$ if $r$ is an even node. We can compute which nodes are even in $O(N)$ time with a single dfs. Subtask 4 $N \leq 20000$, $Q \leq 300$ We can build up every tree variation and calculate the number of even nodes for it. This subtask can be solved in $O(N\cdot Q + sum(D_i))$ Path to the root For every node $u$ let's denote its distance from the root $r$ by $D(u)$. Let $S(u)$ denote the number of even nodes on the path from $u$ to $r$. This means that the number of odd nodes on the path from $u$ to $r$ is $D(u)+1-S(u)$. The previous values can be computed in $O(N)$ time for the original tree using a single dfs. Subtask 5 The original tree is a perfect binary tree rooted at node $1$ In this case $D(u) <= logN$ for every $u$. When adding a leaf to node $u$, we change all parities from $u$ to $r$, which takes at most $O(logN)$ time. This subtask can be solved in $O(N + sum(D_I) \cdot logN)$. Subtask 6 $D_i = 1$ We calculate the minimum required cost for the original tree in $O(N)$. If we add an extra leaf to an original leaf the cleaning cost increases by 1, but nothing else happens. If we add an extra leaf to an inner node $u$, then every node on the path from $u$ to $r$ will change parity. So the answer is $N + |E| - S(u) + (D(u) - S(u))$ if $r$ was and odd node in the original tree. This subtask can be solved in $O(N + Q)$ Subtask 7 Original constraints. Virtual tree approach Adding an extra leaf to node $u$ may cause parity change on the path from $u$ to $r$. If we add an extra leaf to node $v$ too then the parities from node $LCA(u,v)$ (LCA=lowest common ancestor) to $r$ will change twice, i.e. it doesn't change at all. If we add more leaves then there will be paths where the parity has changed odd times and where it has changed even times. If we added a new leaf to a node $u$ let's call it a critical node. Let's define $L$ as a subset of original nodes where: If we add a node to node $u$, then $u \in L$. If $u,v \in L$ then $LCA(u,v) \in L$ too. It can be proved that $|L| \leq min(N,2\cdot D_i - 1)$ for the $i$th variation. We can form a new tree from the nodes of $L$ in the following way: In this tree, the parent of node $u \in L$ is node $v \in L$ if $v$ is an ancestor of $u$ in the original tree and $D(v)$ is maximal. Let's denote the parent of node $u$ in the new tree by $P_{L}(u)$. In the new tree, for all nodes $u$ we calculate the number of critical nodes in the subtree rooted in $u$. (This can be computed in $O(|L|)$ using a single dfs.) If this number is even then nothing happens. If it's odd, the parity from node $u$ to $P_{L}(u)$ will change in the original tree (but will not change in $P_{L}(u)$. We can say that the parity has changed from $u$ to $r$, and then from $P_{L}(u)$ to $r$. These can be handled by using $S(u)$ and $S(P_{L}(u))$ the same way we described in subtask 5. Note that we don't have to know the $i+1$th variation before answering the $i$th one. So this solution can solve this problem "online". This subtask can be solved in $O((N + sum(D_i))logN)$. We remark that the problem can also be solved by utilizing the Heavy-Light Decomposition (HLD) of the original tree, this solution was also passing if the implementation was not too messy. Dynamic offline solution Instead of computing the value changes for every variation online, we can preread and store for each individual node which variations we add leaves to them. Then, using a dfs in a bottom-up DP manner, starting from the leaves for each node we pair the unpaired leaves in its subtree. We can store the unpaired leaves (only the variation's and parent node's identifiers are interesting for us) in some collections (e.g. set/map) and at each node, we merge the collections of all of its children. If we encounter two children having an unpaired leaf for the same variation, it means the current node is the LCA for those two additions, and we can compute the change in the cost of cleaning for that variation and store it. In order to maintain low complexity (i.e. logarithmic in terms of sums of $D_i$, we must make sure to not copy any collections needlessly, and always insert the elements of the smaller into the bigger one. After computing the cost changes for all variations simultaneously by a single dfs, we can contruct the answer for each variation by checking if it has any unpaired leaves in the final collection, and adding the cost change to the basic cleaning cost of the original tree. HLD approach The main idea is that we can calculate $|E|$ directly with heavy-light decomposition. First root the tree arbitrarily. Then, we need a segment tree that counts the amount of even numbers in a range with lazy range increases. After this, do hld on the input tree ith the inner data structure being the aforementioned segment tree. Then, for every leaf $l$ we increase every edge by $1$ on the path from $l$ up to the root. Now, without any variations we know $|E|$, but it's also clear that for the $i$th variaton we will only do $O(D_i)$ queries on the hld, if we attach the nodes one by one there are two cases: currently we attach a node to a leaf, then that leaf will no longer be a leaf, but attaching a new leaf to it basically cancels this effect so we don't need to do anything. we are not attaching to a leaf, then we can simply do the same increasing stuff we did.
|
[
"*special",
"data structures",
"dfs and similar",
"graphs",
"trees"
] | 2,300
| null |
1403
|
C
|
Chess Rush
|
The mythic world of Chess Land is a rectangular grid of squares with $R$ rows and $C$ columns, $R$ being greater than or equal to $C$. Its rows and columns are numbered from $1$ to $R$ and $1$ to $C$, respectively.
The inhabitants of Chess Land are usually mentioned as \underline{pieces} in everyday language, and there are $5$ specific types of them roaming the land: pawns, rooks, bishops, queens and kings. Contrary to popular belief, chivalry is long dead in Chess Land, so there are no knights to be found.
Each piece is unique in the way it moves around from square to square: in one step,
- a pawn can move one row forward (i.e. from row $r$ to $r+1$), without changing columns;
- a rook can move any number of columns left/right without changing rows OR move any number of rows forward/backward without changing columns;
- a bishop can move to any square of the two diagonals intersecting at its currently occupied square;
- a queen can move to any square where a rook or a bishop could move to from her position;
- and a king can move to any of the $8$ adjacent squares.
In the following figure, we marked by X the squares each piece can move to in a single step (here, the rows are numbered from bottom to top, and the columns from left to right).Recently, Chess Land has become a dangerous place: pieces that are passing through the land can get captured unexpectedly by unknown forces and simply disappear. As a consequence, they would like to reach their destinations as fast (i.e. in as few moves) as possible, and they are also interested in the number of different ways it is possible for them to reach it, using the minimal number of steps – because more paths being available could mean lower chances of getting captured. Two paths are considered different if they differ in at least one visited square.
For this problem, let us assume that pieces are entering Chess Land in a given column of row $1$, and exit the land in a given column of row $R$. Your task is to answer $Q$ questions: given the type of a piece, the column it enters row $1$ and the column it must reach in row $R$ in order to exit, compute the minimal number of moves it has to make in Chess Land, and the number of different ways it is able to do so.
|
Pawns, Rooks and Queens If $c_1=c_R$ then there is a unique shortest path, which takes $1$ step for the rook and the queen, and $R-1$ steps for the pawn. Otherwise, it is impossible getting to the exit square for the pawn, and it takes exactly $2$ steps for both the queen and the rook. It is also obvious that the latter can always do it in two different ways. Therefore, we only have to be careful about enumerating the number of ways to do the required $2$ steps for the queen. She always has the same two paths as the rook available, plus we need to take into account all the options involving one or two diagonal moves. For these, we have to check if any path is blocked by the edges Chess Land, and also note that two diagonal moves are only possible if $1+c_1$ and $R+c_R$ have the same parity. Bishops First note that the bishop can reach its destination iff $1+c_1$ and $R+c_R$ have the same parity, otherwise the answer is $0$. For a small number of rows, a carefully implemented bruteforce evaluation can also solve the problem, but if $R$ is large, we need to be more thorough. It is useful to note that we can count the number of steps and paths separately for the cases when the bishops leaves the first row using the left diagonal and the right diagonal, and in the end, if one is shorter than the other then choose that one, and if they take the same number of steps, just sum them to get the answer. Now, for a given starting direction, we can use a combinatorial argument to find the answer. First, imagine that we move forward in a greedy manner, bouncing between the left and right edges of Chess Land until we reach the last row in some impact column $c_I$. This way, we can jump $C-1$ rows forward before hitting a wall and having to move again, except the first and last moves, which are easy to handle. This gives us an initial guess for the number of steps. It is relatively easy to see that if $c_I=c_R$ then this shortest path is unique and the previously computed length is correct. Otherwise, there are two cases depending on how we arrive at the last row: for example, if we reach the last row while moving from left to right, and the target square is further to the right of $c_I$ (i.e. $c_I<c_R$), then the previous step length is once again correct, as we could have chosen to not go all the way to the edges in one or more previous step, which would increase the vaule of $c_I$ so we ensure $c_I=c_R$. However, if we reach the last row while moving from left to right, and the target square is to the left of $c_I$ (i.e. $c_R<c_I$ and we effectively "jump over" the requred destination), then we need to include one additional step into our path somewhere along the way in order to ensure $c_I=c_R$. This way, we obtain the number $n$ of required steps, and the number $f$ of diagonal movements we can spare by stopping before hitting an edge, and we need to distribute them arbitrarily over $n-1$ steps, as we cannot stop during the last step. This is equivalent to the well-known combinatorial problem of distributing $f$ balls into $n-1$ boxes, but is also relatively easy to figure out that it is the combination $\binom{f+n-2}{f}.$ Regarding the implementation of the solution, one has to be very careful about handling corner cases such as starting from the first or last column or arriving there, the direction of arrival and so. Note that in the formula above, $n-2$ can be $O(R/C)$ large which makes the evaluation a bit tricky when $C$ is small: notice that $\binom{f+n-2}{f} = \frac{(f+n-2)!}{f! (n-2)!} = \frac{(f+n-2)(f+n-3)\cdots (n-1)}{f!},$ We remark that alternately, the answers to all possible queries could be precomputed in $O(C^2)$ time, using the fact that while cycling through all the possible values of $c_R$ for a fixed $c_1$, the values $n$ and $f$ can change by at most $2$, making it possible to adjust the answer in constant time, but this is much more difficult to implement and was not required to pass. Kings It is easy to see that the king has to make exactly $R-1$ bottom-up moves, and if $|c_R-c_1|\leq R-1$, we need to advance one row in each step in order to have a shortest path. The other case, when $|c_R-c_1| > R-1$ means we need to advance one column towards the destination each step and have some free maneuverability between rows, can be solved seperately in a similar manner as the easiest case of the bottom-up problem, since this can only occur when $R<C$. From now on, we assume $R\geq C$, so the king moves one row forward in each step and takes $R-1$ steps total, so we just need to count the number of paths. First, we observe that the number of ways we can reach the $j$-th column of a row is initially ways[c1]=1 and otherwise ways[j]=0 for the first row, and for any further row can be computed dynamically as next_ways[j] = ways[j - 1]+ ways[j] + ways[j + 1], where $j=1,\ldots,C$. Next, we have to notice that with the same technique, we can precompute and store the answer for every $(c_1,c_R)$ pair, and answer each query in $O(1)$ time after, by adding another dimension to our dynamic programming: let DP[i][j] denote the number of ways to go from the $i$-th column of the first row to the $j$-th column of some current row, so initially we have DP[i][i] = 1 and every other entry is 0. Now we just repeat next_DP[i][j] = DP[i][j - 1]+ DP[i][j] + DP[i][j + 1] Our next observation is to notice that at each iteration, instead of advancing a sinle row $r\to r+1$, we can choose to compute the answer for $r\to 2r-1$ instead. Indeed, our current DP array stores all the numbers of ways to get from the columns of the first row to the columns of the $r$-th row, or in other words, to advance $r$ rows forward. So if we want to count the number of ways to get from the $i$-th column of the first row to the $j$-th column of the $2r-1$-st row, we can enumerate all the possible paths going through the intermediate $k$-th column of the $r$-th row ($k=1,\ldots,C$) by the formula double_DP[i][j] = $\displaystyle\sum_{k=1}^C$ DP[i][k] $\cdot$ DP[k][j]. Notice that the cost of advancing a single row was $O(C^2)$, so if we could somehow speed up the computation of $r\to 2r-1$ advancements too, then generating all answers for the king could be done in $O(C^2 \log R)$ time. There are multiple ways to do this step, but they all can be a bit tricky to find, so implementing a previous, less efficient approach and studying some of its outputs for small $C$ and different $R$ values could be very useful to figure out the main ideas and guess how a solution works. First, the entries of double_DP[i][1], i.e. going from any column of the first row to a fixed column $j=0$, can be computed in $O(C^2)$ time, and since reversing the paths from the $i$-th to the $j$-th column gives exactly the paths from the $j$-th to the $i$-th column, double_DP[i][j] is symmetric and we obtain every double_DP[1][j] entry immediately. Now suppose $1<i<j<C/2$ holds. The key observation is that we don't have to use the values of DP anymore, as the paths going from the $i$-th to the $j$-th column are almost the same as the paths going from the $i-1$-st column to the $j-1$-st column, shifted by a single column to the right. In fact, the relation double_DP[i][j] = double_DP[i-1][j-1] + double_DP[1][1+i+j] double_DP[i][j] = double_DP[i+1][j-1] + double_DP[1][1+i-j],
|
[
"*special",
"combinatorics",
"dp",
"implementation",
"math"
] | 3,200
| null |
1404
|
A
|
Balanced Bitstring
|
A bitstring is a string consisting only of the characters 0 and 1. A bitstring is called $k$-\textbf{balanced} if every substring of size $k$ of this bitstring has an equal amount of 0 and 1 characters ($\frac{k}{2}$ of each).
You are given an integer $k$ and a string $s$ which is composed only of characters 0, 1, and ?. You need to determine whether you can make a $k$-balanced bitstring by replacing every ? characters in $s$ with either 0 or 1.
A string $a$ is a substring of a string $b$ if $a$ can be obtained from $b$ by deletion of several (possibly, zero or all) characters from the beginning and several (possibly, zero or all) characters from the end.
|
Let's denote the balanced bitstring (if any) deriving from $s$ to be $t$. Also, for the ease of the tutorial, let the strings be $0$-indexed (so the first character has index $0$ and the last character has index $n - 1$). First of all, let's prove a very important observation: for every $i$ such that $0 \leq i < n - k$, $t_i = t_{i + k}$. This is because the length $k$ substrings starting at $i$ and $i + 1$ share the $k - 1$ characters $t_{i + 1} \dots t_{i + k - 1}$, so in order for both strings to have the same number of 1 characters, their remaining characters $t_i$ and $t_{i + k}$ must both be 1, or both be 0. Extending this fact, we can easily prove that $t_i = t_j$ if $i \equiv j \pmod{k}$. So first of all, for each $0 \leq i < k$, we need to find out if all $s_j$ such that $j\text{ mod }k = i$ can be converted to the same character (i.e. there can't exist both 0 and 1 among these characters). Furthermore, we can deduce some information for $t_i$: it must be 0 if at least one character among $s_j$ is 0, must be 1 if at least one character among $s_j$ is 1, or it can be undecided and can be freely assigned to 0 or 1 if all $s_j$ are ?. An illustration for $n = 9$, $k = 4$. The positions highlighted with the same colors must have the same characters. By using the information from the known characters we can fill some of the unknown positions. An illustration for $n = 9$, $k = 4$. The positions highlighted with the same colors must have the same characters. By using the information from the known characters we can fill some of the unknown positions. Lastly, we need to check if we can make the substring $t_0t_1 \dots t_{k - 1}$ have exactly half of the characters are equal to 1 (we don't need to check for any other substring, because the condition $t_i = t_{i + k}$ implies that all the substrings of size $k$ will have the same number of 1 characters). We simply need to check if the number of decided 1 characters and the number of decided 0 characters do not exceed $\frac{k}{2}$. It can easily be shown that if these numbers don't exceed this value then we can assign the undecided characters so that half of the characters are 1, and if one exceeds then it is impossible to do so.
|
[
"implementation",
"strings"
] | 1,500
|
#include <bits/stdc++.h>
using namespace std;
int n, k, t;
string s;
int main() {
ios_base::sync_with_stdio(false);
cin.tie(nullptr);
cin >> t;
while (t--) {
cin >> n >> k >> s;
int zer = 0, one = 0;
bool chk = true;
for (int i = 0; i < k; i++) {
int tmp = -1;
for (int j = i; j < n; j += k) {
if (s[j] != '?') {
if (tmp != -1 && s[j] - '0' != tmp) {
chk = false;
break;
}
tmp = s[j] - '0';
}
}
if (tmp != -1) {
(tmp == 0 ? zer : one)++;
}
}
if (max(zer, one) > k / 2) {
chk = false;
}
cout << (chk ? "YES\n" : "NO\n");
}
}
|
1404
|
B
|
Tree Tag
|
Alice and Bob are playing a fun game of tree tag.
The game is played on a tree of $n$ vertices numbered from $1$ to $n$. Recall that a tree on $n$ vertices is an undirected, connected graph with $n-1$ edges.
Initially, Alice is located at vertex $a$, and Bob at vertex $b$. They take turns alternately, and Alice makes the first move. In a move, Alice can jump to a vertex with distance \textbf{at most} $da$ from the current vertex. And in a move, Bob can jump to a vertex with distance \textbf{at most} $db$ from the current vertex. The distance between two vertices is defined as the number of edges on the unique simple path between them. In particular, either player is allowed to stay at the same vertex in a move. Note that when performing a move, a player only occupies the starting and ending vertices of their move, not the vertices between them.
If after at most $10^{100}$ moves, Alice and Bob occupy the same vertex, then Alice is declared the winner. Otherwise, Bob wins.
Determine the winner if both players play optimally.
|
Let's consider several cases independently. Case 1: $\mathrm{dist}(a, b)\le da$ Unsurprisingly, Alice wins in this case by tagging Bob on the first move. Case 2: $2da\ge \mathrm{tree\ diameter}$ Here, the diameter of a tree is defined as the length of the longest simple path. In this case, Alice can move to a center of the tree. Once Alice is there, it doesn't matter where Bob is, since Alice can reach any vertex in the tree in just one move, winning the game. Case 3: $db > 2da$ In this case, let's describe a strategy for Bob to win. Because we are not in case 1, Bob will not lose before his first move. Then it is sufficient to show that Bob can always end his turn with distance greater than $da$ from Alice. Since we are not in case 2, there is at least one vertex with distance at least $da$ from Alice. If Bob is at such a vertex at the start of his turn, he should simply stay there. Otherwise, there is some vertex $v$ with $\mathrm{dist}(a,v)=da+1$. Then $\mathrm{dist}(b,v)\le \mathrm{dist}(b,a)+\mathrm{dist}(a,v)\le da+(da+1)=2da+1\le db$, so Bob can jump to $v$ on his turn. Case 4: $db \le 2da$ In this case, Alice's strategy will be to capture Bob whenever possible or move one vertex closer to Bob otherwise. Let's prove that Alice will win in a finite number of moves with this strategy. Let's root the tree at $a$. Bob is located in some subtree of $a$, say with $k$ vertices. Alice moves one vertex deeper, decreasing Bob's subtree size by at least one vertex. Since $db\le 2da$, Bob cannot move to another subtree without being immediately captured, so Bob must stay in this shrinking subtree until he meets his inevitable defeat. Solution The only non-trivial part in the implementation is checking for cases $1$ and $2$. Case $1$ is simply checked with DFS. Case 2 only requires computing the diameter of the tree, which is a standard problem. Complexity is $O(n)$.
|
[
"dfs and similar",
"dp",
"games",
"trees"
] | 1,900
|
#include <bits/stdc++.h>
using namespace std;
const int N = 1e5 + 5;
int n, a, b, da, db, depth[N];
vector<int> adj[N];
int diam = 0;
int dfs(int x, int p) {
int len = 0;
for(int y : adj[x]) {
if(y != p) {
depth[y] = depth[x] + 1;
int cur = 1 + dfs(y, x);
diam = max(diam, cur + len);
len = max(len, cur);
}
}
return len;
}
int main() {
ios_base::sync_with_stdio(false);
cin.tie(nullptr);
int te;
cin >> te;
while(te--) {
cin >> n >> a >> b >> da >> db;
for(int i = 1; i <= n; i++) adj[i].clear();
for(int i = 0; i < n - 1; i++) {
int u, v;
cin >> u >> v;
adj[u].push_back(v);
adj[v].push_back(u);
}
diam = 0;
depth[a] = 0;
dfs(a, -1);
cout << (2 * da >= min(diam, db) || depth[b] <= da ? "Alice" : "Bob") << '\n';
}
}
|
1404
|
C
|
Fixed Point Removal
|
Let $a_1, \ldots, a_n$ be an array of $n$ positive integers. In one operation, you can choose an index $i$ such that $a_i = i$, and remove $a_i$ from the array (after the removal, the remaining parts are concatenated).
The weight of $a$ is defined as the maximum number of elements you can remove.
You must answer $q$ independent queries $(x, y)$: after replacing the $x$ first elements of $a$ and the $y$ last elements of $a$ by $n+1$ (making them impossible to remove), what would be the weight of $a$?
|
Convenient transformation Replace $a_i$ by $i - a_i$. The new operation becomes: remove a zero, and decrement all elements after by one. For each query, note $l = 1+x$ and $r = n-y$ the endpoints of the non-protected subarray. The main idea of the solution is iterating over $r$, maintaining answers for each $l$ in a BIT (increment on prefix) and answer queries offline (when we meet a right endpoint). What follows is a detailed explanation of this idea. Simplified version Let's suppose that $l = 1$ holds for all queries. We can intuitively see that $a_i$ is removable iff $a_i \ge 0$ and we can remove at least $a_i$ elements before. We're going to rewrite this more formally. Let $f(r)$ be the maximum number of elements we can remove in the subarray $a[1 \ldots r]$. If $a_r < 0$ or $a_r > f(r-1)$, then it's obviously impossible to remove $a_r$ and in that case, $f(r) := f(r-1)$. Otherwise, if $0 \le a_r \le f(r-1)$, then $f(r) := f(r-1)+1$. We can reach this with the following strategy: Perform the $a_r$ first steps in the prefix $[1, r-1]$ Remove $a_r$ (which is equal to $0$ at that moment) Perform the remaining $f(r-1) - a_r$ steps in the prefix $[1, r-1]$. Hence, we can compute successively $f(1), f(2), \ldots, f(n)$ with a single loop: maintain current $f(r)$ in a variable $s$, and at each iteration increment $s$ if and only if $0 \le a_r \le s$. Complete version Note $f(l, r)$ the maximum number of elements we can remove in the subarray $a[l \ldots r]$ (zero if $l > r$). During our iteration over $r$, we're going to maintain the answers for each $l$: $s = [f(1, r), f(2, r), \ldots, f(n, r)]$ When the iteration continues, discovering a new element $a_r$, what happens? If $a_r < 0$, nothing happens. Otherwise, $s_l$ is incremented by one if and only if $s_l \ge a_r$. Let $l_\max$ be the greatest $l$ such that $l \le r$ and $s_{l_\max} \ge a_r$. We should increment the prefix ending here by one: $\boxed{s_1 \ge \ldots \ge s_{l_\max}} \ge a_r > s_{l_\max + 1} \ge \ldots \ge s_n$ A binary indexed tree (aka Fenwick tree) is obviously the structure we need in order to maintain $s$, since it allows to add on segment and get one element in $\mathcal{O}(\log n)$ (segment tree could work, but is slower in practice). In order to find $l_\max$, the easiest solution is to binary search, it takes $\mathcal{O}(\log^2 n)$ time which is fast enough to get AC. We can also use binary lifting in order to optimize the search in $\mathcal{O}(\log n)$. This technique is explained in this blog. We have to read all queries in advance (offline algorithm). When the iteration over $r$ meets the right endpoint of a query, we set its answer to the current weight of $s_l$. In order to get an online algorithm (answer the query before reading the next one), we would have to use a persistent data structure. Final complexity: $\mathcal{O}((n + q) \log n)$ with low constant factor. Under given time limit, $\log^2$ solutions with reasonable constant factor could also pass.
|
[
"binary search",
"constructive algorithms",
"data structures",
"greedy",
"two pointers"
] | 2,300
|
#include <bits/stdc++.h>
using namespace std;
int main() {
ios::sync_with_stdio(false), cin.tie(0);
int n, q; cin >> n >> q;
vector<int> a(n+1), ans(q), leftBound(q);
vector<vector<int>> endHere(n+1);
for (int i = 1; i <= n; ++i) {
cin >> a[i];
a[i] = i - a[i];
}
for (int i = 0; i < q; ++i) {
int x, y; cin >> x >> y;
int l = 1+x, r = n-y;
leftBound[i] = l;
endHere[r].push_back(i);
}
vector<int> BIT(n+1);
int global = 0;
for (int r = 1; r <= n; ++r) {
int target = a[r];
if (target >= 0) {
// Find rightmost pos such that s[pos] >= target
int pos = 0, cur = global;
for (int jump = 1 << __lg(n); jump >= 1; jump /= 2)
if (pos+jump <= r && cur - BIT[pos+jump] >= target)
pos += jump, cur -= BIT[pos];
// Increment prefix (+1 on whole array, -1 on suffix)
++global;
for (int i = pos+1; i <= n; i += i & -i)
++BIT[i];
}
for (int iQuery : endHere[r]) {
ans[iQuery] = global;
for (int i = leftBound[iQuery]; i > 0; i -= i & -i)
ans[iQuery] -= BIT[i];
}
}
for (int i = 0; i < q; ++i)
cout << ans[i] << "\n";
}
|
1404
|
D
|
Game of Pairs
|
\textbf{This is an interactive problem.}
Consider a fixed positive integer $n$. Two players, First and Second play a game as follows:
- First considers the $2n$ numbers $1, 2, \dots, 2n$, and partitions them as he wants into $n$ disjoint pairs.
- Then, Second chooses exactly one element from each of the pairs that First created (he chooses elements he wants).
To determine the winner of the game, we compute the sum of the numbers chosen by Second. If the sum of all these numbers is a multiple of $2n$, then Second wins. Otherwise, First wins.
You are given the integer $n$. Your task is to decide which player you wish to play as and win the game.
|
We split the problem into two cases: $n$ is even We claim that First can guarantee a win by forming the pairs $(1, n + 1), \dots, (n, 2n)$. Note that no matter which elements Second chooses, he will always take one element having each remainder modulo $n$. Thus the total sum is $0 + 1 + 2 + \dots + n - 1 \equiv \frac{n(n - 1)}{2} \pmod{n}$ Say $n = 2m$, then this reduces to $m(2m - 1) \pmod{2m}$. Since $2m - 1$ is an odd integer this is nonzero, and the sum isn't even divisible by $n$, let alone $2n$. $n$ is odd This is the more difficult part of the problem. We claim that now Second is able to win, and present a strategy. We have two important claims: Claim 1. It is enough for Second to find a choice of elements from each pair such that the sum of the chosen numbers is divisible by $n$ (instead of $2n$). Proof. Notice that the sum of all the numbers is $1 + 2 + \dots + 2n = n(2n + 1)$, which is congruent to $n \pmod{2n}$. If the sum of some numbers, one from each pair, is divisible by $n$, then it is either $0 \pmod{2n}$ or $n \pmod{2n}$. In the first case, we have already won. Otherwise, if we take every other number instead, the sum of those numbers will be $0 \pmod{2n}$, and we will also win. Claim 2. It is always possible to take one element from each pair such that each of the remainders modulo $n$ appears exactly once. Proof. Consider a graph with $2n$ vertices $1, 2, \dots, 2n$ and regard the pairs chosen by First as red edges in this graph. We will additionally create $n$ edges connecting the vertices $i$ and $i + n$ for each $i \le n$, and paint them blue. Then every vertex is adjacent to one red edge and one blue edge. In particular, all vertices have degree $2$, so the graph splits into disjoint cycles. An illustration for the case $n = 5$. The pairs are $(1, 6)$, $(2, 7)$, $(3, 5)$, $(4, 8)$ and $(9, 10)$. The numbers $1, 7, 3, 4, 10$ on the white vertices cover all residues $\bmod n$. An illustration for the case $n = 5$. The pairs are $(1, 6)$, $(2, 7)$, $(3, 5)$, $(4, 8)$ and $(9, 10)$. The numbers $1, 7, 3, 4, 10$ on the white vertices cover all residues $\bmod n$. Since the edges in each cycle alternate between being red and blue, they all have even lengths, so it's possible to color their vertices alternately black and white, and we can construct such a coloring by a simple DFS. Finally, after doing this for all cycles, take the numbers corresponding to all the white vertices. Since no two of them are joined by a red edge, they are all in different pairs, and since no two of them are joined by a blue edge, their residues modulo $n$ are all different, and thus each one appears exactly once. Finally, by combining the two previous claims the problem is solved, since $0 + 1 + \dots + n - 1 \equiv 0 \pmod{n}$. Complexity: $O(n)$.
|
[
"constructive algorithms",
"dfs and similar",
"interactive",
"math",
"number theory"
] | 2,800
|
#include <bits/stdc++.h>
using namespace std;
const int MAXN = 1e6 + 10;
vector<int> adj[MAXN];
int vi[MAXN], pv[MAXN];
vector<int> choices[2];
void dfs(int s, int x) {
vi[s] = 1;
choices[x].push_back(s);
for(auto v : adj[s])
if(!vi[v])
dfs(v, x ^ 1);
}
int main() {
ios_base::sync_with_stdio(false);
cin.tie(0);
int n;
cin >> n;
if(n % 2) {
cout << "Second" << endl;
for(int i = 1; i <= 2 * n; i++) {
int x;
cin >> x;
if(pv[x]) {
adj[pv[x]].push_back(i);
adj[i].push_back(pv[x]);
}
else {
pv[x] = i;
}
}
for(int i = 1; i <= n; i++) {
adj[i].push_back(n + i);
adj[n + i].push_back(i);
}
for(int i = 1; i <= 2 * n; i++) {
if(!vi[i])
dfs(i, 0);
}
long long sum = 0;
for(auto x : choices[0])
sum += x;
if(sum % (2 * n) != 0)
swap(choices[0], choices[1]);
for(auto x : choices[0])
cout << x << " ";
cout << endl;
int res;
cin >> res;
//assert(res == 0);
return 0;
}
else {
cout << "First" << endl;
for(int i = 0; i < 2 * n; i++)
cout << (i % n) + 1 << " ";
cout << endl;
int res;
cin >> res;
//assert(res == 0);
return 0;
}
}
|
1404
|
E
|
Bricks
|
A brick is defined as a rectangle with integer side lengths with either width $1$ or height $1$ (or both).
There is an $n\times m$ grid, and each cell is colored either black or white. A tiling is a way to place bricks onto the grid such that each black cell is covered by exactly one brick, and each white cell is not covered by any brick. In other words, bricks are placed on black cells only, cover all black cells, and \textbf{no two bricks overlap}.
\begin{center}
An example tiling of the first test case using $5$ bricks. It is possible to do better, using only $4$ bricks.
\end{center}
What is the minimum number of bricks required to make a valid tiling?
|
Instead of placing a minimum number of bricks into the cells, let's imagine that we start out with all $1\times 1$ bricks and delete the maximum number of borders. Of course, we need to make sure that when we delete borders, all the regions are in fact bricks. A region is a brick if and only if it contains no "L" shape. Let's construct a graph where each vertex is a border between two black cells, and we connect two vertices if deleting both would create an "L" shape. Then the tilings correspond exactly with the independent vertex sets in this graph, and the optimal tiling corresponds to the maximum independent set. The number of bricks is simply the total number of black cells minus the size of our independent set. Here is the graph and independent vertex set corresponding to a tiling: In general, computing the maximum independent vertex set of a graph is NP-complete. But in our special case, this graph is bipartite (the bipartition being horizontal borders and vertical borders). And Kőnig's Theorem states that for bipartite graphs, the size of the maximum matching is equal to the size of the minimum vertex cover. Recall that the complement of a minimum vertex cover is a maximum independent set. The maximum matching can be computed using maximum flow. In particular, Dinic's algorithm runs in $O(\sqrt{V}E)$ time. For our graph, $V$ and $E$ are both $O(nm)$. Overall complexity is therefore $O(nm\sqrt{nm})$.
|
[
"flows",
"graph matchings",
"graphs"
] | 2,800
|
#include <bits/stdc++.h>
using namespace std;
#define SOURCE 0
#define HORZ(i, j) (m * (i) + (j) + 1)
#define VERT(i, j) ((n - 1) * m + n * (j) + (i) + 1)
#define SINK ((n - 1) * m + n * (m - 1) + 1)
const int N = 305, V = 2e5 + 5;
struct edge {
int u, v, cap, flow;
};
struct Dinic {
vector<edge> e;
vector<vector<int>> adj;
vector<int> ptr;
vector<int> level;
int n, source, sink;
Dinic(int n, int s, int t): n(n), source(s), sink(t) {
level.assign(n, -1);
adj.assign(n, vector<int>());
ptr.assign(n, 0);
}
void add_edge(int a, int b, int c) {
int k = e.size();
e.push_back({a, b, c, 0});
e.push_back({b, a, c, c});
adj[a].push_back(k);
adj[b].push_back(k + 1);
}
bool bfs() {
fill(level.begin(), level.end(), -1);
level[source] = 0;
queue<int> Q;
Q.push(source);
while(!Q.empty()) {
int x = Q.front(); Q.pop();
for(int i : adj[x]) {
if(level[e[i].v] == -1 && e[i].flow < e[i].cap) {
level[e[i].v] = level[x] + 1;
Q.push(e[i].v);
}
}
}
return level[sink] != -1;
}
int dfs(int x, int pushed) {
if(x == sink) return pushed;
for(int &id = ptr[x]; id < (int) adj[x].size(); id++) {
int i = adj[x][id];
if(level[e[i].v] == level[x] + 1 && e[i].flow < e[i].cap) {
int f = dfs(e[i].v, min(pushed, e[i].cap - e[i].flow));
if(f > 0) {
e[i].flow += f;
e[i ^ 1].flow -= f;
return f;
}
}
}
return 0;
}
int calc() {
int ans = 0;
while(bfs()) {
fill(ptr.begin(), ptr.end(), 0);
while(true) {
int f = dfs(source, INT_MAX);
if(f == 0) break;
ans += f;
}
}
return ans;
}
};
int n, m;
string s[N];
int main() {
ios::sync_with_stdio(false);
cin.tie(0);
cin >> n >> m;
int ans = 0;
Dinic F(SINK + 1, SOURCE, SINK);
for(int i = 0; i < n; i++) {
cin >> s[i];
for(int j = 0; j < m; j++) {
if(s[i][j] == '#') {
ans++;
if(i > 0 && s[i - 1][j] == '#') {
F.add_edge(SOURCE, HORZ(i - 1, j), 1);
ans--;
}
if(j > 0 && s[i][j - 1] == '#') {
F.add_edge(VERT(i, j - 1), SINK, 1);
ans--;
}
}
}
}
for(int i = 0; i < n; i++) {
for(int j = 0; j < m; j++) {
if(s[i][j] == '#') {
// right, down
if(i < n - 1 && j < m - 1 && s[i + 1][j] == '#' && s[i][j + 1] == '#') {
F.add_edge(HORZ(i, j), VERT(i, j), 1);
}
// right, up
if(i > 0 && j < m - 1 && s[i - 1][j] == '#' && s[i][j + 1] == '#') {
F.add_edge(HORZ(i - 1, j), VERT(i, j), 1);
}
// left, down
if(i < n - 1 && j > 0 && s[i + 1][j] == '#' && s[i][j - 1] == '#') {
F.add_edge(HORZ(i, j), VERT(i, j - 1), 1);
}
// left, up
if(i > 0 && j > 0 && s[i - 1][j] == '#' && s[i][j - 1] == '#') {
F.add_edge(HORZ(i - 1, j), VERT(i, j - 1), 1);
}
}
}
}
ans += F.calc();
cout << ans << '\n';
}
|
1405
|
A
|
Permutation Forgery
|
A permutation of length $n$ is an array consisting of $n$ distinct integers from $1$ to $n$ in arbitrary order. For example, $[2,3,1,5,4]$ is a permutation, but $[1,2,2]$ is not a permutation ($2$ appears twice in the array) and $[1,3,4]$ is also not a permutation ($n=3$ but there is $4$ in the array).
Let $p$ be any permutation of length $n$. We define the \textbf{fingerprint} $F(p)$ of $p$ as the sorted array of sums of adjacent elements in $p$. More formally,
$$F(p)=\mathrm{sort}([p_1+p_2,p_2+p_3,\ldots,p_{n-1}+p_n]).$$
For example, if $n=4$ and $p=[1,4,2,3],$ then the fingerprint is given by $F(p)=\mathrm{sort}([1+4,4+2,2+3])=\mathrm{sort}([5,6,5])=[5,5,6]$.
You are given a permutation $p$ of length $n$. Your task is to find a \textbf{different} permutation $p'$ with the same fingerprint. Two permutations $p$ and $p'$ are considered different if there is some index $i$ such that $p_i \ne p'_i$.
|
Let $p'=\mathrm{reverse}(p).$ Then $p'$ is a permutation, since every value from $1$ to $n$ appears exactly once. $p'\ne p$ since $p'_1=p_n\ne p_1$. (Here, we use $n\ge 2$.) $F(p')=F(p)$ since any two adjacent values in $p$ remain adjacent in $p'$.
|
[
"constructive algorithms"
] | 800
|
#include <bits/stdc++.h>
using namespace std;
int main() {
ios::sync_with_stdio(false);
cin.tie(0);
int te;
cin >> te;
while(te--) {
int n;
cin >> n;
vector<int> a(n);
for(int &x : a) cin >> x;
reverse(a.begin(), a.end());
for(int x : a) cout << x << ' ';
cout << '\n';
}
}
|
1405
|
B
|
Array Cancellation
|
You're given an array $a$ of $n$ integers, such that $a_1 + a_2 + \cdots + a_n = 0$.
In one operation, you can choose two \textbf{different} indices $i$ and $j$ ($1 \le i, j \le n$), decrement $a_i$ by one and increment $a_j$ by one. If $i < j$ this operation is free, otherwise it costs one coin.
How many coins do you have to spend in order to make all elements equal to $0$?
|
The answer is the maximum suffix sum, which can be computed in $\mathcal{O}(n)$. Formal proof. Define $c_i = a_i + a_{i+1} + \cdots + a_n$ (partial suffix sum). Note $M = \max(c)$. We can observe that $a_1 = \cdots = a_n = 0$ if and only if $c_1 = \cdots = c_n = 0$. (If $c$ is null, $a_i = c_i - c_{i+1} = 0 - 0 = 0$.) A free operation on $i < j$ is equivalent to incrementing $c_{i+1}, \ldots, c_j$. Free operations can only increment elements of $c$, so we obviously need at least $M$ coins. Let's do $M$ times the operation $(i = n, j = 1)$, which decrement every element $M$ times. Now, for every $i$, $c_i \le 0$ and we can make it equal to $0$ by performing $-c_i$ times the free operation $(i-1, i)$.
|
[
"constructive algorithms",
"implementation"
] | 1,000
|
#include <bits/stdc++.h>
using namespace std;
int main() {
ios::sync_with_stdio(false), cin.tie(0);
int t; cin >> t;
while (t--) {
int n;
cin >> n;
long long cur = 0;
for (int i = 0; i < n; ++i) {
long long x; cin >> x;
cur = max(0LL, cur + x);
}
cout << cur << "\n";
}
}
|
1406
|
A
|
Subset Mex
|
Given a set of integers (it can contain equal elements).
You have to split it into two subsets $A$ and $B$ (both of them can contain equal elements or be empty). You have to maximize the value of $mex(A)+mex(B)$.
Here $mex$ of a set denotes the smallest non-negative integer that doesn't exist in the set. For example:
- $mex(\{1,4,0,2,2,1\})=3$
- $mex(\{3,3,2,1,3,0,0\})=4$
- $mex(\varnothing)=0$ ($mex$ for empty set)
The set is splitted into two subsets $A$ and $B$ if for any integer number $x$ the number of occurrences of $x$ into this set is equal to the sum of the number of occurrences of $x$ into $A$ and the number of occurrences of $x$ into $B$.
|
Let us store the count of each number from $0$ to $100$ in array $cnt$. Now $mex(A)$ would be the smallest $i$ for which $cnt_i=0$.Let this $i$ be $x$. $mex(B)$ would be smallest $i$ for which $cnt_i\leq 1$. This is because one count of each number less than $x$ would go to $A$ therefore the element which was present initially once would now not be available for $B$. Overall Complexity: $O(n)$.
|
[
"greedy",
"implementation",
"math"
] | 900
|
#include<bits/stdc++.h>
#define re register
using namespace std;
inline int read(){
re int t=0;re char v=getchar();
while(v<'0')v=getchar();
while(v>='0')t=(t<<3)+(t<<1)+v-48,v=getchar();
return t;
}
int n,a[102],vis[2][102],ansa,ansb;
int main(){
re int t=read();
while(t--){
n=read();
for(re int i=1;i<=n;++i)a[i]=read();memset(vis,0,sizeof(vis));
for(re int i=1;i<=n;++i){
if(!vis[0][a[i]])vis[0][a[i]]=1;
else vis[1][a[i]]=1;
}ansa=ansb=0;
while(vis[0][ansa])++ansa;
while(vis[1][ansb])++ansb;
printf("%d\n",ansa+ansb);
}
}
|
1406
|
B
|
Maximum Product
|
You are given an array of integers $a_1,a_2,\ldots,a_n$. Find the maximum possible value of $a_ia_ja_ka_la_t$ among all five indices $(i, j, k, l, t)$ ($i<j<k<l<t$).
|
First, if all numbers are less than $0$, then you should print the product of the five biggest numbers of them. Otherwise, the maximum product must be non-negative. Sort the numbers by their absolute value from big to small. If the first five numbers' product is positive then print it. Then we can always change one of the five to one of the $n-5$ other numbers to make this product positive. Enumerate which one to replace, and you can solve this problem in $O(n)$ time.
|
[
"brute force",
"dp",
"greedy",
"implementation",
"sortings"
] | 1,200
|
#include<bits/stdc++.h>
using namespace std;
long long ans,a[100005];
int main() {
int t;
scanf("%d",&t);
while(t--){
int n;
long long mx=-1e9;
scanf("%d",&n);
for(int i=1;i<=n;i++)scanf("%lld",&a[i]),mx=max(mx,a[i]);
sort(a+1,a+n+1,[](long long x,long long y){return abs(x)>abs(y);});
if(mx<0){
cout<<a[n]*a[n-1]*a[n-2]*a[n-3]*a[n-4]<<'\n';
continue;
}
ans=a[1]*a[2]*a[3]*a[4]*a[5];
for(int i=6;i<=n;i++){
for(int j=1;j<=5;j++){
long long tmp=a[i];
for(int k=1;k<=5;k++){
if(k!=j)tmp*=a[k];
}
ans=max(ans,tmp);
}
}
printf("%lld\n",ans);
}
return 0;
}
|
1406
|
C
|
Link Cut Centroids
|
Fishing Prince loves trees, and he especially loves trees with only one centroid. The tree is a connected graph without cycles.
A vertex is a \textbf{centroid} of a tree only when you cut this vertex (remove it and remove all edges from this vertex), the size of the largest connected component of the remaining graph is the smallest possible.
For example, the centroid of the following tree is $2$, because when you cut it, the size of the largest connected component of the remaining graph is $2$ and it can't be smaller.
However, in some trees, there might be more than one centroid, for example:
Both vertex $1$ and vertex $2$ are centroids because the size of the largest connected component is $3$ after cutting each of them.
Now Fishing Prince has a tree. He should cut one edge of the tree (it means to remove the edge). After that, he should add one edge. The resulting graph after these two operations should be a tree. He can add the edge that he cut.
He wants the centroid of the resulting tree to be unique. Help him and find any possible way to make the operations. It can be proved, that at least one such way always exists.
|
Let vertex $1$ be the root of the tree. If there is only one centroid, just cut any edge and link it back. Otherwise there are two centroids. Let them be $x$ and $y$, then there must be an edge connecting $x$ and $y$. (If not, choose any other vertex on the path from $x$ to $y$ and the size of the largest connected component after cutting it will be smaller than $x$ and $y$). Let $x$ be $y$'s father. (If not, swap $x$ and $y$) Then just cut a leaf from $y$'s subtree and link it with $x$. After that, $x$ becomes the only centroid. Proof: It's easy to see that the size of $y$'s subtree must be exactly $\dfrac{n}{2}$. After cutting and linking, the maxinum component size of $y$ becomes $\dfrac{n}{2}+1$ while the maxinum component size of $x$ is still $\dfrac{n}{2}$.
|
[
"constructive algorithms",
"dfs and similar",
"graphs",
"trees"
] | 1,700
|
#include<iostream>
#include<cstdio>
#include<algorithm>
#include<vector>
using namespace std;
int n,size[100005],fa[100005],minn=1e9,cent1,cent2;
vector<int> g[100005];
void dfs(int x,int f){
fa[x]=f,size[x]=1;
int mx=0;
for(int y:g[x]){
if(y==f)continue;
dfs(y,x);
size[x]+=size[y];
mx=max(mx,size[y]);
}
mx=max(mx,n-size[x]);
if(mx<minn)minn=mx,cent1=x,cent2=0;
else if(mx==minn)cent2=x;
}
int S;
void dfs2(int x,int f){
if(g[x].size()==1){
S=x;
return ;
}
for(int y:g[x]){
if(y==f)continue;
dfs2(y,x);
}
}
int main(){
int t;
cin>>t;
while(t--){
cin>>n,cent1=cent2=0,minn=1e9;
for(int i=1;i<=n;i++)g[i].clear(),fa[i]=0;
for(int i=1;i<n;i++){
int x,y;
cin>>x>>y;
g[x].push_back(y),g[y].push_back(x);
}
dfs(1,0);
if(!cent2){
printf("1 %d\n1 %d\n",g[1][0],g[1][0]);
continue;
}
if(fa[cent1]!=cent2)swap(cent1,cent2);
dfs2(cent1,cent2);
printf("%d %d\n%d %d\n",S,fa[S],S,cent2);}
return 0;
}
|
1406
|
D
|
Three Sequences
|
You are given a sequence of $n$ integers $a_1, a_2, \ldots, a_n$.
You have to construct two sequences of integers $b$ and $c$ with length $n$ that satisfy:
- for every $i$ ($1\leq i\leq n$) $b_i+c_i=a_i$
- $b$ is non-decreasing, which means that for every $1<i\leq n$, $b_i\geq b_{i-1}$ must hold
- $c$ is non-increasing, which means that for every $1<i\leq n$, $c_i\leq c_{i-1}$ must hold
You have to minimize $\max(b_i,c_i)$. In other words, you have to minimize the maximum number in sequences $b$ and $c$.
Also there will be $q$ changes, the $i$-th change is described by three integers $l,r,x$. You should add $x$ to $a_l,a_{l+1}, \ldots, a_r$.
You have to find the minimum possible value of $\max(b_i,c_i)$ for the initial sequence and for sequence after each change.
|
Since sequence $b$ is non-decreasing and sequence $c$ is non-increasing, we need to mimimize $\max(c_1,b_n)$. Now observe that if $a_i>a_{i-1}$ then $b_i=b_{i-1}+a_i-a_{i-1}$ and $c_i=c_{i-1}$.Else if $a_i<a_{i-1}$ then $b_i=b_{i-1}$ but $c_i=c_{i-1}+a_i-a_{i-1}$. Now we calculate $\sum\limits_{i=2}^{n}\max(0,a_i-a_{i-1})$.Let this sum be $K$. Now lets assume $c_1$ is $x$. So then $b_1$ is $a_1-x$.And as observed before $b_n = a_1-x+K$. Now we just need to minimize $\max(x,a_1-x+K)$. Now it is easily observable that $x$ should be $\dfrac{a_1+K}{2}$. For the changes, since we only need to know $\sum\max(0,a_i-a_{i-1})$, so only $a_l-a_{l-1}$ and $a_r-a_{r-1}$ will change. Total time complexity: $O(n+q)$.
|
[
"constructive algorithms",
"data structures",
"greedy",
"math"
] | 2,200
|
#include<cstdio>
#include<cmath>
#define re register
#define int long long
using namespace std;
inline int read(){
re int t=0,f=0;re char v=getchar();
while(v<'0')f|=(v=='-'),v=getchar();
while(v>='0')t=(t<<3)+(t<<1)+v-48,v=getchar();
return f?-t:t;
}
int n,a[100002],sumg,suml,a1,A,B,C,q;
inline int check(){
return ceil((double)(a1+sumg)/2.0);
}
inline void cg(re int x,re int y){
if(x>n)return;
a[x]>0?sumg-=a[x]:suml+=a[x];
a[x]+=y;
a[x]>0?sumg+=a[x]:suml-=a[x];
}
signed main(){
n=read();
for(re int i=1;i<=n;++i)a[i]=read();
for(re int i=2;i<=n;++i){
if(a[i]>a[i-1])sumg+=a[i]-a[i-1];else suml+=a[i-1]-a[i];
}
for(re int i=n;i;--i)a[i]=a[i]-a[i-1];
a1=a[1];
printf("%lld\n",check());
q=read();
while(q--){
re int l=read(),r=read(),x=read();
if(l==1)a1+=x;
else cg(l,x);
cg(r+1,-x);
printf("%lld\n",check());
}
}
|
1406
|
E
|
Deleting Numbers
|
\textbf{This is an interactive problem.}
There is an unknown integer $x$ ($1\le x\le n$). You want to find $x$.
At first, you have a set of integers $\{1, 2, \ldots, n\}$. You can perform the following operations no more than $10000$ times:
- A $a$: find how many numbers are multiples of $a$ in the current set.
- B $a$: find how many numbers are multiples of $a$ in this set, and then delete all multiples of $a$, but $x$ will never be deleted (even if it is a multiple of $a$). In this operation, $a$ must be greater than $1$.
- C $a$: it means that you know that $x=a$. This operation can be only performed once.
Remember that in the operation of type B $a>1$ must hold.
Write a program, that will find the value of $x$.
|
If we know what prime factors x has, we can find $x$ just using bruteforce. To find the prime factors, we can just do $B~p$ for every prime $p$ in ascending order, meanwhile calculate the numbers there supposed to be without $x$, if it differs with the number the interactor gives, then $x$ contains the prime factor $p$. This way, we can find every prime factor except for the smallest one. Let $m$ be the number of primes no greater than $n$. Then we can split the prime numbers into $\sqrt m$ groups. After finishing asking a group, ask $A~1$ and check if the return value same as it supposed to be without $x$. If it's the first time finding it different, it means the smallest prime number is in the range, then just check every prime numbers in the range by asking $A~p$. After finding the prime factors, for each factor, ask $A~p^k$, it can be proved this step will be done around $\log(n)$ times. The total number of operations if around $m+2\sqrt m+\log(n)$, the total time complexity is $O(n\log n)$
|
[
"interactive",
"math",
"number theory"
] | 2,600
|
#include<bits/stdc++.h>
#define re register
#define int long long
using namespace std;
bool vis[100002];
int pri[100002],tot,n,x,sum,ans,ia;
signed main(){
srand(19260817);
scanf("%lld",&n);
for(re int i=2; i<=n; ++i) {
if(!vis[i]) {
pri[++tot]=i;
if(i<=sqrt(n))
for(re int j=i*i; j<=n; j+=i)vis[j]=1;
}
}
memset(vis,0,sizeof(vis));
re int k=sqrt(tot);sum=n;ans=1;
for(re int i=1;i<=tot;++i){
if(i>=k&&ans*pri[i-k+1]>n)break;
printf("B %lld",pri[i]);cout<<endl;
re int num=0;
for(re int j=pri[i];j<=n;j+=pri[i]){
if(!vis[j]){
++num;--sum;
vis[j]=1;
}
}
scanf("%lld",&x);
if(x!=num){
for(re int kk=pri[i];kk<=n;kk*=pri[i]){
printf("A %lld",kk);cout<<endl;
scanf("%lld",&x);
if(x)ans*=pri[i];
else break;
}
}
if((i==tot||i%k==0)&&!ia){
printf("A 1");cout<<endl;
scanf("%lld",&x);
if(x!=sum){
for(re int j=i-k+1;j<=i;++j){
for(re int kk=pri[j];kk<=n;kk*=pri[j]){
printf("A %lld",kk);cout<<endl;
scanf("%lld",&x);
if(x)ans*=pri[j],ia=1;
else break;
}
if(ia)break;
}
}
}
}
printf("C %lld",ans);cout<<endl;
}
|
1407
|
A
|
Ahahahahahahahaha
|
Alexandra has an even-length array $a$, consisting of $0$s and $1$s. The elements of the array are enumerated from $1$ to $n$. She wants to remove \textbf{at most} $\frac{n}{2}$ elements (where $n$ — length of array) in the way that alternating sum of the array will be equal $0$ (i.e. $a_1 - a_2 + a_3 - a_4 + \dotsc = 0$). In other words, Alexandra wants sum of all elements at the odd positions and sum of all elements at the even positions to become equal. The elements that you remove don't have to be consecutive.
For example, if she has $a = [1, 0, 1, 0, 0, 0]$ and she removes $2$nd and $4$th elements, $a$ will become equal $[1, 1, 0, 0]$ and its alternating sum is $1 - 1 + 0 - 0 = 0$.
Help her!
|
Let $cnt_0$ be the count of zeroes in the array, $cnt_1$ - count of ones. Then if $cnt_1 \leq \frac{n}{2}$, we remove all ones and alternating sum, obliously, equals $0$. Otherwise, $cnt_0 < \frac{n}{2}$, we remove all zeroes and if $cnt_1$ is odd - plus another $1$. In this case, alternating sum equals $1 - 1 + 1 - \ldots - 1 = 0$ (because count of remaining ones if even) and we'll remove not more than $cnt_0 + 1 \leq \frac{n}{2}$.
|
[
"constructive algorithms",
"math"
] | 1,100
|
#include <bits/stdc++.h>
using namespace std;
int main() {
int t;
cin >> t;
while (t--) {
int n;
cin >> n;
vector<int>a(n), ans;
int cnt0 = 0;
for (int i = 0; i < n; i++) {
cin >> a[i];
if (!a[i]) cnt0++;
}
int cnt1 = n - cnt0;
if (cnt0 >= n / 2) {
cout << cnt0 << '\n';
for (int i = 0; i < cnt0; i++) cout << 0 << ' ';
} else {
cout << cnt1 - cnt1 % 2 << '\n';
for (int i = 0; i < cnt1 - cnt1 % 2; i++) {
cout << 1 << ' ';
}
}
cout << '\n';
}
return 0;
}
|
1407
|
B
|
Big Vova
|
Alexander is a well-known programmer. Today he decided to finally go out and play football, but with the first hit he left a dent on the new Rolls-Royce of the wealthy businessman Big Vova. Vladimir has recently opened a store on the popular online marketplace "Zmey-Gorynych", and offers Alex a job: if he shows his programming skills by solving a task, he'll work as a cybersecurity specialist. Otherwise, he'll be delivering some doubtful products for the next two years.
You're given $n$ positive integers $a_1, a_2, \dots, a_n$. Using each of them \textbf{exactly at once}, you're to make such sequence $b_1, b_2, \dots, b_n$ that sequence $c_1, c_2, \dots, c_n$ is lexicographically maximal, where $c_i=GCD(b_1,\dots,b_i)$ - the greatest common divisor of the first $i$ elements of $b$.
Alexander is really afraid of the conditions of this simple task, so he asks you to solve it.
A sequence $a$ is lexicographically smaller than a sequence $b$ if and only if one of the following holds:
- $a$ is a prefix of $b$, but $a \ne b$;
- in the first position where $a$ and $b$ differ, the sequence $a$ has a smaller element than the corresponding element in $b$.
|
We'll describe several constructive solutions for this task, differing by the time complexity: 1. $O(n^2log A)$ Let $b$ be an empty sequence in the beginning. We'll consequently transfer elements from $a$ to $b$ in a certain order. Let's notice that if we've already transfered $(k-1)$ elements then we can always choose to the place of $b_k$ any element $a_j$ left in $a$ such that $c_k=gcd(b_1,\dots,b_{k-1},a_j)$ is maximal. A-priory, if we've fixed the first $(k-1)$ elements of the sequence, lexicographically greater would be the one in which $c_k$ is maximal. The particular value of $b_k$ here doesn't matter: each element $c_i$ divides all the previous ones, so $gcd(b_k, c_j)=gcd(c_k,c_j)$ for any $j>=k$. So the algorithm is following: let's say that we have auxiliary element of the sequence $c_0=0$, and $gcd(0, k)=k$ for any integer $k\geq1$. Then we make $n$ iterations: during the $i$-th one we choose such $a_j$ (overall elements left in $a$) that the value of $gcd(a_j, c_{i-1})$ is maximal, and make $b_i=a_j$, removing $a_j$ from the sequence $a$. The $i$-th iteration will be passed in $O((n-i)log A)$, where $A$ is the greatest possible value in the original $a$ sequence, $log A$ is the time complexity of the Euclidean algorithm for searching $GCD$. Via summing time complexities overall iterations we get summary $O(n^2log A)$ time complexity of the algorithm. 2. $O(Anlog A)$ The main idea is the same as in the first solution, but the realisation is different: the main array $a$ is contained as an array $cnt$ of size $A$, where $cnt_x$ is the amount of elements in $a$ that are equal to $x$. Searching for the optimal element $b_k$ is $O(Alog A)$ for each of $n$ iterations, so the summary is $O(Anlog A)$. 3. $O(nlog^2 A)$ This solution is based on the following idea: for each $i>1$ either $c_i=c_{i-1}$ or $2c_i\leq c_{i-1}$. That means any possible sequence $c$ contains $O(log A)$ different values. So we do $O(log A)$ iterations, on each we find the value $x$ among the elements left in $a$ that maximizes $O(c_k, x)$ (where $k$ is the amount of elements already transfered to $b$, and $c_k=gcd(b_1,\dots,b_k)$) and transfer all the elements left in $a$ of the value equal to $x$ to the end of $b$. Each iteration is done in $O(n log A)$ so the total time complexity is $O(n log^2 A)$.
|
[
"brute force",
"greedy",
"math",
"number theory"
] | 1,300
|
#include <bits/stdc++.h>
using namespace std;
void solve() {
int n;
cin >> n;
int a[n];
int mi = 0;
for (int i = 0; i < n; i++) {
cin >> a[i];
mi = (a[i] > a[mi] ? i : mi);
}
vector<int> b(n);
b[0] = a[mi]; a[mi] = 0;
int cg = b[0];
for (int i = 1; i < n; i++) {
int ci = 0, cans = 0;
for (int j = 0; j < n; j++)
if (a[j] && __gcd(a[j], cg) > cans) {
cans = __gcd(a[j], cg);
ci = j;
}
b[i] = a[ci];
cg = cans;
a[ci] = 0;
}
for (int i = 0; i < n; i++)
cout << b[i] << ' ';
cout << '\n';
}
int main() {
int t;
cin >> t;
while (t--)
solve();
return 0;
}
|
1407
|
C
|
Chocolate Bunny
|
\textbf{This is an interactive problem.}
We hid from you a permutation $p$ of length $n$, consisting of the elements from $1$ to $n$. You want to guess it. To do that, you can give us 2 different indices $i$ and $j$, and we will reply with $p_{i} \bmod p_{j}$ (remainder of division $p_{i}$ by $p_{j}$).
We have enough patience to answer at most $2 \cdot n$ queries, so you should fit in this constraint. Can you do it?
As a reminder, a permutation of length $n$ is an array consisting of $n$ distinct integers from $1$ to $n$ in arbitrary order. For example, $[2,3,1,5,4]$ is a permutation, but $[1,2,2]$ is not a permutation ($2$ appears twice in the array) and $[1,3,4]$ is also not a permutation ($n=3$ but there is $4$ in the array).
|
Observation: $(a \bmod b > b \bmod a) \Leftrightarrow (a < b)$. Proof: if $a > b$, then $(a \bmod b) < b = (b \bmod a$). If $a = b$, then $(a \bmod b) = (b \bmod a) = 0$. Let's maintain index $mx$ of maximal number on the reviewed prefix (initially $mx = 1$). Let's consider index $i$. Ask two queries: ? i mx and ? mx i. We'll know the less from both of them and either guess $p_{mx}$ and update $mx = i$ or guess $p_i$. In the end, all numbers will be guessed except $p_{mx}$, that, obviously, equals $n$. In total, we'll make $2 \cdot n - 2$ queries.
|
[
"constructive algorithms",
"interactive",
"math",
"two pointers"
] | 1,600
|
#include <bits/stdc++.h>
using namespace std;
int ask(int x, int y) {
cout << "? " << x + 1 << ' ' << y + 1 << endl;
int z;
cin >> z;
return z;
}
int main() {
int n;
cin >> n;
vector<int>ans(n, -1);
int mx = 0;
for (int i = 1; i < n; i++) {
int a = ask(mx, i);
int b = ask(i, mx);
if (a > b) {
ans[mx] = a;
mx = i;
} else {
ans[i] = b;
}
}
ans[mx] = n;
cout << "! ";
for (int i = 0; i < n; i++) cout << ans[i] << ' ';
cout << endl;
return 0;
}
|
1407
|
D
|
Discrete Centrifugal Jumps
|
There are $n$ beautiful skyscrapers in New York, the height of the $i$-th one is $h_i$. Today some villains have set on fire first $n - 1$ of them, and now the only safety building is $n$-th skyscraper.
Let's call a jump from $i$-th skyscraper to $j$-th ($i < j$) \textbf{discrete}, if all skyscrapers between are strictly lower or higher than both of them. Formally, jump is discrete, if $i < j$ and one of the following conditions satisfied:
- $i + 1 = j$
- $\max(h_{i + 1}, \ldots, h_{j - 1}) < \min(h_i, h_j)$
- $\max(h_i, h_j) < \min(h_{i + 1}, \ldots, h_{j - 1})$.
At the moment, Vasya is staying on the first skyscraper and wants to live a little longer, so his goal is to reach $n$-th skyscraper with minimal count of discrete jumps. Help him with calcualting this number.
|
Consider such a jump, when all of the skyscrapers between are smaller than initial and final (another case is similar). Let's stand on the skyscraper with index $x$. We want to find out whether $y$-th skyscraper satisfies our conditions. We have two cases: $h_x \leq h_y$. Then, obviously, $y$ is the first skyscraper that not lower than $x$ (otherwise we have a building that higher than starter, it's contradiction). $h_x > h_y$. Then, it's easy to see, that $x$ is the first skyscraper to the left of $y$, that higher than $y$ for the same reason. For another case, reasoning is similar, but skyscaper should be lower, not higher. We can see, that amount of pairs $i, j : i < j$ such that we can jump from $i$ to $j$, we can estimate as $O(n)$. So, we can find for each skyscraper the nearest bigger (and smaller) one using stack and simply count $dp_i$ - minimal count of jumps that we need to reach $i$-th skyscraper. Check the solution for a better understanding.
|
[
"data structures",
"dp",
"graphs"
] | 2,200
|
#include <bits/stdc++.h>
using namespace std;
const int INF = 1e9 + 1;
const int maxn = 3e5 + 1;
int h[maxn], dp[maxn], lge[maxn], lle[maxn], rge[maxn], rle[maxn];
vector<int>jumps[maxn];
int main() {
int n;
cin >> n;
for (int i = 0; i < n; i++) {
cin >> h[i];
dp[i] = INF;
}
dp[0] = 0;
vector<pair<int, int> >st;
for (int i = 0; i < n; i++) { // the nearest greater from the left
while (!st.empty() && st.back().first < h[i]) {
st.pop_back();
}
if (st.empty()) lge[i] = -1;
else lge[i] = st.back().second;
st.push_back({h[i], i});
}
st.clear();
for (int i = 0; i < n; i++) { // the nearest less from the left
while (!st.empty() && st.back().first > h[i]) {
st.pop_back();
}
if (st.empty()) lle[i] = -1;
else lle[i] = st.back().second;
st.push_back({h[i], i});
}
st.clear();
for (int i = n - 1; i >= 0; i--) { // the nearest greater from the right
while (!st.empty() && st.back().first < h[i]) {
st.pop_back();
}
if (st.empty()) rge[i] = -1;
else rge[i] = st.back().second;
st.push_back({h[i], i});
}
st.clear();
for (int i = n - 1; i >= 0; i--) { // the nearest less from the right
while (!st.empty() && st.back().first > h[i]) {
st.pop_back();
}
if (st.empty()) rle[i] = -1;
else rle[i] = st.back().second;
st.push_back({h[i], i});
}
st.clear();
for (int i = 0; i < n; i++) {
if (rle[i] != -1) jumps[i].push_back(rle[i]);
if (rge[i] != -1) jumps[i].push_back(rge[i]);
if (lle[i] != -1) jumps[lle[i]].push_back(i);
if (lge[i] != -1) jumps[lge[i]].push_back(i);
}
for (int i = 0; i < n; i++) {
for (int to : jumps[i]) {
dp[to] = min(dp[to], dp[i] + 1);
}
}
cout << dp[n - 1];
return 0;
}
|
1407
|
E
|
Egor in the Republic of Dagestan
|
Egor is a famous Russian singer, rapper, actor and blogger, and finally he decided to give a concert in the sunny Republic of Dagestan.
There are $n$ cities in the republic, some of them are connected by $m$ directed roads without any additional conditions. In other words, road system of Dagestan represents an arbitrary directed graph. Egor will arrive to the city $1$, travel to the city $n$ by roads along some path, give a concert and fly away.
As any famous artist, Egor has lots of haters and too annoying fans, so he can travel only by safe roads. There are two types of the roads in Dagestan, black and white: black roads are safe at night only, and white roads — in the morning. Before the trip Egor's manager's going to make a schedule: for each city he'll specify it's color, black or white, and then if during the trip they visit some city, the only time they can leave it is determined by the city's color: night, if it's black, and morning, if it's white. After creating the schedule Egor chooses an available path from $1$ to $n$, and for security reasons it has to be the shortest possible.
Egor's manager likes Dagestan very much and wants to stay here as long as possible, so he asks you to make such schedule that there would be no path from $1$ to $n$ or the shortest path's length would be greatest possible.
A path is one city or a sequence of roads such that for every road (excluding the first one) the city this road goes from is equal to the city previous road goes into. Egor can move only along paths consisting of safe roads only.
The path length is equal to the number of roads in it. The shortest path in a graph is a path with smallest length.
|
**This task has a simple intuitive proof, but I wanted to describe it formally so it's pretty complicated.** We'll show a constructive algorithm for this task and proof it's correctness. We also provide a realisation with $O(n+m)$ time complexity. Let's change each edge's direction to the opposite. Then for vertex of color $c$ all incoming edges of color $c$ and only they are safe. We call a schedule optimal for $x$ if the shortest path (from $n$) to $x$ along the safe edges is the longest possible. We call a path (from $n$) to $x$ optimal if it's the shortest path for any optimal schedule for $x$. So we have to find an optimal schedule for $1$ and the length of optimal path for $1$. Let's make four parameters for each vertex $x$: $b[x]$ - the length of optimal path to $x$, if $x$ is black, $w[x]$ - the length of optimal path to $x$, if $x$ is white, $dp[x]$ - the length of optimal path to $x$ among all possible schedules, $col[x]$ - the color of $x$. The algorithm is following: Initially $b[n]=w[n]=dp[n]=0$, $b[x]=w[x]=dp[x]=+\infty$ for all $x\neq n$. All vertices are unpainted. If there is no unpainted vertex $x$ such that $dp[x]<+\infty$ (including the case when all vertices are painted) - go to point $6$ Among all unpainted vertices choose vertex $u$ with the smallest possible value of $dp[u]$. If $b[u] > w[u]$ set $col[u]=0$. If $w[u] > b[u]$ set $col[u]=1$. If $w[u] = b[u]$ then $col[u]$ can be either $0$ or $1$. If $b[u] > w[u]$ set $col[u]=0$. If $w[u] > b[u]$ set $col[u]=1$. If $w[u] = b[u]$ then $col[u]$ can be either $0$ or $1$. Watch each edge $(u, v)$ outgoing from $u$. Let an edge's color be $t$.If $t=0$ (black edge) - set $b[v]=min(b[v], dp[u]+1)$. If $t=1$ (white edge) - set $w[v]=min(w[v], dp[u]+1)$. Then set $dp[v]=max(b[v], w[v])$. If $t=0$ (black edge) - set $b[v]=min(b[v], dp[u]+1)$. If $t=1$ (white edge) - set $w[v]=min(w[v], dp[u]+1)$. Then set $dp[v]=max(b[v], w[v])$. Go to point $2$. For each unpainted vertex set the color just as in point $3$. The value of $dp[1]$ is equal to the desired answer (excluding $dp[1]=\infty$ case: that means there's no way from $n$ to $1$ for the constructed schedule, and the answer is $-1$), and the values of $col$ form the optimal schedule. Correctness proof Let $lb[x]$, $lw[x]$ and $l[x]$ be the real optimal values of $b[x]$, $w[x]$ and $dp[x]$. We'll show that the parameters found by the algorithm are optimal. Let's proof some statements: 1. Any optimal path is simple. This statement is obvious, because the shortest path in any graph doesn't contain repeating vertices. 2. For any black edge $(u, v$) $l[u]+1\geq lb[v]$. For any schedule the length of the shortest path from $n$ to $u$ is not greater than $l[u]$, and the color of $v$ is fixed, so the length of the shortest path from $n$ to $v$ is not greater than $l[u]+1$. For white edges and correspondingly $lw[u]$ the analogous statement is correct. 3. $b[x]\geq lb[x]$, $w[x]\geq lw[x]$ for any vertex $x$ at every moment. Before the first iteration the statement $3$ is correct. Let $u$ be selected in the beginning of some iteration, and we update the parameters of $v$ for a black edge $(u, v)$. Let the statement $3$ be correct before the update. Then due to the statement $2$ $l[u]+1\geq lb[v]$. $b[u]\geq lb[u]$ and $w[u]\geq lw[u]$, so $dp[u]=max(b[u],w[u])\geq l[u]=max(lb[u],lw[u])$, and $dp[u]+1\geq l[u]+1\geq lb[v]$. After the update the value of $b[v]$ can stay the same or be changed to $dp[u]+1$, but since as $b[v]\geq l[v]$, the final value of $b[v]$ is not less than $l[v]$, so the statement $3$ remains correct after the update. Analogously we can show that for white edges it's correct, too. By induction we have that the statement $3$ is invariant, i.e. it's always correct. We can notice that for each painted vertex $x$ it's value of $dp[x]$ is not smaller than the value of $dp[y]$ for any vertex $y$ painted before the $x$. Also, the values of $dp[x]$, $b[x]$ and $w[x]$ remain constant after the iteration when $x$ is painted. These facts are easy-to-proof, but for shortness we won't do it here. Lemma: in the end of the $k$-th iteration for each vertex $x$ the values of $b[x]$, $w[x]$ and $dp[x]$ correspond to the shortest (for the current schedule) paths passing through the painted vertices only (excluding $x$). Proof: we'll show it by induction on the number of iterations. It's easy to see that after the first iteration the Lemma remains correct. Let the Lemma be correct after the first $k$ iterations. Let $x$ be painted during the $(k+1)$-th iteration. Then $b[x]$ and $w[x]$ are already equal to the lengths of shortest paths to $x$, passing through painted vertices only and ending by black and white edges, correspondingly (it's obvious, proof it yourself if you don't believe). The length of the shortest path for already painted vertices won't change, because $dp[x]$ is not smaller than the values of $dp$ of previously painted vertices. Thus, at the end of the algorithm we get the desired schedule, where $dp[x]$ for each $x$ is equal to the length of the shortest path to $x$, and $dp[x]\geq l[x]$; then from these two statements follows the fact that $dp[x]=l[x]$ for all vertices $x$, what means that the constructed schedule is optimal. Realisation This algorithm can be realised as a modified BFS, where the vertex is added to the queue just as it's value becomes smaller than infinity (in the code the value of <<infinity>> can be just $n$). It's easy to proof that such realisation is equivalent to the algorithm.
|
[
"constructive algorithms",
"dfs and similar",
"dp",
"graphs",
"greedy",
"shortest paths"
] | 2,500
|
#include <bits/stdc++.h>
using namespace std;
const int maxn = 1e6 + 1;
vector<int> bg[maxn], rg[maxn];
int b[maxn], r[maxn], d[maxn], col[maxn];
int n, m;
int main() {
ios_base::sync_with_stdio(0);
cin.tie(0); cout.tie(0);
cin >> n >> m;
for (int i = 0; i < m; i++) {
int u, v, t;
cin >> u >> v >> t;
--u; --v;
if (!t) bg[v].push_back(u);
else rg[v].push_back(u);
}
for (int i = 0; i < n; i++)
d[i] = b[i] = r[i] = n;
queue<int> q;
q.push(n - 1);
d[n - 1] = r[n - 1] = b[n - 1] = 0;
while (!q.empty()) {
int x = q.front();
q.pop();
for (auto to : bg[x]) {
if (b[to] < n) continue;
b[to] = d[x] + 1;
if (max(b[to], r[to]) < n) {
q.push(to);
d[to] = max(b[to], r[to]);
}
}
for (auto to : rg[x]) {
if (r[to] < n) continue;
r[to] = d[x] + 1;
if (max(b[to], r[to]) < n) {
q.push(to);
d[to] = max(b[to], r[to]);
}
}
}
if (d[0] == n) cout << "-1\n";
else cout << d[0] << '\n';
for (int i = 0; i < n; i++) {
if (b[i] > r[i]) col[i] = 0;
else col[i] = 1;
cout << col[i];
}
return 0;
}
|
1408
|
A
|
Circle Coloring
|
You are given three sequences: $a_1, a_2, \ldots, a_n$; $b_1, b_2, \ldots, b_n$; $c_1, c_2, \ldots, c_n$.
For each $i$, $a_i \neq b_i$, $a_i \neq c_i$, $b_i \neq c_i$.
Find a sequence $p_1, p_2, \ldots, p_n$, that satisfy the following conditions:
- $p_i \in \{a_i, b_i, c_i\}$
- $p_i \neq p_{(i \mod n) + 1}$.
In other words, for each element, you need to choose one of the three possible values, such that no two adjacent elements (where we consider elements $i,i+1$ adjacent for $i<n$ and also elements $1$ and $n$) will have equal value.
It can be proved that in the given constraints solution always exists. You don't need to minimize/maximize anything, you need to find any proper sequence.
|
At first, set $p_1 = a_1$. Then for $i \in \{2, \ldots, n-1\}$ if $a_i = p_{i-1}$, then set $p_i = b_i$. Otherwise, set $p_i = a_i$. In the end, set $p_n$ to one of $\{a_n, b_n, c_n\}$, which is not equal to $p_1$ or $p_{n-1}$.
|
[
"constructive algorithms"
] | 800
| null |
1408
|
B
|
Arrays Sum
|
You are given a \textbf{non-decreasing} array of \textbf{non-negative} integers $a_1, a_2, \ldots, a_n$. Also you are given a positive integer $k$.
You want to find $m$ \textbf{non-decreasing} arrays of \textbf{non-negative} integers $b_1, b_2, \ldots, b_m$, such that:
- The size of $b_i$ is equal to $n$ for all $1 \leq i \leq m$.
- For all $1 \leq j \leq n$, $a_j = b_{1, j} + b_{2, j} + \ldots + b_{m, j}$. In the other word, array $a$ is the sum of arrays $b_i$.
- The number of different elements in the array $b_i$ is at most $k$ for all $1 \leq i \leq m$.
Find the minimum possible value of $m$, or report that there is no possible $m$.
|
Case $k = 1$: If all $a_i$ are equal the answer is $1$. Otherwise the answer is $-1$. Case $k > 1$: Let's consider an array $a' = (a_2 - a_1, a_3 - a_2, \ldots, a_n - a_{n-1})$ and arrays $b_i' = (b_{i, 2} - b_{i, 1}, b_{i, 3} - b_{i, 2}, \ldots, b_{i, n} - b_{i, n-1})$. The number of non-zero elements in $b_i'$ is at most $k-1$. Let's define $c$ as the number of non-zero elements in $a'$ or in the other words the number of indices $i$, such that $a_i \neq a_{i+1}$. The answer is at least $\lceil \frac{c}{k-1} \rceil$. It's easy to prove that there exists arrays $b_i$ with such number $m$.
|
[
"constructive algorithms",
"greedy",
"math"
] | 1,400
| null |
1408
|
C
|
Discrete Acceleration
|
There is a road with length $l$ meters. The start of the road has coordinate $0$, the end of the road has coordinate $l$.
There are two cars, the first standing at the start of the road and the second standing at the end of the road. They will start driving simultaneously. The first car will drive from the start to the end and the second car will drive from the end to the start.
Initially, they will drive with a speed of $1$ meter per second. There are $n$ flags at \textbf{different} coordinates $a_1, a_2, \ldots, a_n$. Each time when any of two cars drives through a flag, the speed of that car increases by $1$ meter per second.
Find how long will it take for cars to meet (to reach the same coordinate).
|
Solution $1$: Let's make a binary search on the answer. If we have some time $t$ we can calculate the coordinate of each car after $t$ seconds. Let's define them as $x_1$ and $x_2$. If $x_1 \leq x_2$ let's move the left bound of the binary search, otherwise, let's move the right bound. Time complexity: $O(n \log{\frac{1}{\epsilon}})$. Solution $2$: Let's calculate the time for each car and each flag, after which the car will reach this flag. We can find the first flag from left to right, on which the second car was before the first car. Using it we can find the segment between flags, on which the cars will meet. After that the answer can be found by some simple formula using their speed and times, on which they will reach the left and the right bound of this segment. Time complexity: $O(n)$.
|
[
"binary search",
"dp",
"implementation",
"math",
"two pointers"
] | 1,500
| null |
1408
|
D
|
Searchlights
|
There are $n$ robbers at coordinates $(a_1, b_1)$, $(a_2, b_2)$, ..., $(a_n, b_n)$ and $m$ searchlight at coordinates $(c_1, d_1)$, $(c_2, d_2)$, ..., $(c_m, d_m)$.
In one move you can move each robber to the right (increase $a_i$ of each robber by one) or move each robber up (increase $b_i$ of each robber by one). Note that you should either increase \textbf{all} $a_i$ or \textbf{all} $b_i$, you \textbf{can't} increase $a_i$ for some points and $b_i$ for some other points.
Searchlight $j$ can see a robber $i$ if $a_i \leq c_j$ and $b_i \leq d_j$.
A configuration of robbers is safe if no searchlight can see a robber (i.e. if there is no pair $i,j$ such that searchlight $j$ can see a robber $i$).
What is the minimum number of moves you need to perform to reach a safe configuration?
|
Let's define as $x$ our move to the right and as $y$ our move to the up. For all pairs $(i, j)$ of (robber, searchlight) at least one of this should be true: $x + a_i > c_j$, $y + b_i > d_j$. So if $x \leq c_j - a_i$ then $y \geq d_j - b_i + 1$. Let's make an array $r$ of length $C = 10^6$ and write on each position of $x$ the minimum value of $y$. For each $(i, j)$ we should make $r_x = max(r_x, d_j - b_i + 1)$ for all $x \leq c_j - a_i$. So we have $nm$ queries of $max=$ on prefix. We can do it using suffix maximums. After we will calculate all $a_x$ the answer is $\min\limits_{x}{(x + r_x)}$. Time complexity: $O(nm + C)$.
|
[
"binary search",
"brute force",
"data structures",
"dp",
"implementation",
"sortings",
"two pointers"
] | 2,000
| null |
1408
|
E
|
Avoid Rainbow Cycles
|
You are given $m$ sets of integers $A_1, A_2, \ldots, A_m$; elements of these sets are integers between $1$ and $n$, inclusive.
There are two arrays of positive integers $a_1, a_2, \ldots, a_m$ and $b_1, b_2, \ldots, b_n$.
In one operation you can delete an element $j$ from the set $A_i$ and pay $a_i + b_j$ coins for that.
You can make several (maybe none) operations (some sets can become empty).
After that, you will make an edge-colored undirected graph consisting of $n$ vertices. For each set $A_i$ you will add an edge $(x, y)$ with color $i$ for all $x, y \in A_i$ and $x < y$. Some pairs of vertices can be connected with more than one edge, but such edges have different colors.
You call a cycle $i_1 \to e_1 \to i_2 \to e_2 \to \ldots \to i_k \to e_k \to i_1$ ($e_j$ is some edge connecting vertices $i_j$ and $i_{j+1}$ in this graph) rainbow if all edges on it have different colors.
Find the minimum number of coins you should pay to get a graph without rainbow cycles.
|
Let's make a bipartite graph with $m$ vertices on the left side and $n$ vertices on the right side. We will connect vertex $i$ from the left side with all elements of $A_i$. It can be proven, that the graph, which we create using our sets don't have rainbow cycles if and only if our bipartite graph don't have cycles. So, our task is equivalent of finding the Maximum Spanning Tree of this bipartite graph, where edge between $i$ (from left side) and $j$ (from right side) has weight equal to $a_i + b_j$.
|
[
"data structures",
"dsu",
"graphs",
"greedy",
"sortings",
"trees"
] | 2,400
| null |
1408
|
F
|
Two Different
|
You are given an integer $n$.
You should find a list of pairs $(x_1, y_1)$, $(x_2, y_2)$, ..., $(x_q, y_q)$ ($1 \leq x_i, y_i \leq n$) satisfying the following condition.
Let's consider some function $f: \mathbb{N} \times \mathbb{N} \to \mathbb{N}$ (we define $\mathbb{N}$ as the set of positive integers). In other words, $f$ is a function that returns a positive integer for a pair of positive integers.
Let's make an array $a_1, a_2, \ldots, a_n$, where $a_i = i$ initially.
You will perform $q$ operations, in $i$-th of them you will:
- assign $t = f(a_{x_i}, a_{y_i})$ ($t$ is a temporary variable, it is used \textbf{only} for the next two assignments);
- assign $a_{x_i} = t$;
- assign $a_{y_i} = t$.
In other words, you need to \textbf{simultaneously} change $a_{x_i}$ and $a_{y_i}$ to $f(a_{x_i}, a_{y_i})$. Note that during this process $f(p, q)$ is always the same for a fixed pair of $p$ and $q$.
In the end, there should be at most two different numbers in the array $a$.
It should be true for any function $f$.
Find any possible list of pairs. The number of pairs should not exceed $5 \cdot 10^5$.
|
Claim: for each $k$, we can perform operations on $2^k$ elements to make all numbers the same. You can prove this fact with induction by $k$. For $k=0$ this fact is obvious, For $k>0$, at first change first $2^{k-1}$ and last $2^{k-1}$ points to the same value (assume that those values are $x$ and $y$, respectively). And then, perform operations on points $i$ and $i + 2^{k-1}$, to simultaneously change them to $f(x,y)$. After that, all values will be equal $\blacksquare$. Using this observation, it is easy to leave only two different values in the array. Find the maximum $2^k \leq n$, and then change first $2^k$ numbers to the same number, and then change last $2^k$ elements to the same number (note that $2^k + 2^k > n$, so in the end there will be only two different elements).
|
[
"constructive algorithms",
"divide and conquer"
] | 2,300
| null |
1408
|
G
|
Clusterization Counting
|
There are $n$ computers in the company network. They are numbered from $1$ to $n$.
For each pair of two computers $1 \leq i < j \leq n$ you know the value $a_{i,j}$: the difficulty of sending data between computers $i$ and $j$. All values $a_{i,j}$ for $i<j$ are different.
You want to separate all computers into $k$ sets $A_1, A_2, \ldots, A_k$, such that the following conditions are satisfied:
- for each computer $1 \leq i \leq n$ there is \textbf{exactly} one set $A_j$, such that $i \in A_j$;
- for each two pairs of computers $(s, f)$ and $(x, y)$ ($s \neq f$, $x \neq y$), such that $s$, $f$, $x$ are from the same set but $x$ and $y$ are from different sets, $a_{s,f} < a_{x,y}$.
For each $1 \leq k \leq n$ find the number of ways to divide computers into $k$ groups, such that all required conditions are satisfied. These values can be large, so you need to find them by modulo $998\,244\,353$.
|
Sort all edges by their weights. In this order, maintain the DSU. For each connected component, let's maintain $dp_k$: the number of ways to divide this component into $k$ groups. When you merge two connected clusters, you have to recalculate the DP as in the multiplication of two polynomials (of course, if you bound $k$ by the size of the connected component, similarly to the similar tree DP's, it works in $O(n^2)$ total). Once some connected component becomes a clique, then we obtain a new cluster, and you should increase $dp_1$ for this connected component by $1$.
|
[
"combinatorics",
"dp",
"dsu",
"fft",
"graphs",
"trees"
] | 2,700
| null |
1408
|
H
|
Rainbow Triples
|
You are given a sequence $a_1, a_2, \ldots, a_n$ of non-negative integers.
You need to find the largest number $m$ of triples $(i_1, j_1, k_1)$, $(i_2, j_2, k_2)$, ..., $(i_m, j_m, k_m)$ such that:
- $1 \leq i_p < j_p < k_p \leq n$ for each $p$ in $1, 2, \ldots, m$;
- $a_{i_p} = a_{k_p} = 0$, $a_{j_p} \neq 0$;
- all $a_{j_1}, a_{j_2}, \ldots, a_{j_m}$ are different;
- all $i_1, j_1, k_1, i_2, j_2, k_2, \ldots, i_m, j_m, k_m$ are different.
|
Let's reformulate this problem: you should choose different $i$ with different $a_i > 0$, and for each of them choose one zero to the left and one zero to the right (i.e. for each chosen guy we can assume that we have two vertices in the bipartite graph, and we want to match the left of them with some zero to the left, and the right of them with some zero to the right). Denote the number of zeroes as $z$. Observation 1. The answer does not exceed $\frac{z}{2}$. Observation 2. If the number of zeroes to the right of number is $\geq \frac{z}{2}$, then we will always be able to match it with some zeroes to the right (from Observation 2). You can also make a similar symmetrical observation. Observation 3. Using observation 2, we can separate non-zero numbers into two groups, s.t. numbers on prefix can always be matched with zero to the right, and numbers on suffix can always be matched with zero to the left. Let's denote those sets as $L$ (prefix) and $R$ suffix. Observation 4. For each colour only two numbers are interesting: the rightmost in $L$ and the leftmost in $R$. It is easy to show, for example, if you decided to take some colour but not in the rightmost in $L$ position, you can replace it by the rightmost position in $L$ (of course, corresponding zero to the left can be matched with the new position, as you moved it to the right, and you can still match it with some zero to the right). Symmetrical proof holds for $R$. Observation 5. We derive the following problem: for each colour you want to either choose it in the $L$, choose it in the $R$, or not choose it all. And if you choose it in $L$, then you want to match it with some zeroes on the prefix. If you choose it in $R$, then you want to match it with some zeroes on the suffix. So we can replace each colour to the tuple $(l,r)$ which means that this colour can be matched with either some zero on the prefix of length $l$ of some zero on the suffix of length $r$. And our goal is to find the largest matching. Observation 6. The maximum matching for this type of graph can be found easily: move $x$ backwards, and each time match $x$ with some unmatched pair $(l, r)$ such that $l \geq x$, and $r$ is minimum among them. Observation 7. After you saw observation 6, you can optimize the solution with a priority queue. And don't remember that the real answer is the smaller value of $\frac{z}{2}$ and your matching size. P.S. You can also formulate this problem as a matroid intersection, and find the answer using the minimax formula for the matroid intersection + segment tree.
|
[
"binary search",
"data structures",
"flows",
"greedy"
] | 3,300
| null |
1408
|
I
|
Bitwise Magic
|
You are given a positive integer $k$ and an array $a_1, a_2, \ldots, a_n$ of non-negative distinct integers not smaller than $k$ and not greater than $2^c-1$.
In each of the next $k$ seconds, one element is chosen randomly equiprobably out of all $n$ elements and decreased by $1$.
For each integer $x$, $0 \leq x \leq 2^c - 1$, you need to find the probability that in the end the bitwise XOR of all elements of the array is equal to $x$.
Each of these values can be represented as an irreducible fraction $\frac{p}{q}$, and you need to find the value of $p \cdot q^{-1}$ modulo $998\,244\,353$.
|
There are several solutions to this problem, with various complexities. Some of our solutions work in less than half a second. The constraints were set to allow most non-naive solutions. Solution 1, DP on Trie Let's look at the bitwise trie. Each node corresponds to some segment $[l, r]$ of possible numbers. Let's note that if you perform operations on numbers in $[l + k, \ldots, r]$, then the resulting numbers for them won't leave the trie node. So we can come up with the following solution: let's maintain some kind of a DP on this trie. When we took into account numbers from $a$ among $[l + k, \ldots, r]$, and we should store the FWHT for each number of operations (we can assume that we store some kind of an exponential generating functions for each value in FHWT and maintain the product of those genfuncs for numbers with the corresponding signs). Note that in this FWHT you are interested only in $O(r - l)$ values (because all given numbers have equal prefixes, so if you know the parity of the number, you know the prefix of the total XOR). To recalculate this DP, when we calculated it for children, at first in $O((r - l)k^2)$ you should multiply DP's for the children, and then you should add first $k$ numbers of the right subtree (that you didn't consider yet) to this DP, you can do it for each of them in $O((r - l)k^2)$. If some details are not clear for you here, probably you are not familiar with FWHT enough. Deriving the total complexity of $2^c \cdot c \cdot k^3$. Of course, a lot of different further optimizations possible for this solution. Solution 2, Observations The main observation: the number of different tuples $x \oplus x, x \oplus (x-1), x \oplus (x-2), \ldots, x \oplus (x - k)$ is around $O(kc)$. Why? Because consider how $x$ is changed during the process, usually only $O(\log k)$ bits are changed, but once one block of zeroes of the length $O(\log n)$ is changed to ones. So if we know the last $O(\log k)$ bits of the number and the length of the block of zeroes after them, we can identify s $x \oplus x, x \oplus (x-1), x \oplus (x-2), \ldots, x \oplus (x - k)$. If you notice this, you can note a lot of other similar observations, and derive another solution with better complexity than naive. And then using DP/FWHT, you can upgrade those ideas to the solution. For example, QAQautomaton wrote the solution in $2^c \cdot c^2 + c^6$ using this idea. I will leave all remaining details as an exercise for the reader.
|
[
"dp",
"math"
] | 3,200
| null |
1409
|
A
|
Yet Another Two Integers Problem
|
You are given two integers $a$ and $b$.
In one move, you can choose some \textbf{integer} $k$ from $1$ to $10$ and add it to $a$ or subtract it from $a$. In other words, you choose an integer $k \in [1; 10]$ and perform $a := a + k$ or $a := a - k$. You may use \textbf{different} values of $k$ in different moves.
Your task is to find the \textbf{minimum} number of moves required to obtain $b$ from $a$.
You have to answer $t$ independent test cases.
|
We can add or subtract $10$ until the difference between $a$ and $b$ becomes less than $10$. And if it is not $0$ after all such moves, we need one additional move. Let $d = |a - b|$ is the absolute difference between $a$ and $b$. The final answer is $\left\lfloor\frac{d}{10}\right\rfloor$ plus one if $d \mod 10 > 0$. This formula can be represented as $d$ divided by $10$ rounded up, in other words $\left\lfloor\frac{d+9}{10}\right\rfloor$.
|
[
"greedy",
"math"
] | 800
|
#include <bits/stdc++.h>
using namespace std;
int main() {
#ifdef _DEBUG
freopen("input.txt", "r", stdin);
// freopen("output.txt", "w", stdout);
#endif
int t;
cin >> t;
while (t--) {
int a, b;
cin >> a >> b;
cout << (abs(a - b) + 9) / 10 << endl;
}
return 0;
}
|
1409
|
B
|
Minimum Product
|
You are given four integers $a$, $b$, $x$ and $y$. Initially, $a \ge x$ and $b \ge y$. You can do the following operation \textbf{no more than} $n$ times:
- Choose either $a$ or $b$ and decrease it by one. However, as a result of this operation, value of $a$ cannot become less than $x$, and value of $b$ cannot become less than $y$.
Your task is to find the \textbf{minimum} possible product of $a$ and $b$ ($a \cdot b$) you can achieve by applying the given operation no more than $n$ times.
You have to answer $t$ independent test cases.
|
The only fact required to solve the problem: if we start decreasing the number, we are better to end decreasing it and only then decrease the other number. So, we can just consider two cases: when we decrease $a$ first, and $b$ after that and vice versa, and just take the minimum product of these two results. The rest is just implementation.
|
[
"brute force",
"greedy",
"math"
] | 1,100
|
#include <bits/stdc++.h>
using namespace std;
int main() {
#ifdef _DEBUG
freopen("input.txt", "r", stdin);
// freopen("output.txt", "w", stdout);
#endif
int t;
cin >> t;
while (t--) {
int a, b, x, y, n;
cin >> a >> b >> x >> y >> n;
long long ans = 1e18;
for (int i = 0; i < 2; ++i) {
int da = min(n, a - x);
int db = min(n - da, b - y);
ans = min(ans, (a - da) * 1ll * (b - db));
swap(a, b);
swap(x, y);
}
cout << ans << endl;
}
return 0;
}
|
1409
|
C
|
Yet Another Array Restoration
|
We have a secret array. You don't know this array and you have to restore it. However, you know some facts about this array:
- The array consists of $n$ \textbf{distinct positive} (greater than $0$) integers.
- The array contains two elements $x$ and $y$ (these elements are \textbf{known} for you) such that $x < y$.
- If you sort the array in increasing order (such that $a_1 < a_2 < \ldots < a_n$), differences between all adjacent (consecutive) elements are equal (i.e. $a_2 - a_1 = a_3 - a_2 = \ldots = a_n - a_{n-1})$.
It can be proven that such an array always exists under the constraints given below.
Among all possible arrays that satisfy the given conditions, we ask you to restore one which has the \textbf{minimum possible} maximum element. In other words, you have to minimize $\max(a_1, a_2, \dots, a_n)$.
You have to answer $t$ independent test cases.
|
The only fact required to solve this problem is just to notice that the answer array is just an arithmetic progression. After that, we can fix the first element $start$, fix the difference $d$, construct the array $[start, start + d, start + 2d, \dots, start + d \cdot (n-1)]$, check if $x$ and $y$ are in this array and, if yes, update the answer with $start + d \cdot (n-1)$. This is $O(n^3)$ solution. There are faster solutions, though. Other author's solution is $O(n \sqrt{y})$ but I didn't want to make this problem harder, so I allowed $O(n^3)$ solutions. It is obvious that the difference of the progression is some divisor of $y-x$. Let it be $d$. Let's add some elements starting from $y$ "to the left" ($y, y-d, y-2d$ and so on) and stop if we reach $n$ elements or the next element is less than $1$. If we didn't find $x$ among these elements, just skip this difference, it is useless for us. Otherwise, if we have less than $n$ elements, let's add $y+d, y+2d, y+3d$ and so on until we get $n$ elements. And then update the answer with the maximum element of the array. There is also a solution in $O(n + \sqrt{y})$ with some greedy observations :)
|
[
"brute force",
"math",
"number theory"
] | 1,200
|
#include <bits/stdc++.h>
using namespace std;
int main() {
int tcs;
cin >> tcs;
while (tcs--) {
int n, x, y;
cin >> n >> x >> y;
int diff = y - x;
for (int delta = 1; delta <= diff; ++delta) {
if (diff % delta) continue;
if (diff / delta + 1 > n) continue;
int k = min((y - 1) / delta, n - 1);
int a0 = y - k * delta;
for (int i = 0; i < n; ++i) {
cout << (a0 + i * delta) << ' ';
}
cout << endl;
break;
}
}
}
|
1409
|
D
|
Decrease the Sum of Digits
|
You are given a positive integer $n$. In one move, you can increase $n$ by one (i.e. make $n := n + 1$). Your task is to find the minimum number of moves you need to perform in order to make the sum of digits of $n$ be less than or equal to $s$.
You have to answer $t$ independent test cases.
|
Firstly, let's check if the initial $n$ fits the conditions. If it is, print $0$ and continue. Otherwise, let's solve the problem greedily. At first, let's try to set the last digit to zero. Let $dig = n \mod 10$. We need exactly $(10 - dig) \mod 10$ moves to do that. Let's add this number to $n$ and to the answer and check if the current $n$ fits the conditions. If it isn't, let's try to set the previous last digit to zero. Let $dig = \left\lfloor\frac{n}{10}\right\rfloor \mod 10$. Then we need $((10 - dig) \mod 10) \cdot 10$ moves to do that. Let's add this number to $n$ and to the answer and check if the current $n$ fits the conditions. If it isn't, repeat the same with the third digit and so on. This cycle can do no more than $18$ iterations. And we can fing the sum of digits of $n$ in at most $18$ iterations too (decimal logarithm of $n$). So, the total time complexity is $O(\log_{10}^2{(n)})$.
|
[
"greedy",
"math"
] | 1,500
|
#include <bits/stdc++.h>
using namespace std;
int sum(long long n) {
int res = 0;
while (n > 0) {
res += n % 10;
n /= 10;
}
return res;
}
int main() {
#ifdef _DEBUG
freopen("input.txt", "r", stdin);
// freopen("output.txt", "w", stdout);
#endif
int t;
cin >> t;
while (t--) {
long long n;
int s;
cin >> n >> s;
long long ans = 0;
if (sum(n) <= s) {
cout << 0 << endl;
continue;
}
long long pw = 1;
for (int i = 0; i < 18; ++i) {
int digit = (n / pw) % 10;
long long add = pw * ((10 - digit) % 10);
n += add;
ans += add;
if (sum(n) <= s) {
break;
}
pw *= 10;
}
cout << ans << endl;
}
return 0;
}
|
1409
|
E
|
Two Platforms
|
There are $n$ points on a plane. The $i$-th point has coordinates $(x_i, y_i)$. You have two horizontal platforms, both of length $k$. Each platform can be placed anywhere on a plane but it should be placed \textbf{horizontally} (on the same $y$-coordinate) and have \textbf{integer borders}. If the left border of the platform is $(x, y)$ then the right border is $(x + k, y)$ and all points between borders (including borders) belong to the platform.
Note that platforms can share common points (overlap) and it is not necessary to place both platforms on the same $y$-coordinate.
When you place both platforms on a plane, all points start falling down decreasing their $y$-coordinate. If a point collides with some platform at some moment, the point stops and is \textbf{saved}. Points which never collide with any platform are lost.
Your task is to find the maximum number of points you can \textbf{save} if you place both platforms optimally.
You have to answer $t$ independent test cases.
For better understanding, please read the \textbf{Note} section below to see a picture for the first test case.
|
Firstly, we obviously don't need $y$-coordinates at all because we can place both platforms at $y=-\infty$. Let's sort all $x$-coordinates in non-decreasing order. Calculate for each point $i$ two values $l_i$ and $r_i$, where $l_i$ is the number of points to the left from the point $i$ (including $i$) that are not further than $k$ from the $i$-th point (i.e. the number of such points $j$ that $|x_i - x_j| \le k$). And $r_i$ is the number of points to the right from the point $i$ (including $i$) that are not further than $k$ from the $i$-th point. Both these parts can be done in $O(n)$ using two pointers. Then let's build suffix maximum array on $r$ and prefix maximum array on $l$. For $l$, just iterate over all $i$ from $2$ to $n$ and do $l_i := max(l_i, l_{i-1})$. For $r$, just iterate over all $i$ from $n-1$ to $1$ and do $r_i := max(r_i, r_{i + 1})$. The question is: what? What did we do? We did the following thing: the answer always can be represented as two non-intersecting segments of length $k$ such that at least one endpoint of each segment is some input point (except the case $n=1$). Now, let's fix this border between segments. Iterate over all $i$ from $1$ to $n-1$ and update the answer with $max(l_{i}, r_{i + 1})$. So we took some segment that starts at some point to the left from $i$ (including $i$) and goes to the left and took some segment that starts further than $i+1$ (including $i+1$) and goes to the right. With this model, we considered all optimal answers that can exist. Time complexity: $O(n \log{n})$.
|
[
"binary search",
"dp",
"sortings",
"two pointers"
] | 1,800
|
#include <bits/stdc++.h>
using namespace std;
int main() {
#ifdef _DEBUG
freopen("input.txt", "r", stdin);
// freopen("output.txt", "w", stdout);
#endif
int t;
cin >> t;
while (t--) {
int n, k;
cin >> n >> k;
vector<int> x(n), y(n);
for (auto &it : x) cin >> it;
for (auto &it : y) cin >> it;
sort(x.begin(), x.end());
int j = n - 1;
vector<int> l(n), r(n);
for (int i = n - 1; i >= 0; --i) {
while (x[j] - x[i] > k) --j;
r[i] = j - i + 1;
if (i + 1 < n) r[i] = max(r[i], r[i + 1]);
}
j = 0;
for (int i = 0; i < n; ++i) {
while (x[i] - x[j] > k) ++j;
l[i] = i - j + 1;
if (i > 0) l[i] = max(l[i], l[i - 1]);
}
int ans = 1;
for (int i = 0; i < n - 1; ++i) {
ans = max(ans, r[i + 1] + l[i]);
}
cout << ans << endl;
}
return 0;
}
|
1409
|
F
|
Subsequences of Length Two
|
You are given two strings $s$ and $t$ consisting of lowercase Latin letters. The length of $t$ is $2$ (i.e. this string consists only of two characters).
In one move, you can choose \textbf{any} character of $s$ and replace it with \textbf{any} lowercase Latin letter. More formally, you choose some $i$ and replace $s_i$ (the character at the position $i$) with some character from 'a' to 'z'.
You want to do \textbf{no more than} $k$ replacements in such a way that \textbf{maximizes} the number of occurrences of $t$ in $s$ as a \textbf{subsequence}.
Recall that a subsequence is a sequence that can be derived from the given sequence by deleting zero or more elements without changing the order of the remaining elements.
|
I'm almost sure this problem can be solved faster and with greater constraints but this version is fine for the last problem. Consider both strings $0$-indexed and let's do the dynamic programming $dp_{i, j, cnt_0}$. It means the maximum number of occurrences of $t$ if we considered first $i$ characters of $s$, did $j$ moves and the number of characters $t_0$ is $cnt_0$. The answer to the problem is $\max\limits_{ck=0}^{k} \max\limits_{cnt_0=0}^{n} dp_{n, ck, cnt_0}$. Initially all states are $-\infty$ and $dp_{0, 0, 0}$ is $0$. What about transitions? There are essentially three types of them: don't change the current character, change the current character to $t_0$ and change the current character to $t_1$. Let's create three additional variables to make our life easier (if that were true...). $e_0$ is $1$ if $s_i = t_0$ and $0$ otherwise, $e_1$ is $1$ if $s_i = t_1$ and $0$ otherwise and $e_{01}$ is $1$ if $t_0 = t_1$ and $0$ otherwise. Now let's make and describe our transitions: Don't change the $i$-th character:$dp_{i + 1, ck, cnt_0 + e_0} = max(dp_{i + 1, ck, cnt_0 + e_0}, dp_{i, ck, cnt_0} + (e_1~ ?~ cnt_0 : 0))$. The expression $x~ ?~ y : z$ is just ternary if statement: if $x$ is true, return $y$, otherwise return $z$. So, the number of characters $t_0$ increases if $s_i$ equals $t_0$ and the answer increases if the $i$-th character equals $t_1$ (because we added all occurrences that end in the $i$-th character). $dp_{i + 1, ck, cnt_0 + e_0} = max(dp_{i + 1, ck, cnt_0 + e_0}, dp_{i, ck, cnt_0} + (e_1~ ?~ cnt_0 : 0))$. The expression $x~ ?~ y : z$ is just ternary if statement: if $x$ is true, return $y$, otherwise return $z$. So, the number of characters $t_0$ increases if $s_i$ equals $t_0$ and the answer increases if the $i$-th character equals $t_1$ (because we added all occurrences that end in the $i$-th character). Change the $i$-th character to $t_0$ (possible only when $ck < k$):$dp_{i + 1, ck + 1, cnt_0 + 1} = max(dp_{i + 1, ck + 1, cnt_0 + 1}, dp_{i, ck, cnt_0} + (e_{01}~ ?~ cnt_0 : 0))$. The number of characters $t_0$ always increases and the answer increases if $t_0$ equals $t_1$ by the same reason as in the previous transition. $dp_{i + 1, ck + 1, cnt_0 + 1} = max(dp_{i + 1, ck + 1, cnt_0 + 1}, dp_{i, ck, cnt_0} + (e_{01}~ ?~ cnt_0 : 0))$. The number of characters $t_0$ always increases and the answer increases if $t_0$ equals $t_1$ by the same reason as in the previous transition. Change the $i$-th character to $t_1$ (possible only when $ck < k$):$dp_{i + 1, ck + 1, cnt_0 + e_{01}} = max(dp_{i + 1, ck + 1, cnt_0 + e_{01}}, dp_{i, ck, cnt_0} + cnt_0)$. The number of characters $t_0$ increases only if $t_0 = t_1$ and the answer always increases. $dp_{i + 1, ck + 1, cnt_0 + e_{01}} = max(dp_{i + 1, ck + 1, cnt_0 + e_{01}}, dp_{i, ck, cnt_0} + cnt_0)$. The number of characters $t_0$ increases only if $t_0 = t_1$ and the answer always increases. Note that we always increase the number of moves in the second and the third transitions even when $s_i$ equals $t_0$ or $t_1$ because this case is handled in the first transition, so we don't care. Time complexity: $O(n^3)$. There are also some greedy approaches which work in $O(n^4)$ with pretty small constant and can be optimized even further.
|
[
"dp",
"strings"
] | 2,100
|
// Author: Ivan Kazmenko (gassa@mail.ru)
module solution;
import std.algorithm;
import std.conv;
import std.range;
import std.stdio;
import std.string;
void main ()
{
int n, k;
while (readf !(" %s %s") (n, k) > 0)
{
readln;
auto s0 = readln.strip;
auto t = readln.strip;
auto s = s0.dup;
int res = 0;
foreach (b0; 0..k + 1)
{
auto e0 = k - b0;
loop_x0:
foreach (x0; 0..b0 + 1)
{
loop_y0:
foreach (y0; 0..e0 + 1)
{
int b = b0;
int e = e0;
int x = x0;
int y = y0;
s[] = s0[];
for (int i = 0; i < n && b; i++)
{
if (s[i] == t[0])
{
}
else if (s[i] != t[1])
{
s[i] = t[0];
b -= 1;
}
else if (x > 0)
{
s[i] = t[0];
x -= 1;
b -= 1;
}
}
for (int j = n - 1; j >= 0 && e; j--)
{
if (s[j] == t[1])
{
}
else if (s[j] != t[0])
{
s[j] = t[1];
e -= 1;
}
else if (y > 0)
{
s[j] = t[1];
y -= 1;
e -= 1;
}
}
int cur = 0;
int add = 0;
for (int i = 0; i < n; i++)
{
if (s[i] == t[1])
{
cur += add;
}
if (s[i] == t[0])
{
add += 1;
}
}
res = max (res, cur);
if (x > 0)
{
break loop_x0;
}
if (y > 0)
{
break loop_y0;
}
}
}
}
writeln (res);
}
}
|
1411
|
A
|
In-game Chat
|
You have been assigned to develop a filter for bad messages in the in-game chat. A message is a string $S$ of length $n$, consisting of lowercase English letters and characters ')'. The message is bad if the number of characters ')' at the end of the string strictly greater than the number of remaining characters. For example, the string ")bc)))" has three parentheses at the end, three remaining characters, and is not considered bad.
|
You should count the number of parentheses at the end of the string, suppose there are $x$ such parentheses. Then if $x > \frac{n}{2}$, message is bad. Note that you should divide $n$ by $2$ without rounding. Or you can compare $2 \cdot x$ and $n$ instead. If $2 \cdot x > n$, the message is bad.
|
[
"implementation"
] | 800
| null |
1411
|
B
|
Fair Numbers
|
We call a positive integer number fair if it is divisible by each of its nonzero digits. For example, $102$ is fair (because it is divisible by $1$ and $2$), but $282$ is not, because it isn't divisible by $8$. Given a positive integer $n$. Find the minimum integer $x$, such that $n \leq x$ and $x$ is fair.
|
Let's call a number super-fair if it is divisible by each of the numbers $1..9$. It is fair to say that super-fair numbers are also divisible by $LCM(1..9)$ which is equal to $2520$. The answer isn't larger than the nearest super-fair number, which means that you can increase the original $n$ by one until it becomes fair. We will determine if the number is fair by checking each of its digits separately.
|
[
"brute force",
"number theory"
] | 1,000
| null |
1411
|
C
|
Peaceful Rooks
|
You are given a $n \times n$ chessboard. Rows and columns of the board are numbered from $1$ to $n$. Cell $(x, y)$ lies on the intersection of column number $x$ and row number $y$.
Rook is a chess piece, that can in one turn move any number of cells vertically or horizontally. There are $m$ rooks ($m < n$) placed on the chessboard in such a way that no pair of rooks attack each other. I.e. there are no pair of rooks that share a row or a column.
In one turn you can move one of the rooks any number of cells vertically or horizontally. Additionally, it shouldn't be attacked by any other rook after movement. What is the minimum number of moves required to place all the rooks on the main diagonal?
The main diagonal of the chessboard is all the cells $(i, i)$, where $1 \le i \le n$.
|
Consider rooks as edges in a graph. The position $(x, y)$ will correspond to an edge $(x \to y)$. From the condition that there're at most one edge exits leading from each vertex and at most one edge leading to each vertex, it follows that the graph decomposes into cycles, paths, and loops (edges of type $v \to v$). What happens to the graph when we move the rook? The edge changes exactly one of its endpoints. By such operations, we must turn all edges into loops, and the constraint on the number of edges going in and out of a vertex must be satisfied. A path is quite easy to turn into loops, just start from one end. A cycle must first be turned into a path, which is always possible. We've only spent one extra move, it's not hard to see that this is optimal. The answer is the number of rooks minus the number of loops plus the number of cycles.
|
[
"dfs and similar",
"dsu",
"graphs"
] | 1,700
| null |
1411
|
D
|
Grime Zoo
|
Currently, XXOC's rap is a string consisting of zeroes, ones, and question marks. Unfortunately, haters gonna hate. They will write $x$ angry comments for every occurrence of \textbf{subsequence} 01 and $y$ angry comments for every occurrence of \textbf{subsequence} 10. You should replace all the question marks with 0 or 1 in such a way that the number of angry comments would be as small as possible.
String $b$ is a subsequence of string $a$, if it can be obtained by removing some characters from $a$. Two occurrences of a subsequence are considered distinct if sets of positions of remaining characters are distinct.
|
Consider two adjacent question marks at positions $l$ and $r$ ($l < r$). Let $c_0$ zeros and $c_1$ ones be on the interval $(l, r)$. In case $s_l = 0$, $s_r = 1$ there will be written $(c_1 + 1) \cdot x + c_0 \cdot x + out = (c_0 + c_1 + 1) \cdot x + out = (r - l) \cdot x + out$ comments, where $out$ is the number of comments for subsequences, at least one element of which is outside $[l, r]$. In the case $s_l = 1$, $s_r = 0$ we get $(c_0 + 1) \cdot y + c_1 \cdot y + out = (c_0 + c_1 + 1) \cdot y + out = (r - l) \cdot y + out$ comments. Subtract the second from the first, we get $(r - l) \cdot (x - y)$. This means the following: if $x \geq y$, it is always better to change $01$ to $10$. That is, there is such an optimal substitution of $?$ by $0$ and $1$ that some prefix of $?$ are replaced by $1$, and the remaining by $0$. In the case of $x < y$, similarly, there will be some prefix of $0$, then suffix of $1$. For $\mathcal{O}(n)$ implementation you can count how many ones and zeros on each prefix and iterate over the separation boundary.
|
[
"brute force",
"greedy",
"implementation",
"strings"
] | 2,100
| null |
1411
|
E
|
Poman Numbers
|
You've got a string $S$ consisting of $n$ lowercase English letters from your friend. It turned out that this is a number written in poman numerals. The poman numeral system is long forgotten. All that's left is the algorithm to transform number from poman numerals to the numeral system familiar to us. Characters of $S$ are numbered from $1$ to $n$ from left to right. Let's denote the value of $S$ as $f(S)$, it is defined as follows:
- If $|S| > 1$, an arbitrary integer $m$ ($1 \le m < |S|$) is chosen, and it is defined that $f(S) = -f(S[1, m]) + f(S[m + 1, |S|])$, where $S[l, r]$ denotes the substring of $S$ from the $l$-th to the $r$-th position, inclusively.
- Otherwise $S = c$, where $c$ is some English letter. Then $f(S) = 2^{pos(c)}$, where $pos(c)$ is the position of letter $c$ in the alphabet ($pos($a$) = 0$, $pos($z$) = 25$).
Note that $m$ is chosen independently on each step.
Your friend thinks it is possible to get $f(S) = T$ by choosing the right $m$ on every step. Is he right?
|
First, note that the last digit will always be taken with a plus sign, and the one before the last - with a minus sign. It turns out that all other digits may be taken with any sign. Let's prove it. Suppose we want to get the mask $---++--++---+$. All minuses on the left can be obtained by simply splitting one character at a time. We are left with the $++--++---+$ mask, split it as follows: $(++--++-)(--+)$. That is, we left in the left part only one minus from the last segment of consecutive minuses. Change the signs in the left part: $(--++--+)(--+)$. We reduced it to a smaller problem. Doing this, we will end up with masks of the form $+$. Now the problem is reduced to whether we can get the number $X$ using the first $n - 2$ letters. Since the weights of the items are powers of two, we can choose them greedily. Bonus. Can you construct an answer in linear time (i.e. output a binary tree)?
|
[
"bitmasks",
"greedy",
"math",
"strings"
] | 2,300
| null |
1411
|
F
|
The Thorny Path
|
According to a legend the Hanoi Temple holds a permutation of integers from $1$ to $n$. There are $n$ stones of distinct colors lying in one line in front of the temple. Monks can perform the following operation on stones: choose a position $i$ ($1 \le i \le n$) and cyclically shift stones at positions $i$, $p[i]$, $p[p[i]]$, .... That is, a stone from position $i$ will move to position $p[i]$, a stone from position $p[i]$ will move to position $p[p[i]]$, and so on, a stone from position $j$, such that $p[j] = i$, will move to position $i$.
Each day the monks must obtain a new arrangement of stones using an arbitrary number of these operations. When all possible arrangements will have been obtained, the world will end. You are wondering, what if some elements of the permutation could be swapped just before the beginning? How many days would the world last?
You want to get a permutation that will allow the world to last as long as possible, using the minimum number of exchanges of two elements of the permutation.
Two arrangements of stones are considered different if there exists a position $i$ such that the colors of the stones on that position are different in these arrangements.
|
The problem boils down to getting an array consisting of threes and a remainder (2 or 2+2 or 4) using split and merge operations. It helps to think that all merge operations are done before split operations. To solve the problem, you can brute which elements of the array the remainder is subtracted from, then the rest of the operations are done greedily. Bonus. Given $k$. We need to get an array consisting of $k$ using these operations. Assume that the sum of the array elements is divisible by $k$. This can be represented as minimum cover of the hypergraph by edges with weights = (number of vertices - 1) + (sum of elements / $k$ - 1). Is there a polynomial solution ($k$ is a parameter)?
|
[
"greedy",
"math"
] | 3,000
| null |
1411
|
G
|
No Game No Life
|
Let's consider the following game of Alice and Bob on a directed acyclic graph. Each vertex may contain an arbitrary number of chips. Alice and Bob make turns alternating. Alice goes first. In one turn player can move exactly one chip along any edge outgoing from the vertex that contains this chip to the end of this edge. The one who cannot make a turn loses. Both players play optimally.
Consider the following process that takes place every second on a given graph with $n$ vertices:
- An integer $v$ is chosen equiprobably from $[1, n + 1]$.
- If $v \leq n$, we add a chip to the $v$-th vertex and go back to step 1.
- If $v = n + 1$, Alice and Bob play the game with the current arrangement of chips and the winner is determined. After that, the process is terminated.
Find the probability that Alice will win the game. It can be shown that the answer can be represented as $\frac{P}{Q}$, where $P$ and $Q$ are coprime integers and $Q \not\equiv 0 \pmod{998\,244\,353}$. Print the value of $P \cdot Q^{-1} \bmod 998\,244\,353$.
|
The winner of the game is determined by xor of Grundy values for all chips' vertices. Notice that every Grundy value $\leq\sqrt m$ so xor doesn't exceed 512. Let $P_v$ be a probability of Alice's victory if the current xor is $v$. $P_v = \sum P_{to}\cdot prob(v \rightarrow to) + [v\neq 0] \cdot\frac{1}{n + 1}$ In the second term, we got $n + 1$ and the process ended. It is clear that $prob(v \rightarrow to) = \frac{cnt[v \oplus to]}{n + 1}$, where $cnt[x]$ is the number of vertices with the Grundy value equal to $x$. Now we have a system of 512 linear equations with variables $P_v$. We can solve it using the Gauss method. The answer is in $P_0$. The proof that Gauss won't break along the way is left to the reader as an exercise. There is also a solution using the Hadamard transform.
|
[
"bitmasks",
"games",
"math",
"matrices"
] | 2,700
| null |
1413
|
A
|
Finding Sasuke
|
Naruto has sneaked into the Orochimaru's lair and is now looking for Sasuke. There are $T$ rooms there. Every room has a door into it, each door can be described by the number $n$ of seals on it and their integer energies $a_1$, $a_2$, ..., $a_n$. All energies $a_i$ are \textbf{nonzero} and do not exceed $100$ by absolute value. Also, \textbf{$n$ is even}.
In order to open a door, Naruto must find such $n$ seals with integer energies $b_1$, $b_2$, ..., $b_n$ that the following equality holds: $a_{1} \cdot b_{1} + a_{2} \cdot b_{2} + ... + a_{n} \cdot b_{n} = 0$. All $b_i$ must \textbf{be nonzero} as well as $a_i$ are, and also \textbf{must not exceed $100$} by absolute value. Please find required seals for every room there.
|
The following is always a valid answer: $-a_2, a_1, -a_4, a_3, ..., -a_n, a_{n-1}$.
|
[
"constructive algorithms",
"math"
] | 800
| null |
1413
|
B
|
A New Technique
|
All techniques in the ninja world consist of hand seals. At the moment Naruto is learning a new technique, which consists of $n\cdot m$ different seals, denoted by distinct numbers. All of them were written in an $n\times m$ table.
The table is lost now. Naruto managed to remember elements of each row from left to right, and elements of each column from top to bottom, but he doesn't remember the order of rows and columns. Please restore the table consistent with this data so that Naruto will be able to learn the new technique.
|
To solve this problem it's sufficient to find the position of each row in the table. If we consider the first number of each row and find a column containing it, we will automatically obtain the position of the row. Since all numbers are distinct, the positions will be determined uniquely.
|
[
"implementation"
] | 1,100
| null |
1413
|
C
|
Perform Easily
|
After battling Shikamaru, Tayuya decided that her flute is too predictable, and replaced it with a guitar. The guitar has $6$ strings and an infinite number of frets numbered from $1$. Fretting the fret number $j$ on the $i$-th string produces the note $a_{i} + j$.
Tayuya wants to play a melody of $n$ notes. Each note can be played on different string-fret combination. The easiness of performance depends on the difference between the maximal and the minimal indices of used frets. The less this difference is, the easier it is to perform the technique. Please determine the minimal possible difference.
For example, if $a = [1, 1, 2, 2, 3, 3]$, and the sequence of notes is $4, 11, 11, 12, 12, 13, 13$ (corresponding to the second example), we can play the first note on the first string, and all the other notes on the sixth string. Then the maximal fret will be $10$, the minimal one will be $3$, and the answer is $10 - 3 = 7$, as shown on the picture.
|
Consider all possible frets we may need to use. To do this we sort all the pairs $(b_j - a_i, j)$ lexicographically. Now we need to find a subsegment with the minimal range containing the first fields and also so that all numbers from $1$ to $n$ occur among the second fields (so it will mean that for each note there is at least one string-fret combination). For each $l$, denote the minimal $right(l)$ so that $[l, right(l)]$ is a valid subsegment. It's easy to see that $right(l) \leq right(l + 1)$, because if $[l + 1, right(l + 1)]$ contains all numbers from $1$ to $n$ among the second fields, then so does $[l, right(l + 1)]$. So to find all $right(l)$ one can just use two pointers, maintaining the set of notes that occur on the segment. Once we calculated it, we just print the minimal difference between the first fields of the endpoints of all possible segments $[l, right(l)]$. The final complexity is $O(nm\log(nm))$.
|
[
"binary search",
"brute force",
"dp",
"implementation",
"sortings",
"two pointers"
] | 1,900
| null |
1413
|
D
|
Shurikens
|
Tenten runs a weapon shop for ninjas. Today she is willing to sell $n$ shurikens which cost $1$, $2$, ..., $n$ ryo (local currency). During a day, Tenten will place the shurikens onto the showcase, which is empty at the beginning of the day. Her job is fairly simple: sometimes Tenten places another shuriken (from the available shurikens) on the showcase, and sometimes a ninja comes in and buys a shuriken from the showcase. Since ninjas are thrifty, they always buy the \textbf{cheapest} shuriken from the showcase.
Tenten keeps a record for all events, and she ends up with a list of the following types of records:
- + means that she placed another shuriken on the showcase;
- - x means that the shuriken of price $x$ was bought.
Today was a lucky day, and all shurikens were bought. Now Tenten wonders if her list is consistent, and what could be a possible order of placing the shurikens on the showcase. Help her to find this out!
|
Let's note that if a shuriken of price $x$ is being bought right now, then the only information we obtain about all the remaining shurikens is that they are of prices $\geq x$. It's also clear that if we consider two shurikens on the showcase then the one which was placed earlier has the stronger constraints (as written above). Now consider all events that can happen when the shuriken of price $x$ is bought. If for all shurikens that are currently on the showcase we know that they must have prices $> x$, then the answer is negative. Otherwise, for all shurikens that had a lower bound of something less than $x$ we increase it to $x$, and remove any one of them, because we cannot remove any other shuriken, and these are indistinguishable. However, since we know that the last placed shuriken has the weakest constraint, we can just remove the last placed shuriken each time and check the consistency in the end. This verification can be done using any min-heap. The final time complexity is $O(n \cdot \log{n})$.
|
[
"data structures",
"greedy",
"implementation"
] | 1,700
| null |
1413
|
E
|
Solo mid Oracle
|
Meka-Naruto plays a computer game. His character has the following ability: given an enemy hero, deal $a$ instant damage to him, and then heal that enemy $b$ health points at the end of every second, for exactly $c$ seconds, starting one second after the ability is used. That means that if the ability is used at time $t$, the enemy's health decreases by $a$ at time $t$, and then increases by $b$ at time points $t + 1$, $t + 2$, ..., $t + c$ due to this ability.
The ability has a cooldown of $d$ seconds, i. e. if Meka-Naruto uses it at time moment $t$, next time he can use it is the time $t + d$. Please note that he can only use the ability at integer points in time, so all changes to the enemy's health also occur at integer times only.
The effects from different uses of the ability may stack with each other; that is, the enemy which is currently under $k$ spells gets $k\cdot b$ amount of heal this time. Also, if several health changes occur at the same moment, they are all counted at once.
Now Meka-Naruto wonders if he can kill the enemy by just using the ability each time he can (that is, every $d$ seconds). The enemy is killed if their health points become $0$ or less. Assume that the enemy's health is not affected in any way other than by Meka-Naruto's character ability. What is the maximal number of health points the enemy can have so that Meka-Naruto is able to kill them?
|
It will be easier to explain using illustrations. We will use timelines, where each cast spell instance will occupy a separate row; and each second will be represented as a column. First of all, if $a > b\cdot c$ then the answer is $-1$. Indeed, after time $t$ the total amount of damage dealt is $(a - bc)$ for each spell which has expired completely plus some damage from spells which have not expired. The first summand can be as great as we want it to, and the second one is bounded by, say, $-bc^2$ as there are at most $c$ spells which have not yet expired, and each of them healed the enemy by at most $b$ units each second, for at most $c$ seconds. Therefore, the damage may be arbitrarily huge. On the other hand, if $a\leq bc$, then the answer always exists, and here is why. First of all, let's only look at the moments divisible by $d$ - that is, the moments when damage was dealt. It is obvious that for every other moment $t$ the enemy had less (or the same amount of) health at time $t-1$. Second, if $t\geq c$, then the enemy had no more health than now at the moment $t-d$. Indeed, the difference between damages then and now is exactly one full-lasted spell, which is non-negative, as we know. For clarity take a look at the pictures below: So now we know that we may consider only $t < c$, and it follows in particular that the answer exists. Also, when in general should we subtract $d$ from $t$ to obtain a more damaged enemy? One can see that if $t < c$ then the damage we subtract is $a - tb$, and since $t = dk$ for some integer nonnegative $k$, then we subtract $a - bdk$ damage. It makes sense to do this while $a - bdk < 0$: In other words, we have reduced the task to the following: find the greatest $k$ so that $a \geq bdk$, and cast the spell $(k+1)$ time. The enemy will have the least amount of health just after we cast the spell for the $(k+1)$-st time. The answer is thus $a(k+1) - \dfrac{k(k+1)}{2}bd$. The time complexity of this solution is $O(1)$ per test. One could also find out that the enemy's health is convex over time and use ternary search to find the minimum. It requires $O(\log{maxanswer})$ per test, which is still ok.
|
[
"greedy",
"math",
"ternary search"
] | 2,100
| null |
1413
|
F
|
Roads and Ramen
|
In the Land of Fire there are $n$ villages and $n-1$ bidirectional road, and there is a path between any pair of villages by roads. There are only two types of roads: stone ones and sand ones. Since the Land of Fire is constantly renovating, every morning workers choose a single road and flip its type (so it becomes a stone road if it was a sand road and vice versa). Also everyone here loves ramen, that's why every morning a ramen pavilion is set in the middle of every \textbf{stone} road, and at the end of each day all the pavilions are removed.
For each of the following $m$ days, after another road is flipped, Naruto and Jiraiya choose a simple path — that is, a route which starts in a village and ends in a (possibly, the same) village, and doesn't contain any road twice. Since Naruto and Jiraiya also love ramen very much, they buy a single cup of ramen on each stone road and one of them eats it. Since they don't want to offend each other, they only choose routes where they can eat equal number of ramen cups. Since they both like traveling, they choose any longest possible path. After every renovation find the maximal possible length of a path (that is, the number of roads in it) they can follow.
|
Fix any diameter of the tree. One of the optimal paths always starts at one of the diameter's endpoints. Proof: Let $AB$ be a diameter of the tree, and the optimal answer be $EF$. Then the parity of the number of stone roads on $DE$ is the same as on $DF$, and also the same holds for $CE$ and $CF$. Since a diameter has the greatest length among all paths in the tree, the stone roads parity is different on $AC$ and on $BC$ (otherwise, the diameter would be an answer). Hence, the stone roads parity on $CE$ coincides with one of $AC$ and $BC$. Assume without loss of generality that the stone roads parities of $AC$ and $CE$ are the same. Then the path $AE$ contains an even number of stone roads. Note that since $AB$ is a diameter, $AC$ is no shorter than $CF$, hence $AD$ is no shorter than $DF$, which implies that $AE$ is not shorter than $EF$. This means that there is an optimal path starting at one of the diameter's endpoints. Now remaining is to solve the problem if one of the endpoints is fixed. If we root the tree from it, and write down for each vertex the stone roads parity between it and the root, then each query is basically changing the parity of a subtree. In an euler-tour traversal every such subtree is represented by a contiguous subsegment. Now the original problem can be reformulated in a following way: we have a binary array, there are queries of type "flip a subsegment", and after each query we need to find a zero with the greatest depth parameter. This can be done via a segment tree, where in each node we store the deepest zero and the deepest one on the subsegment corresponding to that node. The final time complexity is $O(n\log{n})$.
|
[
"data structures",
"trees"
] | 2,800
| null |
1415
|
A
|
Prison Break
|
There is a prison that can be represented as a rectangular matrix with $n$ rows and $m$ columns. Therefore, there are $n \cdot m$ prison cells. There are also $n \cdot m$ prisoners, one in each prison cell. Let's denote the cell in the $i$-th row and the $j$-th column as $(i, j)$.
There's a secret tunnel in the cell $(r, c)$, that the prisoners will use to escape! However, to avoid the risk of getting caught, they will escape at night.
Before the night, every prisoner is in his own cell. When night comes, they can start moving to adjacent cells. Formally, in one second, a prisoner located in cell $(i, j)$ can move to cells $( i - 1 , j )$ , $( i + 1 , j )$ , $( i , j - 1 )$ , or $( i , j + 1 )$, as long as the target cell is inside the prison. They can also choose to stay in cell $(i, j)$.
The prisoners want to know the minimum number of seconds needed so that every prisoner can arrive to cell $( r , c )$ if they move optimally. Note that there can be any number of prisoners in the same cell at the same time.
|
The problem is equivalent to finding the farthest cell from $( x , y )$. It is easy to see that, if they move optimally, $( i , j )$ can reach $( x , y )$ just by moving in an L shape, and this is equivalent to the Manhattan distance between the two points. The longest distance a prisoner will move on rows is $max( x - 1 , n - x )$, and for the columns it is $( y - 1 , m - y )$. So answer is just $max( x - 1 , n - x ) + max( y - 1 , m - y )$
|
[
"brute force",
"math"
] | 800
| null |
1415
|
B
|
Repainting Street
|
There is a street with $n$ houses in a line, numbered from $1$ to $n$. The house $i$ is initially painted in color $c_i$. The street is considered beautiful if all houses are painted in the same color. Tom, the painter, is in charge of making the street beautiful. Tom's painting capacity is defined by an integer, let's call it $k$.
On one day, Tom can do the following repainting process that consists of two steps:
- He chooses two integers $l$ and $r$ such that $ 1 \le l \le r \le n $ and $ r - l + 1 = k $.
- For each house $i$ such that $l \le i \le r$, he can either repaint it with any color he wants, or ignore it and let it keep its current color.
Note that in the same day Tom can use different colors to repaint different houses.
Tom wants to know the minimum number of days needed to repaint the street so that it becomes beautiful.
|
If we want to paint every house on the street with color $x$, it is easy to see that we need to change every house with color different from $x$, and not necessarily repaint houses already painted in color $x$. We can do the following greedy algorithm to minimize the number of days: Find leftmost house not painted in color $x$. Assume this is in position $i$. Then we will paint $[i,i+k-1]$ with color $x$. Repeat this until all houses are painted in color $x$. Why is this optimal? When we find the leftmost house not painted in $x$, we know we need to change it, and as it is the leftmost one, everything before it is painted in $x$. To maximize our chances of changing other houses that need repainting, we choose this as the leftmost position in our painting range. This can be implemented easily with a linear pass. However, we don't know the color $x$ that we will have at the end. Limit of colors are small enough, so we can try all of them and just keep the smallest answer. Time complexity: $O(n \cdot max(c))$ Space complexity: $O(n)$
|
[
"brute force",
"greedy"
] | 1,100
| null |
1415
|
C
|
Bouncing Ball
|
You're creating a game level for some mobile game. The level should contain some number of cells aligned in a row from left to right and numbered with consecutive integers starting from $1$, and in each cell you can either put a platform or leave it empty.
In order to pass a level, a player must throw a ball from the left so that it first lands on a platform in the cell $p$, then bounces off it, then bounces off a platform in the cell $(p + k)$, then a platform in the cell $(p + 2k)$, and so on every $k$-th platform until it goes farther than the last cell. If any of these cells has no platform, you can't pass the level with these $p$ and $k$.
You already have some level pattern $a_1$, $a_2$, $a_3$, ..., $a_n$, where $a_i = 0$ means there is no platform in the cell $i$, and $a_i = 1$ means there is one. You want to modify it so that the level can be passed with given $p$ and $k$. In $x$ seconds you can add a platform in some empty cell. In $y$ seconds you can remove the first cell completely, reducing the number of cells by one, and renumerating the other cells keeping their order. You can't do any other operation. You \textbf{can not} reduce the number of cells to less than $p$.
\begin{center}
{\small Illustration for the third example test case. Crosses mark deleted cells. Blue platform is the newly added.}
\end{center}
What is the minimum number of seconds you need to make this level passable with given $p$ and $k$?
|
Note that instead of deletion of the first cell we can increase the value of $p$ by one, these operations are equivalent. Now let's loop through the possible final values of $p$, let it be $q$ ($p \le q \le n$). Then we need to add missing platforms in cells $q$, $(q + k)$, $(q + 2k)$, and so on. Let's compute the array $c_i$ - the number of cells without a platform among cells $i$, $(i + k)$, $(i + 2k)$, an so on. It can be computed using the method of dynamic programming, going from large $i$ to small: $c_i = c_{i + k} + (1 - a_i)$. Now the time required to add the platforms for a given value of $q$ is $c_q \cdot x$, while the time needed to increase $p$ to $q$ is $(q - p) \cdot y$. The total time equals $c_q \cdot x + (q - p) \cdot y$. We only have to choose minimum among all possible values of $q$.
|
[
"brute force",
"dp",
"implementation"
] | 1,400
| null |
1415
|
D
|
XOR-gun
|
Arkady owns a \textbf{non-decreasing} array $a_1, a_2, \ldots, a_n$. You are jealous of its beauty and want to destroy this property. You have a so-called XOR-gun that you can use one or more times.
In one step you can select two \textbf{consecutive} elements of the array, let's say $x$ and $y$, remove them from the array and insert the integer $x \oplus y$ on their place, where $\oplus$ denotes the bitwise XOR operation. Note that the length of the array decreases by one after the operation. You can't perform this operation when the length of the array reaches one.
For example, if the array is $[2, 5, 6, 8]$, you can select $5$ and $6$ and replace them with $5 \oplus 6 = 3$. The array becomes $[2, 3, 8]$.
You want the array no longer be non-decreasing. What is the minimum number of steps needed? If the array stays non-decreasing no matter what you do, print $-1$.
|
First let's compute array $b_1, b_2, \ldots, b_n$, where $b_i$ is the index of the highest bit equal to $1$ in the binary notation of $a_i$. The statement says $b_i \le b_{i + 1}$. These values can be computed by dividing the given numbers by $2$ until they are zero. Note that if for a given $i$ the equality $b_{i - 1} = b_i = b_{i + 1} = t$ holds, then we can apply an operation to $a_i$ and $a_{i + 1}$, and the resulting integer is smaller than $a_{i - 1}$. Indeed, in $a_{i - 1}$ the highest bit set is $t$, but in $a_i \oplus a_{i + 1}$ the $t$-th and higher bits are zeros. That means if there is such an $i$ (it is easy to check in a single linear pass), then the answer is $1$. Now note that if there is no such $i$, then the size of the array $n$ is not bigger than $2 \cdot \left(\lfloor\log_2{10^9}\rfloor + 1\right) = 60$! Indeed, there are no more than two integers with the same highest bit set. It is much easier to solve the problem in such constraints. Consider some solution. In the final array, let's denote it as $c$, there is $i$ such that $c_i > c_{i + 1}$. Note that each element of the final array is equal to XOR of some subsegment of the initial array, and each element of the initial array belongs to exactly one such subsegment. Let this subsegment for $c_i$ be $a_l, a_{l + 1}, \ldots, a_m$, and for $c_{i + 1}$ be $a_{m + 1}, a_{m + 2}, \ldots, a_r$. Then it's clear that to find an optimal solution it is enough to loop through all possible values of $l$, $m$, and $r$ and check whether XOR of all elements of the left subsegment is larger than XOR of all elements of the right subsegment. If this inequality holds, update answer with value $r - l - 1$. The complexity of this part is $O(n^3)$ or $O(n^4)$ depending on implementation.
|
[
"bitmasks",
"brute force",
"constructive algorithms"
] | 2,000
| null |
1415
|
E
|
New Game Plus!
|
Wabbit is playing a game with $n$ bosses numbered from $1$ to $n$. The bosses can be fought in any order. Each boss needs to be defeated \textbf{exactly once}. There is a parameter called \textbf{boss bonus} which is initially $0$.
When the $i$-th boss is defeated, the current \textbf{boss bonus} is added to Wabbit's score, and then the value of the \textbf{boss bonus} increases by the point increment $c_i$. Note that $c_i$ can be negative, which means that other bosses now give fewer points.
However, Wabbit has found a glitch in the game. At any point in time, he can reset the playthrough and start a New Game Plus playthrough. This will set the current \textbf{boss bonus} to $0$, while all defeated bosses remain defeated. The current score is also saved and does \textbf{not} reset to zero after this operation. This glitch can be used \textbf{at most} $k$ times. He can reset after defeating any number of bosses (including before or after defeating all of them), and he also can reset the game several times in a row without defeating any boss.
Help Wabbit determine the maximum score he can obtain if he has to defeat \textbf{all} $n$ bosses.
|
We see that each playthrough (whether it is the first one or any of the playthroughs after the reset) is completely independent of any other playthrough. Thus, we should instead think of the problem as partitioning the $n$ bosses into $k+1$ playthroughs. Consider a playthrough with $x$ bosses which have point increments $a_1,a_2,\cdots,a_{x}$ when fought in order. Then, the number of points that we will get is $(x-1)a_1 + (x-2)a_2 + \ldots + (1)a_{x-1} + (0)a_{x}$. A simple greedy argument tell us that within a single playthrough, we should always fight the bosses in non-increasing order of point increments, i.e. $a_1 \geq a_2 \geq \ldots \geq a_{x}$. We can visualize this with $k+1$ stacks, each representing a playthrough. Each stack contains all of the bosses that will be fought in that playthrough, and the stack is non-increasing from top to bottom, meaning we fight the bosses from top to bottom. We now see that $k=0$ is a fairly trivial case; just fight the bosses in non-increasing order of point increments. For the rest of the tutorial, we will assume $k \geq 1$. For simplicity, we say that a boss is in position $p$ of a playthrough if there are $p$ more bosses below this one on its playthrough stack. Notice that if we have two bosses in two different playthroughs with point increments $a$ and $b$ with positions $i$ and $j$ in their respective playthroughs, then the total points gained from the two bosses is $ia + jb$. We now see that if $a \geq b$, then $i \geq j$ and vice versa. For example, the configuration below is not optimal because swapping the $-5$ and $-3$ gives a better answer. Therefore, all bosses in lower-numbered positions should have point increments that are less than any boss in a higher-numbered position. This means that we can place the bosses on the stacks one at a time in non-decreasing order of point increments to reach an optimal configuration. Call a boss good if it has a non-negative point increment, and bad otherwise. Let's fix the arrangement of bad bosses and try to place the good bosses in non-decreasing order of point increments. We can see that placing a boss with point increment $a$ on a stack with height $h$ will add $ah$ to the total, so we should always pick the stack with maximum height. Thus, all of the good bosses will always end up on the same stack, and that stack will be the stack of maximum height. Call this stack the main stack; the other $k$ stacks are the side stacks. If there exists two side stacks whose heights differ by at least $2$, then we can always move the top-most bad boss of the taller side stack to the shorter side stack and decrease the loss in points. In the example below, the $-10$ on the left stack (of height $5$) can be moved down to the top of the right stack (of height $3$). Thus, the maximum and minimum heights of these $k$ side stacks cannot differ by more than 1. Let the minimum height of these $k$ stacks be $h$. If we consider the bottom $h$ bosses of all $k+1$ stacks, we see that they still must have the property that all bosses in lower positions have point increments that are less than any boss in higher positions. Thus, they must consist of the $h(k+1)$ bosses with smallest point increments. Direct Greedy We first sort the bosses in non-decreasing order of point increments. For each possible prefix $P$ that contains only negative point increments, we take all bosses in $P$ and distribute them evenly across the $k+1$ stacks. We then take the remaining bosses and place them on the tallest of the stacks (if there are multiple stacks of the same height, the result is the same). Performing this computation naively takes $O(n^2)$ time, but this can be sped up using precomputation of weighted prefix and suffix sums to evaluate all configurations in $O(n)$. The final time complexity is $O(n \log n)$ due to sorting. Smarter Greedy Let's take all $n$ point increments and place them in a "sliding" stack such that they are arranged in non-decreasing order from bottom to top. We will slide the stack across the $k+1$ playthroughs and leave the bottom point increment behind. Shown below is an example with $n=8$ and $k=2$. Every time we slide the stack, we reduce the total by the sum of all of the point increments that we slide down by $1$ position. Thus, we should only slide it down if the sum of those is less than $0$ as that will give us a more optimal solution. We notice that the point increments that we slide down always form a suffix of the point increments when sorted in non-decreasing order. The moment we stop sliding is when the sum of the suffix is non-negative, giving the optimal solution. Thus, we can find the optimal configuration directly by finding the right suffix and distributing the remaining negative point increments evenly. Genius Greedy (found by K0u1e) Sort the point increments in non-increasing order $a_1,a_2,\ldots,a_n$. Maintain a priority queue that initially contains $k+1$ zeros. We now process the point increments in non-increasing order. To process the current point increment $a_i$, we perform the following steps in order: Find the largest number $x$ in the priority queue and remove it from the priority queue Add $x$ to our running total Push $x+a_i$ back into the priority queue The answer is the final total at the end. It will be left as an exercise to the reader to figure out why this solution is indeed equivalent to the previous solution.
|
[
"constructive algorithms",
"greedy",
"math"
] | 2,200
| null |
1415
|
F
|
Cakes for Clones
|
You live on a number line. You are initially (at time moment $t = 0$) located at point $x = 0$. There are $n$ events of the following type: at time $t_i$ a small cake appears at coordinate $x_i$. To collect this cake, you have to be at this coordinate at this point, otherwise the cake spoils immediately. No two cakes appear at the same time and no two cakes appear at the same coordinate.
You can move with the speed of $1$ length unit per one time unit. Also, at any moment you can create a clone of yourself at the same point where you are located. The clone can't move, but it will collect the cakes appearing at this position for you. The clone \textbf{disappears} when you create another clone. If the new clone is created at time moment $t$, the old clone can collect the cakes that appear before or at the time moment $t$, and the new clone can collect the cakes that appear at or after time moment $t$.
Can you collect all the cakes (by yourself or with the help of clones)?
|
Let $mintime_i$ be the minimum time we can get to coordinate $x_i$ given that all previous cakes are collected and the latest created clone is already useless. Also let $dp_{i, j}$ be a boolean being true if we can reach a situation where we just collected the $i$-th cake, and our clone is currently waiting for the cake $j$ in correct position. Let us currently be in the position of the $i$-th event and the latest clone is useless (state $mintime_i$). Then it is always optimal to collect the $i$-th cake with a new clone while we move somewhere. There are two possible options for further actions: We directly go to the next event. Then we just need to update $mintime_{i + 1}$, and not forget to wait for the clone to collect the $i$-th cake. We want to leave clone somewhere waiting for some $j$-th cake and go take the $(i + 1)$-th cake ourselves. Then if we have enough time to do that, make $dp_{i+1, j}$ reachable. Let us be in the state $dp_{i, j}$ (we have just collected the $i$-th cake, and our clone is waiting for the $j$-th cake). If $i + 1 \neq j$, then we should just go and collect the $i + 1$-th cake, otherwise there are two possibilities: The $j+1$-th cake is collected by a new clone, then we have to updated $mintime_{j+1}$. We want to leave a new clone waiting for some later cake $k$ (after the old one collects cake $j$), and take the $j+1$-th cake ourselves, then the transition leads to the state $dp_{j+1, k}$. We can collect all cakes if $mintime_n \leq t_n$, or $dp_{n - 1, n}$ is reachable. The total complexity is $O(n^2)$, because we only loop through the position of the next clone in $O(n)$ states.
|
[
"dp"
] | 2,900
| null |
1416
|
A
|
k-Amazing Numbers
|
You are given an array $a$ consisting of $n$ integers numbered from $1$ to $n$.
Let's define the $k$-amazing number of the array as the minimum number that occurs in all of the subsegments of the array having length $k$ (recall that a subsegment of $a$ of length $k$ is a contiguous part of $a$ containing exactly $k$ elements). If there is no integer occuring in all subsegments of length $k$ for some value of $k$, then the $k$-amazing number is $-1$.
For each $k$ from $1$ to $n$ calculate the $k$-amazing number of the array $a$.
|
Let's fix some arbitrary number $x$ and calculate the minimum value of $k$ such that $x$ occurs in all segments of length $k$. Let $p_1 < p_2 < \dots < p_m$ be the indices of entries of $x$ in the array. Then, for each $1 \le i < m$ it is clear that $k$ should be at least the value of $p_{i+1}-p_i$. Also, $k \ge p_1$ and $k \ge n - p_m + 1$. It is enough to just take the maximum of those values. Let's call this derived value of $k$ as $f(x)$. Now, we can just go in increasing order of $x$ from $1$ to $n$ and try update the suffix $[f(x), n]$ with $x$. This can be done straightforwardly, just iterating over the range $[f(x), n]$. If we arrive at a cell for which the value of $x$ is already calculated, we immediately terminate our loop and continue our algorithm from $x+1$. Time complexity: $O(n)$. Space complexity: $O(n)$.
|
[
"binary search",
"data structures",
"implementation",
"two pointers"
] | 1,500
|
// chrono::system_clock::now().time_since_epoch().count()
#include<bits/stdc++.h>
#define pb push_back
#define eb emplace_back
#define mp make_pair
#define fi first
#define se second
#define all(x) (x).begin(), (x).end()
#define debug(x) cerr << #x << " = " << x << endl;
using namespace std;
typedef long long ll;
typedef pair<int, int> pii;
const int MAXN = (int)3e5 + 5;
int f[MAXN], last[MAXN], arr[MAXN], ans[MAXN];
int n;
void solve() {
scanf("%d", &n);
for (int i = 1; i <= n; ++i) {
f[i] = last[i] = 0;
ans[i] = -1;
}
for (int i = 1; i <= n; ++i) {
scanf("%d", &arr[i]);
}
for (int i = 1; i <= n; ++i) {
int x = arr[i];
f[x] = max(f[x], i - last[x]);
last[x] = i;
}
for (int x = 1; x <= n; ++x) {
f[x] = max(f[x], n - last[x] + 1);
for (int i = f[x]; i <= n && ans[i] == -1; ++i) {
ans[i] = x;
}
}
for (int i = 1; i <= n; ++i) {
printf("%d%c", ans[i], " \n"[i == n]);
}
}
int main() {
int tt;
scanf("%d", &tt);
while (tt--) {
solve();
}
return 0;
}
|
1416
|
B
|
Make Them Equal
|
You are given an array $a$ consisting of $n$ \textbf{positive} integers, numbered from $1$ to $n$. You can perform the following operation no more than $3n$ times:
- choose three integers $i$, $j$ and $x$ ($1 \le i, j \le n$; $0 \le x \le 10^9$);
- assign $a_i := a_i - x \cdot i$, $a_j := a_j + x \cdot i$.
After each operation, all elements of the array should be \textbf{non-negative}.
Can you find a sequence of no more than $3n$ operations after which all elements of the array are equal?
|
Let $S$ be the sum of the array. If $S$ is not divisible by $n$, then the answer is obviously $-1$. Otherwise, there always exists a solution which uses no more than $3n$ queries. We will solve this problem in two phases. First phase: gather the sum in $a_1$. Let's iterate over $2 \le i \le n$ in increasing order. If $a_i$ is divisible by $i$, we can immediately transfer it using one operation. Otherwise, we have to make it divisible by transferring $i - (a_i \bmod i)$ from $a_1$ to $a_i$. Note that this operation does not break a condition on non-negativity because all $a_i$ are initially positive. This way, we successfully finish this phase using at most $2(n-1)$ operations. Second phase: distribute the sum across all elements. Just iterate over all $2 \le i \le n$ and make a transfer of $S/n$ from $a_1$ to $a_i$. This phase takes exactly $n-1$ operations. Time complexity: $O(n)$ Space complexity: $O(n)$
|
[
"constructive algorithms",
"greedy",
"math"
] | 2,000
|
// chrono::system_clock::now().time_since_epoch().count()
#include<bits/stdc++.h>
#define pb push_back
#define eb emplace_back
#define mp make_pair
#define fi first
#define se second
#define all(x) (x).begin(), (x).end()
#define debug(x) cerr << #x << " = " << x << endl;
using namespace std;
typedef long long ll;
typedef pair<int, int> pii;
const int MAXN = (int)1e4 + 5;
vector<array<int, 3>> ans;
int arr[MAXN];
int n;
void go(int x, int y, int z) {
arr[x] -= x * z;
arr[y] += x * z;
ans.pb({x, y, z});
}
void solve() {
scanf("%d", &n);
for (int i = 1; i <= n; ++i) {
scanf("%d", &arr[i]);
}
ans.clear();
int sum = accumulate(arr + 1, arr + n + 1, 0);
if (sum % n) {
printf("-1\n");
return;
}
for (int i = 2; i <= n; ++i) {
if (arr[i] % i) {
go(1, i, i - arr[i] % i);
}
go(i, 1, arr[i] / i);
}
for (int i = 2; i <= n; ++i) {
go(1, i, sum / n);
}
for (int i = 1; i <= n; ++i) {
assert(arr[i] == sum / n);
}
assert((int)ans.size() <= 3 * n);
printf("%d\n", (int)ans.size());
for (auto &[x, y, z] : ans) {
printf("%d %d %d\n", x, y, z);
}
}
int main() {
int tt;
scanf("%d", &tt);
while (tt--) {
solve();
}
return 0;
}
|
1416
|
C
|
XOR Inverse
|
You are given an array $a$ consisting of $n$ non-negative integers. You have to choose a non-negative integer $x$ and form a new array $b$ of size $n$ according to the following rule: for all $i$ from $1$ to $n$, $b_i = a_i \oplus x$ ($\oplus$ denotes the operation bitwise XOR).
An inversion in the $b$ array is a pair of integers $i$ and $j$ such that $1 \le i < j \le n$ and $b_i > b_j$.
You should choose $x$ in such a way that the number of inversions in $b$ is minimized. If there are several options for $x$ — output the smallest one.
|
Note: the integer $x$ from the statement is marked as an uppercase $X$ for clarity. Take any arbitrary integers $x$ and $y$. It is a well-known fact that whether $x < y$ or $x > y$ depends only on one bit - the highest bit which differs in both. So, let's construct a trie on our array integers. Represent each number as a binary string from the highest bit ($29$) to the lowest bit ($0$). Each leaf will keep a corresponding index/indices from the array and each non-leaf node will have at most two children - one for $0$-edge and one for $1$-edge. Let's denote $S(v)$ as a sorted list of indices of all values in the subtree of $v$. These lists can be easily maintained while inserting our numbers into trie. Take any arbitrary vertex $v$ which has both children and has a depth (distance from root) of $k$. Let $a$ and $b$ be its children. Here comes the most important thing to notice: If the $k$-th highest bit of $X$ is toggled, lists $S(a)$ and $S(b)$ will change their relative order. Otherwise, it will not change. Thus, exploiting the fact that both lists are sorted, we can efficiently calculate the corresponding number of inversions between those lists and add them to our values $sum[k][0]$ and $sum[k][1]$. $sum[i][j]$ means the number of inversions we have to add if $i$-th highest bit of $X$ is equal to $j$. After the calculation of our $sum$ table is done, the value of $X$ can be easily restored. Time complexity: $O(n \log 10^9)$ Memory complexity: $O(n \log 10^9)$
|
[
"bitmasks",
"data structures",
"divide and conquer",
"dp",
"greedy",
"math",
"sortings",
"strings",
"trees"
] | 2,000
|
#include <bits/stdc++.h>
#define mp make_pair
#define pb push_back
#define f first
#define s second
#define ll long long
#define forn(i, a, b) for(int i = (a); i <= (b); ++i)
#define forev(i, b, a) for(int i = (b); i >= (a); --i)
#define VAR(v, i) __typeof( i) v=(i)
#define forit(i, c) for(VAR(i, (c).begin()); i != (c).end(); ++i)
#define all(x) (x).begin(), (x).end()
#define sz(x) ((int)(x).size())
#define file(s) freopen(s".in","r",stdin); freopen(s".out","w",stdout);
using namespace std;
const int maxn = (int)5e6 + 100;
const int maxm = (int)1e6 + 100;
const int mod = (int)1e9 + 7;
const int P = (int) 1e6 + 7;
const double pi = acos(-1.0);
#define inf mod
typedef long double ld;
typedef pair<int, int> pii;
typedef pair<ll, ll> pll;
typedef vector<int> vi;
typedef vector<ll> Vll;
typedef vector<pair<int, int> > vpii;
typedef vector<pair<ll, ll> > vpll;
int n, t[2][maxn], id = 1;
ll dp[2][30];
vi g[maxn];
void add(int x, int pos){
int v = 0;
forev(i, 29, 0){
int bit = ((x >> i) & 1);
if(!t[bit][v]) t[bit][v] = id++;
v = t[bit][v];
g[v].pb(pos);
}
}
void go(int v, int b = 29){
int l = t[0][v], r = t[1][v];
if(l) go(l, b - 1);
if(r) go(r, b - 1);
if(!l || !r) return;
ll res = 0;
int ptr = 0;
for(auto x : g[l]){
while(ptr < sz(g[r]) && g[r][ptr] < x) ptr++;
res += ptr;
}
dp[0][b] += res;
dp[1][b] += sz(g[l]) * 1ll * sz(g[r]) - res;
}
void solve(){
scanf("%d", &n);
forn(i, 1, n){
int x;
scanf("%d", &x);
add(x, i);
}
go(0);
ll inv = 0;
int res = 0;
forn(i, 0, 29){
inv += min(dp[0][i], dp[1][i]);
if(dp[1][i] < dp[0][i])
res += (1 << i);
}
printf("%lld %d", inv, res);
}
int main () {
int t = 1;
//scanf("%d", &t);
while(t--) solve();
}
|
1416
|
D
|
Graph and Queries
|
You are given an undirected graph consisting of $n$ vertices and $m$ edges. Initially there is a single integer written on every vertex: the vertex $i$ has $p_i$ written on it. All $p_i$ are distinct integers from $1$ to $n$.
You have to process $q$ queries of two types:
- $1$ $v$ — among all vertices reachable from the vertex $v$ using the edges of the graph (including the vertex $v$ itself), find a vertex $u$ with the largest number $p_u$ written on it, print $p_u$ and replace $p_u$ with $0$;
- $2$ $i$ — delete the $i$-th edge from the graph.
Note that, in a query of the first type, it is possible that all vertices reachable from $v$ have $0$ written on them. In this case, $u$ is not explicitly defined, but since the selection of $u$ does not affect anything, you can choose any vertex reachable from $v$ and print its value (which is $0$).
|
Basically, we want to transform each "connected component maximum" query into "segment maximum" query. It can be efficiently done using DSU and processing all queries in reversed order. For simplicity, let's assume all edges will eventually get deleted in the process. If not, you can always add some extra queries at the end. Initially, each vertex is a connected component on its own. We are processing all queries in reverse order. If the current query is of first type, remember the "boss" of the corresponding vertex. Otherwise, unite the corresponding vertices accordingly. If we want to unite two bosses $a$ and $b$, we create a new fake vertex $c$ and add edges $(a, c)$, $(b, c)$ so that the subtree of $c$ becomes responsible for both components of $a$ and $b$. Notice that we cannot apply small-to-large merging to our DSU, but we are still able to use path-compression heuristic. Now, our DSU-tree is ready. Each query of first type is now a subtree-maximum query and all queries of second type can be ignored. The solution onwards should be pretty straightforward. We first do an Eulerian tour on our tree to transform each subtree into a segment. Using segment tree we are able to efficiently process all queries. Time complexity: $O((n + m + q) \log n)$ Space complexity: $O(n + m + q)$
|
[
"data structures",
"dsu",
"graphs",
"implementation",
"trees"
] | 2,600
|
// chrono::system_clock::now().time_since_epoch().count()
#include<bits/stdc++.h>
#define pb push_back
#define eb emplace_back
#define mp make_pair
#define fi first
#define se second
#define all(x) (x).begin(), (x).end()
#define debug(x) cerr << #x << " = " << x << endl;
using namespace std;
typedef long long ll;
typedef pair<int, int> pii;
const int MAXN = (int)5e5 + 5;
const int MAXM = (int)3e5 + 5;
const int MAXQ = (int)5e5 + 5;
pii e[MAXM], que[MAXQ], t[MAXN << 2];
vector<int> adj[MAXN];
int tin[MAXN], tout[MAXN], timer;
int par[MAXN], arr[MAXN];
bool del[MAXM];
int n, m, q;
int getPar(int x) {
if (x == par[x]) {
return x;
}
return par[x] = getPar(par[x]);
}
void uni(int a, int b) {
a = getPar(a);
b = getPar(b);
if (a == b) {
return;
}
++n;
par[n] = n;
par[a] = n;
par[b] = n;
adj[n].pb(a);
adj[n].pb(b);
}
void dfs(int v) {
tin[v] = ++timer;
for (int to : adj[v]) {
dfs(to);
}
tout[v] = timer;
}
pii segMax(int v, int tl, int tr, int l, int r) {
if (l > r || tl > r || tr < l) {
return mp(0, 0);
}
if (l <= tl && tr <= r) {
return t[v];
}
int mid = (tl + tr) >> 1;
int c1 = (v << 1), c2 = (c1 | 1);
return max(segMax(c1, tl, mid, l, r), segMax(c2, mid + 1, tr, l, r));
}
void updPos(int v, int tl, int tr, int p, pii x) {
if (tl == tr) {
t[v] = x;
return;
}
int mid = (tl + tr) >> 1;
int c1 = (v << 1), c2 = (c1 | 1);
if (p <= mid) {
updPos(c1, tl, mid, p, x);
}
else {
updPos(c2, mid + 1, tr, p, x);
}
t[v] = max(t[c1], t[c2]);
}
void solve() {
scanf("%d %d %d", &n, &m, &q);
for (int i = 1; i <= n; ++i) {
scanf("%d", &arr[i]);
}
for (int i = 1; i <= m; ++i) {
int u, v;
scanf("%d %d", &u, &v);
e[i] = mp(u, v);
}
for (int i = 1; i <= q; ++i) {
int a, b;
scanf("%d %d", &a, &b);
que[i] = mp(a, b);
if (a == 2) {
del[b] = 1;
}
}
for (int i = 1; i <= n; ++i) {
par[i] = i;
}
for (int i = 1; i <= m; ++i) {
if (!del[i]) {
uni(e[i].fi, e[i].se);
}
}
for (int i = q; i > 0; --i) {
int tp = que[i].fi;
if (tp == 2) {
int id = que[i].se;
uni(e[id].fi, e[id].se);
}
else {
que[i].se = getPar(que[i].se);
}
}
for (int i = 1; i <= n; ++i) {
if (getPar(i) == i) {
dfs(i);
}
}
for (int i = 1; i <= n; ++i) {
updPos(1, 1, n, tin[i], mp(arr[i], tin[i]));
}
for (int i = 1; i <= q; ++i) {
int tp = que[i].fi;
if (tp == 1) {
int v = que[i].se;
pii tmp = segMax(1, 1, n, tin[v], tout[v]);
if (tmp.fi == 0) {
printf("0\n");
}
else {
printf("%d\n", tmp.fi);
updPos(1, 1, n, tmp.se, mp(0, 0));
}
}
}
}
int main() {
int tt = 1;
while (tt--) {
solve();
}
return 0;
}
|
1416
|
E
|
Split
|
One day, BThero decided to play around with arrays and came up with the following problem:
You are given an array $a$, which consists of $n$ positive integers. The array is numerated $1$ through $n$. You execute the following procedure \textbf{exactly once}:
- You create a new array $b$ which consists of $2n$ \textbf{positive} integers, where for each $1 \le i \le n$ the condition $b_{2i-1}+b_{2i} = a_i$ holds. For example, for the array $a = [6, 8, 2]$ you can create $b = [2, 4, 4, 4, 1, 1]$.
- You merge consecutive equal numbers in $b$. For example, $b = [2, 4, 4, 4, 1, 1]$ becomes $b = [2, 4, 1]$.
Find and print the minimum possible value of $|b|$ (size of $b$) which can be achieved at the end of the procedure. It can be shown that under the given constraints there is at least one way to construct $b$.
|
Note that minimizing $|b|$ is the same as maximizing the number of consecutive equal pairs. We will focus on the second version. Let's forget about constraints and consider the most naive solution with dynamic programming. $DP[i][j]$ will store the answer if we have already considered ($a_1$, ..., $a_i$) and our last element $b_{2i}$ is equal to $j$. Let's get rid of our dimension $i$ and keep our $DP$ table by layers. Suppose that our current $i$-th layer is called $curDP$, next layer is called $nxtDP$, and $a_{i+1}$ is called $X$. After carefully analyzing our transitions, we have the following observations: for any $i$, $nxtDP[i] \ge max(curDP)$, since we always have a transition from our maximum. $max(nxtDP[i]) - min(nxtDP[i]) \le 2$, since we can add at most two pairs. The case $max(nxtDP[i]) - min(nxtDP[i]) = 2$ may occur only if $a_i$ is even. Moreover, $nxtDP[a_i/2]$ will be the only maximum element. For some suffix upto $i$ we always have a transition from $curDP[X-i] + 1$ to $nxtDP[i]$. If $X$ is even, instead of calculating $nxtDP[X/2]$ separately, we can calculate it as usual and increase its value by $1$ at the end. Using everything said above, we could replace our naive $DP$ with the following: A variable called $zero$ - the value of minimum of our current layer. A set called $one$ - it keeps all indices $i$ such that $curDP[i] = zero + 1$. A variable called $two$ - it is equal to $-1$ or $X/2$ depending on the parity of $X$ and the value of $curDP[X/2]$. Basically, we want to be able to: Erase some elements from the prefix/suffix of our set $one$. Check if some number $x$ is in our set $one$. Add a segment of values $[l, r]$ into out set $one$. Rotate all elements in our set by a pivot $x$. That is, a number $y$ should turn into $x-y$. We can efficiently process all queries by maintaining $one$ as a simple set of non-intersecting segments. The rotation operation can be done as follows: Suppose we had an integer $X$ at the beginning. We rotate everything by a pivot $A$. $X$ becomes $A-X$. We rotate everything by a pivot $B$. $A-X$ becomes $B-A+X$. Following the logic, $C-B+A-X$, $D-C+B-A+X$, ... We can just maintain the sign of $X$ and a global pivot, which is the combination of all our rotation operations. Time complexity: $O(n \log n)$ Space complexity: $O(n)$
|
[
"binary search",
"data structures",
"dp",
"greedy"
] | 3,200
|
// chrono::system_clock::now().time_since_epoch().count()
#include<bits/stdc++.h>
#define pb push_back
#define eb emplace_back
#define mp make_pair
#define fi first
#define se second
#define all(x) (x).begin(), (x).end()
#define debug(x) cerr << #x << " = " << x << endl;
using namespace std;
typedef long long ll;
typedef pair<int, int> pii;
typedef vector<int> vi;
typedef vector<vi> vvi;
const int dx[] = {0, 1, 0, -1};
const int dy[] = {1, 0, -1, 0};
const char dc[] = {'R', 'D', 'L', 'U'};
const int INF = (int)1e6;
const int MAXN = (int)3e5 + 5;
namespace {
struct Edge {
int v, to, f, c;
Edge() {
v = to = f = c = 0;
}
Edge(int v, int to, int c) : v(v), to(to), c(c) {
f = 0;
}
};
vector<Edge> e;
vector<int> adj[MAXN];
int ptr[MAXN], d[MAXN], q[MAXN];
int S, T, newS, newT, V;
int cap[2][MAXN];
void prep() {
e.clear();
for (int i = 0; i < V; ++i) {
adj[i].clear();
ptr[i] = d[i] = q[i] = 0;
cap[0][i] = cap[1][i] = 0;
}
}
void addEdge(int u, int v, int c) {
//printf("E %d %d %d\n", u, v, c);
adj[u].pb((int)e.size());
e.pb(Edge(u, v, c));
adj[v].pb((int)e.size());
e.pb(Edge(v, u, 0));
}
void addEdgeLim(int u, int v) {
//printf("F %d %d\n", u, v);
++cap[0][v];
++cap[1][u];
}
bool bfs() {
fill(d, d + V, -1);
d[newS] = 0;
int l = 0, r = 0;
q[r++] = newS;
while (l < r) {
int v = q[l++];
for (int id : adj[v]) {
if (e[id].f < e[id].c) {
int to = e[id].to;
if (d[to] == -1) {
d[to] = d[v] + 1;
q[r++] = to;
}
}
}
}
return d[newT] != -1;
}
int dfs(int v, int flow = INF) {
if (!flow || v == newT) {
return flow;
}
int sum = 0;
for (; ptr[v] < (int)adj[v].size(); ++ptr[v]) {
int id = adj[v][ptr[v]];
int to = e[id].to;
int can = e[id].c - e[id].f;
if (d[to] != d[v] + 1 || can == 0) {
continue;
}
int pushed = dfs(to, min(flow, can));
if (pushed > 0) {
e[id].f += pushed;
e[id ^ 1].f -= pushed;
sum += pushed;
flow -= pushed;
if (flow == 0) {
return sum;
}
}
}
return sum;
}
int maxFlow() {
int ret = 0;
while (bfs()) {
fill(ptr, ptr + V, 0);
while (int pushed = dfs(newS)) {
ret += pushed;
}
}
return ret;
}
}
vvi arr, follow;
int n, m;
bool inside(int x, int y) {
return 0 <= x && x < n && 0 <= y && y < m;
}
int id(int x, int y) {
return x * m + y;
}
void solve() {
scanf("%d %d", &n, &m);
arr = follow = vvi(n, vi(m, -1));
S = n * m;
T = S + 1;
newS = T + 1;
newT = newS + 1;
V = newT + 1;
prep();
for (int i = 0; i < n; ++i) {
for (int j = 0; j < m; ++j) {
scanf("%d", &arr[i][j]);
}
}
for (int i = 0; i < n; ++i) {
for (int j = 0; j < m; ++j) {
for (int dir = 0; dir < 4; ++dir) {
int ni = i + dx[dir], nj = j + dy[dir];
if (inside(ni, nj)) {
if (arr[ni][nj] < arr[i][j]) {
follow[i][j] = dir;
}
else if ((i + j) % 2 == 0 && arr[ni][nj] == arr[i][j]) {
addEdge(id(i, j), id(ni, nj), 1);
}
}
}
if (follow[i][j] == -1) {
// important vertex
if ((i + j) % 2) {
addEdgeLim(id(i, j), T);
}
else {
addEdgeLim(S, id(i, j));
}
}
else {
if ((i + j) % 2) {
addEdge(id(i, j), T, 1);
}
else {
addEdge(S, id(i, j), 1);
}
}
}
}
for (int i = 0; i <= T; ++i) {
if (cap[0][i] > 0) {
addEdge(newS, i, cap[0][i]);
}
if (cap[1][i] > 0) {
addEdge(i, newT, cap[1][i]);
}
}
addEdge(T, S, INF);
maxFlow();
for (int id : adj[newS]) {
if (e[id].f != e[id].c) {
printf("NO\n");
return;
}
}
vvi ansv, ansc;
ansv = ansc = vvi(n, vi(m));
for (int i = 0; i < n; ++i) {
for (int j = 0; j < m; ++j) {
int dir = follow[i][j];
if (dir != -1) {
int ni = i + dx[dir], nj = j + dy[dir];
ansv[i][j] = arr[i][j] - arr[ni][nj];
ansc[i][j] = dir;
}
}
}
for (Edge it : e) {
int v = it.v, to = it.to;
if (max(v, to) < n * m && it.f == it.c && it.c == 1) {
int ax = v / m, ay = v % m;
int bx = to / m, by = to % m;
ansv[ax][ay] = arr[ax][ay] - 1;
ansv[bx][by] = 1;
for (int dir = 0; dir < 4; ++dir) {
if (mp(ax + dx[dir], ay + dy[dir]) == mp(bx, by)) {
ansc[ax][ay] = dir;
ansc[bx][by] = (dir + 2) % 4;
}
}
}
}
printf("YES\n");
for (int i = 0; i < n; ++i) {
for (int j = 0; j < m; ++j) {
printf("%d%c", ansv[i][j], " \n"[j == m - 1]);
}
}
for (int i = 0; i < n; ++i) {
for (int j = 0; j < m; ++j) {
printf("%c%c", dc[ansc[i][j]], " \n"[j == m - 1]);
}
}
}
int main() {
int tt;
scanf("%d", &tt);
while (tt--) {
solve();
}
return 0;
}
|
1417
|
A
|
Copy-paste
|
\begin{quote}
— Hey folks, how do you like this problem?— That'll do it.
\end{quote}
BThero is a powerful magician. He has got $n$ piles of candies, the $i$-th pile initially contains $a_i$ candies. BThero can cast a copy-paste spell as follows:
- He chooses two piles $(i, j)$ such that $1 \le i, j \le n$ and $i \ne j$.
- All candies from pile $i$ are copied into pile $j$. Formally, the operation $a_j := a_j + a_i$ is performed.
BThero can cast this spell any number of times he wants to — but unfortunately, if some pile contains strictly more than $k$ candies, he loses his magic power. What is the maximum number of times BThero can cast the spell without losing his power?
|
If we do our operation on two arbitrary integers $x \le y$, it is always better to copy $x$ into $y$ rather than to copy $y$ into $x$ (since a resulting pair $(x, x + y)$ is better than $(y, x + y)$). Now, let's assume that we do our operation on two integers $x \le y$ such that $x$ is not the minimum element of our array. If we replace $x$ with minimum, we can always achieve at least the same answer. Thus, we can take any index $m$ such that $a_m$ is the array minimum and use it to increase all other values. Time complexity: $O(n)$ or $O(nk)$ per testcase. Space complexity: $O(n)$
|
[
"greedy",
"math"
] | 800
|
// chrono::system_clock::now().time_since_epoch().count()
#include<bits/stdc++.h>
#define pb push_back
#define eb emplace_back
#define mp make_pair
#define fi first
#define se second
#define all(x) (x).begin(), (x).end()
#define debug(x) cerr << #x << " = " << x << endl;
using namespace std;
typedef long long ll;
typedef pair<int, int> pii;
const int MAXN = (int)1e3 + 5;
int n, k;
int arr[MAXN];
void solve() {
scanf("%d %d", &n, &k);
for (int i = 1; i <= n; ++i) {
scanf("%d", &arr[i]);
}
int mn = min_element(arr + 1, arr + n + 1) - arr;
int ans = 0;
for (int i = 1; i <= n; ++i) {
if (i != mn) {
while (arr[i] + arr[mn] <= k) {
arr[i] += arr[mn];
++ans;
}
}
}
printf("%d\n", ans);
}
int main() {
int tt;
scanf("%d", &tt);
while (tt--) {
solve();
}
return 0;
}
|
1417
|
B
|
Two Arrays
|
RedDreamer has an array $a$ consisting of $n$ non-negative integers, and an unlucky integer $T$.
Let's denote the misfortune of array $b$ having length $m$ as $f(b)$ — the number of pairs of integers $(i, j)$ such that $1 \le i < j \le m$ and $b_i + b_j = T$. RedDreamer has to paint each element of $a$ into one of two colors, white and black (for each element, the color is chosen independently), and then create two arrays $c$ and $d$ so that all white elements belong to $c$, and all black elements belong to $d$ \textbf{(it is possible that one of these two arrays becomes empty)}. RedDreamer wants to paint the elements in such a way that $f(c) + f(d)$ is \textbf{minimum} possible.
For example:
- if $n = 6$, $T = 7$ and $a = [1, 2, 3, 4, 5, 6]$, it is possible to paint the $1$-st, the $4$-th and the $5$-th elements white, and all other elements black. So $c = [1, 4, 5]$, $d = [2, 3, 6]$, and $f(c) + f(d) = 0 + 0 = 0$;
- if $n = 3$, $T = 6$ and $a = [3, 3, 3]$, it is possible to paint the $1$-st element white, and all other elements black. So $c = [3]$, $d = [3, 3]$, and $f(c) + f(d) = 0 + 1 = 1$.
Help RedDreamer to paint the array optimally!
|
Let us partition the array into three sets $X$, $Y$, $Z$ such that $X$ contains all numbers less than $T/2$, $Y$ contains all numbers equal to $T/2$ and $Z$ contains all numbers greater than $T/2$. It is clear that $f(X) = f(Z) = 0$. Now, since each pair in $Y$ makes a sum of $T$, the best solution is to distribute all numbers in $Y$ equally among $X$ and $Z$. Time complexity: $O(n)$ Space complexity: $O(n)$
|
[
"greedy",
"math",
"sortings"
] | 1,100
|
#include <bits/stdc++.h>
#define len(v) ((int)((v).size()))
#define all(v) (v).begin(), (v).end()
#define rall(v) (v).rbegin(), (v).rend()
#define chmax(x, v) x = max((x), (v))
#define chmin(x, v) x = min((x), (v))
using namespace std;
using ll = long long;
void solve() {
int n, tar;
cin >> n >> tar;
int curMid = 0;
for (int i = 0; i < n; ++i) {
int x; cin >> x;
int r;
if (tar % 2 == 0 && x == tar/2)
r = (curMid++) % 2;
else if (2*x < tar)
r = 0;
else
r = 1;
cout << r << " \n"[i==n-1];
}
}
int main() {
ios::sync_with_stdio(false), cin.tie(0);
int nbTests;
cin >> nbTests;
for (int iTest = 0; iTest < nbTests; ++iTest) {
solve();
}
}
|
1418
|
A
|
Buying Torches
|
You are playing a very popular game called Cubecraft. Initially, you have one stick and want to craft $k$ torches. One torch can be crafted using \textbf{one stick and one coal}.
Hopefully, you've met a very handsome wandering trader who has two trade offers:
- exchange $1$ stick for $x$ sticks (you lose $1$ stick and gain $x$ sticks).
- exchange $y$ sticks for $1$ coal (you lose $y$ sticks and gain $1$ coal).
During one trade, you can use \textbf{only one} of these two trade offers. You can use each trade offer any number of times you want to, in any order.
Your task is to find the minimum number of trades you need to craft at least $k$ torches. The answer always exists under the given constraints.
You have to answer $t$ independent test cases.
|
You need $s = yk + k - 1$ additional sticks to get $k$ torches ($yk$ sticks for $y$ units of coal and also $k$ sticks required to craft torches) and you get $x-1$ sticks per one trade. To buy this number of sticks, you need $\left\lceil\frac{s}{x-1}\right\rceil$ trades. And also, you need $k$ additional trades to turn some sticks into coals. And the final answer is $\left\lceil\frac{s}{x-1}\right\rceil + k$.
|
[
"math"
] | 1,000
|
for i in range(int(input())):
x, y, k = map(int, input().split())
print(((y + 1) * k - 1 + x - 2) // (x - 1) + k)
|
1418
|
B
|
Negative Prefixes
|
You are given an array $a$, consisting of $n$ integers.
Each position $i$ ($1 \le i \le n$) of the array is either locked or unlocked. You can take the values on the unlocked positions, rearrange them in any order and place them back into the unlocked positions. You are not allowed to remove any values, add the new ones or rearrange the values on the locked positions. You are allowed to leave the values in the same order as they were.
For example, let $a = [-1, 1, \underline{3}, 2, \underline{-2}, 1, -4, \underline{0}]$, the underlined positions are locked. You can obtain the following arrays:
- $[-1, 1, \underline{3}, 2, \underline{-2}, 1, -4, \underline{0}]$;
- $[-4, -1, \underline{3}, 2, \underline{-2}, 1, 1, \underline{0}]$;
- $[1, -1, \underline{3}, 2, \underline{-2}, 1, -4, \underline{0}]$;
- $[1, 2, \underline{3}, -1, \underline{-2}, -4, 1, \underline{0}]$;
- and some others.
Let $p$ be a sequence of prefix sums of the array $a$ after the rearrangement. So $p_1 = a_1$, $p_2 = a_1 + a_2$, $p_3 = a_1 + a_2 + a_3$, $\dots$, $p_n = a_1 + a_2 + \dots + a_n$.
Let $k$ be the maximum $j$ ($1 \le j \le n$) such that $p_j < 0$. If there are no $j$ such that $p_j < 0$, then $k = 0$.
Your goal is to rearrange the values in such a way that $k$ is minimum possible.
Output the array $a$ after the rearrangement such that the value $k$ for it is minimum possible. If there are multiple answers then print any of them.
|
Let's collect the prefix sums of the initial array $a$. How do they change if you swap two values in the array? Let's swap values on positions $l$ and $r$ ($l < r$). Prefix sums from $1$ to $l-1$ aren't changed. Prefix sums from $l$ to $r-1$ are increased by $a_r-a_l$ (note that if $a_l>a_r$ then these sums become smaller). Finally, prefix sums from $r$ to $n$ aren't changed as well. Thus, swapping two values $a_l<a_r$ will only increase some prefix sums but never decrease any of them. That helps us see that the array such that all values on the unlocked positions are sorted in a non-increasing order is the most optimal one. Overall complexity: $O(n \log n)$ per testcase.
|
[
"greedy",
"sortings"
] | 1,300
|
#include <bits/stdc++.h>
using namespace std;
#define sz(a) int((a).size())
#define forn(i, n) for (int i = 0; i < int(n); ++i)
void solve() {
int n;
cin >> n;
vector<int> a(n), b(n), c;
forn(i, n) cin >> a[i];
forn(i, n) cin >> b[i];
forn(i, n) if (!b[i])
c.push_back(a[i]);
sort(c.rbegin(), c.rend());
int j = 0;
forn(i, n) {
if (b[i]) cout << a[i] << ' ';
else cout << c[j++] << ' ';
}
cout << '\n';
}
int main() {
int T;
cin >> T;
while (T--) solve();
}
|
1418
|
C
|
Mortal Kombat Tower
|
You and your friend are playing the game Mortal Kombat XI. You are trying to pass a challenge tower. There are $n$ bosses in this tower, numbered from $1$ to $n$. The type of the $i$-th boss is $a_i$. If the $i$-th boss is easy then its type is $a_i = 0$, otherwise this boss is hard and its type is $a_i = 1$.
During one session, either you or your friend can kill \textbf{one or two} bosses (neither you nor your friend can skip the session, so the minimum number of bosses killed during one session is at least one). After your friend session, your session begins, then again your friend session begins, your session begins, and so on. \textbf{The first session is your friend's session}.
Your friend needs to get good because he can't actually kill hard bosses. To kill them, he uses skip points. One skip point can be used to kill one hard boss.
Your task is to find the \textbf{minimum} number of skip points your friend needs to use so you and your friend kill all $n$ bosses in the given order.
For example: suppose $n = 8$, $a = [1, 0, 1, 1, 0, 1, 1, 1]$. Then the best course of action is the following:
- your friend kills two first bosses, using one skip point for the first boss;
- you kill the third and the fourth bosses;
- your friend kills the fifth boss;
- you kill the sixth and the seventh bosses;
- your friend kills the last boss, using one skip point, so the tower is completed using two skip points.
You have to answer $t$ independent test cases.
|
If $a_1 = 1$ then our friend always needs one skip point because he always has to kill the first boss. Let's just remove this boss from our consideration and increase the answer if needed. What about other skip points? Firstly, let's understand that we can always do our moves in such a way that the first hard boss will always be killed by us (except the first one). So, if it's our friend turn now and there is only one easy boss before the hard, our friend just kills this easy boss. If there are two easy bosses, he kills both. If there are three, friend kills the first, we kill the second, and he kills the third. And so on. So we can always assume that each segment of hard bosses starts with our move. We can kill each such segment greedily: we kill two bosses and our friend kills one. If there are less than three bosses in the segment, we just kill remaining and proceed. So if the length of the current segment of hard bosses is $k$ then we need $\left\lfloor\frac{k}{3}\right\rfloor$ skip points. Summing up these values over all segments we get the answer (and don't forget that the first boss should be handled separately). Segments of ones can be extracted using two pointers. There are also dynamic programming solution but I found this one more clever.
|
[
"dp",
"graphs",
"greedy",
"shortest paths"
] | 1,500
|
#include <bits/stdc++.h>
using namespace std;
int main() {
#ifdef _DEBUG
freopen("input.txt", "r", stdin);
// freopen("output.txt", "w", stdout);
#endif
int t;
cin >> t;
while (t--) {
int n;
cin >> n;
vector<int> a(n);
for (auto &it : a) cin >> it;
int ans = 0;
ans += a[0] == 1;
for (int i = 1; i < n; ++i) {
if (a[i] == 0) {
continue;
}
int j = i;
while (j < n && a[j] == 1) {
++j;
}
ans += (j - i) / 3;
i = j - 1;
}
cout << ans << endl;
}
return 0;
}
|
1418
|
D
|
Trash Problem
|
Vova decided to clean his room. The room can be represented as the coordinate axis $OX$. There are $n$ piles of trash in the room, coordinate of the $i$-th pile is the integer $p_i$. All piles have \textbf{different} coordinates.
Let's define a total cleanup as the following process. The goal of this process is to collect \textbf{all} the piles in \textbf{no more than two} different $x$ coordinates. To achieve this goal, Vova can do several (possibly, zero) moves. During one move, he can choose some $x$ and move \textbf{all piles} from $x$ to $x+1$ or $x-1$ using his broom. Note that he can't choose how many piles he will move.
Also, there are two types of queries:
- $0$ $x$ — remove a pile of trash from the coordinate $x$. It is guaranteed that there is a pile in the coordinate $x$ at this moment.
- $1$ $x$ — add a pile of trash to the coordinate $x$. It is guaranteed that there is no pile in the coordinate $x$ at this moment.
Note that it is possible that there are zero piles of trash in the room at some moment.
Vova wants to know the \textbf{minimum} number of moves he can spend if he wants to do a total cleanup before any queries. He also wants to know this number of moves after applying each query. Queries are applied in the given order. Note that the total cleanup doesn't actually happen and doesn't change the state of piles. It is only used to calculate the number of moves.
For better understanding, please read the \textbf{Notes} section below to see an explanation for the first example.
|
First, let's understand that if we choose some subset of points $x_1, x_2, \dots, x_k$, then it does not matter to which point we move it (inside the segment [$\min(x_1, x_2, \dots, x_k); \max(x_1, x_2, \dots, x_k)]$) because the minimum number of moves will always be the same and it is equal to $\max(x_1, x_2, \dots, x_k) - \min(x_1, x_2, \dots, x_k)$. Okay, we need to split all points into two subsets and collect all points of the first subset in some point inside it and the same with the second subset. What can we notice? If we sort the points, it's always optimal to choose these subsets as segments. I.e. if the maximum point of the first subset is $x_r$, the minimum point of the second subset is $x_l$ and $x_l < x_r$, we can swap them and decrease answers for both subsets. So, we need to cover all the points with two segments with the minimum total length. What is this length? It is $x_n - x_1 - maxGap$. $MaxGap$ is the maximum distance between two consecutive points (i.e. $max(x_2 - x_1, x_3 - x_2, \dots, x_n - x_{n-1})$. So, we can solve the problem in $O(n \log{n})$ without queries. But how to deal with queries? Let's maintain the set which contains all points $x_i$ and the multiset (set with repetitions) that maintains all gaps between two adjacent points. So, the answer is maximum in the set of points minus minimum in the set of points minus maximum in the multiset of lengths. How do we recalculate these sets between queries? If some point $x$ is removed, let's find the maximum point less than $x$ (let it be $x_l$) and the minimum point greater than $x$ (let it be $x_r$) in the current set of points. Both these points can be found in a logarithmic time. Then we need to remove $x - x_l$ with $x_r - x$ from the multiset and add $x_r - x_l$ to the multiset (and, of course, remove $x$ from the set). If some point $x$ is added, then we need to remove $x_r - x_l$ from the multiset and add $x - x_l$ with $x_r - x$ to the multiset (and add $x$ to the set). So, we can process every query in $O(\log{n + q})$ time and the total time complexity is $O((n+q) \log{(n + q)})$.
|
[
"data structures",
"implementation"
] | 2,100
|
#include <bits/stdc++.h>
using namespace std;
int get(const set<int> &x, const multiset<int> &len) {
if (len.empty()) return 0;
return *x.rbegin() - *x.begin() - *len.rbegin();
}
void add(int p, set<int> &x, multiset<int> &len) {
x.insert(p);
auto it = x.find(p);
int prv = -1, nxt = -1;
if (it != x.begin()) {
--it;
len.insert(p - *it);
prv = *it;
++it;
}
++it;
if (it != x.end()) {
len.insert(*it - p);
nxt = *it;
}
if (prv != -1 && nxt != -1) {
len.erase(len.find(nxt - prv));
}
}
void rem(int p, set<int> &x, multiset<int> &len) {
auto it = x.find(p);
int prv = -1, nxt = -1;
if (it != x.begin()) {
--it;
len.erase(len.find(p - *it));
prv = *it;
++it;
}
++it;
if (it != x.end()) {
len.erase(len.find(*it - p));
nxt = *it;
}
x.erase(p);
if (prv != -1 && nxt != -1) {
len.insert(nxt - prv);
}
}
int main() {
#ifdef _DEBUG
freopen("input.txt", "r", stdin);
// freopen("output.txt", "w", stdout);
#endif
int n, q;
cin >> n >> q;
set<int> x;
multiset<int> len;
for (int i = 0; i < n; ++i) {
int p;
cin >> p;
add(p, x, len);
}
cout << get(x, len) << endl;
for (int i = 0; i < q; ++i) {
int t, p;
cin >> t >> p;
if (t == 0) {
rem(p, x, len);
} else {
add(p, x, len);
}
cout << get(x, len) << endl;
}
return 0;
}
|
1418
|
E
|
Expected Damage
|
You are playing a computer game. In this game, you have to fight $n$ monsters.
To defend from monsters, you need a shield. Each shield has two parameters: its current durability $a$ and its defence rating $b$. Each monster has only one parameter: its strength $d$.
When you fight a monster with strength $d$ while having a shield with current durability $a$ and defence $b$, there are three possible outcomes:
- if $a = 0$, then you receive $d$ damage;
- if $a > 0$ and $d \ge b$, you receive no damage, but the current durability of the shield decreases by $1$;
- if $a > 0$ and $d < b$, nothing happens.
The $i$-th monster has strength $d_i$, and you will fight each of the monsters exactly once, in some random order (all $n!$ orders are equiprobable). You have to consider $m$ different shields, the $i$-th shield has initial durability $a_i$ and defence rating $b_i$. For each shield, calculate the expected amount of damage you will receive if you take this shield and fight the given $n$ monsters in random order.
|
First of all, let's find a solution in $O(nm)$. We will use the lineriality of expectation: the answer for some shield $j$ is equal to $\sum\limits_{i = 1}^{n} d_i P(i, j)$, where $P(i, j)$ is the probability that the monster $i$ will deal damage if we use the $j$-th shield. Let's see how to calculate $P(i, j)$. Consider a monster $i$ such that $d_i \ge b_j$. To deal damage, he should be preceded by at least $a_j$ other monsters having $d_i \ge b_j$. We can write a complicated formula with binomial coefficients to calculate the probability of this happening, and then simplify it, but a much easier solution is to consider the order of these "strong" monsters. Suppose there are $k$ of them, then there are $\max(k - a_j, 0)$ strong monsters that will deal damage. Since all orderings are equiprobable, the probability that our fixed monster will deal damage is $\dfrac{\max(k - a_j, 0)}{k}$ - since it is the probability that it will take one of the last places in the order. Okay, what about "weak" monsters? It turns out that we can use the same approach: to deal damage, a weak monster should be preceded by at least $a_j$ strong monsters. Consider the relative order of $k$ strong monsters and that weak monster we are analyzing. There are $\max(k + 1 - a_j, 0)$ positions where the weak monster will deal damage, so the probability of weak monster dealing damage is $\dfrac{\max(k + 1 - a_j, 0)}{k + 1}$. Okay, we got a solution in $O(nm)$. How to make it faster? Whenever we consider a shield, all monsters are split into two types: strong and weak, and we may sort the monsters beforehand, so the number of strong monsters (and their total strength) can be found with binary search. Since the probabilities for all strong monsters are the same, we can multiply their total strength by the probability that one fixed strong monster will deal damage (we already described how to calculate it). The same applies for the weak monsters, so the total complexity is $O(n \log n + m (\log n + \log MOD))$.
|
[
"binary search",
"combinatorics",
"probabilities"
] | 2,400
|
#include <bits/stdc++.h>
using namespace std;
const int N = int(2e5) + 9;
const int MOD = 998244353;
int mul(int a, int b) {
return (a * 1LL * b) % MOD;
}
int bp(int a, int n) {
int res = 1;
for (; n > 0; n /= 2) {
if (n & 1) res = mul(res, a);
a = mul(a, a);
}
return res;
}
int inv(int a) {
int ia = bp(a, MOD - 2);
assert(mul(a, ia) == 1);
return ia;
}
int n, m;
int d[N];
long long sd[N];
long long sum (int l, int r) {
return (sd[r] - sd[l]) % MOD;
}
int main(){
cin >> n >> m;
for (int i = 0; i < n; ++i)
scanf("%d", d + i);
sort(d, d + n);
for (int i = 0; i < n; ++i)
sd[i + 1] = sd[i] + d[i];
for (int i = 0; i < m; ++i) {
int a, b;
scanf("%d%d", &a, &b); // dur, def
int cnt = (d + n) - lower_bound(d, d + n, b);
int res = 0;
if (cnt >= a) {
res = mul( mul(cnt - a, inv(cnt)), sum(n - cnt, n) );
res += mul( mul(cnt - a + 1, inv(cnt + 1)), sum(0, n - cnt) );
res %= MOD;
}
printf("%d\n", res);
}
return 0;
}
|
1418
|
F
|
Equal Product
|
You are given four integers $n$, $m$, $l$ and $r$.
Let's name a tuple $(x_1, y_1, x_2, y_2)$ as good if:
- $1 \le x_1 < x_2 \le n$;
- $1 \le y_2 < y_1 \le m$;
- $x_1 \cdot y_1 = x_2 \cdot y_2$;
- $l \le x_1 \cdot y_1 \le r$.
Find any good tuple \textbf{for each $x_1$ from $1$ to $n$ inclusive}.
|
Let's look at $x_1 y_1 = x_2 y_2$ where $x_1 < x_2$. It can be proven that there always exists such pair $(a, b)$ ($a \mid x_1$, $b \mid y_1$ and $a < b$) that $x_2 = \frac{x_1}{a} b$ and $y_2 = \frac{y_1}{b} a$. Brief proof is following: calculate $g = \gcd(x_1, x_2)$, then let $a = \frac{x_1}{g}$ and $b = \frac{x_2}{g}$. Obviously, such $(a, b)$ will make $x_2$ from $x_1$ and $y_2$ from $y_1$ (if $b \mid y_1$). And since $b = \frac{x_2}{g} \Rightarrow$ $b \mid \frac{x_2 y_2}{g} \Rightarrow$ $b \mid \frac{x_1 y_1}{g} \Rightarrow$ $b \mid \frac{x_1}{g} y_1$ and since $\gcd(b, \frac{x_1}{g}) = 1 \Rightarrow$ $b \mid y_1$. As we can see $a$ divides $x_1$, so if we will iterate over all pairs $(x_1, a)$ where $1 \le x_1 \le n$ there will be $O(n \log n)$ pairs in total. Let's fix value of $x_1$. Then, from one side, $y_1 \le m$ but, from the other side, since $l \le x_1 y_1 \le r$, then $\left\lceil \frac{l}{x_1} \right\rceil \le y_1 \le \left\lfloor \frac{r}{x_1} \right\rfloor$. Anyway, all valid $y_1$ form a segment (possibly, empty segment). And we need to find any $b > a$ that divides any $y_1$ from the segment and $x_2 = \frac{x_1}{a} b$ doesn't exceed $n$. Obviously, it's optimally to find the minimum possible such $b$ and just check inequality $\frac{x_1}{a} b \le n$. We can find such $b$ for a fixed $(x_1, a)$ using, for example, built-in upper_bound in a set with all divisors for all valid $y_1$. To maintain this set we can note that $\frac{l}{x_1 + 1} \le \frac{l}{x_1}$ (simillary, $\frac{r}{x_1 + 1} \le \frac{r}{x_1}$). So we can move valid segment's ends as two pointers. Each pair $(y_1, b)$ will be added and erased from the segment exactly once. That's why the total complexity of maintaining the set of divisors (as well as the total complexity of queries for each $(x_1, a)$) will be equal to $O((n + m) \log^2 (n + m))$. All pairs $(x_1, a)$ (and $(y_1, b)$) can be precalculated in $O((n + m) \log (n + m))$ using the sieve-like algorithm.
|
[
"data structures",
"math",
"number theory",
"two pointers"
] | 3,000
|
#include<bits/stdc++.h>
using namespace std;
#define fore(i, l, r) for(int i = int(l); i < int(r); i++)
#define sz(a) int((a).size())
#define x first
#define y second
typedef long long li;
typedef pair<int, int> pt;
template<class A, class B> ostream& operator <<(ostream& out, const pair<A, B> &p) {
return out << "(" << p.x << ", " << p.y << ")";
}
template<class A> ostream& operator <<(ostream& out, const vector<A> &v) {
out << "[";
fore(i, 0, sz(v)) {
if(i) out << ", ";
out << v[i];
}
return out << "]";
}
const int INF = int(1e9);
const li INF64 = li(1e18);
int n, m;
li l, r;
inline bool read() {
if(!(cin >> n >> m))
return false;
cin >> l >> r;
return true;
}
const int N = int(2e5) + 555;
vector<int> divs[N];
inline void solve() {
fore(d, 1, N) {
for(int pos = d; pos < N; pos += d)
divs[pos].push_back(d);
}
li lf = m + 1, rg = m;
vector<int> cnt(m + 1, 0);
vector<int> id(m + 1, -1);
set<int> curDivs;
vector< vector<int> > ans(n + 1);
fore(x1, 1, n + 1) {
li newlf = (l + x1 - 1) / x1;
li newrg = r / x1;
assert(newrg - newlf + 1 >= 0);
while (lf > newlf) {
lf--;
for (int d : divs[lf]) {
if (cnt[d] == 0)
curDivs.insert(d);
cnt[d]++;
id[d] = (int)lf;
}
}
while (rg > newrg) {
for (int d : divs[rg]) {
cnt[d]--;
if (cnt[d] == 0)
curDivs.erase(d);
}
rg--;
}
for (int a : divs[x1]) {
auto it = curDivs.upper_bound(a);
if (it == curDivs.end())
continue;
int b = *it;
if (x1 / a * 1ll * b <= n) {
int y1 = id[b];
ans[x1] = {x1, y1, x1 / a * b, y1 / b * a};
}
}
}
fore(i, 1, n + 1) {
if (ans[i].empty())
cout << -1 << '\n';
else {
cout << ans[i][0] << " " << ans[i][1] << " " << ans[i][2] << " " << ans[i][3] << '\n';
}
}
}
int main() {
#ifdef _DEBUG
freopen("input.txt", "r", stdin);
int tt = clock();
#endif
ios_base::sync_with_stdio(false);
cin.tie(0), cout.tie(0);
cout << fixed << setprecision(15);
if(read()) {
solve();
#ifdef _DEBUG
cerr << "TIME = " << clock() - tt << endl;
tt = clock();
#endif
}
return 0;
}
|
1418
|
G
|
Three Occurrences
|
You are given an array $a$ consisting of $n$ integers. We denote the subarray $a[l..r]$ as the array $[a_l, a_{l + 1}, \dots, a_r]$ ($1 \le l \le r \le n$).
A subarray is considered good if every integer that occurs in this subarray occurs there \textbf{exactly thrice}. For example, the array $[1, 2, 2, 2, 1, 1, 2, 2, 2]$ has three good subarrays:
- $a[1..6] = [1, 2, 2, 2, 1, 1]$;
- $a[2..4] = [2, 2, 2]$;
- $a[7..9] = [2, 2, 2]$.
Calculate the number of good subarrays of the given array $a$.
|
Let's consider two solutions: a non-deterministic and a deterministic one. The random solution goes like that. Let's assign a random integer to each value from $1$ to $n$ (to value, not to a position). Let the value of the subarray be the trit-wise sum of the assigned integers of all values on it. Trit-wise is the analogue of bit-wise sum (xor) but in ternary system. So adding up the same integer three times trit-wise is always equal to zero. Thus, if the value on a subarray is zero then each value appears on it a multiple of three times. How to count the number of such subarrays? Process the array from left to right and store the prefix trit-wise sums in a map. The number of the valid subarrays that end in the current position is the number of occurrences of the current prefix trit-wise sum in a map. The current sum should be added to the map afterwards. However, that's not what the problem asks us to find. Let's consider another problem: count the number of subarray such that each number appears no more than three times. This can be done with two pointers. Process the array from left to right and for each number store the positions it occurred on. If some number appears at least four times than the left pointer should be moved to the next position after the fourth-to-last position. The number of valid subarrays the end in the current position is the distance to the left pointer. Let's combine these problems: maintain the pointer to only the valid positions and remove the prefix trit-wise sums from the map as you increase the pointer. That way the map will only store the valid sums, and they can be added to answer as they are. Assume you use $K$ trits. I guess the probability of the collision is the same as two vectors (out of $n$) colliding in a $K$-dimensional space with their coordinates being from $0$ to $2$. That will be about $\frac{1}{2}$ when $n \approx \sqrt{3^K}$ (according to birthday paradox) - and way less if we increase $K$. Overall complexity: $O(nK \log n)$. The deterministic solution (a.k.a. the boring one) goes like that. Let's again process the array from left to right. Let the current position be the right border of the segment. Each number makes some constraints on where the left border might be. More specifically, it's two possible segments: between its last occurrence and the current position and between its fourth-to-last occurrence and its third-to-last one. Let's actually invert these segments. Bad segments are from the beginning of the array to the fourth-to-last occurrence, then from the second-to-last occurrence to the last one. So the valid left borders are in such positions that are covered by zero bad segments. Let's keep track of them in a segment tree. Add $1$ on the bad subarrays. Now you have to count the number of $0$ values in a segtree. That's a pretty common problem. As we know that no values can go below $0$, $0$ should be a minimum element on the segment. So we can store a pair of (minimum on segment, number of minimums on segment). At the end the second value is the number of zeros if the first value is zero. Overall complexity: $O(n \log n)$.
|
[
"data structures",
"divide and conquer",
"hashing",
"two pointers"
] | 2,500
|
#include <bits/stdc++.h>
using namespace std;
#define forn(i, n) for(int i = 0; i < int(n); i++)
typedef unsigned long long uli;
vector<int> a;
vector<pair<int, int>> t;
vector<int> ps;
pair<int, int> merge(const pair<int, int> &a, const pair<int, int> &b){
if (a.first != b.first)
return min(a, b);
return make_pair(a.first, a.second + b.second);
}
void push(int v){
if (v * 2 + 1 < int(ps.size())){
ps[v * 2] += ps[v];
ps[v * 2 + 1] += ps[v];
}
t[v].first += ps[v];
ps[v] = 0;
}
void build(int v, int l, int r){
if (l == r - 1){
t[v] = make_pair(0, 1);
return;
}
int m = (l + r) / 2;
build(v * 2, l, m);
build(v * 2 + 1, m, r);
t[v] = merge(t[v * 2], t[v * 2 + 1]);
}
void upd(int v, int l, int r, int L, int R, int val){
push(v);
if (L >= R)
return;
if (l == L && r == R){
ps[v] = val;
push(v);
return;
}
int m = (l + r) / 2;
upd(v * 2, l, m, L, min(m, R), val);
upd(v * 2 + 1, m, r, max(m, L), R, val);
t[v] = merge(t[v * 2], t[v * 2 + 1]);
}
int main(){
int n;
scanf("%d", &n);
a.resize(n);
forn(i, n){
scanf("%d", &a[i]);
--a[i];
}
t.resize(4 * n);
ps.resize(4 * n);
build(1, 0, n);
vector<vector<int>> pos(n, vector<int>(1, -1));
long long ans = 0;
forn(i, n){
int k = pos[a[i]].size();
if (k >= 1) upd(1, 0, n, pos[a[i]][k - 1] + 1, i + 1, 1);
if (k >= 3) upd(1, 0, n, pos[a[i]][k - 3] + 1, pos[a[i]][k - 2] + 1, -1);
if (k >= 4) upd(1, 0, n, pos[a[i]][k - 4] + 1, pos[a[i]][k - 3] + 1, 1);
pos[a[i]].push_back(i);
push(1);
if (t[1].first == 0) ans += t[1].second - (n - i - 1);
}
printf("%lld\n", ans);
}
|
1419
|
A
|
Digit Game
|
Everyone knows that agents in Valorant decide, who will play as attackers, and who will play as defenders. To do that Raze and Breach decided to play $t$ matches of a digit game...
In each of $t$ matches of the digit game, a positive integer is generated. It consists of $n$ digits. The digits of this integer are numerated from $1$ to $n$ from the highest-order digit to the lowest-order digit. After this integer is announced, the match starts.
Agents play in turns. Raze starts. In one turn an agent can choose any unmarked digit and mark it. Raze can choose digits on odd positions, but can not choose digits on even positions. Breach can choose digits on even positions, but can not choose digits on odd positions. The match ends, when there is only one unmarked digit left. If the single last digit is odd, then Raze wins, else Breach wins.
It can be proved, that before the end of the match (for every initial integer with $n$ digits) each agent has an ability to make a turn, i.e. there is at least one unmarked digit, that stands on a position of required parity.
For each of $t$ matches find out, which agent wins, if both of them want to win and play optimally.
|
Let's say that digits on odd positions are blue and digits on even positions are red. If $n$ is even the remaining digit will be red. If there is at least one even red digit then Breach wins (he can mark all digits except the one that will remain in the end). In other case Raze wins, because any digit that may remain is odd. If $n$ is odd the remaining digit will be blue. If there is at least one odd blue digit then Raze wins (using the same strategy applied to her). In other case Breach wins.
|
[
"games",
"greedy",
"implementation"
] | 900
|
"#include <bits/stdc++.h>\nusing namespace std;\n\nsigned main() {\n\tint T;\n\tcin >> T;\n\twhile (T --> 0) {\n\t\tint n;\n\t\tstring s;\n\t\tcin >> n >> s;\n\t\tbool odd = false, even = false;\n\t\tfor (int i = 1; i <= n; ++i) {\n\t\t\tif (i % 2 == 1) {\n\t\t\t\todd |= ((s[i - 1] - '0') % 2 == 1);\n\t\t\t} else {\n\t\t\t\teven |= ((s[i - 1] - '0') % 2 == 0);\n\t\t\t}\n\t\t}\n\t\tif (n % 2 == 1) {\n\t\t\tcout << (odd ? 1 : 2) << '\\n';\n\t\t} else {\n\t\t\tcout << (even ? 2 : 1) << '\\n';\n\t\t}\n\t}\n\treturn 0;\n}"
|
1419
|
B
|
Stairs
|
Jett is tired after destroying the town and she wants to have a rest. She likes high places, that's why for having a rest she wants to get high and she decided to craft staircases.
A staircase is a squared figure that consists of square cells. Each staircase consists of an arbitrary number of stairs. If a staircase has $n$ stairs, then it is made of $n$ columns, the first column is $1$ cell high, the second column is $2$ cells high, $\ldots$, the $n$-th column if $n$ cells high. The lowest cells of all stairs must be in the same row.
A staircase with $n$ stairs is called nice, if it may be covered by $n$ \textbf{disjoint} squares made of cells. All squares should fully consist of cells of a staircase.
\begin{center}
This is how a nice covered staircase with $7$ stairs looks like:
\end{center}
Find out the maximal number of \textbf{different} nice staircases, that can be built, using no more than $x$ cells, \textbf{in total}. No cell can be used more than once.
|
Let's prove, that the minimal amount of squares needed to cover the staircase is not less than $n$, where $n$ is the height of a staircase. To highest cell of each stair is the top left cell of some square. That's why we need at least $n$ squares. You need exactly $n$ squares if and only if the top left cell of each stair is a top left cell of some sqaure. Let's consider a square that covers the lowest cell in the last stair. Its top left corner should contain the highest cell with index $\frac{n + 1}{2}$ for odd $n$. Then the staircase is divided into 2 staircases, each $\frac{n - 1}{2}$ stairs high. These staircases should be nice, too. It means that nice staircases are $2^k - 1$ stairs high, where $k \ge 1$. To maximize the amount of different staircases we should create staircases greedily. If $n$ is even, then we can consider a square that will have the lowest cell of the last stair. The top left corner of this square may not contain any top cells of a staircase, that's why you will need more than $n$ squares. This means that a staircase with an even height may not be nice.
|
[
"brute force",
"constructive algorithms",
"greedy",
"implementation",
"math"
] | 1,200
|
"#include <bits/stdc++.h>\n \nusing namespace std;\n \n#define ll long long\nconst int INF = 2e9 + 1;\n \nll getS(ll x) {\n\treturn x * (x + 1) / 2;\t\n}\n \nint main() {\n ios_base::sync_with_stdio(0);\n cin.tie(0);\n cout.tie(0);\n int T;\n cin >> T;\n while (T --> 0) {\n ll x;\n cin >> x;\n int ans = 0;\n for (int i = 1; getS((1LL << i) - 1) <= x; i++) {\n ans++;\n x -= getS((1LL << i) - 1);\n }\n cout << ans << '\\n';\n }\n return 0;\n}"
|
1419
|
C
|
Killjoy
|
A new agent called Killjoy invented a virus COVID-2069 that infects accounts on Codeforces. Each account has a rating, described by \textbf{an integer} (it can possibly be negative or very large).
Killjoy's account is already infected and has a rating equal to $x$. Its rating is constant. There are $n$ accounts except hers, numbered from $1$ to $n$. The $i$-th account's initial rating is $a_i$. Any infected account (initially the only infected account is Killjoy's) instantly infects any uninfected account if their ratings are equal. This can happen at the beginning (before any rating changes) and after each contest. If an account is infected, it can not be healed.
Contests are regularly held on Codeforces. In each contest, any of these $n$ accounts (including infected ones) can participate. Killjoy can't participate. After each contest ratings are changed this way: each participant's rating is changed by an integer, but the sum of all changes must be equal to zero. New ratings can be any integer.
Find out the minimal number of contests needed to infect all accounts. You can choose which accounts will participate in each contest and how the ratings will change.
It can be proven that all accounts can be infected in some finite number of contests.
|
If all $n$ accounts have the rating equal to $x$ then the answer is $0$. Now let's consider other cases. Let's try to make all ratings equal to $x$ in a single contest. It's possible only in two cases: 1. If at least one account is already infected we can infect all other accounts in a single contest. Let's say that some account $i$ is already infected, then we can change all other accounts to $x$ except $i$. Let's say that summary changes are $d$, then we can decrease $i$-th account's rating by $d$ and every account will be infected while the summary changes will be equal to zero. So this will take only $1$ contest. 2. $\sum\limits_{i=1}^{n}$ $(a_i - x) = 0$. In this case we can just make all ratings equal $x$ and the sum of all changes will be $0$ because of the equality, which means that we can infect everyone in only $1$ contest. In all other cases the answer is $2$. Let's prove that. We can make the ratings of first $(n-1)$ accounts equal to $x$ after the first contest and the last account will have rating equal to $a_n$ $-$ $\sum\limits_{i=1}^{n-1}$ $(a_i - x)$ so that the sum of rating changes is still equal to zero. After that first $(n-1)$ accounts are already infected and we can change the rating of the last account by $d$ so it's equal to $x$ and we will decrease the rating of the first account by $d$ so that the sum of rating changes is still equal to zero. After such two contests all accounts will be infected.
|
[
"greedy",
"implementation",
"math"
] | 1,500
|
"#include <bits/stdc++.h>\nusing namespace std;\n\nsigned main() {\n int T;\n cin >> T;\n while (T --> 0) {\n int n, x;\n cin >> n >> x;\n int cnt = 0;\n int sum = 0;\n for (int i = 0; i < n; ++i) {\n int val;\n cin >> val;\n cnt += (val == x);\n sum += val;\n }\n if (cnt == n) {\n \tcout << 0 << '\\n';\n } else if (cnt > 0) {\n \tcout << 1 << '\\n';\n } else if (sum == n * x) {\n \tcout << 1 << '\\n';\n } else {\n \tcout << 2 << '\\n';\n }\n }\n return 0;\n}"
|
1419
|
D2
|
Sage's Birthday (hard version)
|
\textbf{This is the hard version of the problem. The difference between the versions is that in the easy version all prices $a_i$ are different. You can make hacks if and only if you solved both versions of the problem.}
Today is Sage's birthday, and she will go shopping to buy ice spheres. All $n$ ice spheres are placed in a row and they are numbered from $1$ to $n$ from left to right. Each ice sphere has a positive integer price. In this version, some prices can be equal.
An ice sphere is cheap if it costs strictly less than two neighboring ice spheres: the nearest to the left and the nearest to the right. The leftmost and the rightmost ice spheres are not cheap. Sage will choose all cheap ice spheres and then buy only them.
You can visit the shop before Sage and reorder the ice spheres as you wish. Find out the maximum number of ice spheres that Sage can buy, and show how the ice spheres should be reordered.
|
Let's learn how to check whether it's possible to buy $x$ ice spheres. Let's sort the array $a$ in the non-decreasing order and then take $x$ smallest elements of it. We will suppose that these $x$ ice spheres will be cheap. To make these ice spheres cheap, we need $x+1$ ice spheres more, so let's take $x+1$ most expensive ice spheres. Why it's always good to take $x+1$ most expensive ice spheres? If we had an ice sphere with the price $y$ and we took an ice sphere with price $z \ge y$ the answer will not become worse. Now we know how to check whether it's possible to buy $x$ ice spheres. If we can buy $x$ ice spheres then it's also possible to buy $x-1$ ice spheres. For that reason the binary search for the answer is working.
|
[
"binary search",
"brute force",
"constructive algorithms",
"greedy",
"sortings",
"two pointers"
] | 1,500
|
"#include <bits/stdc++.h>\nusing namespace std;\n\nint main() {\n ios_base::sync_with_stdio(false);\n cin.tie(NULL);\n cout.tie(NULL);\n int n;\n cin >> n;\n vector<int> a(n);\n for (int i = 0; i < n; ++i) {\n cin >> a[i];\n }\n sort(a.begin(), a.end());\n int l = 0, r = n + 1;\n while (r - l > 1) {\n int m = (l + r) / 2;\n bool f = true;\n if (2 * m + 1 > n) {\n f = false;\n }\n else {\n vector<int> b;\n int pos_a = 0, pos_b = n - (m + 1);\n for (int i = 0; i < 2 * m + 1; ++i) {\n if (i % 2 == 0) {\n b.emplace_back(a[pos_b]);\n ++pos_b;\n }\n else {\n b.emplace_back(a[pos_a]);\n ++pos_a;\n }\n }\n for (int i = 1; i < 2 * m + 1; i += 2) {\n if (b[i] >= b[i - 1] || b[i] >= b[i + 1])\n f = false;\n }\n }\n if (f)\n l = m;\n else\n r = m;\n }\n cout << l << endl;\n vector<int> b;\n int pos_a = 0, pos_b = n - (l + 1);\n for (int i = 0; i < 2 * l + 1; ++i) {\n if (i % 2 == 0) {\n b.emplace_back(a[pos_b]);\n ++pos_b;\n }\n else {\n b.emplace_back(a[pos_a]);\n ++pos_a;\n }\n }\n for (int i = pos_a; i < n - (l + 1); ++i) {\n b.emplace_back(a[i]);\n }\n for (auto &c : b) {\n cout << c << \" \";\n }\n}"
|
1419
|
E
|
Decryption
|
An agent called Cypher is decrypting a message, that contains a composite number $n$. All divisors of $n$, which are greater than $1$, are placed in a circle. Cypher can choose the initial order of numbers in the circle.
In one move Cypher can choose two adjacent numbers in a circle and insert their least common multiple between them. He can do that move as many times as needed.
A message is decrypted, if every two adjacent numbers are not coprime. Note that for such constraints it's always possible to decrypt the message.
Find the minimal number of moves that Cypher should do to decrypt the message, and show the initial order of numbers in the circle for that.
|
Let's factorize $n$: $n = p_1^{q_1} \cdot p_2^{q_2} \cdot \dots \cdot p_k^{q_k}$ If $k = 2$ and $q_1 = q_2 = 1$ (i.e. $n$ is the product of two different prime numbers)Divisors $p_1$ and $p_2$ will definately be adjacent and they are coprime so we should make one operation to insert their lcm between them. After that the circle will be $p_1$, $p_1 \cdot p_2 = n$, $p_2$, $p_1 \cdot p_2 = n$ and there will be no such two adjacent numbers that are coprime. The answer is 1. Divisors $p_1$ and $p_2$ will definately be adjacent and they are coprime so we should make one operation to insert their lcm between them. After that the circle will be $p_1$, $p_1 \cdot p_2 = n$, $p_2$, $p_1 \cdot p_2 = n$ and there will be no such two adjacent numbers that are coprime. The answer is 1. If $k = 2$ and $q_1 > 1$ or $q_2 > 1$, then we can firstly place numbers $p_1, p_1 \cdot p_2, p_2, n$. After that we can insert all unused divisors that are multiples of $p_1$ between $p_1$ and $n$, all divisors that are multiples of $p_2$ between $p_2$ and $n$. It is easy to see that in this case the answer is 0. In another case it is possible to arrange the divisors so that there are no such two adjacent numbers that are coprime. Firstly, we need to arrange in a circle these numbers: $p_1$, $p_2$, $p_3$, ..., $p_k$ After that we need to write down the products of these numbers between them: $p_1$, $p_1 \cdot p_2$, $p_2$, $p_2 \cdot p_3$, ..., $p_k$, $p_k \cdot p_1$ From now on we can just place unused numbers that way: insert all unused divisors, that are multiples of $p_1$, after $p_1$, insert all unused divisors, that are multiples of $p_2$, after $p_2$ and so on. If the solution is still unclear you may take a look at the image below. The answer in this case is 0. $p_1$, $p_2$, $p_3$, ..., $p_k$ After that we need to write down the products of these numbers between them: $p_1$, $p_1 \cdot p_2$, $p_2$, $p_2 \cdot p_3$, ..., $p_k$, $p_k \cdot p_1$ From now on we can just place unused numbers that way: insert all unused divisors, that are multiples of $p_1$, after $p_1$, insert all unused divisors, that are multiples of $p_2$, after $p_2$ and so on. If the solution is still unclear you may take a look at the image below. The answer in this case is 0.
|
[
"constructive algorithms",
"implementation",
"math",
"number theory"
] | 2,100
|
"#include <bits/stdc++.h>\nusing namespace std;\n\nbool prime(int x) {\n\tif (x == 2 || x == 3) return true;\n\tfor (int i = 2; i * i <= x; ++i) {\n\t\tif (x % i == 0) return false;\n\t}\n\treturn true;\n}\n\nsigned main() {\n\tint T;\n\tcin >> T;\n\twhile (T --> 0) {\n\t\tint n;\n\t\tcin >> n;\n\t\tvector<int> d;\n\t\tfor (int i = 2; i * i <= n; ++i) {\n\t\t\tif (n % i == 0) {\n\t\t\t\td.emplace_back(i);\n\t\t\t\td.emplace_back(n / i);\n\t\t\t}\n\t\t}\n\t\td.emplace_back(n);\n\t\tsort(d.begin(), d.end());\n\t\td.resize(unique(d.begin(), d.end()) - d.begin());\n\n\t\tif (d.size() == 3 && prime(d[0]) && prime(d[1])) {\n\t\t\tfor (auto x : d) cout << x << ' ';\n\t\t\tcout << '\\n' << 1 << '\\n';\n\t\t\tcontinue;\n\t\t}\n\n\t\tunordered_map<int, bool> used;\n\t\tvector<int> primes;\n\t\tfor (int i = 2; i * i <= n; ++i) {\n\t\t\tif (n % i == 0) {\n\t\t\t\tprimes.emplace_back(i);\n\t\t\t\twhile (n % i == 0) n /= i;\n\t\t\t}\n\t\t}\n\t\tif (n > 1) primes.emplace_back(n);\n\n\t\tvector<int> connect(primes.size());\n\t\tfor (int i = 0; i < (int)primes.size(); ++i) {\n\t\t\tint p = primes[i], q = primes[(i + 1) % primes.size()];\n\t\t\tfor (int j = 0; j < (int)d.size(); ++j) {\n\t\t\t\tif (!used[d[j]] && d[j] % p == 0 && d[j] % q == 0) {\n\t\t\t\t\tused[d[j]] = true;\n\t\t\t\t\tconnect[i] = d[j];\n\t\t\t\t\tbreak;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tfor (int i = 0; i < (int)primes.size(); ++i) {\n\t\t\tint p = primes[i];\n\t\t\tused[p] = true;\n\t\t\tcout << p << ' ';\n\t\t\tfor (int j = 0; j < (int)d.size(); ++j) {\n\t\t\t\tif (!used[d[j]] && d[j] % p == 0) {\n\t\t\t\t\tused[d[j]] = true;\n\t\t\t\t\tcout << d[j] << ' ';\n\t\t\t\t}\n\t\t\t}\n\t\t\tif (primes.size() > 1) {\n\t\t\t\tcout << connect[i] << ' ';\n\t\t\t}\n\t\t}\n\t\tcout << '\\n' << 0 << '\\n';\n\t}\n\treturn 0;\n}"
|
1419
|
F
|
Rain of Fire
|
There are $n$ detachments on the surface, numbered from $1$ to $n$, the $i$-th detachment is placed in a point with coordinates $(x_i, y_i)$. All detachments are placed in different points.
Brimstone should visit each detachment at least once. You can choose the detachment where Brimstone starts.
To move from one detachment to another he should first choose one of four directions of movement (up, right, left or down) and then start moving with the constant speed of one unit interval in a second until he comes to a detachment. After he reaches an arbitrary detachment, he can repeat the same process.
Each $t$ seconds an orbital strike covers the whole surface, so at that moment Brimstone should be in a point where some detachment is located. He can stay with any detachment as long as needed.
Brimstone is a good commander, that's why he can create \textbf{at most one} detachment and place it in any empty point with integer coordinates he wants before his trip. Keep in mind that Brimstone will need to visit this detachment, too.
Help Brimstone and find such minimal $t$ that it is possible to check each detachment. If there is no such $t$ report about it.
|
We can consider a graph where vertices are the points (detachments), and there is an edge between two points, if it's possible to move from one point to another. It is possible if these points are on the same line ($x_i = x_j$ or $y_i = y_j$) and the distance between them is $\le T$. Now we can check, whether current $t$ value is good (whether it is possible to check all detachments). It is easy to see, that it is only possible, when the graph is connected. This means, that we can make a binary search for $t$. Let's now learn how to check, whether it is possible to add at most one point to make the graph connected. If there is $1$ component, then the graph is already connected. If there are $2$ components, then we can search through all such pairs of points, that one point is from the first component, and another point is from the second component. We can connect these points, if they are on one line, and the distance between them is $\le 2T$ or the differences $|x_1 - x_2| \le T$ and $y_1 - y_2 \le T$. If $3$ are three components, then we should consider triples of points. Two of these points should be on the same line. The added point should be on a segment between these two points, and there are $O(n)$ such segments. Now let's search through all such pairs (segment, point) and check whether it is possible to place a point on the segment and connect it with the point from the pair. If there are $4$ components, then you can search trough pairs (segment, segment), so that one segment is horizontal and another one is vertical (they should make a cross). Now we just need to check whether it is possible to connect their intersection point with all 4 endpoints of the segments. If there are more, than $4$ components, then it is not possible to connect them adding only one point, because there are 4 movement directions. If your binary search did not find the answer even for $T = 2 \cdot 10^9$, then the answer is $-1$, because the maximal distance between any two points is $\le 2 \cdot 10^9$.
|
[
"binary search",
"data structures",
"dfs and similar",
"dsu",
"graphs",
"implementation"
] | 2,800
|
"#include<bits/stdc++.h>\n\nusing namespace std;\n\n#define pb emplace_back\n#define fi first\n#define se second\n#define all(x) (x).begin(), (x).end()\n#define ll long long\n#define pii pair<int, int>\n\nconst int INF = 2e9 + 1;\n\nvector<vector<int>> g;\nvector<int> usd;\n\nvoid dfs(int v, int col) {\n usd[v] = col;\n for (auto &to : g[v]) {\n if (!usd[to]) dfs(to, col);\n }\n}\n\nint main(){\n cin.tie(0);\n cout.tie(0);\n ios_base::sync_with_stdio(0);\n int n;\n cin >> n;\n vector<int> x(n), y(n);\n for (int i = 0; i < n; i++) cin >> x[i] >> y[i];\n map<int, vector<pii>> same_x, same_y;\n for (int i = 0; i < n; i++) {\n same_x[x[i]].pb(y[i], i);\n same_y[y[i]].pb(x[i], i);\n }\n for (auto &c : same_x) sort(all(c.se));\n for (auto &c : same_y) sort(all(c.se));\n ll left = -1, right = INF;\n while (right - left > 1) {\n ll mid = (left + right) / 2;\n g.clear();\n g.resize(n);\n for (auto &c : same_x) {\n for (int i = 1; i < (int)c.se.size(); i++) {\n if (c.se[i].fi - c.se[i - 1].fi <= mid) {\n g[c.se[i].se].pb(c.se[i - 1].se);\n g[c.se[i - 1].se].pb(c.se[i].se);\n }\n }\n }\n for (auto &c : same_y) {\n for (int i = 1; i < (int)c.se.size(); i++) {\n if (c.se[i].fi - c.se[i - 1].fi <= mid) {\n g[c.se[i].se].pb(c.se[i - 1].se);\n g[c.se[i - 1].se].pb(c.se[i].se);\n }\n }\n }\n int cur = 1;\n usd.assign(n, 0);\n for (int i = 0; i < n; i++) {\n if (usd[i]) continue;\n dfs(i, cur);\n cur++;\n }\n cur--;\n if (cur > 4) {\n left = mid;\n continue;\n }\n bool ok = false;\n if (cur == 1) {\n ok = true;\n } else if (cur == 2) {\n for (int i = 0; i < n; i++) {\n for (int j = i + 1; j < n; j++) {\n if (usd[i] == usd[j]) continue;\n if (x[i] == x[j] && abs(y[i] - y[j]) <= 2 * mid) ok = true;\n if (y[i] == y[j] && abs(x[i] - x[j]) <= 2 * mid) ok = true;\n if (abs(x[i] - x[j]) <= mid && abs(y[i] - y[j]) <= mid) ok = true;\n }\n }\n } else if (cur == 3) {\n vector<pii> seg;\n for (auto &c : same_x) {\n for (int i = 1; i < (int)c.se.size(); i++) {\n if (usd[c.se[i].se] != usd[c.se[i - 1].se]) {\n seg.pb(c.se[i].se, c.se[i - 1].se);\n }\n }\n }\n for (auto &c : same_y) {\n for (int i = 1; i < (int)c.se.size(); i++) {\n if (usd[c.se[i].se] != usd[c.se[i - 1].se]) {\n seg.pb(c.se[i].se, c.se[i - 1].se);\n }\n }\n }\n for (auto &c : seg) {\n int i = c.fi, j = c.se;\n for (int k = 0; k < n; k++) {\n if (usd[k] == usd[i] || usd[k] == usd[j]) continue;\n if (x[i] == x[j]) {\n if (min(y[i], y[j]) >= y[k] || max(y[i], y[j]) <= y[k]) continue;\n if (abs(x[i] - x[k]) > mid) continue;\n if (abs(y[i] - y[k]) <= mid && abs(y[j] - y[k]) <= mid) ok = true;\n } else {\n if (min(x[i], x[j]) >= x[k] || max(x[i], x[j]) <= x[k]) continue;\n if (abs(y[i] - y[k]) > mid) continue;\n if (abs(x[i] - x[k]) <= mid && abs(x[j] - x[k]) <= mid) ok = true;\n }\n }\n }\n } else {\n vector<pii> segx, segy;\n for (auto &c : same_x) {\n for (int i = 1; i < (int)c.se.size(); i++) {\n if (usd[c.se[i].se] != usd[c.se[i - 1].se]) {\n segx.pb(c.se[i].se, c.se[i - 1].se);\n }\n }\n }\n for (auto &c : same_y) {\n for (int i = 1; i < (int)c.se.size(); i++) {\n if (usd[c.se[i].se] != usd[c.se[i - 1].se]) {\n segy.pb(c.se[i].se, c.se[i - 1].se);\n }\n }\n }\n for (auto &c : segx) {\n for (auto &l : segy) {\n int i = c.fi, j = c.se, k = l.fi, p = l.se;\n if (usd[i] == usd[k] || usd[i] == usd[p] || usd[j] == usd[p] || usd[j] == usd[k]) continue;\n if (min(y[i], y[j]) >= y[k]) continue;\n if (max(y[i], y[j]) <= y[k]) continue;\n if (min(x[k], x[p]) >= x[i]) continue;\n if (max(x[k], x[p]) <= x[i]) continue;\n int x0 = x[i], y0 = y[k];\n if (abs(y[i] - y0) <= mid && abs(y[j] - y0) <= mid && abs(x[p] - x0) <= mid && abs(x[k] - x0) <= mid) ok = true;\n }\n }\n }\n if (ok) right = mid;\n else left = mid;\n }\n cout << (right == INF ? -1 : right);\n return 0;\n}"
|
1420
|
A
|
Cubes Sorting
|
\begin{quote}
{{\small For god's sake, you're boxes with legs! It is literally your only purpose! Walking onto buttons! How can you not do the one thing you were designed for?Oh, that's funny, is it? Oh it's funny? Because we've been at this for twelve hours and you haven't solved it either, so I don't know why you're laughing. You've got one hour! Solve it!}}
\end{quote}
Wheatley decided to try to make a test chamber. He made a nice test chamber, but there was only one detail absent — cubes.
For completing the chamber Wheatley needs $n$ cubes. $i$-th cube has a volume $a_i$.
Wheatley has to place cubes in such a way that they would be sorted in a non-decreasing order by their volume. Formally, for each $i>1$, $a_{i-1} \le a_i$ must hold.
To achieve his goal, Wheatley can exchange two \textbf{neighbouring} cubes. It means that for any $i>1$ you can exchange cubes on positions $i-1$ and $i$.
But there is a problem: Wheatley is very impatient. If Wheatley needs more than $\frac{n \cdot (n-1)}{2}-1$ exchange operations, he won't do this boring work.
Wheatly wants to know: can cubes be sorted under this conditions?
|
It is not difficult to see that the answer <<NO>> in this task is possible when and only when all $a_i$ are different and sorted in descending order. In this case we need $\frac{n \cdot (n-1)}{2}$ operations. Otherwise the answer is always <<YES>>. Why does this solution work? Let's define number of inversions as the number of pairs $1 \le i < j \le n$ such as $a_i > a_j$. Note that if the number of inversions is zero, the $a$ array is sorted in non-decreasing order. If the array is not sorted, we can always choose two neighboring elements such that $a_i > a_{i+1}$ and swap them. In this case, the number of inversions is reduced by one. In this case, we cannot reduce the number of inversions by more than one, so it is equal to the minimum number of operations we must perform. Now, all we have to do is notice that the number of inversions does not exceed $\frac{n(n-1)}2$, and the maximum is only reached when $a_i > a_j$ for all pairs $1 \le i < j \le n$. It follows that in this case the array must be strictly descending. Thus, we have a solution with a time of $O(n)$.
|
[
"math",
"sortings"
] | 900
|
#include<iostream>
using namespace std;
int a[1000000+5];
int main()
{
ios_base::sync_with_stdio(false);
cin.tie(0);
cout.tie(0);
int t;
cin>>t;
while (t--)
{
int n;
cin>>n;
for (int i=0; i<n; i++)
{
cin>>a[i];
}
bool can=false;
for (int i=1; i<n; i++)
{
if (a[i]>=a[i-1])
{
can=true;
break;
}
}
if (can) cout<<"YES"<<'\n';
else cout<<"NO"<<'\n';
}
}
|
1420
|
B
|
Rock and Lever
|
\begin{quote}
{{\small "You must lift the dam. With a lever. I will give it to you.You must block the canal. With a rock. I will not give the rock to you."}}
\end{quote}
Danik urgently needs rock and lever! Obviously, the easiest way to get these things is to ask Hermit Lizard for them.
Hermit Lizard agreed to give Danik the lever. But to get a stone, Danik needs to solve the following task.
You are given a positive integer $n$, and an array $a$ of positive integers. The task is to calculate the number of such pairs $(i,j)$ that $i<j$ and $a_i$ $\&$ $a_j \ge a_i \oplus a_j$, where $\&$ denotes the bitwise AND operation, and $\oplus$ denotes the bitwise XOR operation.
Danik has solved this task. But can you solve it?
|
Let's take a pair $(a_i, a_j)$ and see in which case $a_i\ \&\ a_j \ge a_i \oplus a_j$ will hold. For this we will follow the bits $a_i$ and $a_j$ from highest to lowest. If we meet two zero bits, the values of $a_i\ \&\ a_j$ and $a_i \oplus a_j$ will match in this bit, so we move on. If we meet a zero bit in $a_i$ and in $a_j$ -one bit(or vice versa), then we get $a_i\ \&\ a_j < a_i \oplus a_j$, and we can immediately say that the required condition is false. And if we meet two one bits, then the required condition is fulfilled, e. $a_i\ \&\ a_j > a_i \oplus a_j$, and then the bits can no longer be considered. Now let's consider the highest one bit in the number of $a_i$ (let it stand at $p_i$ position) and the highest single bit in the number of $a_j$ (let it stand at $p_j$ position). (Here, we consider that the bits are numbered in order of lowest to highest.) Then, $p_i = p_j$ must hold. If $p_i > p_j$, then there is zero in the $a_j$ position and one unit in the $a_i$ position. But then from the reasoning above we get that $a_i\ \&\ a_j < a_i \oplus a_j$. The case of $p_i < p_j$ is treated in a similar way. It is also easy to see that if $p_i = p_j$ then we automatically get the condition $a_i\ \&\ a_j > a_i \oplus a_j$. From here the problem is solved. For each number we find the position of the highest one bit $p_i$. Then we need to calculate the number of pairs of numbers, for which $p_i = p_j$. You may notice that the answer is $\sum\limits_\ell \frac{k_\ell\cdot(k_\ell-1)}2$, where $k_\ell$ - the number of numbers for which $p_i = p_j$. The complexity of the solution is $O(n)$.
|
[
"bitmasks",
"math"
] | 1,200
|
#include<iostream>
#include<vector>
#include<algorithm>
#include<ctime>
#include<random>
using namespace std;
mt19937 rnd(time(NULL));
int a[1000000+5];
int main()
{
ios_base::sync_with_stdio(false);
cin.tie(0);
cout.tie(0);
int t;
cin>>t;
while (t--)
{
int n;
cin>>n;
for (int i=0; i<n; i++)
{
cin>>a[i];
}
int64_t ans=0;
for (int j=29; j>=0; j--)
{
int64_t cnt=0;
for (int i=0; i<n; i++)
{
if (a[i]>=(1<<j)&&a[i]<(1<<(j+1)))
{
cnt++;
}
}
ans+=cnt*(cnt-1)/2;
}
cout<<ans<<'\n';
}
}
|
1420
|
C1
|
Pokémon Army (easy version)
|
\textbf{This is the easy version of the problem. The difference between the versions is that the easy version has no swap operations. You can make hacks only if all versions of the problem are solved.}
Pikachu is a cute and friendly pokémon living in the wild pikachu herd.
But it has become known recently that infamous team R wanted to steal all these pokémon! Pokémon trainer Andrew decided to help Pikachu to build a pokémon army to resist.
First, Andrew counted all the pokémon — there were exactly $n$ pikachu. The strength of the $i$-th pokémon is equal to $a_i$, and all these numbers are distinct.
As an army, Andrew can choose any non-empty subsequence of pokemons. In other words, Andrew chooses some array $b$ from $k$ indices such that $1 \le b_1 < b_2 < \dots < b_k \le n$, and his army will consist of pokémons with forces $a_{b_1}, a_{b_2}, \dots, a_{b_k}$.
The strength of the army is equal to the alternating sum of elements of the subsequence; that is, $a_{b_1} - a_{b_2} + a_{b_3} - a_{b_4} + \dots$.
Andrew is experimenting with pokémon order. He performs $q$ operations. In $i$-th operation Andrew swaps $l_i$-th and $r_i$-th pokémon.
\textbf{Note: $q=0$ in this version of the task.}
Andrew wants to know the maximal stregth of the army he can achieve with the initial pokémon placement. He also needs to know the maximal strength after each operation.
Help Andrew and the pokémon, or team R will realize their tricky plan!
|
The easy version of the task can be solved in different ways. For example, you can use the dynamic programming method. Let $d1_i$ - be the maximum possible sum of a subsequence on a prefix from the first $i$ elements, provided that the length of the subsequence is odd. Similarly enter $d2_i$, only for subsequences of even length. Then $d1_i$ and $d2_i$ are easy to recalculate: $d1_{i+1} = \max(d1_i,\ d2_i + a_i),$ $d2_{i+1} = \max(d2_i,\ d1_i - a_i).$ This solution works for $O(n)$ in time. Its main drawback is that it cannot be used to solve a complex version of a task where a different approach is needed.
|
[
"constructive algorithms",
"dp",
"greedy"
] | 1,300
|
#include <iostream>
#include <vector>
using namespace std;
inline int64_t calc(const vector<int> &a) {
int n = a.size();
vector<int64_t> d1(n+1), d2(n+1);
d1[0] = -static_cast<int64_t>(1e18);
d2[0] = 0;
for (int i = 0; i < n; ++i) {
d1[i+1] = max(d1[i], d2[i] + a[i]);
d2[i+1] = max(d2[i], d1[i] - a[i]);
}
return max(d1.back(), d2.back());
}
int main() {
ios_base::sync_with_stdio(false);
int t; cin >> t;
while (t--) {
int n, q; cin >> n >> q;
vector<int> a(n);
for (int i = 0; i < n; ++i) {
cin >> a[i];
}
cout << calc(a) << "\n";
for (int i = 0; i < q; ++i) {
int l, r; cin >> l >> r; --l; --r;
swap(a[l], a[r]);
cout << calc(a) << "\n";
}
}
return 0;
}
|
1420
|
C2
|
Pokémon Army (hard version)
|
\textbf{This is the hard version of the problem. The difference between the versions is that the easy version has no swap operations. You can make hacks only if all versions of the problem are solved.}
Pikachu is a cute and friendly pokémon living in the wild pikachu herd.
But it has become known recently that infamous team R wanted to steal all these pokémon! Pokémon trainer Andrew decided to help Pikachu to build a pokémon army to resist.
First, Andrew counted all the pokémon — there were exactly $n$ pikachu. The strength of the $i$-th pokémon is equal to $a_i$, and all these numbers are distinct.
As an army, Andrew can choose any non-empty subsequence of pokemons. In other words, Andrew chooses some array $b$ from $k$ indices such that $1 \le b_1 < b_2 < \dots < b_k \le n$, and his army will consist of pokémons with forces $a_{b_1}, a_{b_2}, \dots, a_{b_k}$.
The strength of the army is equal to the alternating sum of elements of the subsequence; that is, $a_{b_1} - a_{b_2} + a_{b_3} - a_{b_4} + \dots$.
Andrew is experimenting with pokémon order. He performs $q$ operations. In $i$-th operation Andrew swaps $l_i$-th and $r_i$-th pokémon.
Andrew wants to know the maximal stregth of the army he can achieve with the initial pokémon placement. He also needs to know the maximal strength after each operation.
Help Andrew and the pokémon, or team R will realize their tricky plan!
|
Let's give a solution for a fixed array and then prove its optimality. Let us name the element $a_i$ a local maximum if $a_i > a_{i-1}$ and $a_i > a_{i+1}$. Similarly let's call the element $a_i$ a local minimum if $a_i < a_{i-1}$ and $a_i < a_{i+1}$. If any of the $a_{i-1}$ or $a_{i+1}$ does not exist in the definitions above, we assume it is equal to $-\infty$. Note that the optimal subsequence will always be an odd length (otherwise we can delete the last element and increase the response). Elements with odd numbers shall be located at local maximums, and elements with even numbers - at local minimums. It is not difficult to see that the first local maximum is always placed earlier than the first local minimum (otherwise it would happen that the initial permutation decreases from the first element to the local minimum, in which case the first element itself is the local maximum). Similarly, you may notice that the last local maximum always costs later than the last local minimum. Given that the local maximums and minimums alternate, you can simply take a subsequence of all the local maximums and minimums and get the best response. Let's show that this construction is alpways optimal. Let's start with the case when an element with an odd number is not a local maximum. In this case, it shall be replaced with a bigger neighbor, and if the bigger neighbor is already in the sub-set, just delete both of these elements. After that, the response shall always increase. The same shall apply if the even-numbered element is not a local minimum. In this case, it may still happen that we cannot move the element downwards because it is on the edge. But then it is the last one in the subsequence, and it can be easily removed. Thus, it is optimal to take only local highs and lows into the subsequence (considering that highs are on odd positions and lows - on even positions). It remains to be shown that it is profitable to take all local maximums and minimums. Indeed, if not all of them are involved, then there is a pair of standing local highs and lows. By adding them to the subsequence, we will increase the answer. Okay. We know how to solve a problem for an initial array by reducing it to the sum of all local maximums and minimums. We will now learn how to process requests quickly. To do this, we will store whether an element is a local minimum or maximum and recalculate this information when exchanging elements. Suddenly it turns out that a single request will change the state of no more than six elements, so we can easily recalculate the response for $O(1)$ per request. Thus, we have a solution with an asymptotic $O(n + q)$ in time.
|
[
"data structures",
"divide and conquer",
"dp",
"greedy",
"implementation"
] | 2,100
|
#include<iostream>
using namespace std;
#define int long long
int a[1000000+5];
int n;
int ans=0;
inline void insert(int i)
{
if (i==0||i==n+1) return;
if (a[i-1]<a[i]&&a[i]>a[i+1]) ans+=a[i];
if (a[i-1]>a[i]&&a[i]<a[i+1]) ans-=a[i];
}
inline void erase(int i)
{
if (i==0||i==n+1) return;
if (a[i-1]<a[i]&&a[i]>a[i+1]) ans-=a[i];
if (a[i-1]>a[i]&&a[i]<a[i+1]) ans+=a[i];
}
int32_t main()
{
ios_base::sync_with_stdio(false);
cin.tie(0);
cout.tie(0);
int t;
cin>>t;
while (t--)
{
int q;
cin>>n>>q;
for (int i=1; i<=n; i++)
{
cin>>a[i];
}
a[0]=-1;
a[n+1]=-1;
ans=0;
for (int i=1; i<=n; i++)
{
if (a[i-1]<a[i]&&a[i]>a[i+1]) ans+=a[i];
if (a[i-1]>a[i]&&a[i]<a[i+1]) ans-=a[i];
}
cout<<ans<<'\n';
while (q--)
{
int l,r;
cin>>l>>r;
erase(l-1);
erase(l);
erase(l+1);
if (l!=r)
{
if (r-1!=l+1&&r-1!=l) erase(r-1);
if (r!=l+1) erase(r);
erase(r+1);
}
swap(a[l],a[r]);
insert(l-1);
insert(l);
insert(l+1);
if (l!=r)
{
if (r-1!=l+1&&r-1!=l) insert(r-1);
if (r!=l+1) insert(r);
insert(r+1);
}
cout<<ans<<'\n';
}
}
}
|
1420
|
D
|
Rescue Nibel!
|
Ori and Sein have overcome many difficult challenges. They finally lit the Shrouded Lantern and found Gumon Seal, the key to the Forlorn Ruins. When they tried to open the door to the ruins... nothing happened.
Ori was very surprised, but Sein gave the explanation quickly: clever Gumon decided to make an additional defence for the door.
There are $n$ lamps with Spirit Tree's light. Sein knows the time of turning on and off for the $i$-th lamp — $l_i$ and $r_i$ respectively. To open the door you have to choose $k$ lamps in such a way that there will be a moment of time when they all will be turned on.
While Sein decides which of the $k$ lamps to pick, Ori is interested: how many ways there are to pick such $k$ lamps that the door will open? It may happen that Sein may be wrong and there are no such $k$ lamps. The answer might be large, so print it modulo $998\,244\,353$.
|
In this task, we need to find the number of sets of $k$ segments such that these $k$ segments intersect at least in one point. Let's look at the starting point of the intersection. This point will always be the beginning of a segment. Let us find the number of sets of segments that their intersection begins at the point $x$. Let us denote $p(x)$ as number of segments that pass through this point, and $s(x)$ as numbers of segments that start at this point. Then all the $k$ segments must pass through $x$ and at least one segment must start at $x$. The number of sets of segments passing through $x$ is ${p(x) \choose k}$ and the number of sets of segments passing through $x$, none of which starts at $x$, is ${p(x) - s(x) \choose k}$. From here we obtain that the required number of piece sets is ${p(x) \choose k}. - {p(x) - s(x) \choose k}$. By summing up all possible $x$ values, we get the answer to the task. It should be noted that $p(x)$ and $s(x)$ can be easily supported using the event method. Then, the total runtime will be $O(n\log n)$.
|
[
"combinatorics",
"data structures",
"sortings"
] | 1,800
|
import java.io.*;
import java.util.*;
public class D_Java {
public static final int MOD = 998244353;
public static int mul(int a, int b) {
return (int)((long)a * (long)b % MOD);
}
int[] f;
int[] rf;
public int C(int n, int k) {
return (k < 0 || k > n) ? 0 : mul(f[n], mul(rf[n-k], rf[k]));
}
public static int pow(int a, int n) {
int res = 1;
while (n != 0) {
if ((n & 1) == 1) {
res = mul(res, a);
}
a = mul(a, a);
n >>= 1;
}
return res;
}
static void shuffleArray(int[] a) {
Random rnd = new Random();
for (int i = a.length-1; i > 0; i--) {
int index = rnd.nextInt(i + 1);
int tmp = a[index];
a[index] = a[i];
a[i] = tmp;
}
}
public static int inv(int a) {
return pow(a, MOD-2);
}
public void doIt() throws IOException {
BufferedReader in = new BufferedReader(new InputStreamReader(System.in));
StringTokenizer tok = new StringTokenizer(in.readLine());
int n = Integer.parseInt(tok.nextToken());
int k = Integer.parseInt(tok.nextToken());
f = new int[n+42];
rf = new int[n+42];
f[0] = rf[0] = 1;
for (int i = 1; i < f.length; ++i) {
f[i] = mul(f[i-1], i);
rf[i] = mul(rf[i-1], inv(i));
}
int[] events = new int[2*n];
for (int i = 0; i < n; ++i) {
tok = new StringTokenizer(in.readLine());
int le = Integer.parseInt(tok.nextToken());
int ri = Integer.parseInt(tok.nextToken());
events[i] = le*2;
events[i + n] = ri*2 + 1;
}
shuffleArray(events);
Arrays.sort(events);
int ans = 0;
int balance = 0;
for (int r = 0; r < 2*n;) {
int l = r;
while (r < 2*n && events[l] == events[r]) {
++r;
}
int added = r - l;
if (events[l] % 2 == 0) {
// Open event
ans += C(balance + added, k);
if (ans >= MOD) ans -= MOD;
ans += MOD - C(balance, k);
if (ans >= MOD) ans -= MOD;
balance += added;
} else {
// Close event
balance -= added;
}
}
in.close();
System.out.println(ans);
}
public static void main(String[] args) throws IOException {
(new D_Java()).doIt();
}
}
|
1420
|
E
|
Battle Lemmings
|
A lighthouse keeper Peter commands an army of $n$ battle lemmings. He ordered his army to stand in a line and numbered the lemmings from $1$ to $n$ from left to right. Some of the lemmings hold shields. Each lemming cannot hold more than one shield.
The more protected Peter's army is, the better. To calculate the protection of the army, he finds the number of protected pairs of lemmings, that is such pairs that both lemmings in the pair don't hold a shield, but there is a lemming with a shield between them.
Now it's time to prepare for defence and increase the protection of the army. To do this, Peter can give orders. He chooses a lemming with a shield and gives him one of the two orders:
- give the shield to the left neighbor if it exists and doesn't have a shield;
- give the shield to the right neighbor if it exists and doesn't have a shield.
In one second Peter can give exactly one order.
It's not clear how much time Peter has before the defence. So he decided to determine the maximal value of army protection for each $k$ from $0$ to $\frac{n(n-1)}2$, if he gives no more that $k$ orders. Help Peter to calculate it!
|
First, let us denote $f(A)$ as a conversion of the original sequence of $A$. In the beginning, we will write down the number of zeros before the the first one. Then, we write down the number of zeros standing between the first and second ones, then - between the second and third ones, and so on. For example, $f(011000101100) = \{1, 0, 3, 1, 0, 2\}$, because there is one zero before the first one, there are no zeros between the first and second ones, there are three zeros between the second and third ones, etc. d. It is not difficult to see that the original sequence $A$ can be unambiguously recovered with $f(A)$. Now, let us consider how $f(A)$ will change if we change two different adjacent elements in the original $A$ sequence. In this case, two neighboring numbers will change in $f(A)$, one of which will decrease by one and the other - will increase by one. The reversed statement is also true: if we choose two neighboring numbers in $f(A)$, one of them will increase by one, and the other - will decrease by one so that they both remain non-negative, this operation will correspond to an exchange of two neighboring elements in $A$. Let us name this operation (choosing two neighboring numbers, increasing one of them by one and reducing the other by one). pouring. Let us consider such a task. We have two arrays $A = \{a_1, a_2, \dots, a_k\}$ and $B = \{b_1, b_2, \dots, b_k\}$. We have to calculate the minimum amount of pouring operations we can turn $A$ into $B$. (Obviously, the sum of the numbers in $A$ and in $B$ must be the same.) To solve this, let's try to split the array into two "barriers", standing after $i$-th position. Then, the $A$ array is split into two parts: the left (elements from $1$ to $i$) and the right (elements from $i+1$ to $n$). For the left part of the $A$ array to have the same number of elements as in $B$, you need $g_{A, B}(i) = \left|\sum_{j=1}^{i} a_i - \sum_{j=1}^i b_i\right|$ pouring operations involving $i$ and $i+1$ elements. To sum up the number of necessary pouring operations for each pair of neighboring elements, you have to sum $g_{A,B}(i)$ for all $i$ from $1$ to $n-1$. It can be shown that the sum obtained will be the required number of pouring operations, because since it is equal to zero only in case of equal arrays, and each pouring operation will reduce it by no more than $1$ (and there is always a transfusion that reduces this sum by $1$). So, this subtask has been sorted out, let's move on. We have to learn how to read security for the sequence $A$, knowing $f(A) = \{f_1, f_2, \dots, f_k\}$. It is not difficult to see that it is equal to $p(A) = \sum_{1 \le i < j \le k} f_i\cdot f_j = \frac 12 \left( \sum_{1 \le i, j \le k} f_i\cdot f_j - \sum_{i=1}^k f_i^2 \right) = \frac 12 \left( \left(\sum_{i=1}^k f_i \right)^2 - \sum_{i=1}^k f_i^2 \right).$ Now we can finally get down to the task. To do this, let's try to build an optimal sequence of $f(A) = \{f_1, f_2, \dots, f_k\}$ by applying no more than $k$ of transfusions. Of course, we need to use the dynamic programming of $dp_{i,\ s,\ k}$. This means that we have looked at the first $i$ elements in $f(A)$ and done with $k$ pouring operations so that $\sum_{i=1}^k f_i$ equals $s$. The DP itself will store the minimum possible value of $\sum_{i=1}^k f_i^2$. To get the answer we need to refer to $dp_{i,\ c_0,\ k}$ where $c_0$ - is the number of zeros in the original sequence. And to recalculate the dynamics of $dp_{i,\ s,\ k}$ we have to go through what $f_{i+1}$ will be as a result. Let it be $h$. Then, the answer recovery happens in the following way: $dp_{i+1,\ s + h,\ k + \left|z_i - (s + h)\right|} \text{min=} dp_{i,\ s,\ k} + h^2.$ Here $z_i$ denotes $\sum_{j=1}^i f_i$ in the original sequence $f(A)$. The total asymptotic is $O(n^5)$ in time and $O(n^4)$ in memory, although with a correct implementation the constant is very small (the author's solution works for $\frac{n^5}{27}$). In this task, there are solutions for $O(n^4\log n)$, but I will not describe them here.
|
[
"dp",
"greedy"
] | 2,500
|
#include <cassert>
#include <climits>
#include <iostream>
#include <vector>
#include <numeric>
using namespace std;
template<typename T>
inline void umin(T &a, const T &b) {
a = min(a, b);
}
template<typename T>
inline void umax(T &a, const T &b) {
a = max(a, b);
}
int main() {
ios_base::sync_with_stdio(false);
int n; cin >> n;
vector<int> a(n);
for (int &x : a) {
cin >> x;
}
a.push_back(1);
vector<int> gg;
for (int i = 0; i <= n; ++i) {
int s = i;
while (a[i] == 0) ++i;
gg.push_back(i - s);
}
vector<int> p = gg;
partial_sum(begin(p), end(p), begin(p));
constexpr int mx = 103;
constexpr int dx = mx + 1, dy = mx + 1, dz = mx * (mx + 1) / 2 + 1;
static int dp[dx][dy][dz] = {};
for (int i = 0; i < dx; ++i) {
for (int j = 0; j < dy; ++j) {
for (int k = 0; k < dz; ++k) {
dp[i][j][k] = INT_MAX;
}
}
}
dp[0][0][0] = 0;
int k = gg.size(), l = p.back();
for (int i = 0; i < k; ++i) {
for (int j = 0; j <= l; ++j) {
for (int s = 0; s <= n * (n-1) / 2; ++s) {
if (dp[i][j][s] == INT_MAX) continue;
for (int q = j; q <= l; ++q) {
umin(dp[i+1][q][s + abs(q - p[i])], dp[i][j][s] + (q-j)*(q-j));
}
}
}
}
int mn = INT_MAX;
for (int s = 0; s <= n * (n-1) / 2; ++s) {
umin(mn, dp[k][l][s]);
int val = l*l - mn;
assert(val % 2 == 0);
cout << val/2 << " ";
}
cout << endl;
return 0;
}
|
1421
|
A
|
XORwice
|
In order to celebrate Twice's 5th anniversary, Tzuyu and Sana decided to play a game.
Tzuyu gave Sana two integers $a$ and $b$ and a really important quest.
In order to complete the quest, Sana has to output the smallest possible value of ($a \oplus x$) + ($b \oplus x$) for any given $x$, where $\oplus$ denotes the bitwise XOR operation.
|
Think about addition in base two. Say $a$ = $10101$ and $b$ = $1001$. What your operation does is it modifies the bits in your numbers, so if the first bit in $a$ is $1$ and the first bit in $b$ is $1$ (as is the case above) you can make both $0$ by making that bit $1$ in $x$. This is actually the only way you can decrease the resulting sum, so $x$ = $1$ is an answer above. Noticing the hint above we now deduce $x$ = $a$ & $b$ where & is bitwise $AND$. So just printing ($a$ $\oplus$ ($a$ & $b$)) + ($b$ $\oplus$ ($a$ & $b$)) works, but there's an even nicer formula. We'll leave it up to you to prove that ($a$ $\oplus$ ($a$ & $b$)) + ($b$ $\oplus$ ($a$ & $b$)) = $a$ $\oplus$ $b$, where $\oplus$ is the bitwise $XOR$ :)
|
[
"bitmasks",
"greedy",
"math"
] | 800
| null |
1421
|
B
|
Putting Bricks in the Wall
|
Pink Floyd are pulling a prank on Roger Waters. They know he doesn't like walls, he wants to be able to walk freely, so they are blocking him from exiting his room which can be seen as a grid.
Roger Waters has a square grid of size $n\times n$ and he wants to traverse his grid from the upper left ($1,1$) corner to the lower right corner ($n,n$). Waters can move from a square to any other square adjacent by a side, as long as he is still in the grid. Also except for the cells ($1,1$) and ($n,n$) every cell has a value $0$ or $1$ in it.
Before starting his traversal he will pick either a $0$ or a $1$ and will be able to only go to cells values in which are equal to the digit he chose. The starting and finishing cells ($1,1$) and ($n,n$) are exempt from this rule, he may go through them regardless of picked digit. Because of this the cell ($1,1$) takes value the letter 'S' and the cell ($n,n$) takes value the letter 'F'.
For example, in the first example test case, he can go from ($1, 1$) to ($n, n$) by using the zeroes on this path: ($1, 1$), ($2, 1$), ($2, 2$), ($2, 3$), ($3, 3$), ($3, 4$), ($4, 4$)
The rest of the band (Pink Floyd) wants Waters to not be able to do his traversal, so while he is not looking they will \textbf{invert at most two cells} in the grid (from $0$ to $1$ or vice versa). They are afraid they will not be quick enough and asked for your help in choosing the cells. \textbf{Note that you cannot invert cells $(1, 1)$ and $(n, n)$}.
We can show that there always exists a solution for the given constraints.
Also note that Waters will pick his digit of the traversal after the band has changed his grid, so he must not be able to reach ($n,n$) no matter what digit he picks.
|
It's hard to use the two valuable switches somewhere in the middle of the matrix, a much wiser choice would be to somehow block the $S$ cell or the $F$ cell. Perhaps you can set both neighbours of $S$ to $1$ to force Roger to pick $1$. If we pick the neighbours of $S$ to be $1$ we can make the neighbours of $F$ $0$ and there would be no way to go from $S$ to $F$. But this requires in the worst case $4$ switches, which is not good enough. Luckily, in order to get down to $2$ switches we only have to consider the other way around, making the squares neighboring $S$ become $0$ and the squares neighboring $F$ $1$. There must be a solution of the two with at most two switches and you won't get from $S$ to $F$ since you're forced to pick $1$ (or $0$) and can't get past the neighbours of $F$ which are opposite.
|
[
"constructive algorithms",
"implementation"
] | 1,100
| null |
1421
|
C
|
Palindromifier
|
Ringo found a string $s$ of length $n$ in his yellow submarine. The string contains only lowercase letters from the English alphabet. As Ringo and his friends love palindromes, he would like to turn the string $s$ into a palindrome by applying two types of operations to the string.
The first operation allows him to choose $i$ ($2 \le i \le n-1$) and to append the substring $s_2s_3 \ldots s_i$ ($i - 1$ characters) reversed to the front of $s$.
The second operation allows him to choose $i$ ($2 \le i \le n-1$) and to append the substring $s_i s_{i + 1}\ldots s_{n - 1}$ ($n - i$ characters) reversed to the end of $s$.
Note that characters in the string in this problem are indexed from $1$.
For example suppose $s=$abcdef. If he performs the first operation with $i=3$ then he appends cb to the front of $s$ and the result will be cbabcdef. Performing the second operation on the resulted string with $i=5$ will yield cbabcdefedc.
Your task is to help Ringo make the entire string a palindrome by applying any of the two operations (in total) \textbf{at most $30$ times}. \textbf{The length of the resulting palindrome must not exceed $10^6$}
It is guaranteed that under these constraints there always is a solution. Also note you do not have to minimize neither the number of operations applied, nor the length of the resulting string, but they have to fit into the constraints.
|
You're not allowed to just pick the whole string and append its reversed result to the front, but what's the next best thing? We're very close to the answer if we take the whole string except for a letter (so for $abcde$ we make $dcbabcde$). The operation above which transformed $abcde$ into $dcbabcde$ is very close, if only we could've somehow append $e$ to the left. Turns out you can set that up, so from $abcde$ first append $d$ to the end, then you have $abcded$. Now apply the operation from the hint on this string and get $edcbabcded$. See why we added that $d$ first? We can now append it to the front just like we wanted!. Do the operation $L$ $2$ and the job is finished. Yep, amazingly just printing $R$ $n-1$ $L$ $n$ $L$ $2$ works!
|
[
"constructive algorithms",
"strings"
] | 1,400
| null |
1421
|
D
|
Hexagons
|
Lindsey Buckingham told Stevie Nicks "Go your own way". Nicks is now sad and wants to go away as quickly as possible, but she lives in a 2D hexagonal world.
Consider a hexagonal tiling of the plane as on the picture below.
Nicks wishes to go from the cell marked $(0, 0)$ to a certain cell given by the coordinates. She may go from a hexagon to any of its six neighbors you want, but there is a cost associated with each of them. The costs depend only on the direction in which you travel. Going from $(0, 0)$ to $(1, 1)$ will take the exact same cost as going from $(-2, -1)$ to $(-1, 0)$. The costs are given in the input in the order $c_1$, $c_2$, $c_3$, $c_4$, $c_5$, $c_6$ as in the picture below.
Print the smallest cost of a path from the origin which has coordinates $(0, 0)$ to the given cell.
|
Using too many edges in the solution feels wasteful, the solution surely has some neat line as straight as possible. Perhaps we can prove only two edges are required? Indeed two edges are required in the solution, so one approach would be picking all combinations of edges and do linear algebra so see how many times each is required (or if it's impossible). To prove that let's suppose our target is somewhere reachable by only taking $C1$ and $C2$ (the upper right sextant, a sixth division of the plane). $C4$ and $C5$ will never be used since they contribute in the wrong direction. We can now use $C6$, $C1$, $C2$ or $C3$ for our solution. If using $C1$ and $C2$ is not optimal we can choose $C3$ or $C6$, without loss of generality we choose $C3$. $C6$ cannot be used because it simply counters $C3$. Now we either use $C1$, $C2$ or $C3$, but we can further narrow down to just two edges. If we use all three this means we use $C1$ + $C3$ which goes the same way as $C2$, and $C2$ also goes the same way as $C1$ + $C3$. So we can just not use $C2$ if $C1$ + $C3$ < $C2$, or use $C2$ instead of $C1$ + $C3$ until either of $C1$ or $C3$ doesn't appear anymore on our solution.
|
[
"brute force",
"constructive algorithms",
"greedy",
"implementation",
"math",
"shortest paths"
] | 1,900
| null |
1421
|
E
|
Swedish Heroes
|
While playing yet another strategy game, Mans has recruited $n$ Swedish heroes, whose powers which can be represented as an array $a$.
Unfortunately, not all of those mighty heroes were created as capable as he wanted, so that he decided to do something about it. In order to accomplish his goal, he can pick two consecutive heroes, with powers $a_i$ and $a_{i+1}$, remove them and insert a hero with power $-(a_i+a_{i+1})$ back in the same position.
For example if the array contains the elements $[5, 6, 7, 8]$, he can pick $6$ and $7$ and get $[5, -(6+7), 8] = [5, -13, 8]$.
After he will perform this operation $n-1$ times, Mans will end up having only one hero. He wants his power to be as big as possible. What's the largest possible power he can achieve?
|
The problem gives us an array and we have to come up with an achievable sequence of pluses and minuses such that summing the numbers after applying the signs we get the largest sum. Intuitively we can probably assign $+$ to most positive numbers and - to most negative numbers somehow, but we should investigate exactly which are possible. Before you try to find patterns you should observe that there is one case that is impossible to reach. You cannot assign alternating + and - to the array, like $+-+-+-+-$ or $-+-+-+-+$. The reason is very simple, the very first thing you do is apply the operation on two consecutive numbers and make them both $--$, and whenever you apply further operations on the both of them they remain the same sign. In the end we decided to give this in the samples but we know from testing many would miss this case. In order to explain the solution we will add some notations: $n$ = the length of the array. $m$ = the number of elements we multiply by $-1$ in the solution (put - in front of them). $p$ = the number of elements we do not multiply by $-1$ in the solution (put $+$ in front of them). The mysterious pattern is as follows: ($n$ $+$ $m$) % $3$ = $1$ So yes, for $n$ = $7$ for example we can put $3$ minus signs anywhere, like $++--+-+$, or $6$ minus signs like $--+----$ or full plus signs $+++++++$. We can arrange the pluses and minuses however we want as long as there are $2$ consecutive equal signs and ($n$ $+$ $m$) % $3$ = $1$. The solution now simply requires us to sort the array and multiply by $-1$ each number one by one and when ($n$ $+$ $m$) % $3$ = $1$ update the answer with the current sum. Of course there is one point where all the elements might form the forbidden $+-+-+-$ so you should check that in that case they do not, or if they do pick the next smallest number to turn into - instead of the last one. So for the sample $[4, -5, 9, -2, 1]$ you sort and get $[-5, -2, 1, 4, 9]$ and when you turn $-5$ into $5$ instead of turning $-2$ into $2$ you turn $1$ into $-1$ and the array will be $[5, -2, -1, 4, 9]$. After that undo your modification, turn $-2$ into $2$ and revert $-1$ to $1$. The proof can be done via induction, but I will try to explain why this happens. Suppose your solution looks like $+--+---+$. What we need to do is to split into two substrings such that their negation is achievable and we are done, they will concatenate and reverse each sign. So we can split into ($+--$) and ($+---+$) and notice that their negations are ($-++$) and ($-+++-$) which are achievable (($n$ $+$ $m$) % $3$ = $1$ for both and are not alternating). We can always find such a split, start at the left-most point and see if you can split into ($+$) and ($--+---+$). You can't, the negation of ($+$) is just ($-$) which you can't have. Splitting into ($+-$) and ($+---+$) won't do either for the same reasons, but ($+--$) and ($+---+$) work, actually if the left substring starts with a $+$ the very first time the last two signs are equal is a time where we can make the split, something like ($+-+-++$) when reversed will always have ($n$ $+$ $m$) % $3$ = $1$. So now you see even clearer why the corner case $+-+-+$ exists, you can never split it into two substrings where the last two signs of the first substring are equal.
|
[
"brute force",
"dp",
"implementation"
] | 2,700
| null |
1422
|
A
|
Fence
|
Yura is tasked to build a closed fence in shape of an arbitrary non-degenerate simple quadrilateral. He's already got three straight fence segments with known lengths $a$, $b$, and $c$. Now he needs to find out some possible integer length $d$ of the fourth straight fence segment so that he can build the fence using these four segments. In other words, the fence should have a quadrilateral shape with side lengths equal to $a$, $b$, $c$, and $d$. Help Yura, find any possible length of the fourth side.
A non-degenerate simple quadrilateral is such a quadrilateral that no three of its corners lie on the same line, and it does not cross itself.
|
A quadrilateral can be built when $max (a, b, c, d) < a + b + c + d - max (a, b, c, d)$, that is, the sum of the minimum three numbers is greater than the maximum. To do this, you could choose $max (a, b, c)$ or $a + b + c - 1$ as $d$.
|
[
"geometry",
"math"
] | 800
|
#include <algorithm>
#include <cassert>
#include <chrono>
#include <cmath>
#include <cstdio>
#include <cstring>
#include <ctime>
#include <iostream>
#include <map>
#include <numeric>
#include <queue>
#include <random>
#include <set>
#include <stack>
#include <string>
#include <vector>
using namespace std;
#define all(x) (x).begin(), (x).end()
#define rall(x) (x).rbegin(), (x).rend()
#define reunique(v) v.resize(std::unique(v.begin(), v.end()) - v.begin())
#define sz(v) ((int)(v).size())
#define vec1d(x) vector<x>
#define vec2d(x) vector<vec1d(x)>
#define vec3d(x) vector<vec2d(x)>
#define vec4d(x) vector<vec3d(x)>
#define ivec1d(x, n, v) vec1d(x)(n, v)
#define ivec2d(x, n, m, v) vec2d(x)(n, ivec1d(x, m, v))
#define ivec3d(x, n, m, k, v) vec3d(x)(n, ivec2d(x, m, k, v))
#define ivec4d(x, n, m, k, l, v) vec4d(x)(n, ivec3d(x, m, k, l, v))
#ifdef LOCAL
#include "pretty_print.h"
#define dbg(...) cerr << "[" << #__VA_ARGS__ << "]: ", debug_out(__VA_ARGS__)
#else
#define dbg(...) 42
#endif
#define nl "\n"
typedef long double ld;
typedef long long ll;
typedef unsigned long long ull;
template <typename T> T sqr(T x) { return x * x; }
template <typename T> T abs(T x) { return x < 0? -x : x; }
template <typename T> T gcd(T a, T b) { return b? gcd(b, a % b) : a; }
template <typename T> bool chmin(T &x, const T& y) { if (x > y) { x = y; return true; } return false; }
template <typename T> bool chmax(T &x, const T& y) { if (x < y) { x = y; return true; } return false; }
auto random_address = [] { char *p = new char; delete p; return (uint64_t) p; };
mt19937 rng(chrono::steady_clock::now().time_since_epoch().count() * (random_address() | 1));
mt19937_64 rngll(chrono::steady_clock::now().time_since_epoch().count() * (random_address() | 1));
int main(int /* argc */, char** /* argv */)
{
ios_base::sync_with_stdio(false);
cin.tie(NULL);
#ifdef LOCAL
assert(freopen("i.txt", "r", stdin));
assert(freopen("o.txt", "w", stdout));
#endif
int t;
cin >> t;
while (t--) {
int a, b, c;
cin >> a >> b >> c;
if (a > b) {
swap(a, b);
}
if (b > c) {
swap(b, c);
}
cout << max(1, c - a - b + 1) << nl;
}
#ifdef LOCAL
cerr << "Time execute: " << clock() / (double)CLOCKS_PER_SEC << " sec" << endl;
#endif
return 0;
}
|
1422
|
B
|
Nice Matrix
|
A matrix of size $n \times m$ is called nice, if all rows and columns of the matrix are palindromes. A sequence of integers $(a_1, a_2, \dots , a_k)$ is a palindrome, if for any integer $i$ ($1 \le i \le k$) the equality $a_i = a_{k - i + 1}$ holds.
Sasha owns a matrix $a$ of size $n \times m$. In one operation he can increase or decrease any number in the matrix by one. Sasha wants to make the matrix nice. He is interested what is the minimum number of operations he needs.
Help him!
|
Note, that if the value of $a_{1, 1}$ is equal to some number $x$, then values of $a_{n, 1}$, $a_{1, m}$ and $a_{n, m}$ must also be equal to this number $x$ by the palindrome property. A similar property holds for all of the following elements $a_{x, y}$ ($a_{x, y}=a_{n - x + 1, y}=a_{1,m - y + 1}=a_{n - x + 1, m - y + 1}$), so the problem is reduced to finding the optimal number for each four of numbers (maybe less for some positions in matrix). This number is the median of these numbers (the average of the sorted set). The answer will be the sum of the differences between the median of the "four" and each number in the "four" for all "fours".
|
[
"greedy",
"implementation",
"math"
] | 1,300
|
#include <bits/stdc++.h>
#define fi first
#define se second
#define m_p make_pair
#define endl '\n'
#define fast_io ios_base::sync_with_stdio(0); cin.tie(0)
using namespace std;
typedef long long ll;
const int MAXN = 412345;
const int MAXINT = 2047483098;
const ll MOD = 1e9 + 7;
const int MAX = 1e4;
const long double EPS = 1e-10;
long long calcAnswer(vector <long long> &numbers) {
sort(begin(numbers), end(numbers));
long long result = 0;
int sz = numbers.size();
for (int i = 0; i < sz; ++i) result += abs(numbers[i] - numbers[sz / 2]);
return result;
}
void solve() {
int n, m;
cin >> n >> m;
vector <vector <long long> > matrix(n);
for (int i = 0; i < n; ++i) {
matrix[i].resize(m);
for (int j = 0; j < m; ++j) cin >> matrix[i][j];
}
long long answer = 0;
int left_row = 0, right_row = n - 1;
while(left_row <= right_row) {
int left_column = 0, right_column = m - 1;
while(left_column <= right_column) {
vector <long long> cur_numbers = {matrix[left_row][left_column]};
if (left_row != right_row) cur_numbers.push_back(matrix[right_row][left_column]);
if (right_column != left_column) cur_numbers.push_back(matrix[left_row][right_column]);
if (left_column != right_column && left_row != right_row) cur_numbers.push_back(matrix[right_row][right_column]);
answer += calcAnswer(cur_numbers);
left_column++, right_column--;
}
left_row++, right_row--;
}
cout << answer << endl;
}
int main()
{
fast_io;
int T;
cin >> T;
while(T--) {
solve();
}
return 0;
}
|
1422
|
C
|
Bargain
|
Sometimes it is not easy to come to an agreement in a bargain. Right now Sasha and Vova can't come to an agreement: Sasha names a price as high as possible, then Vova wants to remove as many digits from the price as possible. In more details, Sasha names some integer price $n$, Vova removes a non-empty substring of (consecutive) digits from the price, the remaining digits close the gap, and the resulting integer is the price.
For example, is Sasha names $1213121$, Vova can remove the substring $1312$, and the result is $121$.
It is allowed for result to contain leading zeros. If Vova removes all digits, the price is considered to be $0$.
Sasha wants to come up with some constraints so that Vova can't just remove all digits, but he needs some arguments supporting the constraints. To start with, he wants to compute the sum of all possible resulting prices after Vova's move.
Help Sasha to compute this sum. Since the answer can be very large, print it modulo $10^9 + 7$.
|
Let's count for each digit how many times it will be included in the final sum and in what place. Let's denote $m$ as the length of the number $n$. Consider the digit $a_i$ at the position $i$ in the number $n$ ($1 \le i \le m$). If some part of the number to the left of the digit is removed, then the current digit will remain in its place - and we add the number of ways to remove the subsegment to the left to the answer multiplied by the current digit $i * (i - 1) / 2 \times 10 ^ {m - i} \times a_i$. If the segment to the right is deleted, then the place of the digit will change - $(j + 1) \times 10^j \times a_i$ for all $0 \le j < m - i$, or $\sum_ {j = 0 }^{m - i - 1} {(j + 1) \times 10 ^ j} \times a_i$. The $j$ sum can be pre-calculated for all values.
|
[
"combinatorics",
"dp",
"math"
] | 1,700
|
#include <algorithm>
#include <cassert>
#include <chrono>
#include <cmath>
#include <cstdio>
#include <cstring>
#include <ctime>
#include <iostream>
#include <map>
#include <numeric>
#include <queue>
#include <random>
#include <set>
#include <stack>
#include <string>
#include <vector>
using namespace std;
#define all(x) (x).begin(), (x).end()
#define rall(x) (x).rbegin(), (x).rend()
#define reunique(v) v.resize(std::unique(v.begin(), v.end()) - v.begin())
#define sz(v) ((int)(v).size())
#define vec1d(x) vector<x>
#define vec2d(x) vector<vec1d(x)>
#define vec3d(x) vector<vec2d(x)>
#define vec4d(x) vector<vec3d(x)>
#define ivec1d(x, n, v) vec1d(x)(n, v)
#define ivec2d(x, n, m, v) vec2d(x)(n, ivec1d(x, m, v))
#define ivec3d(x, n, m, k, v) vec3d(x)(n, ivec2d(x, m, k, v))
#define ivec4d(x, n, m, k, l, v) vec4d(x)(n, ivec3d(x, m, k, l, v))
#ifdef LOCAL
#include "pretty_print.h"
#define dbg(...) cerr << "[" << #__VA_ARGS__ << "]: ", debug_out(__VA_ARGS__)
#else
#define dbg(...) 42
#endif
#define nl "\n"
typedef long double ld;
typedef long long ll;
typedef unsigned long long ull;
template <typename T> T sqr(T x) { return x * x; }
template <typename T> T abs(T x) { return x < 0? -x : x; }
template <typename T> T gcd(T a, T b) { return b? gcd(b, a % b) : a; }
template <typename T> bool chmin(T &x, const T& y) { if (x > y) { x = y; return true; } return false; }
template <typename T> bool chmax(T &x, const T& y) { if (x < y) { x = y; return true; } return false; }
auto random_address = [] { char *p = new char; delete p; return (uint64_t) p; };
mt19937 rng(chrono::steady_clock::now().time_since_epoch().count() * (random_address() | 1));
mt19937_64 rngll(chrono::steady_clock::now().time_since_epoch().count() * (random_address() | 1));
int main(int /* argc */, char** /* argv */)
{
ios_base::sync_with_stdio(false);
cin.tie(NULL);
#ifdef LOCAL
assert(freopen("i.txt", "r", stdin));
assert(freopen("o.txt", "w", stdout));
#endif
string s;
cin >> s;
int n = s.size();
vector<ll> a(n);
for (int i = 0; i < n; ++i) {
a[i] = s[i] - '0';
}
const int MOD = (int)1e+9 + 7;
ll ans = 0;
ll sum = 0;
ll p = 1;
for (ll i = n - 1; i >= 0; --i) {
ll k = (i * (i + 1) / 2 % MOD * p % MOD + sum) % MOD;
sum = (sum + p * (n - i) % MOD) % MOD;
p = p * 10 % MOD;
ans = (ans + a[i] * k % MOD) % MOD;
}
cout << ans << endl;
#ifdef LOCAL
cerr << "Time execute: " << clock() / (double)CLOCKS_PER_SEC << " sec" << endl;
#endif
return 0;
}
|
1422
|
D
|
Returning Home
|
Yura has been walking for some time already and is planning to return home. He needs to get home as fast as possible. To do this, Yura can use the instant-movement locations around the city.
Let's represent the city as an area of $n \times n$ square blocks. Yura needs to move from the block with coordinates $(s_x,s_y)$ to the block with coordinates $(f_x,f_y)$. In one minute Yura can move to any neighboring by side block; in other words, he can move in four directions. Also, there are $m$ instant-movement locations in the city. Their coordinates are known to you and Yura. Yura can move to an instant-movement location in no time if he is located in a block with the same coordinate $x$ or with the same coordinate $y$ as the location.
Help Yura to find the smallest time needed to get home.
|
You can build a graph with vertices at the start point and all fast travel points. The distance between the vertices $(x_1, y_1)$ and $(x_2, y_2)$ is calculated as $min (|x_1 - x_2|, |y_1 - y_2|)$. To avoid drawing all $m * (m + 1) / 2$ edges in the graph, note that for a pair of points $(x_1, y_1)$ and $(x_2, y_2)$ such that $|x_1 - x_2| \le |y_1 - y_2|$, if there is a point with coordinate $x_3$ such that it is between $x_1$ and $x_2$ ($min (x_1, x_2) \le x_3 \le max (x_1, x_2)$), then the distance between the first and second point will be equal to the sum of the distances between the first and third and between the third and second. In this case, the edge between the first and second points does not need to be drawn - it will be unnecessary. It turns out that for each point of the graph it will be enough to draw the edges to the points nearest along the $x$ axis in both directions. Similarly for $y$. Next, in the constructed graph, we find the minimum distance from the starting point to each point of the graph $(x, y)$ and sum it up with the distance to the end point $(f_x, f_y)$, which is equal to $|x - f_x| + |y - f_y|$. Among all the distances, we choose the minimum one.
|
[
"graphs",
"shortest paths",
"sortings"
] | 2,300
|
#include <algorithm>
#include <cassert>
#include <chrono>
#include <cmath>
#include <cstdio>
#include <cstring>
#include <ctime>
#include <iostream>
#include <map>
#include <numeric>
#include <queue>
#include <random>
#include <set>
#include <stack>
#include <string>
#include <vector>
using namespace std;
#define all(x) (x).begin(), (x).end()
#define rall(x) (x).rbegin(), (x).rend()
#define reunique(v) v.resize(std::unique(v.begin(), v.end()) - v.begin())
#define sz(v) ((int)(v).size())
#define vec1d(x) vector<x>
#define vec2d(x) vector<vec1d(x)>
#define vec3d(x) vector<vec2d(x)>
#define vec4d(x) vector<vec3d(x)>
#define ivec1d(x, n, v) vec1d(x)(n, v)
#define ivec2d(x, n, m, v) vec2d(x)(n, ivec1d(x, m, v))
#define ivec3d(x, n, m, k, v) vec3d(x)(n, ivec2d(x, m, k, v))
#define ivec4d(x, n, m, k, l, v) vec4d(x)(n, ivec3d(x, m, k, l, v))
#ifdef LOCAL
#include "pretty_print.h"
#define dbg(...) cerr << "[" << #__VA_ARGS__ << "]: ", debug_out(__VA_ARGS__)
#else
#define dbg(...) 42
#endif
#define nl "\n"
typedef long double ld;
typedef long long ll;
typedef unsigned long long ull;
template <typename T> T sqr(T x) { return x * x; }
template <typename T> T abs(T x) { return x < 0? -x : x; }
template <typename T> T gcd(T a, T b) { return b? gcd(b, a % b) : a; }
template <typename T> bool chmin(T &x, const T& y) { if (x > y) { x = y; return true; } return false; }
template <typename T> bool chmax(T &x, const T& y) { if (x < y) { x = y; return true; } return false; }
auto random_address = [] { char *p = new char; delete p; return (uint64_t) p; };
mt19937 rng(chrono::steady_clock::now().time_since_epoch().count() * (random_address() | 1));
mt19937_64 rngll(chrono::steady_clock::now().time_since_epoch().count() * (random_address() | 1));
int main(int /* argc */, char** /* argv */)
{
ios_base::sync_with_stdio(false);
cin.tie(NULL);
#ifdef LOCAL
assert(freopen("i.txt", "r", stdin));
assert(freopen("o.txt", "w", stdout));
#endif
int _, n;
cin >> _ >> n;
int sx, sy, fx, fy;
cin >> sx >> sy >> fx >> fy;
vector<pair<int, int>> p(n);
vector<tuple<int, int, int>> a(n), b(n);
for (int i = 0; i < n; ++i) {
int x, y;
cin >> x >> y;
p[i] = {x, y};
a[i] = make_tuple(x, y, i);
b[i] = make_tuple(y, x, i);
}
vector<vector<pair<int, int>>> e(n);
for (auto& s : vector{a, b}) {
sort(all(s));
for (int i = 1; i < n; ++i) {
auto [x1, y1, u] = s[i - 1];
auto [x2, y2, v] = s[i];
int d = min(abs(x1 - x2), abs(y1 - y2));
e[u].push_back({v, d});
e[v].push_back({u, d});
}
}
priority_queue<pair<ll, int>> h;
vector<ll> d(n);
for (int i = 0; i < n; ++i) {
d[i] = min(abs(sx - p[i].first), abs(sy - p[i].second));
h.push({-d[i], i});
}
while (h.size()) {
int x;
ll w;
tie(w, x) = h.top();
h.pop();
w = -w;
if (d[x] != w) {
continue;
}
for (auto& [y, c] : e[x]) {
if (chmin(d[y], w + c)) {
h.push({-d[y], y});
}
}
}
ll ans = abs(sx - fx) + abs(sy - fy);
for (int i = 0; i < n; ++i) {
chmin(ans, d[i] + abs(fx - p[i].first) + abs(fy - p[i].second));
}
cout << ans << nl;
#ifdef LOCAL
cerr << "Time execute: " << clock() / (double)CLOCKS_PER_SEC << " sec" << endl;
#endif
return 0;
}
|
1422
|
E
|
Minlexes
|
Some time ago Lesha found an entertaining string $s$ consisting of lowercase English letters. Lesha immediately developed an unique algorithm for this string and shared it with you. The algorithm is as follows.
Lesha chooses an arbitrary (possibly zero) number of pairs on positions $(i, i + 1)$ in such a way that the following conditions are satisfied:
- for each pair $(i, i + 1)$ the inequality $0 \le i < |s| - 1$ holds;
- for each pair $(i, i + 1)$ the equality $s_i = s_{i + 1}$ holds;
- there is no index that is contained in more than one pair.
After that Lesha removes all characters on indexes contained in these pairs and the algorithm is over. Lesha is interested in the lexicographically smallest strings he can obtain by applying the algorithm to the suffixes of the given string.
|
Let's find the answer $ans_i$ for all suffixes, starting with the smallest in length. $ans_n$ is equal to an empty string. Then if $s_i = s_{i + 1}$ ($0 \le i + 1 < n$), then $ans_i = min (s_i + ans_{i + 1}, ans_{i + 2})$, and otherwise $ans_i = s_i + ans_{i + 1}$. To quickly find minimum of two strings, they can be stored as "binary lifts" - $next_ {i, j}$ will be equal to the position in the string $s$, on which the $2 ^ j$ character $ans_i$ will be located, and $hash_{i, j}$ - hash from the prefix $ans_i$ of length $2 ^ j$. Values for $(i, j)$ can be obtained from $(i, j-1)$ and $(i + 2 ^ {j-1}, j-1)$. To restore the answer, $next_ {i, j}$ will be enough for us, and for simplicity we can additionally store the length of each answer.
|
[
"dp",
"greedy",
"implementation",
"strings"
] | 2,700
|
#include <algorithm>
#include <cassert>
#include <chrono>
#include <cmath>
#include <cstdio>
#include <cstring>
#include <ctime>
#include <iostream>
#include <map>
#include <numeric>
#include <queue>
#include <random>
#include <set>
#include <stack>
#include <string>
#include <vector>
using namespace std;
#define all(x) (x).begin(), (x).end()
#define rall(x) (x).rbegin(), (x).rend()
#define reunique(v) v.resize(std::unique(v.begin(), v.end()) - v.begin())
#define sz(v) ((int)(v).size())
#define vec1d(x) vector<x>
#define vec2d(x) vector<vec1d(x)>
#define vec3d(x) vector<vec2d(x)>
#define vec4d(x) vector<vec3d(x)>
#define ivec1d(x, n, v) vec1d(x)(n, v)
#define ivec2d(x, n, m, v) vec2d(x)(n, ivec1d(x, m, v))
#define ivec3d(x, n, m, k, v) vec3d(x)(n, ivec2d(x, m, k, v))
#define ivec4d(x, n, m, k, l, v) vec4d(x)(n, ivec3d(x, m, k, l, v))
#ifdef LOCAL
#include "pretty_print.h"
#define dbg(...) cerr << "[" << #__VA_ARGS__ << "]: ", debug_out(__VA_ARGS__)
#else
#define dbg(...) 42
#endif
#define nl "\n"
typedef long double ld;
typedef long long ll;
typedef unsigned long long ull;
template <typename T> T sqr(T x) { return x * x; }
template <typename T> T abs(T x) { return x < 0? -x : x; }
template <typename T> T gcd(T a, T b) { return b? gcd(b, a % b) : a; }
template <typename T> bool chmin(T &x, const T& y) { if (x > y) { x = y; return true; } return false; }
template <typename T> bool chmax(T &x, const T& y) { if (x < y) { x = y; return true; } return false; }
auto random_address = [] { char *p = new char; delete p; return (uint64_t) p; };
mt19937 rng(chrono::steady_clock::now().time_since_epoch().count() * (random_address() | 1));
mt19937_64 rngll(chrono::steady_clock::now().time_since_epoch().count() * (random_address() | 1));
int main(int /* argc */, char** /* argv */)
{
ios_base::sync_with_stdio(false);
cin.tie(NULL);
#ifdef LOCAL
assert(freopen("i.txt", "r", stdin));
assert(freopen("o.txt", "w", stdout));
#endif
string s;
while (cin >> s) {
int n = s.size();
vector<int> a(n + 2, 0);
for (int i = 0; i < n; ++i) {
a[i] = s[i] - 'a' + 1;
}
int m = 1;
while ((1 << m) <= n) {
m += 1;
}
const int P = 29;
vector<ull> p(n + 1);
p[0] = 1;
for (int i = 1; i <= n; ++i) {
p[i] = p[i - 1] * P;
}
auto hsh = ivec2d(ull, n + 1, m, 0);
auto nxt = ivec2d(int, n + 1, m, n);
auto len = ivec1d(int, n + 1, 0);
auto jmp = ivec1d(int, n + 1, 0);
iota(all(jmp), 0);
auto cmp = [&](int x, int y) {
x = jmp[x];
y = jmp[y];
for (int i = m - 1; i >= 0; --i) {
if (hsh[x][i] == hsh[y][i]) {
x = nxt[x][i];
y = nxt[y][i];
}
}
return a[x] < a[y];
};
auto upd = [&](int x, int v) {
v = jmp[v];
len[x] = len[v] + 1;
hsh[x][0] = a[x];
nxt[x][0] = v;
for (int i = 1; i < m; ++i) {
int y = nxt[x][i - 1];
nxt[x][i] = nxt[y][i - 1];
hsh[x][i] = hsh[x][i - 1] * p[1 << (i - 1)] + hsh[y][i - 1];
}
};
for (int i = n - 1; i >= 0; --i) {
upd(i, i + 1);
if (i + 1 < n && a[i] == a[i + 1] && cmp(i + 2, i)) {
jmp[i] = jmp[jmp[i + 2]];
len[i] = len[jmp[i]];
}
}
for (int i = 0; i < n; ++i) {
cout << len[i] << " ";
int x = jmp[i];
for (int j = 0, limit = len[i] <= 10? len[i] : 5; j < limit; ++j, x = nxt[x][0]) {
cout << s[x];
}
if (len[i] > 10) {
int d = len[i] - 5 - 2;
for (int i = m - 1; i >= 0; --i) {
if ((1 << i) <= d) {
d -= 1 << i;
x = nxt[x][i];
}
}
cout << "...";
for (int j = 0; j < 2; ++j, x = nxt[x][0]) {
cout << s[x];
}
}
cout << nl;
}
}
#ifdef LOCAL
cerr << "Time execute: " << clock() / (double)CLOCKS_PER_SEC << " sec" << endl;
#endif
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.