contest_id
stringlengths
1
4
index
stringclasses
43 values
title
stringlengths
2
63
statement
stringlengths
51
4.24k
tutorial
stringlengths
19
20.4k
tags
listlengths
0
11
rating
int64
800
3.5k
code
stringlengths
46
29.6k
1422
F
Boring Queries
Yura owns a quite ordinary and boring array $a$ of length $n$. You think there is nothing more boring than that, but Vladik doesn't agree! In order to make Yura's array even more boring, Vladik makes $q$ boring queries. Each query consists of two integers $x$ and $y$. Before answering a query, the bounds $l$ and $r$ for this query are calculated: $l = (last + x) \bmod n + 1$, $r = (last + y) \bmod n + 1$, where $last$ is the answer on the previous query (zero initially), and $\bmod$ is the remainder operation. Whenever $l > r$, they are swapped. After Vladik computes $l$ and $r$ for a query, he is to compute the least common multiple (LCM) on the segment $[l; r]$ of the initial array $a$ modulo $10^9 + 7$. LCM of a multiset of integers is the smallest positive integer that is divisible by all the elements of the multiset. The obtained LCM is the answer for this query. Help Vladik and compute the answer for each query!
In order to find the LCM of numbers on a segment, you can, for each prime number, find the maximum power with which it enters into any number on the segment and multiply the answer with this power. Let's calculate the LCM for primes less than $\sqrt {MaxA}$ and greater than $\sqrt {MaxA}$ separately using the segment tree. There are $k = 86$ prime numbers less than $\sqrt {MaxA}$. Let's store a sorted list of prime numbers with their maximum power in each subsegment of the tree. The union of two segments can be done in $O (k)$. Then the construction of the entire tree can be done in $O (n \cdot k)$. In order to answer the query, we split it into $O (log (n))$ subsegments of the segment tree and sequentially combine them in $O (k)$. For each number $a_i$ in the array, there will be no more than one prime divisor $p_i$ greater than $\sqrt {MaxA}$. For a query, you need to find the product of unique numbers $p_i$ on a segment. To do this, for each such simple $p_i$ number, find $prev_i$ - the closest position to the left in the array of the same prime number (or $-1$ if there is no such number on the left). For a subsegment, we will store a sorted list of $(prev_i, p_i)$ pairs, and also pre-calculate the product $p_i$ for each prefix. Now, for each subsegment of the tree that will be included in the query $(l, r)$, you need to select all such pairs for which $prev_i <l$ and take product of $p_i$. Since the list is ordered, all these numbers will form a prefix. The prefix can be found using a binary search for $log (n)$. Total complexity $O (n \cdot k + q \cdot log (n) \cdot k + q \cdot log^2 (n))$, where $k = \frac {\sqrt {MaxA}} {\ln \sqrt {MaxA}}$ (the number of primes up to the root).
[ "data structures", "math", "number theory" ]
2,700
#include <algorithm> #include <cassert> #include <chrono> #include <cmath> #include <cstdio> #include <cstring> #include <ctime> #include <iostream> #include <map> #include <numeric> #include <queue> #include <random> #include <set> #include <stack> #include <string> #include <vector> using namespace std; #define all(x) (x).begin(), (x).end() #define rall(x) (x).rbegin(), (x).rend() #define reunique(v) v.resize(std::unique(v.begin(), v.end()) - v.begin()) #define sz(v) ((int)(v).size()) #define vec1d(x) vector<x> #define vec2d(x) vector<vec1d(x)> #define vec3d(x) vector<vec2d(x)> #define vec4d(x) vector<vec3d(x)> #define ivec1d(x, n, v) vec1d(x)(n, v) #define ivec2d(x, n, m, v) vec2d(x)(n, ivec1d(x, m, v)) #define ivec3d(x, n, m, k, v) vec3d(x)(n, ivec2d(x, m, k, v)) #define ivec4d(x, n, m, k, l, v) vec4d(x)(n, ivec3d(x, m, k, l, v)) #ifdef LOCAL #include "pretty_print.h" #define dbg(...) cerr << "[" << #__VA_ARGS__ << "]: ", debug_out(__VA_ARGS__) #else #define dbg(...) 42 #endif #define nl "\n" typedef long double ld; typedef long long ll; typedef unsigned long long ull; template <typename T> T sqr(T x) { return x * x; } template <typename T> T abs(T x) { return x < 0? -x : x; } template <typename T> T gcd(T a, T b) { return b? gcd(b, a % b) : a; } template <typename T> bool chmin(T &x, const T& y) { if (x > y) { x = y; return true; } return false; } template <typename T> bool chmax(T &x, const T& y) { if (x < y) { x = y; return true; } return false; } auto random_address = [] { char *p = new char; delete p; return (uint64_t) p; }; mt19937 rng(chrono::steady_clock::now().time_since_epoch().count() * (random_address() | 1)); mt19937_64 rngll(chrono::steady_clock::now().time_since_epoch().count() * (random_address() | 1)); const int MAXX = (int)sqrt(2e+5 + 42); const int MAXN = 1e+5 + 42; const int MAXT = 4 * MAXN; const int MOD = (int)1e+9 + 7; struct TNode { vector<pair<int, int>> low; vector<pair<int, int>> high; vector<int> prod; }; vector<TNode> t(MAXT); map<int, int> lst; vector<int> a; vector<int> ps; vector<int> upd(MAXX, 0); vector<int> val(MAXX, 0); vector<int> seq(MAXX, 0); int cnt = 0; int ans = 0; void node_merge(TNode& a, TNode& b, TNode& r) { { auto fs = a.low.begin(); auto sc = b.low.begin(); while (fs != a.low.end() || sc != b.low.end()) { if (sc == b.low.end() || (fs != a.low.end() && fs->first < sc->first)) { r.low.emplace_back(fs->first, fs->second); ++fs; } else if (fs == a.low.end() || (sc != b.low.end() && sc->first < fs->first)) { r.low.emplace_back(sc->first, sc->second); ++sc; } else { assert(fs != a.low.end()); assert(sc != b.low.end()); assert(fs->first == sc->first); r.low.emplace_back(fs->first, max(fs->second, sc->second)); ++fs; ++sc; } } } merge(all(a.high), all(b.high), back_inserter(r.high)); int p = 1; for (auto& [_, x] : r.high) { p = p * (ll)x % MOD; r.prod.push_back(p); } } void build_tree(int idx, int l, int r) { if (l + 1 == r) { int x = a[l]; for (auto& p : ps) { if (p * p > x) { break; } int c = 0; while (x % p == 0) { x /= p; ++c; } if (c) { t[idx].low.emplace_back(p, c); } } if (x > 1) { if (x >= MAXX) { auto it = lst.find(x); int prv = it == lst.end()? -1 : it->second; t[idx].high.emplace_back(prv, x); t[idx].prod.emplace_back(x); lst[x] = l; } else { t[idx].low.emplace_back(x, 1); } } return; } int c = (l + r) / 2; int lid = 2 * idx + 0; int rid = 2 * idx + 1; build_tree(lid, l, c); build_tree(rid, c, r); node_merge(t[lid], t[rid], t[idx]); } void get(int idx, int l, int r, int lq, int rq) { if (lq <= l && r <= rq) { for (auto& [p, s] : t[idx].low) { if (upd[p] != cnt) { upd[p] = cnt; seq.push_back(p); val[p] = 0; } chmax(val[p], s); } int pos = lower_bound(all(t[idx].high), make_pair(lq, 0)) - t[idx].high.begin(); if (pos) { ans = ans * (ll)t[idx].prod[pos - 1] % MOD; } return; } int c = (l + r) / 2; int lid = 2 * idx + 0; int rid = 2 * idx + 1; if (lq < c) { get(lid, l, c, lq, rq); } if (c < rq) { get(rid, c, r, lq, rq); } } int powmod(int a, int b) { ll s = a; ll ret = 1; while (b) { if (b & 1) { ret = ret * s % MOD; } s = s * s % MOD; b >>= 1; } return ret; } int main(int /* argc */, char** /* argv */) { ios_base::sync_with_stdio(false); cin.tie(NULL); #ifdef LOCAL assert(freopen("i.txt", "r", stdin)); assert(freopen("o.txt", "w", stdout)); #endif vector<int> fp(MAXX, 1); for (int i = 2; i < MAXX; ++i) { if (!fp[i]) { continue; } ps.push_back(i); if (i <= MAXX / i) { for (int j = i * i; j < MAXX; j += i) { fp[j] = false; } } } int n; cin >> n; a.resize(n); for (int i = 0; i < n; ++i) { cin >> a[i]; } int root = 1; build_tree(root, 0, n); int Q; cin >> Q; for (int q = 0; q < Q; ++q) { int l, r; cin >> l >> r; l = (l + ans) % n; r = (r + ans) % n; if (l > r) { swap(l, r); } ++cnt; seq.clear(); ans = 1; get(root, 0, n, l, r + 1); for (auto& x : seq) { ans = ans * (ll)powmod(x, val[x]) % MOD; } cout << ans << nl; } #ifdef LOCAL cerr << "Time execute: " << clock() / (double)CLOCKS_PER_SEC << " sec" << endl; #endif return 0; }
1425
A
Arena of Greed
Lately, Mr. Chanek frequently plays the game \textbf{Arena of Greed}. As the name implies, the game's goal is to find the greediest of them all, who will then be crowned king of Compfestnesia. The game is played by two people taking turns, where Mr. Chanek takes the first turn. Initially, there is a treasure chest containing $N$ gold coins. The game ends if there are no more gold coins in the chest. In each turn, the players can make one of the following moves: - Take one gold coin from the chest. - Take half of the gold coins on the chest. This move is only available if the number of coins in the chest is even. Both players will try to maximize the number of coins they have. Mr. Chanek asks your help to find the maximum number of coins he can get at the end of the game if both he and the opponent plays optimally.
We can solve this problem greedily. The tricky case is if the current number of coins is a multiple of $4$ greater than $8$, it is optimal to take 1 coin instead of taking $\frac{N}{2}$ coins. The proof: Lets say the number of of coins is $4k$ for some $k>0$. If we take $2k$ coins: Mr. Chanek takes $2k$ coins. (remaining: $2k$) Opponent takes $k$ coins. (remaining: $k$) The number of coins Mr. Chanek gets is $2k$ coins. Lets say we take $1$ coin: Mr. Chanek takes $1$ coin. (remaining: $4k-1$) Opponent takes $1$ coin. (remaining: $4k-2$) Mr. Chanek takes $2k -1$ coins. (remaining: $2k-1$). Opponent takes $1$ coin. (remaining: $2k-2$). In both cases, Mr. Chanek ends up with $2k$ coins. However, there are $2k-2$ coins remaining in the second example, while only $k$ coins remain in the first example. So, if $2k-2 > k$, we take the second case. It turns out, $2k - 2 \le k$ is only true if $k = 1$. So if the number of coins is $4$, we take $2$ coins, else we take $1$ coin. Alternatively, you can also look at the number of coins the opponent gained. On the first case, the opponent gains $k$ coins, while on the second case, the opponent gains $2$ coins. In both cases, we gain $2k$ coins. So, it's optimal to choose the option that maximizes the difference of coins gained ($k$ or $2$). For other possible number of coins: if the number of coins is even not multiple of $4$, we take half coins. If the number of coins is odd, we take $1$ coin. Time complexity: $O(T log N)$
[ "games", "greedy" ]
1,400
#include <bits/stdc++.h> using namespace std; #define pb push_back #define pf push_front #define pob pop_back #define pof pop_front #define mp make_pair #define fi first #define se second typedef long long lli; typedef pair<int, int> ii; typedef pair<lli, lli> ll; lli solve(lli n) { if (n < 5) return max(1ll, n - 1); if ((n % 2 == 1) || (n % 4 == 0)) return (n - solve(n - 1)); return (n - solve(n / 2)); } int main() { lli tc, n; scanf("%lld", &tc); while (tc--) { scanf("%lld", &n); printf("%lld\n", solve(n)); } return 0; }
1425
B
Blue and Red of Our Faculty!
It's our faculty's 34th anniversary! To celebrate this great event, the Faculty of Computer Science, University of Indonesia (Fasilkom), held CPC - Coloring Pavements Competition. The gist of CPC is two players color the predetermined routes of Fasilkom in Blue and Red. There are $N$ Checkpoints and $M$ undirected predetermined routes. Routes $i$ connects checkpoint $U_i$ and $V_i$, for $(1 \le i \le M)$. It is guaranteed that any pair of checkpoints are connected by using one or more routes. The rules of CPC is as follows: - Two players play in each round. One player plays as blue, the other plays as red. For simplicity, let's call these players $Blue$ and $Red$. - $Blue$ will color every route in he walks on blue, $Red$ will color the route he walks on red. Both players start at checkpoint number $1$. Initially, all routes are gray. - Each phase, from their current checkpoint, $Blue$ and $Red$ select a \textbf{different} gray route and moves to the checkpoint on the other end of the route simultaneously. - The game ends when $Blue$ or $Red$ can no longer move. That is, there is no two distinct gray routes they can choose to continue moving. Chaneka is interested in participating. However, she does not want to waste much energy. So, She is only interested in the number of final configurations of the routes after each round. Turns out, counting this is also exhausting, so Chaneka asks you to figure this out! Two final configurations are considered different if there is a route $U$ in a different color in the two configurations.
First, we must notice that the graph is a clover graph. The graph has cycles with vertex $1$ in common. We can transform the graph into an array $A$, where $A_i$ is the number of edges in cycle $i$. On the final configuration, each cycle has three possible endings: The whole cycle is colored red or blue The whole cycle is gray exactly one cycle is colored in two different colors (can be red and blue, red and gray, blue and gray). This is the last cycle they visit. The third point suggests a dynamic programming approach: for each cycle in $A$, set this cycle as the last cycle, and count how many configurations. So for the other cycles, we can calculate $DP[i][diff][takeAll]$, the number of configurations using the first $i$ cycles, where $diff$ is the absolute difference between the number edges blue and red took, and $takeAll$ is a boolean that indicates all cycles 1...i must be taken by either $red$ or $blue$. The transitions are quite straightforward, and its easier to see the code if you are confused. Calculating this DP is done in $O(N^2)$ Why flag $takeAll$? because for a fixed last cycle, there are two possible final positions for $red$ and $blue$, assuming $C$ is the size of the fixed cycle: If one player ends in vertex $1$: $2 \cdot DP[N][C-1][1]$. All other cycles must be colored, else both players can still move. If both players ends inside the cycle: $2 \cdot \sum\limits_{i=0}^{C-2}DP[N][i][0]$ There is an additional case where both players finish at vertex $1$. TO find this, we recalculate DP using the whole array. The number of configurations is $DP[N][0][1]$. Calculating this for all elements of $A$ will give an $O(N^3)$ solution. However, since $\sum A = N$, there are atmost $\sqrt{N}$ different values in $A$. So, we can calculate DP for only these $\sqrt{N}$ values. This gives a $O(N^2 \sqrt{N})$ solution which if sufficient to get accepted. Bonus $O(N^2)$ solution: The solution can be optimized further by partitioning $A$ into two parts: first part consists of all distinct values in the area (each appears once). The size of this part is at most $\sqrt{N}$. the second part is all other elements We notice that when fixing cycles, the second part does not change so we can calculate DP on this part exactly once in $O(N^2)$. We can then only recalculate DP in the first part. Since there are $\sqrt{N}$ elements, calculating DP on this is $O(N \sqrt{N})$. Since we calculate DP $\sqrt{N}$ times, the total complexity is $O(N^2)$. Then, merges with the first DP. $O(N^2 \sqrt{N})$
[ "divide and conquer", "dp" ]
2,600
#include <bits/stdc++.h> using namespace std; const int MAXN=2e3+5; int N, M, SA, SB, ans; int P[MAXN]; int H[MAXN]; int DP[MAXN][3*MAXN]; int PD[MAXN][3*MAXN]; vector <int> A, B; vector <int> V[MAXN]; set <int> S; //BEGIN TEMODPLATE //Template for DP Combin to avoid RLE const long long MOD=1e9+7, D=(1LL<<61)/MOD; //fast modulo for addition inline int add(int a) { return a<MOD?a:a-MOD; } //fast modulo for subtraction inline int sub(int a) { return a<0?a+MOD:a; } //fast modulo for multiplication inline int mul(long long a) { return add(a-MOD*((a>>29)*D>>32)); } //one-line fast exponentation int expo(long long a,int b) { return b?b&1?mul(a*expo(mul(a*a),b>>1)):expo(mul(a*a),b>>1):1; } //modular inverse for division inline int div(int a) { return expo(a,MOD-2); } //END TEMODPLATE void input() { cin>>N>>M; for (int i=0;i<M;i++) { int X, Y; cin>>X>>Y; V[X].push_back(Y); V[Y].push_back(X); } for (int X : V[1]) { int Y=1, H=1; while (X!=1) { Y=V[X][0]+V[X][1]-Y; swap(X,Y); H++; } B.push_back(H); } } void debug() { cin>>N; for (int i=0;i<N;i++) { cin>>M; B.push_back(M); B.push_back(M); } } int main () { input(); //debug(); sort(B.begin(),B.end()); for (int i=0;i<B.size();i+=2) { if (S.count(B[i])) { A.push_back(B[i]); } else { S.insert(B[i]); } } SA=A.size(); for (int x : S) { A.push_back(x); } SB=A.size(); DP[0][0]=1; for (int i=0;i<SB;i++) { P[i+1]=P[i]+A[i]; for (int j=0;j<=P[i+1];j++) { DP[i+1][j]=add(DP[i][j>A[i]?j-A[i]:A[i]-j]+DP[i][j+A[i]]); PD[i+1][j]=add(add(DP[i][j>A[i]?j-A[i]:A[i]-j]+PD[i][j+A[i]])+(j>=A[i]?PD[i][j-A[i]]:sub(mul((long long)i*DP[i][A[i]-j])-PD[i][A[i]-j]))); } } ans=add(DP[SB][0]+add(add(PD[SB][1]<<1)<<1)); for (int i=0;i<SB;i++) { for (int j=0;j<=P[i+1];j++) { DP[i+1][j]=add(DP[i][j]+add(DP[i][j>A[i]?j-A[i]:A[i]-j]+DP[i][j+A[i]])); } } for (int h=SB-1;h>=SA;h--) { for (int i=h+1;i<SB;i++) { for (int j=0;j<=P[i+1];j++) { DP[i][j]=add(DP[i-1][j]+add(DP[i-1][j>A[i]?j-A[i]:A[i]-j]+DP[i-1][j+A[i]])); } } for (int i=1;i<A[h]-1;i++) { H[A[h]]=add(H[A[h]]+DP[SB-1][i]); } H[A[h]]=add(H[A[h]]<<1); H[A[h]]=add(H[A[h]]+DP[SB-1][0]); H[A[h]]=add(H[A[h]]<<1); } for (int i=0;i<SB;i++) { ans=add(ans+H[A[i]]); } cout<<ans<<'\n'; }
1425
C
Captain of Knights
Mr. Chanek just won the national chess tournament and got a huge chessboard of size $N \times M$. Bored with playing conventional chess, Mr. Chanek now defines a function $F(X, Y)$, which denotes the minimum number of moves to move a knight from square $(1, 1)$ to square $(X, Y)$. It turns out finding $F(X, Y)$ is too simple, so Mr. Chanek defines: $G(X, Y) = \sum_{i=X}^{N} \sum_{j=Y}^{M} F(i, j)$ Given X and Y, you are tasked to find $G(X, Y)$. A knight can move from square $(a, b)$ to square $(a', b')$ if and only if $|a - a'| > 0$, $|b - b'| > 0$, and $|a - a'| + |b - b'| = 3$. Of course, the knight cannot leave the chessboard.
An important observation is that it is guaranteed there is a sequence of moves from $(1, 1)$ to $(X, Y)$ where we only visit squares in the rectangle $(1, 1)$ to $(X, Y)$. So we can calculate $F(X, Y)$ independently without concerning $N$ dan $M$. To ease the implementation and explanation, we define $\int(f(x)) = \sum_{i = 1}^{x}f(i)$. So the sum of an order two polynomial $F(x) = a \cdot x^2 + b \cdot x + c$ is $\int{F(x} = \frac{a}{3} x^3 + \frac{a + b}{2} x^2 + \frac{a + 3b + 6c}{6} x$ Since the number of formulas in this problem tutorial is quite large, we will omit the method to find it because then the editorial will be quite long Lets define $P(x,y) = \sum_{i = 3}^{x} \sum_{j = 3}^{y} F(i,j)$. If we can find $P(x, y)$ fast, the answer is $P(N,M) - P(X-1,M) - P(N,Y-1) + P(X-1,Y-1)$. Calculating $P$ can be divided into three cases: $\bullet$ Case 1, For $x = 3$, $y \le 5$. we can use brute force. $\bullet$ Case 2,For $x < 2y$ and $y < 2x$. Let $z = (x + y - 2)$, $F(x, y) = s(z) = \frac{z}{3} + z \bmod 3$. Assume $t(x) = \int(s(x))$. For rows $i$ from 4 to $\frac{y}{2}$, the sum of $F$ is $t(3(i-1)) - t(\frac{3i}{2}-2)$. For rows $i$ greater than $\frac{y}{2}$ next rows, the sum of $F$ is $t(i+y-2) - t(\frac{3i}{2}-2)$. To ease in finding the sum, we define: $a(x) = t(1) + t(2) + ... + t(x)$ $b(x) = t(3) + t(6) + ... + t(3x)$ $c(x) = t(1) + t(2) + t(4) + t(5) + ... + t(\frac{3x-1}{2})$ $t(3x) = \int(3x+1)$, $a(3x) = \int(3 \int(3x+1) - (3x+1))$ $b(x) = \int(\int(3x+1))$ $c(2x) = a(3x) - b(x)$. $\bullet$ Case 3, for $2y \le 2 \cdot x$ or $x \le 2 \cdot y$. First see the illustration below for context. The sum of $F(x, y)$ for each blocks of two columns starting at an even number is the same. We define $blocks$ as these columns. So, block $1$ is the $[3, 4]$ block, block $2$ is the $[5, 4]$ block, and so on. The sum of a row in block $i$ is $(2 \cdot x + 5)$. Since there are $i$ rows in block $i$, the sum of all rows in block $i$ is $i(2 \cdot i + 5)$. To ease, lets define two functions: $d(x) = \int{x (2 \cdot x + 5)}$. Sum of all $F$ for the first $x$ blocks. $e(x) = \int{(2 \cdot x + 5 )}$. Sum of a row for the first $x$ blocks. The sum of this part is divided into three regions which is also described in the image above (red, yellow, green). the sum on the green area is $d(N-1)$. There is a square of height $N$ from colums $N-1$ to $\frac{M}{2}$. So, the sum of this region is $N(e(\frac{M}{2})-e(N-1))$ The red area. This is a single row at the end if $M$ is odd. If $N$ is even, the sum is $\frac{N}{2} \cdot (M + 6)$. If N is odd, we add an additional square which is $F(N + 2, M + 5)$ (remember we simplify $N$ and $M$ at the beginning. So, the sum of case three is $d(N-1) + N(e(\frac{M}{2})-e(N-1))$ + red area. Calculate this formula again with the rows swapped to for case $y \ge 2 \cdot x$. note: Calculating $t(x)$ and $a(x)$ when $x$ is not a multiple of $3$ can be done by finding the closest multiple of $3$ and then bruteforcing the last columns. That can also be used to calculate $c(x)$ and $e(x)$ when $x$ is odd. Time Complexity: $O(T)$
[ "math" ]
3,100
#include <bits/stdc++.h> using namespace std; const long long MOD=1e9+7; struct intmod { long long val; intmod operator + (const intmod other) const { intmod ret; ret.val=val+other.val; if (ret.val>=MOD) { ret.val-=MOD; } return ret; } intmod operator - (const intmod other) const { intmod ret; ret.val=val+MOD-other.val; if (ret.val>=MOD) { ret.val-=MOD; } return ret; } intmod operator * (const intmod other) const { intmod ret; ret.val=val*other.val%MOD; return ret; } } HALF={(MOD+1)/2}, THIRD={(MOD+1)/3}; intmod make_int(long long x) { return {x}; } struct polinom { intmod koef[4]; intmod ans(intmod x) { intmod ret; ret=koef[3]; ret=(ret*x)+koef[2]; ret=(ret*x)+koef[1]; ret=(ret*x)+koef[0]; return ret; } polinom sum() { polinom ret; ret.koef[0]={0}; ret.koef[1]=koef[0]+((koef[1]+(koef[2]*THIRD))*HALF); ret.koef[2]=(koef[1]+koef[2])*HALF; ret.koef[3]=koef[2]*THIRD; return ret; } polinom operator + (const polinom other) const { polinom ret; ret.koef[0]=koef[0]+other.koef[0]; ret.koef[1]=koef[1]+other.koef[1]; ret.koef[2]=koef[2]+other.koef[2]; ret.koef[3]=koef[3]+other.koef[3]; return ret; } polinom operator - (const polinom other) const { polinom ret; ret.koef[0]=koef[0]-other.koef[0]; ret.koef[1]=koef[1]-other.koef[1]; ret.koef[2]=koef[2]-other.koef[2]; ret.koef[3]=koef[3]-other.koef[3]; return ret; } polinom operator * (const intmod other) const { polinom ret; ret.koef[0]=koef[0]*other; ret.koef[1]=koef[1]*other; ret.koef[2]=koef[2]*other; ret.koef[3]=koef[3]*other; return ret; } } BASE={1,3,0,0}; intmod f(long long K) { polinom save; save=BASE; save=save.sum(); intmod ret; ret=save.ans({K/3}); if (K%3>=1) { ret=ret+make_int(K/3+1); } if (K%3>=2) { ret=ret+make_int(K/3+2); } return ret; } intmod a(long long K) { polinom save; save=BASE; save=save.sum(); save=save*make_int(3); save=save-BASE; save=save.sum(); intmod ret; ret=save.ans({K/3}); if (K%3>=1) { ret=ret+f(K); } if (K%3>=2) { ret=ret+f(K-1); } return ret; } intmod b(long long K) { polinom save; save=BASE; save=save.sum(); save=save.sum(); return save.ans({K}); } intmod c(long long K) { return a(3*K/2)-b(K/2); } intmod d(long long K) { polinom save; save={0,5,2,0}; save=save.sum(); return save.ans({K}); } intmod e(long long K) { polinom save; save={5,2,0,0}; save=save.sum(); return save.ans({K}); } intmod P(long long N,long long M) { long long K; N=min(N,2*M-1); M=min(M,2*N-1); K=max(3LL,M/2); if (N<=3) { return {0}; } intmod ret; ret=a(N+M-2)-a(M+K-2); ret=ret+b(K-1)-b(2); ret=ret-c(N-1)+c(2); return ret; } intmod Q(long long N,long long M) { N=min(N,M/2)-2; M=M-5; if (N<=0) { return {0}; } intmod ret; ret=d(N-1); ret=ret+((e(M/2)-e(N-1))*make_int(N)); if (M%2>=1) { ret=ret+(make_int(N/2)*make_int(M+6)); if (N%2>=1) { ret=ret+make_int(2*((M+1)/4)+3); } } return ret; } intmod area(long long N,long long M) { if (N<3) { return {0}; } intmod ret; ret={0}; for (long long i=3;i<=5;i++) { if (i<=M) { ret=ret+make_int(7-i); } } ret=ret+P(N,M); ret=ret+Q(N,M); ret=ret+Q(M,N); return ret; } long long Nmin, Mmin; long long Nmax, Mmax; intmod ans; int main () { ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0); long long tc; cin>>tc; while(tc--) { cin>>Nmin>>Mmin; cin>>Nmax>>Mmax; ans=area(Nmax,Mmax)+area(Nmin-1,Mmin-1); ans=ans-(area(Nmax,Mmin-1)+area(Nmin-1,Mmax)); cout<<ans.val<<'\n'; } return 0; }
1425
D
Danger of Mad Snakes
Mr. Chanek The Ninja is one day tasked with a mission to handle mad snakes that are attacking a site. Now, Mr. Chanek already arrived at the hills where the destination is right below these hills. The mission area can be divided into a grid of size $1000 \times 1000$ squares. There are $N$ mad snakes on the site, the i'th mad snake is located on square $(X_i, Y_i)$ and has a danger level $B_i$. Mr. Chanek is going to use the Shadow Clone Jutsu and Rasengan that he learned from Lord Seventh to complete this mission. His attack strategy is as follows: - Mr. Chanek is going to make $M$ clones. - Each clone will choose a mad snake as the attack target. Each clone must pick a different mad snake to attack. - All clones jump off the hills and attack their respective chosen target at once with Rasengan of radius $R$. If the mad snake at square $(X, Y)$ is attacked with a direct Rasengan, it and all mad snakes at squares $(X', Y')$ where $max(|X' - X|, |Y' - Y|) \le R$ will die. - The real Mr. Chanek will calculate the score of this attack. The score is defined as the square of the sum of the danger levels of all the killed snakes. Now Mr. Chanek is curious, what is the sum of scores for every possible attack strategy? Because this number can be huge, Mr. Chanek only needs the output modulo $10^9 + 7$.
We are going to count the contribution of two snakes $(I, J)$ separately. Assume that the current snake we are interested in has danger value $B_1$ and $B_2$. The terms where they are killed must be on the form $(... + B_1 + B_2 + ...)^2$. Expanding this gives $B_1^2 + B_2^2 + 2B_1B_2$. So, the contribution of $B_1B_2$ to the answer is the number of attack strategies where both $I$ and $J$ are killed. We can find this using the inclusion-exclusion principle. First, let's define: $W$: the number of squares where when hit will result in $I$ and $J$ being killed. $U$: the number of squares where when hit will result in $I$ killed, but not $J$. $V$: the number of squared where when hit will result in $J$ killed, but not $I$. We can calculate all these values easily using prefix sum DP. Then, there are two cases to consider: Case 1: a square in $W$ is hit. The number of ways is $C(N, M) - C(N - W, M)$ Case 2: a square in $W$ is not hit. However, a square in $U$ and $V$ is hit. Lets define $N' = N - W$. The number of ways is $C(N', M) - C(N' - U, M) - C(N' - V, M) + C(N' - U - V, M)$ Sum the number of ways times $2B_1B_2$ to the answer. Don't also forget to count the contribution of $B_{1}^2$. Time Complexity: $O(N^2)$
[ "combinatorics", "dp", "math" ]
2,300
#include <bits/stdc++.h> using namespace std; #define MAXN 1010 #define MOD 1000000007 typedef long long ll; typedef pair<int,int> pii; ll pref[MAXN+1][MAXN+1]; int dnc(int bas, int power, int mod) { if(power==0) return 1; if(power%2==0) { int res=dnc(bas,power/2,mod); return 1LL*res*res%mod; } return 1LL*bas*dnc(bas,power-1,mod)%mod; } ll f[2*MAXN+1],inv[2*MAXN+1]; void precalc() { f[0] = inv[0]=1; for(int temp=1;temp<=2*MAXN;temp++) { //cout<<temp<<"\n"; f[temp]=1ll*temp*f[temp-1]%MOD; inv[temp]=1ll*dnc(temp, MOD-2, MOD)*inv[temp-1]%MOD; } } ll comb(int n, int k) { if(k>n || k<0) return 0; //cout<<"combo "<<n<<" "<<k<<"\n"; return f[n]*inv[k]%MOD*inv[n-k]%MOD; } ll getsum(int x1, int y1, int x2, int y2) { x1 = max(x1, 1); x2 = min(x2, 1000); y1 = max(y1, 1); y2 = min(y2, 1000); if(x1 > x2 || y1 > y2) return 0; //cout<<"getsum "<<x1<<" "<<y1<<" "<<x2<<" "<<y2<<" "<<pref[x2][y2] - pref[x2][y1-1] - pref[x1-1][y2] + pref[x1-1][y1-1]<<"\n"; return pref[x2][y2] - pref[x2][y1-1] - pref[x1-1][y2] + pref[x1-1][y1-1]; } ll getNumberThatCanKill(int x, int y, int r) { return getsum(x-r, y-r, x+r, y+r); } int main() { precalc(); int n, m, r; cin>>n>>m>>r; int temp, temp2; vector<pii> snakes; int b[n]; //cout<<"in\n"; for(temp=0;temp<n;temp++) { int x,y; cin>>x>>y>>b[temp]; pref[x][y] += 1; snakes.push_back({x, y}); } for(int temp=1;temp<=MAXN;temp++) for(int temp2=1;temp2<=MAXN;temp2++) { pref[temp][temp2] = pref[temp][temp2] + pref[temp-1][temp2] + pref[temp][temp2-1] - pref[temp-1][temp2-1]; } //cout<<"out\n"; ll sum = 0; for(temp=0;temp<snakes.size();temp++) { for(temp2=temp;temp2<snakes.size();temp2++) { //cout<<temp<<" "<<temp2<<"\n"; ll cnt = 0; int x1 = snakes[temp].first, y1 = snakes[temp].second; int x2 = snakes[temp2].first, y2 = snakes[temp2].second; // case 1, exist point that hits both int minX = max(x1, x2) - r, minY = max(y1, y2) - r; int maxX = min(x1, x2) + r, maxY = min(y1, y2) + r; int w = getsum(minX, minY, maxX, maxY); cnt += comb(n, m) - comb(n-w, m); if(cnt<0) cnt+=MOD; //cout<<"yeah\n"; // case 2, no snakes hits both int u = getNumberThatCanKill(x1, y1, r) - w, v = getNumberThatCanKill(x2, y2, r) - w; cnt += comb(n-w, m) - comb(n-u-w, m) - comb(n-v-w, m) + comb(n-u-v-w, m); cnt%=MOD; if(cnt<0) cnt+=MOD; if(temp == temp2) sum+=cnt*b[temp]%MOD*b[temp2]%MOD; else sum+=2ll*cnt*b[temp]%MOD*b[temp2]%MOD; sum%=MOD; } } cout<<sum<<"\n"; }
1425
E
Excitation of Atoms
Mr. Chanek is currently participating in a science fair that is popular in town. He finds an exciting puzzle in the fair and wants to solve it. There are $N$ atoms numbered from $1$ to $N$. These atoms are especially quirky. Initially, each atom is in normal state. Each atom can be in an excited. Exciting atom $i$ requires $D_i$ energy. When atom $i$ is excited, it will give $A_i$ energy. You can excite any number of atoms (including zero). These atoms also form a peculiar one-way bond. For each $i$, $(1 \le i < N)$, if atom $i$ is excited, atom $E_i$ will also be excited at no cost. Initially, $E_i$ = $i+1$. Note that atom $N$ cannot form a bond to any atom. Mr. Chanek must change \textbf{exactly} $K$ bonds. Exactly $K$ times, Mr. Chanek chooses an atom $i$, $(1 \le i < N)$ and changes $E_i$ to a different value other than $i$ and the current $E_i$. Note that an atom's bond can remain unchanged or changed more than once. Help Mr. Chanek determine the maximum energy that he can achieve! \textbf{note:} You must first change \textbf{exactly} $K$ bonds before you can start exciting atoms.
Notice that for $K = 2$, we can always make an excitation route starting from any atom in $i$ in $1 \le i < N$ that excites all atoms. So, for $K > 2$, we can always make the routes by toggling bonds. So, there are three cases: If $K = 0$, it is optimal to excite only one atom. We try to excite atom $i$ for every $i$ and calculate the gained energy using prefix sum. If $K \ge 2$, we can either excite one atom $i$ in $1 \le i < N$, which will excite all atoms or excite only the last atom (which can be optimal if it has the lowest $D$). $K = 1$ is a tricky one. there are 5 cases to consider: change $E_{N-1}$ to $1$. Then excite the atom with the $D_i$ in $1 \le i < N$. Also, excite atom $N$ if the energy gained is positive. change $E_i$ to $1$ and then excite atom $i$ and $i+1$ for $(1 < i < N)$. It is optimal to excite $i+1$ because if not, it will be worse than case 1. Change $E_1$ to $3$ and then excite $1$ and $2$. Change $E_1$ to $N$ and then excite atom $i$, for $(1 < i \le N)$ Change $E_i$ to $i+2$ and then excite atom $1$, for $(1 < i < N)$. Note that this is only optimal if we excite atom $1$, else it is worse than case $4$ Handling all these cases will get you accepted Time complexity: $O(N)$
[ "greedy", "implementation" ]
2,200
#include <iostream> using namespace std; typedef long long LL; typedef pair<LL, LL> PLL; #define fi first #define se second const LL LINF = 4557430888798830399LL; const LL MAXN = 1000000; LL n,k; LL sum[MAXN+5]; PLL isi[MAXN+5]; // Define X is a node that make this true (1 <= X < N) /** * [This checks with no arrow change] * * Try to pick 1: * Do prefix sum, earn X..N * * Try to pick >= 2: * No Use, assume you must pick 2, so you take u and v * the case is whether u will reach v or v will reach u, * its better just to take one of those who will reach the other. */ LL check0(){ LL ans = 0; for(int i = n;i >= 1;i--) ans = max(ans, sum[i]-isi[i].se); return ans; } /** * [This checks with one arrow change] * * Try to pick 1: * If you pick X: * Case 1: the answer N is included. If X != 1, just do prefix sum. * if X is one, pick one smallest to skip. * Case 2: The answer N is not included. You can make 2 cycle. * Make 2 cycle * 1 -> 2 -> .. -> N-1 -> 1 and N * earn 1..N-1. * Try to pick 2: * If you pick X and N: * Make 2 cycle * 1 -> 2 -> .. -> N-1 -> 1 and N * earn 1..N * If you pick U and V, both non N: * earn 1..N * */ LL check1a(){ LL ans = LINF; for(int i = 1;i < n;i++) ans = min(ans, isi[i].se); ans = (sum[1]-isi[n].fi)-ans; LL profit = max(0LL, isi[n].fi-isi[n].se); return max(ans+profit, profit); } LL check1b(){ PLL maksi = {LINF, LINF}; for(int i = 1;i < n;i++){ if(isi[i].se <= maksi.fi){ maksi.se = maksi.fi; maksi.fi = isi[i].se; }else if(isi[i].se <= maksi.se){ maksi.se = isi[i].se; } } return sum[1]-(maksi.fi+maksi.se); } LL check1c(){ LL ans = 0; for(int i = n;i >= 2;i--) ans = max(ans, sum[i]-isi[i].se); LL mini = LINF; for(int i = 2;i < n;i++) mini = min(mini, isi[i].fi); return max(ans, sum[1]-mini-isi[1].se); } /** * [This checks with two or more arrow changes with toggle to two] * * Try to pick 1: * Case 1: Pick X * Make line graph ending at N, visiting all 1..N * The way is X->1->2->...(X-1)->(X+1)->..->(N-1)->N * with only 2 changes. * earn 1..N * Case 2: Pick N * Handled at check 0, because it can't go anywhere * * Try to pick >= 2: * No use, X must be picked, but you can just pick X to visit all * */ LL check2(){ LL ans = LINF; for(int i = 1;i < n;i++) ans = min(ans, isi[i].se); return max(isi[n].fi-isi[n].se, sum[1]-ans); } int main(){ cin >> n >> k; for(int i = 1;i <= n;i++) cin >> isi[i].fi; for(int i = 1;i <= n;i++) cin >> isi[i].se; for(int i = n;i >= 1;i--) sum[i] = isi[i].fi+sum[i+1]; LL ans = 0; if(k == 0) ans = max(ans, check0()); else if(k == 1) ans = max(ans, max(check1c(), max(check1a(), check1b()))); else ans = max(ans, check2()); cout << ans << endl; return 0; }
1425
F
Flamingoes of Mystery
This is an interactive problem. You have to use a flush operation right after printing each line. For example, in C++ you should use the function fflush(stdout), in Java — System.out.flush(), in Pascal — flush(output) and in Python — sys.stdout.flush(). Mr. Chanek wants to buy a flamingo to accompany his chickens on his farm. Before going to the pet shop, Mr. Chanek stops at an animal festival to have fun. It turns out there is a carnival game with a flamingo as the prize. There are $N$ mysterious cages, which are numbered from $1$ to $N$. Cage $i$ has $A_i$ $(0 \le A_i \le 10^3)$ flamingoes inside $(1 \le i \le N)$. However, the game master keeps the number of flamingoes inside a secret. To win the flamingo, Mr. Chanek must guess the number of flamingoes in each cage. Coincidentally, Mr. Chanek has $N$ coins. Each coin can be used to ask once, what is the total number of flamingoes inside cages numbered $L$ to $R$ inclusive? With $L < R$.
First get the values of the first three elements using three queries: "? 1 3" "? 1 2" "? 2 3" Once you get $A_i$, element $A_{i+1}$ can be obtained using "? i i+1" Time complexity: $O(N)$
[ "interactive" ]
1,400
#include <iostream> using namespace std; const int MAXN = 1000; int n, ans[MAXN+5]; int ask(int l, int r){ cout << "? " << l << " " << r << endl; int ret; cin >> ret; return ret; } void answer(){ cout << "!"; for(int i = 1;i <= n;i++) cout << " " << ans[i]; cout << endl; } int main(){ cin >> n; ans[n] = ask(1, n); ans[1] = ans[n]-ask(2, n); for(int i = 2;i <= n-1;i++) ans[i] = ask(i-1, i)-ans[i-1]; for(int i = 1;i < n;i++) ans[n] -= ans[i]; answer(); }
1425
H
Huge Boxes of Animal Toys
Chaneka has a hobby of playing with animal toys. Every toy has a different fun value, a real number. Chaneka has four boxes to store the toys with specification: - The first box stores toys with fun values in range of $(-\infty,-1]$. - The second box stores toys with fun values in range of $(-1, 0)$. - The third box stores toys with fun values in range of $(0, 1)$. - The fourth box stores toys with fun value in range of $[1, \infty)$. Chaneka has $A$, $B$, $C$, $D$ toys in the first, second, third, and fourth box, respectively. One day she decides that she only wants one toy, a super toy. So she begins to create this super toy by sewing all the toys she has. While the number of toys Chaneka has is more than 1, she takes two different toys randomly and then sews them together, creating a new toy. The fun value of this new toy is equal to the multiplication of fun values of the sewn toys. She then puts this new toy in the appropriate box. She repeats this process until she only has one toy. This last toy is the super toy, and the box that stores this toy is the special box. As an observer, you only know the number of toys in each box initially but do not know their fun values. You also don't see the sequence of Chaneka's sewing. Determine which boxes can be the special box after Chaneka found her super toy.
The main observation of this problem is that the sewing order does not matter. The final fun value is the multiplication of fun values of all toys. There are two separate cases we must consider: Cek the sign of the toy. If $(A + B)$ is even, the sign is positive. Else, it's negative. Cek whether it is possible to make a fun value with absolute value lower than 1 and greater than 1. The former is possible if $(B+ C) > 0$, the latter is possible if $(A + D) > 0$. Combining these two cases will give you the solution. Time Complexity: $O(T)$
[ "constructive algorithms" ]
1,300
#include <bits/stdc++.h> using namespace std; int T; int A,B,C,D; int main(){ cin>>T; for(int i=0;i<T;i++){ cin>>A>>B>>C>>D; if(A==0 && D == 0){ if(B % 2 == 1){ cout<<"Tidak Ya Tidak Tidak\n"; } else { cout<<"Tidak Tidak Ya Tidak\n"; } } else if(B == 0 && C == 0){ if(A % 2 == 1){ cout<<"Ya Tidak Tidak Tidak\n"; } else { cout<<"Tidak Tidak Tidak Ya\n"; } } else { if((A+B) % 2 == 1){ cout<<"Ya Ya Tidak Tidak\n"; } else{ cout<<"Tidak Tidak Ya Ya\n"; } } } }
1425
I
Impressive Harvesting of The Orchard
Mr. Chanek has an orchard structured as a rooted ternary tree with $N$ vertices numbered from $1$ to $N$. The root of the tree is vertex $1$. $P_i$ denotes the parent of vertex $i$, for $(2 \le i \le N)$. Interestingly, the height of the tree is not greater than $10$. Height of a tree is defined to be the largest distance from the root to a vertex in the tree. There exist a bush on each vertex of the tree. Initially, all bushes have fruits. Fruits will not grow on bushes that currently already have fruits. The bush at vertex $i$ will grow fruits after $A_i$ days since its last harvest. Mr. Chanek will visit his orchard for $Q$ days. In day $i$, he will harvest all bushes that have fruits on the subtree of vertex $X_i$. For each day, determine the sum of distances from every harvested bush to $X_i$, and the number of harvested bush that day. Harvesting a bush means collecting \textbf{all} fruits on the bush. For example, if Mr. Chanek harvests all fruits on subtree of vertex $X$, and harvested bushes $[Y_1, Y_2, \dots, Y_M]$, the sum of distances is $\sum_{i = 1}^M \text{distance}(X, Y_i)$ $\text{distance}(U, V)$ in a tree is defined to be the number of edges on the simple path from $U$ to $V$.
We classify each vertex $i$ into two types. heavy if $A_i > \sqrt{Q}$ and light if $A_i \le \sqrt{Q}$. We craft two different algorithms to solve each case separately. heavy nodes: Flatten the tree using pre-order transversal. Now querying a subtree becomes querying a subarray. Maintain a global set $S$ with all currently heavy node harvestable bushes, sorted by their pre-order index. We simulate each query. When querying subtree of $X$, we find all harvestable nodes in the set, remove them, and then add to the answer their contribution. The node is re-added the next time it is available to be queried. We can do this by maintaining a list of vectors denoting which nodes are harvestable again at each time. Since all nodes have $A_i > \sqrt{Q}$, each node is removed and re-added at most $\sqrt{Q}$ times. Since we are using set, it adds a log factor. So the total complexity of this is $O(N \sqrt{Q} log N)$. light nodes: We solve separately for each different value of $A_i$. Assume the current value we are solving is $A$. We build a segment tree beats aggeration directly on the tree. we do not flatten the tree; you will see why in the proof below. In each vertex, we store $R_i$, when this bush is available to be harvested. We also store $max_i$ and $min_i$, the maximum and minimum value of $R_i$ on the subtrees of $i$, respectively. Initially, $R_i = 0$ for all $i$. We do the following in order for each query $X$: Find all highest depth $i$ vertices in the subtree of $X$ with $max_i = 0$. Add their contribution to the answer and then lazily update the vertex and all of it's child's $R_i$ to $A$. Update the values of all $R_i$ to be $max(R_i - 1, 0)$. We can achieve this using segment tree beats. The break condition is when $min_i > 0$, the tag condition is when $max_i = 0$. We are going to prove that is yields an average complexity of $log N$. We are using the jargon from https://www.youtube.com/watch?v=UJyBHCXa-1g: Assume $mark$ is the maximum $R_i$ of a subtree. If a vertex has mark $M$, all marks $M$ on its subtree is deleted. We define $F(X)$ as the number of vertices with mark $0$ on the $X$ subtree. We also define P as the sum of $F(X)$ for all $X$. Initially, $P = F(1) = 1$. We observe the change of $P$ for each query when we query in the subtree of $X$: We put tag $A$ of $X$. Note that this does not increase the value of $P$. However, this gives an important property: each nonzero mark can appear at most once at any given time. This does not hold if we flatten the tree. $P$ will increase at most $log N$ for each $mark$ that decreases from $1$ to $0$ from our subtraction update. Since there can be at most one mark $1$ at any given time, the maximum increase of $P$ is $log N$ per query. $P$ will decrease by at least $1$ for each extra node we visit. And this is the only way to decrease $P$. Since option $2$ is the only way to increase $P$, we find our maximum value of $P$ is $Q log N$. Since option $3$ is the only way to decrease $P$ and $P$ cannot be negative, we find the number of extra nodes we visit is at most $Q log N$, giving us a $Q log N$ solution. Note: it is possible to omit the lazy propagation by storing the last time each vertex is harvested, instead of $R_i$. Dropping lazy propagation will speed up the code almost two times faster! Since we are solving for each $A_i < \sqrt{Q}$, the total complexity of this part is $O(Q \sqrt{N} log N)$. Combining the two algorithms (heavy and light), we get the accepted solution. In practice, the optimal bucket size is arround $\sqrt{Q}/2$, since STB has a larger constant factor. Sadly, it turns out a sophisticated $O(NQ)$ solution can pass. We should've made all $A's$ have the same value, so the solution is in $O(Q log N)$. Oh well, we hope this problem can be educational to you all :)
[ "data structures" ]
2,800
#pragma GCC optimize("Ofast,no-stack-protector,unroll-loops") #pragma GCC target("sse,sse2,sse3,ssse3,sse4,sse4.1,sse4.2,popcnt,abm,mmx,avx,avx2,fma,tune=native") #include <vector> #include <iostream> #include <string> #include <tuple> #include <algorithm> #include <assert.h> using namespace std; const int NMAX = 50010; vector <int> adia[NMAX]; int ord[NMAX], last[NMAX], adancime[NMAX]; int perioada[NMAX]; int perioada_init[NMAX]; int active[NMAX]; void dfs(int nod, int& last_val, int h = 0) { ord[nod] = last_val++; adancime[ord[nod]] = h; for (auto i : adia[nod]) dfs(i, last_val, h + 1); last[ord[nod]] = last_val; } int main() { ios_base::sync_with_stdio(0); cin.tie(0); int n, q; cin >> n >> q; for (int i = 1; i <= n; i++) cin >> perioada_init[i]; for (int i = 2; i <= n; i++) { int tata; cin >> tata; adia[tata].push_back(i); } int last_val = 0; dfs(1, last_val); for (int i = 1; i <= n; i++) perioada[ord[i]] = perioada_init[i]; for (int t = 0; t < q; t++) { int nod; cin >> nod; nod = ord[nod]; int sum_h = 0, nr = 0; for (int i = nod; i < last[nod]; i++) { int take = (active[i] <= t); sum_h += take * adancime[i]; nr += take; active[i] = active[i] * (1 - take) + (t + perioada[i]) * take; } sum_h -= nr * adancime[nod]; cout << sum_h << ' ' << nr << '\n'; } return 0; }
1426
A
Floor Number
Vasya goes to visit his classmate Petya. Vasya knows that Petya's apartment number is $n$. There is only one entrance in Petya's house and the distribution of apartments is the following: the first floor contains $2$ apartments, every other floor contains $x$ apartments each. Apartments are numbered starting from one, from the first floor. I.e. apartments on the first floor have numbers $1$ and $2$, apartments on the second floor have numbers from $3$ to $(x + 2)$, apartments on the third floor have numbers from $(x + 3)$ to $(2 \cdot x + 2)$, and so on. Your task is to find the number of floor on which Petya lives. Assume that the house is always high enough to fit at least $n$ apartments. You have to answer $t$ independent test cases.
If $n \le 2$ then the answer is $1$. Otherwise, you can "remove" the first floor and then the answer is $\left\lfloor\frac{n - 3}{x}\right\rfloor + 2$.
[ "implementation", "math" ]
800
for i in range(int(input())): n, x = map(int, input().split()) print(1 if n <= 2 else (n - 3) // x + 2)
1426
B
Symmetric Matrix
Masha has $n$ types of tiles of size $2 \times 2$. Each cell of the tile contains one integer. Masha has an \textbf{infinite number} of tiles of each type. Masha decides to construct the square of size $m \times m$ consisting of the given tiles. This square also has to be a symmetric with respect to the main diagonal matrix, and each cell of this square has to be covered with exactly one tile cell, and also sides of tiles should be parallel to the sides of the square. All placed tiles cannot intersect with each other. Also, each tile should lie inside the square. See the picture in Notes section for better understanding. Symmetric with respect to the main diagonal matrix is such a square $s$ that for each pair $(i, j)$ the condition $s[i][j] = s[j][i]$ holds. I.e. it is true that the element written in the $i$-row and $j$-th column equals to the element written in the $j$-th row and $i$-th column. Your task is to determine if Masha can construct a square of size $m \times m$ which is a symmetric matrix and consists of tiles she has. Masha can use any number of tiles of each type she has to construct the square. Note that she \textbf{can not} rotate tiles, she can only place them in the orientation they have in the input. You have to answer $t$ independent test cases.
Firstly, if $m$ is odd then the answer is "NO" by obvious reasons. Otherwise, we can notice that the top left and the bottom right values of the tile do not matter (since we can place tiles symmetrically). So we only need to check that there is some tile that its top right value equals its bottom left value (because this is how we get main diagonal symmetry).
[ "implementation" ]
900
for i in range(int(input())): n, m = map(int, input().split()) a = [] for i in range(n): a.append([[int(x) for x in input().split()] for i in range(2)]) ok = False for i in range(n): ok |= a[i][0][1] == a[i][1][0] ok &= m % 2 == 0 print("YES" if ok else "NO")
1426
C
Increase and Copy
Initially, you have the array $a$ consisting of one element $1$ ($a = [1]$). In one move, you can do one of the following things: - Increase some (\textbf{single}) element of $a$ by $1$ (choose some $i$ from $1$ to the current length of $a$ and increase $a_i$ by one); - Append the copy of some (\textbf{single}) element of $a$ to the end of the array (choose some $i$ from $1$ to the current length of $a$ and append $a_i$ to the end of the array). For example, consider the sequence of five moves: - You take the first element $a_1$, append its copy to the end of the array and get $a = [1, 1]$. - You take the first element $a_1$, increase it by $1$ and get $a = [2, 1]$. - You take the second element $a_2$, append its copy to the end of the array and get $a = [2, 1, 1]$. - You take the first element $a_1$, append its copy to the end of the array and get $a = [2, 1, 1, 2]$. - You take the fourth element $a_4$, increase it by $1$ and get $a = [2, 1, 1, 3]$. Your task is to find the \textbf{minimum} number of moves required to obtain the array with the sum at least $n$. You have to answer $t$ independent test cases.
It is pretty intuitive that we firstly need to do all increments and only then copy numbers (because otherwise we can swap the order of moves and the sum will not decrease). You could notice that the answer does not exceed $O(\sqrt{n})$ so we can just iterate from $1$ to $\left\lfloor\sqrt{n}\right\rfloor$ and fix the number we will copy. Let it be $x$. Then we need $x-1$ moves to obtain it and also need $\left\lceil\frac{n-x}{x}\right\rceil$ moves to get the enough number of copies. So, we can update the answer with this number of moves. Time complexity: $O(\sqrt{n})$ per test case. Actually, the required number is always pretty near to $\left\lfloor\sqrt{n}\right\rfloor$ so it is enough to try a few options in range $[\left\lfloor\sqrt{n}\right\rfloor - 5; \left\lfloor\sqrt{n}\right\rfloor + 5]$ to get the optimal answer. This is $O(1)$ solution.
[ "binary search", "constructive algorithms", "math" ]
1,100
#include<bits/stdc++.h> using namespace std; const long double EPS = 1e-9; long long f(long long x) { long long z = sqrtl(x); long long ans = 1e18; for(int i = -5; i <= 5; i++) { long long z2 = z - i; if(z2 > x || z2 < 1) continue; ans = min(ans, z2 - 2 + (x + z2 - 1) / z2); } return ans; } int main() { int t; cin >> t; for(int i = 0; i < t; i++) { long long x; cin >> x; cout << f(x) << endl; } }
1426
D
Non-zero Segments
Kolya got an integer array $a_1, a_2, \dots, a_n$. The array can contain both positive and negative integers, but Kolya doesn't like $0$, so the array doesn't contain any zeros. Kolya doesn't like that the sum of some subsegments of his array can be $0$. The subsegment is some consecutive segment of elements of the array. You have to help Kolya and change his array in such a way that it doesn't contain any subsegments with the sum $0$. To reach this goal, you can insert any integers between any pair of adjacent elements of the array (integers can be really any: positive, negative, $0$, any by absolute value, even such a huge that they can't be represented in most standard programming languages). Your task is to find the minimum number of integers you have to insert into Kolya's array in such a way that the resulting array doesn't contain any subsegments with the sum $0$.
Firstly, let's understand that the sum of the segment $[l; r]$ is zero if $p_r - p_{l - 1}$ is zero (in other words, $p_{l - 1} = p_r$), where $p_i$ is the sum of the first $i$ elements ($p_0 = 0$). Let's iterate over elements from left to right and add all prefix sums in the set. If we get the sum that is already in the set, we get some segment with sum $0$, and we need to fix it somehow. Let's insert some huge number before the current element in such a way that all prefix sums starting from the current element to the end will be significantly bigger than all prefix sums to the left. In words of implementation, we just get rid of all prefix sums to the left (clear the set) and continue doing the same process starting from the current element (so we just cut off the prefix of the array). This way is optimal because we remove all segments with sum $0$ ending at the current element using only one insertion (and we need to use at least one insertion to do that). Time complexity: $O(n \log{n})$.
[ "constructive algorithms", "data structures", "greedy", "sortings" ]
1,500
n = int(input()) a = [int(x) for x in input().split()] d = set() d.add(0) cur = 0 ans = 0 for i in range(n): cur += a[i] if cur in d: ans += 1 d = set() d.add(0) cur = a[i] d.add(cur) print(ans)
1426
E
Rock, Paper, Scissors
Alice and Bob have decided to play the game "Rock, Paper, Scissors". The game consists of several rounds, each round is independent of each other. In each round, both players show one of the following things at the same time: rock, paper or scissors. If both players showed the same things then the round outcome is a draw. Otherwise, the following rules applied: - if one player showed rock and the other one showed scissors, then the player who showed rock is considered the winner and the other one is considered the loser; - if one player showed scissors and the other one showed paper, then the player who showed scissors is considered the winner and the other one is considered the loser; - if one player showed paper and the other one showed rock, then the player who showed paper is considered the winner and the other one is considered the loser. Alice and Bob decided to play exactly $n$ rounds of the game described above. Alice decided to show rock $a_1$ times, show scissors $a_2$ times and show paper $a_3$ times. Bob decided to show rock $b_1$ times, show scissors $b_2$ times and show paper $b_3$ times. Though, both Alice and Bob \textbf{did not choose} the sequence in which they show things. It is guaranteed that $a_1 + a_2 + a_3 = n$ and $b_1 + b_2 + b_3 = n$. Your task is to find two numbers: - the minimum number of round Alice can win; - the maximum number of rounds Alice can win.
The maximum number of rounds Alice can win is pretty easy to calculate greedily: $min(a_1, b_2) + min(a_2, b_3) + min(a_3, b_1)$. What about the minimum number of rounds? It can be shown that if we started using some combination we are better to end it before using the other one. There are six possible combinations to not win the round: $a_1$ and $b_1$. $a_2$ and $b_2$. $a_3$ and $b_3$. $a_1$ and $b_3$. $a_2$ and $b_1$. $a_3$ and $b_2$. We can iterate over all permutations of these combinations (there are $6! = 720$ possible permutations) and greedily apply them. Use the first while it is possible, then the second, and so on, and find the best answer. It is also possible that the order of these combinations does not matter, but we didn't prove that fact. Time complexity: $O(1)$.
[ "brute force", "constructive algorithms", "flows", "greedy", "math" ]
1,800
#include <bits/stdc++.h> #define sz(v) int(v.size()) #define all(v) v.begin(), v.end() #define pb push_back #define ft first #define sc second using namespace std; int n; vector<int> a, b; inline void read() { cin >> n; a.resize(3); b.resize(3); for (int i = 0; i < 3; i++) cin >> a[i]; for (int i = 0; i < 3; i++) cin >> b[i]; } inline void solve() { int ans1 = INT_MAX; vector<pair<int, int> > ord; ord.pb({0, 0}); ord.pb({0, 2}); ord.pb({1, 1}); ord.pb({1, 0}); ord.pb({2, 2}); ord.pb({2, 1}); sort(all(ord)); do { vector<int> a1 = a, b1 = b; for (int i = 0; i < sz(ord); i++) { int cnt = min(a1[ord[i].ft], b1[ord[i].sc]); a1[ord[i].ft] -= cnt; b1[ord[i].sc] -= cnt; } int cur = min(a1[0], b1[1]) + min(a1[1], b1[2]) + min(a1[2], b1[0]); ans1 = min(ans1, cur); } while(next_permutation(all(ord))); int ans2 = min(a[0], b[1]) + min(a[1], b[2]) + min(a[2], b[0]); cout << ans1 << ' ' << ans2 << endl; } int main () { read(); solve(); }
1426
F
Number of Subsequences
You are given a string $s$ consisting of lowercase Latin letters "a", "b" and "c" and question marks "?". Let the number of question marks in the string $s$ be $k$. Let's replace each question mark with one of the letters "a", "b" and "c". Here we can obtain all $3^{k}$ possible strings consisting only of letters "a", "b" and "c". For example, if $s = $"ac?b?c" then we can obtain the following strings: $[$"acabac", "acabbc", "acabcc", "acbbac", "acbbbc", "acbbcc", "accbac", "accbbc", "accbcc"$]$. Your task is to count the total number of subsequences "abc" in all resulting strings. Since the answer can be very large, print it modulo $10^{9} + 7$. A subsequence of the string $t$ is such a sequence that can be derived from the string $t$ after removing some (possibly, zero) number of letters without changing the order of remaining letters. For example, the string "baacbc" contains two subsequences "abc" — a subsequence consisting of letters at positions $(2, 5, 6)$ and a subsequence consisting of letters at positions $(3, 5, 6)$.
There are several more or less complicated combinatorial solutions to this problem, but I will describe a dynamic programming one which, I think, is way easier to understand and to implement. Suppose we have fixed the positions of a, b and c that compose the subsequence (let these positions be $p_a$, $p_b$ and $p_c$). How many strings contain the required subsequence on these positions? Obviously, if some of these characters is already not a question mark and does not match the expected character on that position, the number of strings containing the subsequence on that position is $0$. Otherwise, since we have fixed three characters, all question marks on other positions can be anything we want - so the number of such strings is $3^x$, where $x$ is the number of question marks on positions other than $p_a$, $p_b$ and $p_c$. It allows us to write an $O(n^3)$ solution by iterating on $p_a$, $p_b$ and $p_c$, and for every such triple, calculating the number of strings containing the required subsequence on those positions. But that's too slow. Let's notice that, for every such subsequence, the number of strings containing it is $3^{k - qPos({p_a, p_b, p_c})}$, where $qPos({p_a, p_b, p_c})$ is the number of positions from ${p_a, p_b, p_c}$ that contain a question mark. So, for each integer $i$ from $0$ to $3$, let's calculate the number of subsequences matching abc that contain exactly $i$ question marks - and that will allow us to solve the problem faster. How can we calculate the required number of subsequences for every $i$? In my opinion, the simplest way is dynamic programming: let $dp_{i, j, k}$ be the number of subsequences of $s$ that end up in position $i$, match $j$ first characters of abc and contain $k$ question marks. The transitions in this dynamic programming are quadratic (since we have to iterate on the next/previous position from the subsequence), but can be sped up to linear if we rewrite $dp_{i, j, k}$ as the number of subsequences of $s$ that end up in position not later than $i$, match $j$ first characters of abc and contain $k$ question marks. Each transition is either to take the current character or to skip it, so they can be modeled in $O(1)$, and overall this dynamic programming solution works in $O(n)$.
[ "combinatorics", "dp", "strings" ]
2,000
#include <bits/stdc++.h> using namespace std; const int MOD = int(1e9) + 7; const int N = 200043; const int K = 4; int add(int x, int y) { x += y; while(x >= MOD) x -= MOD; while(x < 0) x += MOD; return x; } int mul(int x, int y) { return (x * 1ll * y) % MOD; } int n; string s; int dp[N][K][K]; char buf[N]; int pow3[N]; int main() { scanf("%d", &n); scanf("%s", buf); s = buf; int cntQ = 0; for(auto c : s) if(c == '?') cntQ++; pow3[0] = 1; for(int i = 1; i < N; i++) pow3[i] = mul(pow3[i - 1], 3); dp[0][0][0] = 1; for(int i = 0; i < n; i++) for(int j = 0; j <= 3; j++) for(int k = 0; k <= 3; k++) { if(!dp[i][j][k]) continue; dp[i + 1][j][k] = add(dp[i + 1][j][k], dp[i][j][k]); if(j < 3 && (s[i] == '?' || s[i] - 'a' == j)) { int nk = (s[i] == '?' ? k + 1 : k); dp[i + 1][j + 1][nk] = add(dp[i + 1][j + 1][nk], dp[i][j][k]); } } int ans = 0; for(int i = 0; i <= 3; i++) if(cntQ >= i) ans = add(ans, mul(dp[n][3][i], pow3[cntQ - i])); printf("%d\n", ans); }
1427
A
Avoiding Zero
You are given an array of $n$ integers $a_1,a_2,\dots,a_n$. You have to create an array of $n$ integers $b_1,b_2,\dots,b_n$ such that: - The array $b$ is a rearrangement of the array $a$, that is, it contains the same values and each value appears the same number of times in the two arrays. In other words, the multisets $\{a_1,a_2,\dots,a_n\}$ and $\{b_1,b_2,\dots,b_n\}$ are equal.For example, if $a=[1,-1,0,1]$, then $b=[-1,1,1,0]$ and $b=[0,1,-1,1]$ are rearrangements of $a$, but $b=[1,-1,-1,0]$ and $b=[1,0,2,-3]$ are not rearrangements of $a$. - For all $k=1,2,\dots,n$ the sum of the first $k$ elements of $b$ is nonzero. Formally, for all $k=1,2,\dots,n$, it must hold $$b_1+b_2+\cdots+b_k\not=0\,.$$ If an array $b_1,b_2,\dots, b_n$ with the required properties does not exist, you have to print NO.
First of all, notice that if the sum $a_1+a_2+\cdots+a_n$ is $0$, then, since $b$ is a rearrangement of $a$, it holds $b_1+b_2+\cdots+b_n=0$ and therefore the answer is NO. On the other hand, if $a_1+a_2+\cdots+a_n\not=0$, then there is a valid array $b$. To show this, let us consider two cases. If $a_1+a_2+\cdots+a_n > 0$, then $b$ can be chosen as the array $a$ sorted in decreasing order. In this way, for any $k=1,\dots, n$, it holds $b_{1}+\cdots+b_{k} > 0$. Let us prove it by dividing in two cases. If $b_{k}>0$, then also $b_{1},\dots,b_{k-1}$ are positive and therefore the sum is positive. If $b_{k}\le 0$, then also $b_{k+1},\dots,b_{n}$ are nonpositive and therefore $b_{1}+\cdots+b_{k} = b_{1}+\cdots+b_{n} - (b_{k+1}+\cdots+b_{n}) \ge b_{1}+\cdots+b_{n} > 0\,.$ If $b_{k}>0$, then also $b_{1},\dots,b_{k-1}$ are positive and therefore the sum is positive. If $b_{k}\le 0$, then also $b_{k+1},\dots,b_{n}$ are nonpositive and therefore $b_{1}+\cdots+b_{k} = b_{1}+\cdots+b_{n} - (b_{k+1}+\cdots+b_{n}) \ge b_{1}+\cdots+b_{n} > 0\,.$ $b_{1}+\cdots+b_{k} = b_{1}+\cdots+b_{n} - (b_{k+1}+\cdots+b_{n}) \ge b_{1}+\cdots+b_{n} > 0\,.$ If $a_1+a_2+\cdots+a_n < 0$, then $b$ can be chosen as the array $a$ sorted in increasing order. The proof that this choice works is analogous to the previous case. Alternative, randomized solution If the sum $a_1+\cdots+a_n=0$, then the answer is NO (as explained in the previous solution). Otherwise, we repeatedly random shuffle the cities until all the conditions are satisfied. It can be proven that a random shuffle works with probability $\ge\frac1n$ (see this comment for a neat proof). Notice that the probability is exactly $\frac1n$ in at least two cases: $a_1=a_2=\cdots=a_{n-1}=0$ and $a_n=1$. $a_1=a_2=\cdots=a_{m+1}=1$ and $a_{m+2}=a_{m+3}=\cdots=a_{2m+1}=-1$ (and $n=2m+1$).
[ "math", "sortings" ]
900
#define _USE_MATH_DEFINES #include <bits/stdc++.h> using namespace std; typedef long long LL; typedef unsigned long long ULL; #define SZ(x) ((int)((x).size())) template <typename T1, typename T2> string print_iterable(T1 begin_iter, T2 end_iter, int counter) { bool done_something = false; stringstream res; res << "["; for (; begin_iter != end_iter and counter; ++begin_iter) { done_something = true; counter--; res << *begin_iter << ", "; } string str = res.str(); if (done_something) { str.pop_back(); str.pop_back(); } str += "]"; return str; } vector<int> SortIndex(int size, std::function<bool(int, int)> compare) { vector<int> ord(size); for (int i = 0; i < size; i++) ord[i] = i; sort(ord.begin(), ord.end(), compare); return ord; } template <typename T> bool MinPlace(T& a, const T& b) { if (a > b) { a = b; return true; } return false; } template <typename T> bool MaxPlace(T& a, const T& b) { if (a < b) { a = b; return true; } return false; } template <typename S, typename T> ostream& operator <<(ostream& out, const pair<S, T>& p) { out << "{" << p.first << ", " << p.second << "}"; return out; } template <typename T> ostream& operator <<(ostream& out, const vector<T>& v) { out << "["; for (int i = 0; i < (int)v.size(); i++) { out << v[i]; if (i != (int)v.size()-1) out << ", "; } out << "]"; return out; } template<class TH> void _dbg(const char* name, TH val){ clog << name << ": " << val << endl; } template<class TH, class... TA> void _dbg(const char* names, TH curr_val, TA... vals) { while(*names != ',') clog << *names++; clog << ": " << curr_val << ", "; _dbg(names+1, vals...); } #if DEBUG && !ONLINE_JUDGE ifstream input_from_file("input.txt"); #define cin input_from_file #define dbg(...) _dbg(#__VA_ARGS__, __VA_ARGS__) #define dbg_arr(x, len) clog << #x << ": " << print_iterable(x, x+len, -1) << endl; #else #define dbg(...) #define dbg_arr(x, len) #endif /////////////////////////////////////////////////////////////////////////// //////////////////// DO NOT TOUCH BEFORE THIS LINE //////////////////////// /////////////////////////////////////////////////////////////////////////// map<vector<int>, string> samples = { {{1, -2, 3, -4}, "1 -2 3 -4"}, {{1, -1, 1, -1, 1}, "1 1 -1 1 -1"}, {{40, -31, -9, 0, 13, -40}, "-40 13 40 0 -9 -31"} }; bool is_sample(const vector<int>& a) { if (!samples.count(a)) return false; cout << samples[a] << "\n"; return true; } int main() { ios::sync_with_stdio(false); cin.tie(0); // Remove in problems with online queries! int T; cin >> T; for (int t = 1; t <= T; t++) { int N; cin >> N; vector<int> a(N); int sum = 0; for (int i = 0; i < N; i++) cin >> a[i], sum += a[i]; if (sum == 0) { cout << "NO\n"; continue; } cout << "YES\n"; if (is_sample(a)) continue; sort(a.begin(), a.end()); if (sum > 0) reverse(a.begin(), a.end()); for (int x: a) cout << x << " "; cout << "\n"; } }
1427
B
Chess Cheater
You like playing chess tournaments online. In your last tournament you played $n$ games. For the sake of this problem, each chess game is either won or lost (no draws). When you lose a game you get $0$ points. When you win you get $1$ or $2$ points: if you have won also the previous game you get $2$ points, otherwise you get $1$ point. If you win the very first game of the tournament you get $1$ point (since there is not a "previous game"). The outcomes of the $n$ games are represented by a string $s$ of length $n$: the $i$-th character of $s$ is W if you have won the $i$-th game, while it is L if you have lost the $i$-th game. After the tournament, you notice a bug on the website that allows you to change the outcome of \textbf{at most} $k$ of your games (meaning that at most $k$ times you can change some symbol L to W, or W to L). Since your only goal is to improve your chess rating, you decide to cheat and use the bug. Compute the maximum score you can get by cheating in the optimal way.
Notice that the score is equal to $\texttt{score} = 2\cdot\texttt{#\{wins\}} - \texttt{#\{winning_streaks\}}\,,$ In the explanation that follows, the variables $\texttt{#\{wins\}}$, $\texttt{#\{winning_streaks\}}$ are always related to the initial situation. If $k+\texttt{#\{wins\}}\ge n$, then it is possible to win all games and therefore the answer is $2n-1$. Otherwise, it is clear that we want to transform $k$ losses in $k$ wins. Thus, after the cheating, the number of wins will be $k+\texttt{#\{wins\}}$. Considering the formula above, it remains only to minimize the number of winning streaks. How can we minimize the number of winning streaks? It is very intuitive that we shall "fill" the gaps between consecutive winning streaks starting from the shortest gap in increasing order of length. This can be proven noticing that if $g$ gaps are not filled (i.e., after cheating this $g$ gaps still contain at least one loss each) then there are at least $g+1$ winning streaks. The implementation goes as follows. With a linear scan we find the lengths of the gaps and then we sort them. Finally we count how many we can select with a sum of lengths $\le k$. The answer is $2\cdot\big(k+\texttt{#\{wins\}}\big) - \texttt{#\{winning_streaks\}} + \texttt{#\{gaps_we_can_fill\}} \,.$ The complexity of the solution is $O(n\log(n))$.
[ "greedy", "implementation", "sortings" ]
1,400
#define _USE_MATH_DEFINES #include <bits/stdc++.h> using namespace std; typedef long long LL; typedef unsigned long long ULL; #define SZ(x) ((int)((x).size())) template <typename T1, typename T2> string print_iterable(T1 begin_iter, T2 end_iter, int counter) { bool done_something = false; stringstream res; res << "["; for (; begin_iter != end_iter and counter; ++begin_iter) { done_something = true; counter--; res << *begin_iter << ", "; } string str = res.str(); if (done_something) { str.pop_back(); str.pop_back(); } str += "]"; return str; } vector<int> SortIndex(int size, std::function<bool(int, int)> compare) { vector<int> ord(size); for (int i = 0; i < size; i++) ord[i] = i; sort(ord.begin(), ord.end(), compare); return ord; } template <typename T> bool MinPlace(T& a, const T& b) { if (a > b) { a = b; return true; } return false; } template <typename T> bool MaxPlace(T& a, const T& b) { if (a < b) { a = b; return true; } return false; } template <typename S, typename T> ostream& operator <<(ostream& out, const pair<S, T>& p) { out << "{" << p.first << ", " << p.second << "}"; return out; } template <typename T> ostream& operator <<(ostream& out, const vector<T>& v) { out << "["; for (int i = 0; i < (int)v.size(); i++) { out << v[i]; if (i != (int)v.size()-1) out << ", "; } out << "]"; return out; } template<class TH> void _dbg(const char* name, TH val){ clog << name << ": " << val << endl; } template<class TH, class... TA> void _dbg(const char* names, TH curr_val, TA... vals) { while(*names != ',') clog << *names++; clog << ": " << curr_val << ", "; _dbg(names+1, vals...); } #if DEBUG && !ONLINE_JUDGE ifstream input_from_file("input.txt"); #define cin input_from_file #define dbg(...) _dbg(#__VA_ARGS__, __VA_ARGS__) #define dbg_arr(x, len) clog << #x << ": " << print_iterable(x, x+len, -1) << endl; #else #define dbg(...) #define dbg_arr(x, len) #endif /////////////////////////////////////////////////////////////////////////// //////////////////// DO NOT TOUCH BEFORE THIS LINE //////////////////////// /////////////////////////////////////////////////////////////////////////// int main() { ios::sync_with_stdio(false); cin.tie(0); // Remove in problems with online queries! int T; cin >> T; for (int t = 1; t <= T; t++) { int N, K; cin >> N >> K; string S; cin >> S; int winning_streaks_cnt = 0; int wins = 0; int losses = 0; vector<int> losing_streaks; for (int i = 0; i < N; i++) { if (S[i] == 'W') { wins++; if (i == 0 or S[i-1] == 'L') winning_streaks_cnt++; } if (S[i] == 'L') { losses++; if (i == 0 or S[i-1] == 'W') losing_streaks.push_back(0); losing_streaks.back()++; } } if (K >= losses) { cout << 2*N-1 << "\n"; continue; } if (wins == 0) { if (K == 0) cout << 0 << "\n"; else cout << 2*K-1 << "\n"; continue; } if (S[0] == 'L') losing_streaks[0] = 1e8; if (S[N-1] == 'L') losing_streaks.back() = 1e8; sort(losing_streaks.begin(), losing_streaks.end()); wins += K; for (int ls: losing_streaks) { if (ls > K) break; K -= ls; winning_streaks_cnt--; } cout << 2*wins - winning_streaks_cnt << "\n"; } }
1427
C
The Hard Work of Paparazzi
You are a paparazzi working in Manhattan. Manhattan has $r$ south-to-north streets, denoted by numbers $1, 2,\ldots, r$ in order from west to east, and $r$ west-to-east streets, denoted by numbers $1,2,\ldots,r$ in order from south to north. Each of the $r$ south-to-north streets intersects each of the $r$ west-to-east streets; the intersection between the $x$-th south-to-north street and the $y$-th west-to-east street is denoted by $(x, y)$. In order to move from the intersection $(x,y)$ to the intersection $(x', y')$ you need $|x-x'|+|y-y'|$ minutes. You know about the presence of $n$ celebrities in the city and you want to take photos of as many of them as possible. More precisely, for each $i=1,\dots, n$, you know that the $i$-th celebrity will be at the intersection $(x_i, y_i)$ in exactly $t_i$ minutes from now (and he will stay there for a very short time, so you may take a photo of him only if at the $t_i$-th minute from now you are at the intersection $(x_i, y_i)$). You are very good at your job, so you are able to take photos instantaneously. You know that $t_i < t_{i+1}$ for any $i=1,2,\ldots, n-1$. Currently you are at your office, which is located at the intersection $(1, 1)$. If you plan your working day optimally, what is the maximum number of celebrities you can take a photo of?
This is a classical dynamic-programming task with a twist. For the solution to work it is fundamental that the city has a small diameter (i.e., $r$ shall not be large) and that there are not simultaneous appearances. We say that two celebrities $i<j$ are compatible if it is possible to take a photo of both, that is $|x_i-x_j| + |y_i-y_j| \le t_j-t_i\,.$ Let $ans_k$ be the maximum number of photos we can take of the first $k$ celebrities assuming that we take a photo of celebrity $k$ (if we cannot take a photo of celebrity $k$, then $ans_k:=-\infty$). It holds (assuming that we can take a photo of celebrity $k$) $ans_k = 1 + \max\Big(0, \max_{\substack{1\le i < k,\\ \text{$i$ is compatible with $k$}}} ans_i\Big)\,.$ How can we speed up this algorithm? The idea is that if $|k-i|$ is big, then $i$ and $k$ are always compatible. More precisely, if $k - i \ge 2r$ then $t_k-t_i \ge 2r$ (because there are no simultaneous appearances) and therefore $|x_i-x_k| + |y_i-y_k| \le 2r \le t_k-t_i\,,$ $ans_k = 1 + \max\Big(0, \max_{1\le i \le k-2r} ans_i, \max_{\substack{k-2r< i < k,\\ \text{$i$ is_compatible_with $k$}}} ans_i\Big)\,.$ Alternative optimization It is also true that any optimal solution does not skip more than $4r$ consecutive celebrities (we leave the proof to the reader). Hence another possible optimization of the naive formula is to take the maximum only over $k-4r\le i < k$.
[ "dp" ]
2,000
#define _USE_MATH_DEFINES #include <bits/stdc++.h> using namespace std; typedef long long LL; typedef unsigned long long ULL; #define SZ(x) ((int)((x).size())) template <typename T1, typename T2> string print_iterable(T1 begin_iter, T2 end_iter, int counter) { bool done_something = false; stringstream res; res << "["; for (; begin_iter != end_iter and counter; ++begin_iter) { done_something = true; counter--; res << *begin_iter << ", "; } string str = res.str(); if (done_something) { str.pop_back(); str.pop_back(); } str += "]"; return str; } vector<int> SortIndex(int size, std::function<bool(int, int)> compare) { vector<int> ord(size); for (int i = 0; i < size; i++) ord[i] = i; sort(ord.begin(), ord.end(), compare); return ord; } template <typename T> bool MinPlace(T& a, const T& b) { if (a > b) { a = b; return true; } return false; } template <typename T> bool MaxPlace(T& a, const T& b) { if (a < b) { a = b; return true; } return false; } template <typename S, typename T> ostream& operator <<(ostream& out, const pair<S, T>& p) { out << "{" << p.first << ", " << p.second << "}"; return out; } template <typename T> ostream& operator <<(ostream& out, const vector<T>& v) { out << "["; for (int i = 0; i < (int)v.size(); i++) { out << v[i]; if (i != (int)v.size()-1) out << ", "; } out << "]"; return out; } template<class TH> void _dbg(const char* name, TH val){ clog << name << ": " << val << endl; } template<class TH, class... TA> void _dbg(const char* names, TH curr_val, TA... vals) { while(*names != ',') clog << *names++; clog << ": " << curr_val << ", "; _dbg(names+1, vals...); } #if DEBUG && !ONLINE_JUDGE ifstream input_from_file("input.txt"); #define cin input_from_file #define dbg(...) _dbg(#__VA_ARGS__, __VA_ARGS__) #define dbg_arr(x, len) clog << #x << ": " << print_iterable(x, x+len, -1) << endl; #else #define dbg(...) #define dbg_arr(x, len) #endif /////////////////////////////////////////////////////////////////////////// //////////////////// DO NOT TOUCH BEFORE THIS LINE //////////////////////// /////////////////////////////////////////////////////////////////////////// const int MAXN = 1e5 + 100; int t[MAXN]; int x[MAXN], y[MAXN]; int ans[MAXN]; int max_ans[MAXN]; int main() { ios::sync_with_stdio(false); cin.tie(0); // Remove in problems with online queries! x[0] = 1, y[0] = 1; int R, N; cin >> R >> N; for (int i = 1; i <= N; i++) { cin >> t[i] >> x[i] >> y[i]; ans[i] = -1e9; for (int j = max(i-2*R, 0); j < i; j++) { if (abs(x[i]-x[j]) + abs(y[i]-y[j]) <= t[i]-t[j]) ans[i] = max(ans[i], 1 + ans[j]); } if (i > 2*R) ans[i] = max(ans[i], 1 + max_ans[i-2*R]); max_ans[i] = max(ans[i], max_ans[i-1]); } cout << max_ans[N] << "\n"; }
1427
D
Unshuffling a Deck
You are given a deck of $n$ cards numbered from $1$ to $n$ (not necessarily in this order in the deck). You have to sort the deck by repeating the following operation. - Choose $2 \le k \le n$ and split the deck in $k$ nonempty contiguous parts $D_1, D_2,\dots, D_k$ ($D_1$ contains the first $|D_1|$ cards of the deck, $D_2$ contains the following $|D_2|$ cards and so on). Then reverse the order of the parts, transforming the deck into $D_k, D_{k-1}, \dots, D_2, D_1$ (so, the first $|D_k|$ cards of the new deck are $D_k$, the following $|D_{k-1}|$ cards are $D_{k-1}$ and so on). The internal order of each packet of cards $D_i$ is unchanged by the operation. You have to obtain a sorted deck (i.e., a deck where the first card is $1$, the second is $2$ and so on) performing at most $n$ operations. It can be proven that it is always possible to sort the deck performing at most $n$ operations. \textbf{Examples of operation:} The following are three examples of valid operations (on three decks with different sizes). - If the deck is [3 6 2 1 4 5 7] (so $3$ is the first card and $7$ is the last card), we may apply the operation with $k=4$ and $D_1=$[3 6], $D_2=$[2 1 4], $D_3=$[5], $D_4=$[7]. Doing so, the deck becomes [7 5 2 1 4 3 6]. - If the deck is [3 1 2], we may apply the operation with $k=3$ and $D_1=$[3], $D_2=$[1], $D_3=$[2]. Doing so, the deck becomes [2 1 3]. - If the deck is [5 1 2 4 3 6], we may apply the operation with $k=2$ and $D_1=$[5 1], $D_2=$[2 4 3 6]. Doing so, the deck becomes [2 4 3 6 5 1].
We say that a pair of consecutive cards in the deck is good if they have consecutive numbers (in the right order). Let $m$ be the number of good pairs. We show that, if the deck is not sorted, with one move we can increase $m$. Hence after at most $n-1$ moves it will hold $m=n-1$ and the deck will be sorted. Since the deck is not sorted, there must be two indices $i<j$ such that $c_i=c_j+1$. Moreover, since $c_i > c_j$, there is $i\le t<j$ such that $c_t > c_{t+1}$. We split the deck as (with $k=4$ packets, or less if some of the packets are empty) $D_1=[c_1,c_2,\dots,c_{i-1}],\ D_2=[c_i,c_{i+1},\dots, c_t],\ D_3=[c_{t+1},c_{t+2},\dots, c_j],\ D_4=[c_{j+1},c_{j+2},\dots, c_n]$ $[c_{j+1},c_{j+2},\dots, c_n, c_{t+1},c_{t+2},\dots, c_j, c_i,c_{i+1},\dots, c_t, c_1,c_2,\dots,c_{i-1}]\,.$ The limit on $n$ is so small that fundamentally any polynomial implementation gets accepted. Producing an $O(n^2)$ implementation is trivial, but it should also be possible to produce a pseudo-linear implementation.
[ "constructive algorithms", "implementation" ]
2,000
#define _USE_MATH_DEFINES #include <bits/stdc++.h> using namespace std; typedef long long LL; typedef unsigned long long ULL; #define SZ(x) ((int)((x).size())) template <typename T1, typename T2> string print_iterable(T1 begin_iter, T2 end_iter, int counter) { bool done_something = false; stringstream res; res << "["; for (; begin_iter != end_iter and counter; ++begin_iter) { done_something = true; counter--; res << *begin_iter << ", "; } string str = res.str(); if (done_something) { str.pop_back(); str.pop_back(); } str += "]"; return str; } vector<int> SortIndex(int size, std::function<bool(int, int)> compare) { vector<int> ord(size); for (int i = 0; i < size; i++) ord[i] = i; sort(ord.begin(), ord.end(), compare); return ord; } template <typename T> bool MinPlace(T& a, const T& b) { if (a > b) { a = b; return true; } return false; } template <typename T> bool MaxPlace(T& a, const T& b) { if (a < b) { a = b; return true; } return false; } template <typename S, typename T> ostream& operator <<(ostream& out, const pair<S, T>& p) { out << "{" << p.first << ", " << p.second << "}"; return out; } template <typename T> ostream& operator <<(ostream& out, const vector<T>& v) { out << "["; for (int i = 0; i < (int)v.size(); i++) { out << v[i]; if (i != (int)v.size()-1) out << ", "; } out << "]"; return out; } template<class TH> void _dbg(const char* name, TH val){ clog << name << ": " << val << endl; } template<class TH, class... TA> void _dbg(const char* names, TH curr_val, TA... vals) { while(*names != ',') clog << *names++; clog << ": " << curr_val << ", "; _dbg(names+1, vals...); } #if DEBUG && !ONLINE_JUDGE ifstream input_from_file("input.txt"); #define cin input_from_file #define dbg(...) _dbg(#__VA_ARGS__, __VA_ARGS__) #define dbg_arr(x, len) clog << #x << ": " << print_iterable(x, x+len, -1) << endl; #else #define dbg(...) #define dbg_arr(x, len) #endif /////////////////////////////////////////////////////////////////////////// //////////////////// DO NOT TOUCH BEFORE THIS LINE //////////////////////// /////////////////////////////////////////////////////////////////////////// map<vector<int>, string> samples = { {{3, 1, 2, 4}, "2\n3 1 2 1\n2 1 3"}, {{6, 5, 4, 3, 2, 1}, "1\n6 1 1 1 1 1 1"} }; const int MAXN = 52; int N; int deck[MAXN]; int new_deck[MAXN]; bool is_sample() { vector<int> vec(deck, deck+N); if (!samples.count(vec)) return false; cout << samples[vec] << "\n"; return true; } void make_op(vector<int> D) { int sum = 0; for (int d: D) { for (int i = 0; i < d; i++) new_deck[N-sum-d+i] = deck[sum + i]; sum += d; } assert(sum == N); for (int i = 0; i < N; i++) deck[i] = new_deck[i]; } int main() { ios::sync_with_stdio(false); cin.tie(0); // Remove in problems with online queries! cin >> N; for (int i = 0; i < N; i++) cin >> deck[i]; if (is_sample()) return 0; vector<vector<int>> ops; while (1) { vector<int> op; for (int i = 0; i < N; i++) { if (deck[i] == i+1) continue; if (i != 0) op.push_back(i); for (int j = i+1; j < N; j++) { if (deck[j] == deck[j-1] + 1) continue; op.push_back(j-i); for (int k = j; k < N; k++) { if (deck[k] != deck[i]-1) continue; op.push_back(k-j+1); if (k < N-1) op.push_back(N-1-k); break; } break; } break; } if (op.empty()) break; ops.push_back(op); make_op(op); } cout << ops.size() << "\n"; for (auto op: ops) { cout << op.size() << " "; for (int x: op) cout << x << " "; cout << "\n"; } }
1427
E
Xum
You have a blackboard and initially only an \textbf{odd} number $x$ is written on it. Your goal is to write the number $1$ on the blackboard. You may write new numbers on the blackboard with the following two operations. - You may take two numbers (not necessarily distinct) already on the blackboard and write their sum on the blackboard. The two numbers you have chosen remain on the blackboard. - You may take two numbers (not necessarily distinct) already on the blackboard and write their bitwise XOR on the blackboard. The two numbers you have chosen remain on the blackboard. Perform a sequence of operations such that at the end the number $1$ is on the blackboard.
We present two different solutions. The first solution is by Anton, the second is mine. The first solution is deterministic, it is fundamentally based on Bezout's Theorem and comes with a proof. It performs $\approx 100$ operations and writes numbers up to $O(x^3)$. The second solution is randomized, uses some xor-linear-algebra and comes without a proof. It performs $\approx 1000$ operations and writes numbers up to $O(x^2)$. Deterministic, provable, "gcd" solution Step 0: If $u$ is written on the blackboard, then we can write $nu$ on the blackboard with $O(\log(n))$ operations. How? Just using the sum operation as in the binary exponentiation (same algorithm, just replace multiplication with addition and exponentiation with multiplication). Step 1: Write on the blackboard a number $y$ coprime with $x$. Let $e\in\mathbb N$ be the largest integer such that $2^e\le x$ (i.e., $2^e$ is the largest bit of $x$). Notice that $y=(2^ex)\wedge x = (2^e+1)x - 2^{e+1}$ and therefore $gcd(x,y)=gcd(x,2^{e+1})=1$. Step 2: Write $1=gcd(x,y)$ on the blackboard. Let $a, b\ge 0$ be such that $ax-by=1$ ($a,b$ exist thanks to Bezout's theorem) with $b$ even (if $b$ is odd, we can add $y$ to $a$ and $x$ to $b$, getting an even $b$). Since $by$ is even, we have $ax\wedge by = 1$ and therefore we are able to write $1$ on the blackboard. Randomized, unproven, linear-algebraic solution We are going to talk about subspaces and basis; they shall be understood with respect to the operation xor on nonnegative integers. The rough idea is to sample randomly two numbers from the subspace generated by the numbers currently on the blackboard and write their sum on the blackboard. First, we choose the maximum number of bits $L$ that any number on the blackboard will ever have. A good choice is $2^L > x^2$ ($L=40$ works for any odd $x$ below $10^6$). We iterate the following process until $1$ belongs to the subspace generated by the numbers written on the blackboard. Let $S$ be the subspace generated by the numbers currently on the blackboard (i.e., the set of numbers that can be written as the xor of some numbers on the blackboard). Let $b_1,\dots, b_k$ be a basis for $S$. Randomly choosing $k$ bits $e_1,\dots,e_k$ we can produce a random element in $S$ as $(e_1b_1)\wedge(e_2b_2)\wedge\cdots \wedge(e_kb_k) \,.$ If $u+v \ge 2^L$, we choose a different pair of numbers. If $u+v\in S$ (we can check this in $O(k)$ since we know a basis for $S$), we choose a different pair of numbers. Otherwise, we add $u+v$ to $S$ and update the basis accordingly. It turns out that this approach solves all odd values $3\le x\le 999,999$ instantaneously. Could we choose a much smaller $L$? The answer is no. If $x=2^{19}+1$ then there is a xor-subspace $S$ that contains $x$ and such that if $u,v\in S$ and $u+v<2^{38}$ then $u+v\in S$. Notice that this implies that any strategy needs to write on the blackboard some "some big numbers". This is why any approach that avoids large numbers by design is doomed to fail. Comment: Why should this solution work? Fundamentally, and this is the main idea of the problem, because the two operations $+$ and $\wedge$ are not related by any strange hidden structure. It would be a miracle if we could find a very big subspace for $\wedge$ that is closed also for $+$. And, since miracles are rare, here there is no miracle. It should not be very hard to show that this solution works for any odd $x$, i.e. there is not a subspace that contains $x$ and is closed for sums that are below $2^L$ (if $2^L>x^2$). Nonetheless, I could not show it. On the other hand, I think that proving that this solution has a very good (expected) time-complexity is very hard, but I would be happy if someone proves me wrong.
[ "bitmasks", "constructive algorithms", "math", "matrices", "number theory" ]
2,500
#include <bits/stdc++.h> using namespace std; typedef long long LL; typedef unsigned long long ULL; #define SZ(x) ((int)((x).size())) template <typename T1, typename T2> string print_iterable(T1 begin_iter, T2 end_iter, int counter) { bool done_something = false; stringstream res; res << "["; for (; begin_iter != end_iter and counter; ++begin_iter) { done_something = true; counter--; res << *begin_iter << ", "; } string str = res.str(); if (done_something) { str.pop_back(); str.pop_back(); } str += "]"; return str; } vector<int> SortIndex(int size, std::function<bool(int, int)> compare) { vector<int> ord(size); for (int i = 0; i < size; i++) ord[i] = i; sort(ord.begin(), ord.end(), compare); return ord; } template <typename T> bool MinPlace(T& a, const T& b) { if (a > b) { a = b; return true; } return false; } template <typename T> bool MaxPlace(T& a, const T& b) { if (a < b) { a = b; return true; } return false; } template <typename S, typename T> ostream& operator <<(ostream& out, const pair<S, T>& p) { out << "{" << p.first << ", " << p.second << "}"; return out; } template <typename T> ostream& operator <<(ostream& out, const vector<T>& v) { out << "["; for (int i = 0; i < (int)v.size(); i++) { out << v[i]; if (i != (int)v.size()-1) out << ", "; } out << "]"; return out; } template<class TH> void _dbg(const char* name, TH val){ clog << name << ": " << val << endl; } template<class TH, class... TA> void _dbg(const char* names, TH curr_val, TA... vals) { while(*names != ',') clog << *names++; clog << ": " << curr_val << ", "; _dbg(names+1, vals...); } #if DEBUG && !ONLINE_JUDGE ifstream input_from_file("input.txt"); #define cin input_from_file #define dbg(...) _dbg(#__VA_ARGS__, __VA_ARGS__) #define dbg_arr(x, len) clog << #x << ": " << print_iterable(x, x+len, -1) << endl; #else #define dbg(...) #define dbg_arr(x, len) #endif /////////////////////////////////////////////////////////////////////////// //////////////////// DO NOT TOUCH BEFORE THIS LINE //////////////////////// /////////////////////////////////////////////////////////////////////////// LL Inverse(LL n, LL m) { n %= m; if (n <= 1) return n; // Handles properly (n = 0, m = 1). return m - ((m * Inverse(m, n) - 1) / n); } vector<ULL> A; vector<char> op; vector<ULL> B; void add_op(ULL a, char o, ULL b) { if (a == 0 or b == 0) return; A.push_back(a); op.push_back(o); B.push_back(b); } ULL xxor(ULL a, ULL b) { add_op(a, '^', b); return a^b; } ULL sum(ULL a, ULL b) { add_op(a, '+', b); return a+b; } ULL mul(ULL a, ULL n) { ULL pot = a; ULL curr = 0; while (n > 0) { if (n%2) curr = sum(curr, pot); pot = sum(pot, pot); n /= 2; } return curr; } int main() { ULL x; cin >> x; while (x != 1) { for (ULL n = 2; ; n *= 2) { sum((n/2)*x, (n/2)*x); if (((n*x)^x)%x == 0) continue; ULL y = xxor(n*x, x); ULL g = __gcd(x, y); ULL a = mul(x, Inverse(x/g, y/g)); ULL b = mul(y, (a-g)/y); ULL pot = 1<<20; // must be larger than g ULL q = (pot- b%pot) * Inverse(x, pot) % pot; ULL c = mul(x, q); x = xxor(sum(a, c), sum(b, c)); break; } } cout << A.size() << "\n"; for (int i = 0; i < (int)A.size(); i++) cout << A[i] << " " << op[i] << " " << B[i] << "\n"; }
1427
F
Boring Card Game
When they are bored, Federico and Giada often play the following card game with a deck containing $6n$ cards. Each card contains one number between $1$ and $6n$ and each number appears on exactly one card. Initially the deck is sorted, so the first card contains the number $1$, the second card contains the number $2$, $\dots$, and the last one contains the number $6n$. Federico and Giada take turns, alternating; Federico starts. In his turn, the player takes $3$ contiguous cards from the deck and puts them in his pocket. The order of the cards remaining in the deck is not changed. They play until the deck is empty (after exactly $2n$ turns). At the end of the game both Federico and Giada have $3n$ cards in their pockets. You are given the cards in Federico's pocket at the end of the game. Describe a sequence of moves that produces that set of cards in Federico's pocket.
Before describing the solution, let us comment it. The solution is naturally split in two parts: Solving the problem without the "alternating turns" constraint. Noticing that a simple greedy is sufficient to take care of the "alternating turns" constraint. The first part of the solution is a rather standard greedy approach with complexity $O(n)$. It is quite easy (considering that this is problem F) to guess that such a greedy approach works. On the other hand, the second part of the solution (i.e. noticing that Giada takes the last turn and this imposes a condition on the appropriate forest provided by the first part of the solution) is still a greedy with $O(n)$ complexity, but harder to guess and less standard. The constraint on $n$ is very low as we did not want to implicitly hint towards a greedy approach. Moreover the greedy used in the first part of the solution can be replaced with an $O(n^3)$ dynamic-programming approach (which fits in the timelimit if implemented carefully) and the greedy described in the second part of the solution is slightly easier to implement with complexity $O(n^2)$. Simpler problem, not alternating turns Let us first consider a different (and easier) problem. Federico and Giada don't take turns, they take $3$ cards in the order they prefer (so at the end they may have a different number of cards). You are given a final situation (that is, the cards in Federico's pocket at the end) and you have to produce a sequence of moves that generates that set of cards at the end of the game. We assume that the final situation is fixed, hence each card is assigned to either Federico or Giada. We are going to describe what is natural to call the stack-partitioning. The stack-partitioning is a particular partition of the deck into $2n$ groups of $3$ cards each (not necessarily contiguous) such that each group of cards contain cards taken by the same player (either all $3$ by Federico or all $3$ by Giada). A priori the stack-partitioning might fail, not producing the described partition (but we will se that if the set of cards in Federico's pocket is achievable then the stack-partitioning does not fail). In order to create the stack-partitioning we iterate over the cards in the deck (hence the numbers from $1$ to $6n$) and we keep a stack of partial groups (i.e. of groups of less than $3$ cards). When we process a card, if it was taken by the same player that took the cards in the group at the top of the stack, then we add it to that group. If the group has now $3$ cards, we pop it from the stack and it becomes a group in the stack-partitioning. if it was taken by the other player, we add it on top of the stack (as a group with just that card). For example, if $n=2$ and the cards in Federico's pocket are $\{1,5,8,9,10,12\}$ then the stack-partitioning works and produces the partition $\{\{2,3,4\},\{8,9,10\},\{6,7,8\},\{1,5,12\}\} \,.$ The fundamental observation is: Lemma: Let us fix a possible final situation. Let us perform the stack-partition starting with a nonempty stack, i.e. at the beginning the stack already contains some partial groups assigned to either Federico and Giada (these groups do not correspond to cards in the deck). At the end of the algorithm the stack will be as it was at the beginning, i.e., same number of partial groups, with same sizes and assigned to the same player. proof: The proof is by induction on the number of cards in the deck. Since it is not hard and quite standard, we leave it to the reader. Corollary: A final situation (i.e., a set of cards in Federico's pocket) is possible if and only if the stack-partitioning works. Moreover the stack-partitioning yields a way to produce the final situation. proof: If the stack-partitioning works, then it clearly yields a way to produce the final situation (players take the groups of three cards in the same order as they are popped out of the stack). The other implication follows directly from the Lemma. Original problem Now, we can go back to the original statement. Let us notice a couple more properties of the stack-partitioning (both observations are valid both in the simpler version of the problem and in the harder): The stack-partitioning produces a forest where each node is a group of $3$ cards taken by the same player (hence we will say that a node is owned by Federico or Giada). More precisely any group $G$ of $3$ cards is son of the group of $3$ cards at the top of the stack after $G$ is popped out (or $G$ is a root if the stack becomes empty). We will refer to this forest as the stack-forest. Notice that in the stack-forest two adjacent vertices are owned by different players. In any possible sequence of moves, the group taken in the last move intersects at least one root of the stack-forest. Indeed, the cards before the first card of the group can be taken independently from the others (without alternating turns) and therefore, thanks to the Lemma, when the first card of the group is processed the stack is empty (thus it belongs to a root). We prove that a certain final situation is possible if and only if the stack-partitioning works and there is a root of the stack-forest that is owned by Giada. First, if a situation is possible then the stack-partitioning works (because of the lemma). Moreover, since Giada performs the last move, thanks to the second property above, there must be a root of the stack-forest owned by Giada. Let us now describe a greedy algorithm to produce a given final situation provided that the stack-partitioning works and there is a root owned by Giada in the stack-forest. Notice that if we remove leaves from the stack-forest, alternating between nodes owned by Federico and Giada, we are actually performing a sequence of valid moves. The algorithm is the following: When it is Federico's turn, we remove an arbitrary leaf owned by Federico. When it is Giada's turn we remove an arbitrary leaf owned by Giada taking care of not removing a root if there is only one root owned by Giada. Why does the algorithm work? When it is Federico's turn, since there is at least one root owned by Giada and the number of nodes owned by Federico and Giada is the same, there must be at least one leaf owned by Federico (recall that adjacent vertices have different owners). When it is Giada's turn, there are more nodes owned by Giada then by Federico, hence there is at least one leaf owned by Giada. Could it be that such a leaf is also a root and it is the only root owned by Giada? It can be the case only if that is in fact the only remaining vertex.
[ "data structures", "greedy", "trees" ]
3,200
#include <bits/stdc++.h> using namespace std; typedef unsigned long long ULL; #define SZ(x) ((int)((x).size())) template <typename T1, typename T2> string print_iterable(T1 begin_iter, T2 end_iter, int counter) { bool done_something = false; stringstream res; res << "["; for (; begin_iter != end_iter and counter; ++begin_iter) { done_something = true; counter--; res << *begin_iter << ", "; } string str = res.str(); if (done_something) { str.pop_back(); str.pop_back(); } str += "]"; return str; } vector<int> SortIndex(int size, std::function<bool(int, int)> compare) { vector<int> ord(size); for (int i = 0; i < size; i++) ord[i] = i; sort(ord.begin(), ord.end(), compare); return ord; } template <typename T> bool MinPlace(T& a, const T& b) { if (a > b) { a = b; return true; } return false; } template <typename T> bool MaxPlace(T& a, const T& b) { if (a < b) { a = b; return true; } return false; } template <typename S, typename T> ostream& operator <<(ostream& out, const pair<S, T>& p) { out << "{" << p.first << ", " << p.second << "}"; return out; } template <typename T> ostream& operator <<(ostream& out, const vector<T>& v) { out << "["; for (int i = 0; i < (int)v.size(); i++) { out << v[i]; if (i != (int)v.size()-1) out << ", "; } out << "]"; return out; } template<class TH> void _dbg(const char* name, TH val){ clog << name << ": " << val << endl; } template<class TH, class... TA> void _dbg(const char* names, TH curr_val, TA... vals) { while(*names != ',') clog << *names++; clog << ": " << curr_val << ", "; _dbg(names+1, vals...); } #if DEBUG && !ONLINE_JUDGE ifstream input_from_file("input.txt"); #define cin input_from_file #define dbg(...) _dbg(#__VA_ARGS__, __VA_ARGS__) #define dbg_arr(x, len) clog << #x << ": " << print_iterable(x, x+len, -1) << endl; #else #define dbg(...) #define dbg_arr(x, len) #endif /////////////////////////////////////////////////////////////////////////// //////////////////// DO NOT TOUCH BEFORE THIS LINE //////////////////////// /////////////////////////////////////////////////////////////////////////// map<vector<int>, string> samples = { {{2, 3, 4, 9, 10, 11}, "9 10 11\n6 7 8\n2 3 4\n1 5 12"}, {{1, 2, 3, 4, 5, 9, 11, 12, 13, 18, 19, 20, 21, 22, 23}, "19 20 21\n24 25 26\n11 12 13\n27 28 29\n1 2 3\n14 15 16\n18 22 23\n6 7 8\n4 5 9\n10 17 30"} }; bool is_sample(const vector<int>& player) { vector<int> vec; for (int i = 1; i < (int)player.size(); i++) if (player[i]) vec.push_back(i); if (!samples.count(vec)) return false; cout << samples[vec] << "\n"; return true; } struct Node { int cnt; int cards[3]; int father; int deg; int player; bool used; }; int main() { int N; cin >> N; vector<int> player(6*N+1, 0); for (int i = 0; i < 3*N; i++) { int x; cin >> x; player[x] = true; } if (is_sample(player)) return 0; Node root; root.cnt = 0, root.deg = 0, root.player = -1, root.used = true; vector<Node> nodes; nodes.push_back(root); stack<int> S; S.push(0); for (int i = 1; i <= 6*N; i++) { if (nodes[S.top()].player == player[i]) { int it = S.top(); nodes[it].cards[nodes[it].cnt++] = i; if (nodes[it].cnt == 3) S.pop(); } else { Node X; X.cnt = 1; X.cards[0] = i; X.father = S.top(); X.player = player[i]; X.deg = 0; X.used = false; nodes[S.top()].deg++; S.push(nodes.size()); nodes.push_back(X); } } assert(S.size() == 1); for (int i = 1; i <= 2*N; i++) { int choice = -1; for (int j = 1; j <= 2*N; j++) { if (nodes[j].used or nodes[j].player != i%2 or nodes[j].deg > 0) continue; choice = j; if (i%2 or nodes[j].father != 0) break; } assert(choice != -1); Node& X = nodes[choice]; cout << X.cards[0] << " " << X.cards[1] << " " << X.cards[2] << "\n"; X.used = true; nodes[X.father].deg--; } }
1427
G
One Billion Shades of Grey
You have to paint with shades of grey the tiles of an $n\times n$ wall. The wall has $n$ rows of tiles, each with $n$ tiles. The tiles on the boundary of the wall (i.e., on the first row, last row, first column and last column) are already painted and you shall not change their color. All the other tiles are not painted. Some of the tiles are broken, you shall not paint those tiles. It is guaranteed that the tiles on the boundary are not broken. You shall paint all the non-broken tiles that are not already painted. When you paint a tile you can choose from $10^9$ shades of grey, indexed from $1$ to $10^9$. You can paint multiple tiles with the same shade. Formally, painting the wall is equivalent to assigning a shade (an integer between $1$ and $10^9$) to each non-broken tile that is not already painted. The contrast between two tiles is the absolute value of the difference between the shades of the two tiles. The total contrast of the wall is the sum of the contrast of all the pairs of adjacent non-broken tiles (two tiles are adjacent if they share a side). Compute the minimum possible total contrast of the wall.
We present two solutions: the first solution is mine, the second is by dacin21. The first solution reduces the problem to the computation of $O(n)$ min-cuts and then computes the min-cuts with total complexity $O(n^3)$. This solution requires no advanced knowledge and it fits easily in the time-limit. The second solution solves the dual of the problem, which happens to be a min-cost flow. This shall be implemented with complexity $O(n^3)$ (or $O(n^3\log(n))$ and some optimizations) to fit in the time-limit. We will solve the problem on a general graph (tiles = vertices, two tiles are adjacent if there is an edge between them). Solution via many min-cuts The solution is naturally split in two parts: Reduce the problem to the computation of $O(n)$ min-cuts in the grid-graph (here $O(n)$ represents the number of already painted tiles). Compute all the min-cuts with overall complexity $O(n^3)$. Reducing to many min-cuts A natural way to come up with this solution is to consider the special case in which the painted tiles have only two colors. In such a case the problems is clearly equivalent to the min-cut. We show that something similar holds true in general. Given a graph $(V, E)$, let $B$ be the set of vertices that are already painted (in our problem, $B$ contains the tiles on the boundary). Let us fix a choice of all the shades, that is a function $s:V\to\mathbb N$ such that $s(v)$ is the shade assigned to vertex $v$. Let TC be the total contrast. We have (using that, for $x\le y$, it holds $|x-y|=\sum_{k\ge 1}[x\le k\text{ and } y > k]$) $\texttt{TC} = \sum_{(u,v)\in E} |s(u)-s(v)| = \sum_{k\ge 1} \#\{(u,v)\in E:\ s(u)\le k,\, s(v)>k\} \,.$ $\texttt{TC} = \sum_{k\ge 1} mc(\{v\in V: s(v)\le k\}, \{v\in V: s(v)>k\}) \ge \sum_{k\ge 1} mc(\{v\in B: s(v)\le k\}, \{v\in B: s(v)>k\})\,.$ Given two disjoint subsets $U_1,U_2$, let $\mathcal{MC}(U_1,U_2)$ be the family of all the subsets $U_1\subseteq W\subseteq V\setminus U_2$ such that $mc(U_1, U_2)=mc(W,V\setminus W)$, i.e. the family of subsets achieving the minimum-cut. We are going to need the following lemma. Even though the proof is unilluminating, the statement is very intuitive. Lemma: Take $U_1,U_2$ disjoint and $\tilde U_1,\tilde U_2$ disjoint, such that $U_1\subseteq \tilde U_1$ and $\tilde U_2\subseteq U_2$ (that is, $U_1$ grows and $U_2$ shrinks). If $W\in \mathcal{MC}(U_1,U_2)$ and $\tilde W\in \mathcal{MC}(\tilde U_1, \tilde U_2)$, then $W\cup \tilde W\in \mathcal{MC}(\tilde U_1, \tilde U_2)$. proof. Given two disjoint sets $A,B\subseteq V$, let $c(A,B):=|(A\times B)\cap E|$ be the number of cross-edges. It holds (without using any assumption on $W$ or $\tilde W$) $c(W\cup \tilde W, (W\cup \tilde W)^c) - c(\tilde W, \tilde W^c) = c(W\setminus \tilde W, (W\cup \tilde W)^c) - c(\tilde W, W\setminus\tilde W) \\ \quad\quad\quad\le c(W\setminus \tilde W,W^c) - c(W\cap \tilde W, W\setminus \tilde W) = c(W,W^c) - c(W\cap \tilde W, (W\cap \tilde W)^c)\,.$ Applying the lemma, we may find an increasing family of subsets $W_1\subseteq W_2\subseteq W_3\subseteq \cdots$ such that for all $k\ge 1$ it holds $W_k\in \mathcal{MC}(\{v\in B: s(v)\le k\}, \{v\in B: s(v)>k\}) \,.$ Hence, we may solve the problem running many maximum-flow algorithms. Running $O(n)$ times a max-flow algorithm should get time-limit-exceeded independently on how good your implementation of the maximum-flow is. Remark. For those interested, the relation between min-cuts and the minimization of the contrast is very similar to the relation between the isoperimetric problem and the 1-Sobolev inequality (see this wiki page). In the continuous setting, the strategy employed in this solution corresponds to the coarea formula. Computing quickly many min-cuts Our goal is computing $mc(\{v\in B: s(v)\le k\}, \{v\in B: s(v)>k\})$ for all values of $k$. The number of interesting values of $k$ is clearly $O(n)$. For simplicity, we assume that the interesting values of $k$ are contiguous and that all the tiles on the boundary have different shades. Let us assume that we have computed, via a simple augmenting-path algorithm, a max-flow between $\{v\in B: s(v)\le k\}$ and $\{v\in B: s(v)> k\}$. In particular we are keeping the flow as a union of disjoint paths. We show how to update it to a max-flow between $\{v\in B: s(v)\le k+1\}$ and $\{v\in B: s(v)> k+1\}$ in $O(n^2)$ time (thus the overall complexity will be $O(n^3)$). Passing from $k$ to $k+1$ we only need to transform a sink into a source (in particular the vertex such that $s(v)=k+1$ becomes a source). How shall we do that?. First of all we remove all the paths that ends in $v$ (there are at most $3$, which is the degree of $v$). Then we look for augmenting paths. Due to the optimality of the flow before the update, it is not hard to prove that there can be at most $6$ augmenting paths. The overall complexity of the update is $3n^2 + 6n^2 = O(n^2)$. Solution via min-cost flow The crucial point is formulating the problem as a linear programming problem and computing its dual, which somewhat magically turns out to be a min-cost flow. Then a very careful implementation of the min-cost flow algorithm is necessary to get accepted (with some optimizations, it is possible to get accepted with execution time below $1$ second). Let $x_v$ be the shade of vertex $v$. For any edge $u\sim v$, let $c_{uv}$ be the contrast between $u$ and $v$. Then we have $c_{uv} \ge x_u-x_v \quad \text{ and }\quad c_{uv}\ge x_v-x_u \,,$ Hence, we have formulated the minimization of the total contrast as a linear programming problem. In this form, it's not clear how to solve it efficiently (the simplex algorithm would be way to slow). Computing the dual problem is straight-forward (using well-known formulas) but a bit heavy on notation. In the end, the dual problem turns out to be the min-cost flow with the following parameters: There is a source, connected to all the vertices $v$ already painted via an edge (from the source to $v$) with capacity $1$ and cost $x_v$. There is a sink, connected to all the vertices $v$ already painted via an edge (from $v$ to the sink) with capacity $1$ and cost $-x_v$. All the edges of the graph have capacity $1$ and cost $0$. Remove the edges between already painted vertices (taking care of the contrast generated by adjacent painted vertices). Use a radix-heap instead of a standard heap for Dijkstra.
[ "flows", "graphs" ]
3,300
#include <bits/stdc++.h> using namespace std; typedef long long LL; #define SZ(x) ((int)((x).size())) #if DEBUG && !ONLINE_JUDGE ifstream input_from_file("input.txt"); #define cin input_from_file #define dbg(...) _dbg(#__VA_ARGS__, __VA_ARGS__) #define dbg_arr(x, len) clog << #x << ": " << print_iterable(x, x+len, -1) << endl; #else #define dbg(...) #define dbg_arr(x, len) #endif const int MAXN = 200 + 5; int vals[MAXN][MAXN]; int available[MAXN][MAXN][4]; int dt[4] = {0, 1, 0, -1}; int N; bool sources[MAXN][MAXN]; bool sinks[MAXN][MAXN]; bool visited[MAXN][MAXN]; void reset_visited() { for (int x = 0; x < N; x++) for (int y = 0; y < N; y++) visited[x][y] = false; } bool dfs(int x, int y, bool is_goal[MAXN][MAXN]) { visited[x][y] = true; if (is_goal[x][y]) return true; for (int t = 0; t < 4; t++) { if (!available[x][y][t]) continue; int x1 = x + dt[t]; int y1 = y + dt[t^1]; if (is_goal == sources and available[x1][y1][t^2]) continue; if (visited[x1][y1]) continue; if (dfs(x1, y1, is_goal)) { if (available[x1][y1][t^2]) available[x][y][t] = false; else available[x1][y1][t^2] = true; return true; } } return false; } int main() { ios::sync_with_stdio(false); cin.tie(0); // Remove in problems with online queries! cin >> N; vector<pair<int,int>> tiles; for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) cin >> vals[i][j]; for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) { if (vals[i][j] >= 1) { tiles.push_back({i, j}); sinks[i][j] = true; } } for (int x = 0; x < N; x++) for (int y = 0; y < N; y++) { if (vals[x][y] == -1) continue; for (int t = 0; t < 4; t++) { int x1 = x + dt[t]; int y1 = y + dt[t^1]; if (x1 < 0 or x1 >= N or y1 < 0 or y1 >= N) continue; if (vals[x1][y1] == -1) continue; available[x][y][t] = true; } } sort(tiles.begin(), tiles.end(), [&](pair<int,int> A, pair<int,int> B) { return vals[A.first][A.second] < vals[B.first][B.second]; }); LL res = 0; int flow = 0; for (int it = 0; it < SZ(tiles)-1; it++) { int x = tiles[it].first, y = tiles[it].second; while (dfs(x, y, sources)) { reset_visited(); flow--; } reset_visited(); sinks[x][y] = false; sources[x][y] = true; for (auto pp: tiles) { // for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) { int i = pp.first, j = pp.second; if (sources[i][j] and !visited[i][j]) { while (dfs(i, j, sinks)) { reset_visited(); flow++; } } } // for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) { // if (visited[i][j] and !vals[i][j]) vals[i][j] = vals[x][y]; // } reset_visited(); LL cost = vals[tiles[it+1].first][tiles[it+1].second]-vals[x][y]; res += cost * flow; } // for (int i = 0; i < N; i++) { // for (int j = 0; j < N; j++) cout << vals[i][j] << " "; // cout << "\n"; // } cout << res << endl; }
1427
H
Prison Break
A prisoner wants to escape from a prison. The prison is represented by the interior of the convex polygon with vertices $P_1, P_2, P_3, \ldots, P_{n+1}, P_{n+2}, P_{n+3}$. It holds $P_1=(0,0)$, $P_{n+1}=(0, h)$, $P_{n+2}=(-10^{18}, h)$ and $P_{n+3}=(-10^{18}, 0)$. The prison walls $P_{n+1}P_{n+2}$, $P_{n+2}P_{n+3}$ and $P_{n+3}P_1$ are very high and the prisoner is not able to climb them. Hence his only chance is to reach a point on one of the walls $P_1P_2, P_2P_3,\dots, P_{n}P_{n+1}$ and escape from there. On the perimeter of the prison, there are two guards. The prisoner moves at speed $1$ while the guards move, \textbf{remaining always on the perimeter of the prison}, with speed $v$. If the prisoner reaches a point of the perimeter where there is a guard, the guard kills the prisoner. If the prisoner reaches a point of the part of the perimeter he is able to climb and there is no guard there, he escapes immediately. Initially the prisoner is at the point $(-10^{17}, h/2)$ and the guards are at $P_1$. Find the minimum speed $v$ such that the guards can guarantee that the prisoner will not escape (assuming that both the prisoner and the guards move optimally). \textbf{Notes:} - At any moment, the guards and the prisoner can see each other. - The "climbing part" of the escape takes no time. - You may assume that both the prisoner and the guards can change direction and velocity instantly and that they both have perfect reflexes (so they can react instantly to whatever the other one is doing). - The two guards can plan ahead how to react to the prisoner movements.
We fix the unit of measure and say that the prisoner moves at $1$ meter per second. Let us parametrize the boundary of the prison as follows (ignoring the very long, very high walls). Start walking with speed $1$ from $P_1$ towards $P_{n+1}$ staying on the prison wall and let $\gamma(t)$ be the point you are at after time $t$ (so $\gamma$ is a curve with speed $1$). Let $L$ be the total length of such curve (i.e., $\gamma(L)=P_{n+1}$). With an abuse of notation, we will denote with $\gamma$ also the image of $\gamma$ (i.e., the climbable walls). A criterion for the prison break The prisoner can escape if and only if there are three times $0\le t_1<t_2<t_3\le L$ such that $\quad |\gamma(t_2)-\gamma(t_1)| < \frac{t_2-t_1}{v} \quad \text{ and } |\gamma(t_3)-\gamma(t_2)| < \frac{t_3-t_2}{v} \,.\tag{$\star$}$ proof. If there are three times $t_1<t_2<t_3$ such that $(\star)$ holds, then the prisoner can escape. The strategy of the prisoner is as follows. He begins by going (very close) to the point $\gamma(t_2)$. If there is not a guard there, he escapes. Otherwise, there must be a guard at (a point very close to) $\gamma(t_2)$. Without loss of generality we may assume that the other guard is at $\gamma(t)$ with $t<t_2$. Then the prisoner goes directly to $\gamma(t_3)$ and escapes from there. Notice that the prisoner reaches $\gamma(t_3)$ in $|\gamma(t_3)-\gamma(t_2)|$ seconds, while the closest guard (the one at $\gamma(t_2)$) needs $\frac{t_3-t_2}v$ seconds. The assumption guarantees that the prisoner reaches $\gamma(t_3)$ before the guard. If there are not three such times such that $(\star)$ holds, then the guards have a strategy to avoid the prison break. This implication is harder; we split its proof into various steps. The strategy of the guards is encoded by two functions $f_1,f_2$. Assume that we are given two functions $f_1,f_2:\mathcal Z\to [0,L]$, where $\mathcal Z$ denotes the interior of the prison, such that they are both $v$-Lipschitz (i.e., $|f(A)-f(B)|\le v|A-B|$) and, for any $0\le t\le L$, either $f_1(\gamma(t))=t$ or $f_2(\gamma(t))=t$ (or also both). Denoting with $Q$ the position of the prisoner, the guards may follow the following strategy (it is easy to adapt this strategy to the fact that initially the guards are at $P_1$, we leave it to the reader): The first guard will always be at $\gamma(f_1(Q))$. The second guard will always be at $\gamma(f_2(Q))$. Extending Lipschitz functions. It remains to construct two such functions $f_1,f_2$. The idea is to define them on $\gamma$ and then extend them inside $\mathcal Z$ through a standard technique. Assume that we are able to define them on $\gamma$, it is well-known that a real-valued Lipschitz function defined on a subset of a metric space (the metric space is $\mathcal Z$, its subset is $\gamma$) can be extended to the whole metric space without increasing the Lipschitz constant (this may sound as abstract nonsense, but it's easy... prove it!). From two functions to two subsets. The existence of the desired $f_1,f_2$ boils down to finding two functions on $\gamma$ that are $v$-Lipschitz and for each $0\le t\le L$ we have either $f_1(\gamma(t))=t$ or $f_2(\gamma(t))=t$. Let $\gamma_1,\gamma_2$ be the subsets of $\gamma$ where, respectively, $f_1(\gamma(t))=t$ and $f_2(\gamma(t))=t$. What can we say on the two subsets $\gamma_1$ and $\gamma_2$? Well, restricted on them, the function $\gamma(t)\mapsto t$ must be $v$-Lipschitz! Thus, applying again the extension argument described above, the problem reduces to finding two subsets $\gamma_1,\gamma_2$ of $\gamma$ such that $\gamma_1\cup\gamma_2=\gamma$ and the function $\gamma(t)\mapsto t$ is $v$-Lipschitz on $\gamma_1$ and $\gamma_2$. Existence of the subsets via a bipartition argument. We have finally arrived at the core of the proof, constructing $\gamma_1$ and $\gamma_2$. Given two points $\gamma(s)$ and $\gamma(t)$, we say that they are compatible if $v|\gamma(s)-\gamma(t)| \ge |s-t|$. The conditions on $\gamma_1$ and $\gamma_2$ are equivalent to: It holds $\gamma_1\cup\gamma_2=\gamma$. Any two points in $\gamma_1$ are compatible. Any two points in $\gamma_2$ are compatible. Computing the minimal speed $v$ In order to find the minimal $v$ such that the guards can avoid the prison break, we binary search the answer. Given a certain $v$, we must check whether there are three times $0\le t_1<t_2<t_3\le L$ such that $(\star)$ holds. We say that $t_2$ is left-incompatible if there is a $t_1<t_2$ such that $(\star)$ holds and we say that it is right-incompatible if there is a $t_3>t_2$ such that $(\star)$ holds. We have to check whether there is a time that is both left-incompatible and right-incompatible. Let us assume that $\gamma(t_1)\in [P_i,P_{i+1}]$ and $\gamma(t_2)\in[P_j,P_{j+1}]$ with $i < j$. Under this additional constraint, characterizing the "left-incompatible" times $t_2$ reduces to finding all the points $\gamma(t_2)\in[P_j,P_{j+1}]$ such that $\min_{\gamma(t_1)\in[P_i,P_{i+1}]} |\gamma(t_2)-\gamma(t_1)|-\frac{t_2-t_1}{v} < 0 \,.$ The first method, based on solving quadratic equations, needs some care to avoid precision issues/division by zero. The complexity is $O(1)$ (once $i$ and $j$ are fixed) and produces a complete algorithm with complexity $O(n^2\log(n)\log(\varepsilon^{-1}))$ (or a slightly easier to implement $O(n^3\log(\varepsilon^{-1}))$ that is super-fast. The second method, based on ternary search, is pretty straight-forward to implement but proving its correctness is rather hard. The complexity is $O(\log(\varepsilon^{-1})^2)$ and produces a complete algorithm with complexity $O(n^2\log(\varepsilon^{-1})^3 + n^3\log(\varepsilon^{-1}))$ which is rather slow in practice; some care might be necessary to get it accepted. Since we will need them in both approaches, let us define $t^{\pm}_{12}$ as the values such that $\gamma(t_1^-)=P_i$, $\gamma(t_1^+)=P_{i+1}$, $\gamma(t_2^-)=P_j$, $\gamma(t_2^+)=P_{j+1}$. Via quadratic equations. Consider the function $F(t_1,t_2):= |\gamma(t_2)-\gamma(t_1)|^2v^2-(t_2-t_1)^2$. We have to find the values $t_2\in[t_2^-,t_2^+]$ such that $\min_{t_1\in [t_1^-,t_1^+]} F(t_1, t_2) < 0\,.$ Let us remark that, even if everything seems elementary and easy, implementing this is not a cakewalk. Via ternary search. Consider the function $G(t_1,t_2):= \frac{|\gamma(t_2)-\gamma(t_1)|}{t_2-t_1}$. We have to find the values $t_2\in[t_2^-,t_2^+]$ such that $\min_{t_1\in[t_1^-,t_1^+]} G(t_1, t_2) < v^{-1} \,.$ $\tilde G(t_2) := \min_{t_1\in[t_1^-, t_1^+]} G(t_1, t_2) \,.$ For a proof that the above functions are really unimodal, take a look at this short pdf. Implementing this second approach might be a bit tedious, but presents no real difficulty. Since this is quite slow, it might need some care to get accepted (for example, the arg-min of $\tilde G$ shall be computed only once and not for each binary-search iteration on $v$).
[ "binary search", "games", "geometry", "ternary search" ]
3,500
#define _USE_MATH_DEFINES #include <bits/stdc++.h> using namespace std; typedef long long LL; typedef unsigned long long ULL; #define SZ(x) ((int)((x).size())) template <typename T1, typename T2> string print_iterable(T1 begin_iter, T2 end_iter, int counter) { bool done_something = false; stringstream res; res << "["; for (; begin_iter != end_iter and counter; ++begin_iter) { done_something = true; counter--; res << *begin_iter << ", "; } string str = res.str(); if (done_something) { str.pop_back(); str.pop_back(); } str += "]"; return str; } vector<int> SortIndex(int size, std::function<bool(int, int)> compare) { vector<int> ord(size); for (int i = 0; i < size; i++) ord[i] = i; sort(ord.begin(), ord.end(), compare); return ord; } template <typename T> bool MinPlace(T& a, const T& b) { if (a > b) { a = b; return true; } return false; } template <typename T> bool MaxPlace(T& a, const T& b) { if (a < b) { a = b; return true; } return false; } template <typename S, typename T> ostream& operator <<(ostream& out, const pair<S, T>& p) { out << "{" << p.first << ", " << p.second << "}"; return out; } template <typename T> ostream& operator <<(ostream& out, const vector<T>& v) { out << "["; for (int i = 0; i < (int)v.size(); i++) { out << v[i]; if (i != (int)v.size()-1) out << ", "; } out << "]"; return out; } template<class TH> void _dbg(const char* name, TH val){ clog << name << ": " << val << endl; } template<class TH, class... TA> void _dbg(const char* names, TH curr_val, TA... vals) { while(*names != ',') clog << *names++; clog << ": " << curr_val << ", "; _dbg(names+1, vals...); } #if DEBUG && !ONLINE_JUDGE ifstream input_from_file("input.txt"); #define cin input_from_file #define dbg(...) _dbg(#__VA_ARGS__, __VA_ARGS__) #define dbg_arr(x, len) clog << #x << ": " << print_iterable(x, x+len, -1) << endl; #else #define dbg(...) #define dbg_arr(x, len) #endif /////////////////////////////////////////////////////////////////////////// //////////////////// DO NOT TOUCH BEFORE THIS LINE //////////////////////// /////////////////////////////////////////////////////////////////////////// struct pt { double x, y; pt(): x(0), y(0) {} pt(double x, double y): x(x), y(y) {} }; pt operator -(pt A, pt B) { return {A.x-B.x, A.y-B.y}; } double operator *(pt A, pt B) { return A.x*B.x + A.y*B.y; } double norm(pt A) { return sqrt(A*A); } pt operator /(pt A, double lambda) { return {A.x/lambda, A.y/lambda}; } ostream& operator<<(ostream& out, pt P) { out << "(" << P.x << ", " << P.y << ")"; return out; } // P(s) = as² + bs +c // {l < s < r: P(s) < 0} void neg_interval(double a, double b, double c, double l, double r, vector<pair<double,double>>& ans) { assert(abs(a-1) < 1e-5 or a < 0); if (a > 0) { b /= a, c /= a, a = 1; if (b*b < 4*a*c) return; double delta = sqrt(b*b-4*a*c); double l0 = (-b - delta)/2; double r0 = (-b + delta)/2; l0 = max(l0, l); r0 = min(r0, r); if (l0 < r0) ans.emplace_back(l0, r0); } if (a < 0) { double s = sqrt(a*a + b*b + c*c); a /= s, b /= s, c /= s; // renormalization if (b*b < 4*a*c) { ans.emplace_back(l, r); return; } double delta = sqrt(b*b-4*a*c); double l0 = (-b + delta)/(2*a); double r0 = (-b - delta)/(2*a); // The following lines are necessary to handle appropriately a close to 0. if (-0.01 < a) { if (b > 0) l0 = (-2*c)/(b+delta); else r0 = (2*c)/(-b + delta); } if (l < l0) ans.emplace_back(l, min(l0, r)); if (r0 < r) ans.emplace_back(max(l, r0), r); } } // Q(s, t) = s² + t² + ast + bs + ct + d // {l0 < s < r0: \min_{l1 < t < r1} Q(s, t) < 0} void neg_interval(double a, double b, double c, double d, double l0, double r0, double l1, double r1, vector<pair<double,double>>& ans) { assert(a >= 1.99); // argmin_t Q(s, t) = -(as+c)/2 in [l1, r1] double lmin = (-2 * r1 - c)/a; double rmin = (-2 * l1 - c)/a; lmin = max(lmin, l0); rmin = min(rmin, r0); // argmin in [l1,r1] <-> lmin < s < rmin if (lmin < rmin) neg_interval(1-a*a/4, b-a*c/2, d-c*c/4, lmin, rmin, ans); neg_interval(1, a*l1+b, l1*l1+c*l1+d, l0, r0, ans); neg_interval(1, a*r1+b, r1*r1+c*r1+d, l0, r0, ans); } void good_interval(pt A, pt B, pt C, pt D, double v, double l, vector<pair<double,double>>& ans) { double lA = 0; double rA = norm(B-A); pt dA = (B-A)/rA; double lC = 0; double rC = norm(D-C); pt dC = (D-C)/rC; // v*v*(A+s*dA - C - t*dC)^2 - (l + t - s)^2 < 0 if (v >= sqrt(2/(1+dA*dC))) return; // if ==, then c2 = 2c1 (and a = 2). double c1 = v*v - 1; // s² and t² double c2 = 2*(1- (v*v)*(dA*dC)); // st double c3 = 2*v*v*((A-C)*dA) + 2*l; // s double c4 = -2*v*v*((A-C)*dC) - 2*l; // t double c5 = v*v * ((A-C)*(A-C)) - l*l; // const c2 /= c1, c3 /= c1, c4 /= c1, c5 /= c1; neg_interval(c2, c3, c4, c5, lA, rA, lC, rC, ans); } bool nonempty_intersection(pair<double,double> I1, pair<double,double> I2) { return max(I1.first, I2.first) < min(I1.second, I2.second); } const int MAXN = 51; pt P[MAXN]; double len[MAXN]; int main() { ios::sync_with_stdio(false); cin.tie(0); // Remove in problems with online queries! int N; cin >> N; for (int i = 0; i <= N; i++) cin >> P[i].x >> P[i].y; for (int i = 1; i < N; i++) len[i] = len[i-1] + norm(P[i]-P[i-1]); if (N <= 2) { cout << 1 << "\n"; return 0; } double l = 1; double r = 20; for (int it = 0; it < 50; it++) { bool good = false; double v = (l+r)/2; for (int i = 1; i < N-1; i++) { vector<pair<double,double>> bef, aft; for (int j = 0; j < i; j++) good_interval(P[i], P[i+1], P[j], P[j+1], v, len[j]-len[i], bef); for (int j = i+1; j < N; j++) good_interval(P[i], P[i+1], P[j], P[j+1], v, len[j]-len[i], aft); for (auto& I: bef) for (auto& J: aft) good |= nonempty_intersection(I, J); } if (good) l = v; else r = v; } cout.precision(10); cout << l << "\n"; }
1428
A
Box is Pull
Wabbit is trying to move a box containing food for the rest of the zoo in the coordinate plane from the point $(x_1,y_1)$ to the point $(x_2,y_2)$. He has a rope, which he can use to pull the box. He can only pull the box if he stands \textbf{exactly} $1$ unit away from the box in the direction of one of two coordinate axes. He will pull the box to where he is standing before moving out of the way in the same direction by $1$ unit. For example, if the box is at the point $(1,2)$ and Wabbit is standing at the point $(2,2)$, he can pull the box right by $1$ unit, with the box ending up at the point $(2,2)$ and Wabbit ending at the point $(3,2)$. Also, Wabbit can move $1$ unit to the right, left, up, or down without pulling the box. In this case, it is not necessary for him to be in exactly $1$ unit away from the box. If he wants to pull the box again, he must return to a point next to the box. Also, Wabbit can't move to the point where the box is located. Wabbit can start at any point. It takes $1$ second to travel $1$ unit right, left, up, or down, regardless of whether he pulls the box while moving. Determine the minimum amount of time he needs to move the box from $(x_1,y_1)$ to $(x_2,y_2)$. Note that the point where Wabbit ends up at does not matter.
Consider when $x_1=x_2$ Consider when $x_1 \ne x_2$ We consider 2 cases. The first is that the starting and ending point lie on an axis-aligned line. In this case, we simply pull the box in 1 direction, and the time needed is the distance between the 2 points as we need 1 second to decrease the distance by 1. The second is that they do not lie on any axis-aligned line. Wabbit can pull the box horizontally (left or right depends on the relative values of $x_1$ and $x_2$) for $|x_1-x_2|$ seconds, take 2 seconds to move either above or below the box, then take another $|y_1-y_2|$ seconds to move the box to $(x_2,y_2)$.
[ "math" ]
800
tc=int(input()) for i in range(tc): a,b,c,d=map(int,input().split(" ")) ans=abs(a-c)+abs(b-d) if (a!=c and b!=d): ans+=2 print(ans)
1428
B
Belted Rooms
In the snake exhibition, there are $n$ rooms (numbered $0$ to $n - 1$) arranged in a circle, with a snake in each room. The rooms are connected by $n$ conveyor belts, and the $i$-th conveyor belt connects the rooms $i$ and $(i+1) \bmod n$. In the other words, rooms $0$ and $1$, $1$ and $2$, $\ldots$, $n-2$ and $n-1$, $n-1$ and $0$ are connected with conveyor belts. The $i$-th conveyor belt is in one of three states: - If it is clockwise, snakes can only go from room $i$ to $(i+1) \bmod n$. - If it is anticlockwise, snakes can only go from room $(i+1) \bmod n$ to $i$. - If it is off, snakes can travel in either direction. Above is an example with $4$ rooms, where belts $0$ and $3$ are off, $1$ is clockwise, and $2$ is anticlockwise. Each snake wants to leave its room and come back to it later. A room is \textbf{returnable} if the snake there can leave the room, and later come back to it using the conveyor belts. How many such \textbf{returnable} rooms are there?
There are 2 cases to consider for a room to be returnable. For a room to be returnable, either go one big round around all the rooms or move to an adjacent room and move back. Let's consider two ways to return to the start point. The first is to go one big round around the circle. The second is to move 1 step to the side, and return back immediately. Going one big round is only possible if and only if: There are no clockwise belts OR There are no anticlockwise belts If we can go one big round, all rooms are returnable. If there are both clockwise and anticlockwise belts, then we can't go one big round. For any room to be returnable, it must have an off belt to the left or to the right. In summary, check if clockwise belts are absent or if anticlockwise belts are absent. If either is absent, the answer is $n$. Otherwise, we have to count the number of rooms with an off belt to the left or to the right. Sorry for the unclear statement for B, we should've explained each sample testcase more clearly with better diagrams. Additionally, we're also sorry for the weak pretests. We should've added more testcases of smaller length, and thanks to hackers for adding stronger tests.
[ "graphs", "implementation" ]
1,200
TC = int(input()) for tc in range(TC): n = int(input()) s = input() hasCW = False hasCCW = False for c in s: if c == '>': hasCW = True if c == '<': hasCCW = True if hasCW and hasCCW: s += s[0] ans = 0; for i in range(n): if s[i] == '-' or s[i+1] == '-': ans += 1 print(ans) else: print(n)
1428
C
ABBB
Zookeeper is playing a game. In this game, Zookeeper must use bombs to bomb a string that consists of letters 'A' and 'B'. He can use bombs to bomb a substring which is either "AB" or "BB". When he bombs such a substring, the substring gets deleted from the string and the remaining parts of the string get concatenated. For example, Zookeeper can use two such operations: AAB\underline{AB}BA $\to$ AA\underline{BB}A $\to$ AAA. Zookeeper wonders what the shortest string he can make is. Can you help him find the length of the shortest string?
and oolimry AB and BB means that ?B can be removed. The final string is BAAA... or AAA.... This game is equivalent to processing left to right and maintaining a stack. If the current processed character is A, we add it to the stack, if the current processed character is B, we can either add it to the stack or pop the top of the stack. In the optimal solution, we will always pop from the stack whenever possible. To prove this, we will use the stay ahead argument. Firstly, we notice that the contents of the stack do not actually matter. We actually only need to maintain the length of this stack. Decrementing the size of the stack whenever possible is optimal as it is the best we can do. And in the case where we must push `B' to the stack, this is optimal as the parity of the length of the stack must be the same as the parity of the processed string, so obtaining a stack of length 0 is impossble. Bonus: what is the length of the longest string that Zookeeper can make such that there are no moves left? We're also sorry for the weak pretests in this problem. About 1 hour before the contest, we found out that c++ $O(N^2)$ solution using find and erase would pass. Then we added testcases to kill the c++ solutions, but we didn't test the $O(N^2)$ solution for python using replace.
[ "brute force", "data structures", "greedy", "strings" ]
1,100
TC = int(input()) for tc in range(TC): s = input() ans=0 for i in s: if (i=='B' and ans!=0): ans-=1 else: ans+=1 print(ans)
1428
D
Bouncing Boomerangs
To improve the boomerang throwing skills of the animals, Zookeeper has set up an $n \times n$ grid with some targets, \textbf{where each row and each column has at most $2$ targets each}. The rows are numbered from $1$ to $n$ from top to bottom, and the columns are numbered from $1$ to $n$ from left to right. For each column, Zookeeper will throw a boomerang from the bottom of the column (below the grid) upwards. When the boomerang hits any target, it will bounce off, make a $90$ degree turn to the right and fly off in a straight line in its new direction. The boomerang can hit multiple targets and does not stop until it leaves the grid. In the above example, $n=6$ and the black crosses are the targets. The boomerang in column $1$ (blue arrows) bounces $2$ times while the boomerang in column $3$ (red arrows) bounces $3$ times. The boomerang in column $i$ hits exactly $a_i$ targets before flying out of the grid. \textbf{It is known that $a_i \leq 3$.} However, Zookeeper has lost the original positions of the targets. Thus, he asks you to construct a valid configuration of targets that matches the number of hits for each column, or tell him that no such configuration exists. If multiple valid configurations exist, you may print any of them.
Consider $a_i \in \{0,1,2\}$. Consider $a_i=\{3,3,3, \ldots , 3,1\}$. Clearly, columns with $a_j=0$ are completely empty and we can ignore them. Let's first consider just columns with $1$ s and $2$ s. When a boomerang strikes its first target, it will change directions from upwards to rightwards. If $a_j=1$, the boomerang in column $j$ exits the grid on the right. This means that if the target that it hits in on row $r$, there is no other target to its right on row $r$. For columns with $a_j=2$, the boomerang in column $j$ has to hit second target in some column $k$ before moving downwards. The $2$ targets that this boomerang hits must be in the same row, and since no row contains more than $2$ targets, these are the only $2$ targets in the row. Additionally, there isn't any target below the second target. This means $a_k=1$. This tells us that columns $j$ with $a_j=2$ must be matched with columns $k$ with $a_k=1$ to its right with $j < k$. If we only had $a_j=1$ and $a_j=2$, we can simply greedily match $2$ s to $1$ s that are available. $3$ s initially seem difficult to handle. The key observation is that $3$ s can "link" to $3$ s to its right. The way to do this for the have the first target for one boomerang be the third target for another boomerang. This allows us to "chain" the $3$ s together in one long chain. Thus, we only care about the first $3$, which has to use either a $2$ or a $1$ (if it uses a $1$, that $1$ cannot be matched with a $2$). We should always use a $2$ if possible since it will never be used by anything else, and the exact $1$ that we use also doesn't matter. Thus the solution is as follows: Process from right to left. If the current value is a $1$, add it to a list of available ones. If the current value is a $2$, match it with an available $1$ and remove the $1$ from the list. If the current value is a $3$, match it with $3$,$2$ or $1$ in that order of preference. Once we have found the chains and matches, we can go from left to right and give each chain / match some number of rows to use so that they do not overlap. The final time complexity is $O(n)$. Bonus $1$: Show that the directly simulating the path of each boomerang is overall $O(n)$. Bonus $2$ (unsolved): Solve for $0 \leq a_j \leq 4$.
[ "constructive algorithms", "greedy", "implementation" ]
1,900
def die(): print(-1) exit(0) ones = [] ans = [] H = 1 lastThree = (-1,-1) n = int(input()) arr = list(map(int,input().split(" "))) for i in range(n-1, -1, -1): if arr[i] == 0: continue elif arr[i] == 1: ones.append((i,H)) ans.append((i,H)) H += 1 elif arr[i] == 2: if len(ones) == 0: die() T = ones[-1] ones.pop() ans.append((i, T[1])) lastThree = (i,T[1]) elif arr[i] == 3: if lastThree[0] == -1: if len(ones) == 0: die() else: lastThree = ones[-1] ones.pop() ans.append((i, H)) ans.append((lastThree[0], H)) lastThree = (i,H) H += 1 print(len(ans)) for ii in ans: print(n-ii[1] + 1, end = ' ') print(ii[0] + 1)
1428
E
Carrots for Rabbits
There are some rabbits in Singapore Zoo. To feed them, Zookeeper bought $n$ carrots with lengths $a_1, a_2, a_3, \ldots, a_n$. However, rabbits are very fertile and multiply very quickly. Zookeeper now has $k$ rabbits and does not have enough carrots to feed all of them. To solve this problem, Zookeeper decided to cut the carrots into $k$ pieces. For some reason, all resulting carrot lengths must be positive integers. Big carrots are very difficult for rabbits to handle and eat, so the time needed to eat a carrot of size $x$ is $x^2$. Help Zookeeper split his carrots while minimizing the sum of time taken for rabbits to eat the carrots.
Orz proof by: oolimry Greedy. Let us define $f(l,p)$ as the sum of time needed when we have a single carrot of length $l$ and it is split into $p$ pieces. We can show that $f(l,p-1)-f(l,p) \ge f(l,p)-f(l,p+1)$. Let us define $f(l,p)$ as the sum of time needed when we have a single carrot of length $l$ and it is split into $p$ pieces. In an optimal cutting for a single carrot, we will only cut it into pieces of length $w$ and $w+1$, for some $w$. Such cutting is optimal as suppose we have $2$ pieces of length $\alpha$ and $\beta$, and $\alpha+2 \leq \beta$. Then, it is better to replace those $2$ pieces of carrots with length $\alpha+1$ and $\beta-1$, since $(\alpha+1)^2+(\beta-1)^2 \leq \alpha^2+\beta^2$. To calculate $f(l,p)$, we need to find $p_1,p_2$ such that $p_1 (w) + p_2 (w+1)=l$ and $p_1+p_2=p$, minimizing $p_1(w)^2+p_2(w+1)^2$. Clearly, $w=\lfloor \frac{l}{p} \rfloor$ and $p_2=l\mod p$. Thus, calculating $f(l,p)$ can be done in $O(1)$. We can use the following greedy algorithm. We consider making zero cuts at first. We will make $k$ cuts one by one. When deciding where to make each cut, consider for all the carrots, which carrot gives us the largest decrease in cost when we add an extra cut. If a carrot of length $l$ currently is in $p$ pieces, then the decrease in cost by making one extra cut is $f(l,p) - f(l,p+1)$. The important observation here is that $f(l,p-1)-f(l,p) \geq f(l,p)-f(l,p+1)$. Consider $f(2l,2p)$. Since the length is even and we have an even number of pieces, we know that the will be one cut down the middle and each half will have $p$ pieces. As such, $f(2l,2p) = 2f(l,p)$ This is because there even number of $w$ length pieces and an even number of $w+1$ length pieces, hence the left half and the right half are the same. Also, $f(l,p-1) + f(l,p+1) \geq f(2l,2p)$, since $f(l,p-1) + f(l,p+1)$ is describing cutting a carrot of length $2l$ first into $2$ carrots of length $l$ and cutting it into $p-1$ and $p+1$ pieces respectively. The inequality must hold because this is a way to cut a carrot of length $2l$ into $2p$ pieces. Since we have $f(l,p-1) + f(l,p+1) \geq f(2l,2p)$ and $f(2l,2p) = 2f(l,p)$. We have $f(l,p-1) + f(l,p+1) \geq f(l,p) + f(l,p)$. Rearranging terms gives $f(l,p-1)-f(l,p) \geq f(l,p)-f(l,p+1)$ In other words, the more cuts we make to a carrot, there is a diminishing returns in the decrease in cost per cut. As such, For any carrot, we don't need to think about making the $(p+1)$-th cut before making the $p$-th cut. Hence, all we need to do is choose carrot with the largest decrease in cost, and add one extra cut for that. Which carrot has the largest decrease in cost can be maintained with a priority queue. Hence, we get a time complexity of $O(klog(n))$. Bonus: solve this problem for $k \leq 10^{18}$. When preparing this problem, I started with putting $n,k\leq 5000$ because i thought if I put larger values it would be guessforces. However, several testers made $O(nk \log n)$ and $O(nk)$ dp solutions. Which version of the problem do you think is better?
[ "binary search", "data structures", "greedy", "math", "sortings" ]
2,200
from heapq import * n,k=map(int,input().split(" ")) def val(l,nums): unit=l//nums extra=l-unit*nums return (nums-extra)*unit*unit+extra*(unit+1)*(unit+1) pq=[] arr=list(map(int,input().split(" "))) total=0 for x in range(n): total+=arr[x]*arr[x] heappush(pq,(-val(arr[x],1)+val(arr[x],2),arr[x],2)) for x in range(k-n): temp=heappop(pq) total+=temp[0] a,b=temp[1],temp[2] heappush(pq,(-val(a,b)+val(a,b+1),a,b+1)) print(total)
1428
F
Fruit Sequences
Zookeeper is buying a carton of fruit to feed his pet wabbit. The fruits are a sequence of apples and oranges, which is represented by a binary string $s_1s_2\ldots s_n$ of length $n$. $1$ represents an apple and $0$ represents an orange. Since wabbit is allergic to eating oranges, Zookeeper would like to find the longest \textbf{contiguous} sequence of apples. Let $f(l,r)$ be the longest \textbf{contiguous} sequence of apples in the substring $s_{l}s_{l+1}\ldots s_{r}$. Help Zookeeper find $\sum_{l=1}^{n} \sum_{r=l}^{n} f(l,r)$, or the sum of $f$ across all substrings.
Line sweep. How does $f(l,r)$ change when we increase r? For a fixed $r$, let us graph $f(l,r)$ in a histogram with $l$ as the $x$-axis. We notice that this histogram is non-increasing from left to right. Shown below is the histogram for the string $11101011$ with $r = 6$, where $1$ box represents $1$ unit. The area under this histogram is the sum of all $f(l,r)$ for a fixed $r$. Consider what happens to the histogram when we move from $r$ to $r+1$. If $s_{r+1} = 0$, then the histogram does not change at all. If $s_{r+1} = 1$, then we may need to update the histogram accordingly. Above is the histogram for $11101011$ when $r=7$. And this is the histogram for $11101011$ when $r=8$. When adding a new segment of $k$ $1$s, we essentially fill up the bottom $k$ rows of the histogram. Thus, we let $L_x$ be the largest $l$ such that $f(l,r) = x$. We maintain these $L_x$ values in an array. When we process a group of $k$ 1s, we update the values for $L_1, L_2, \cdots, L_k$, change the area of the histogram and update the cumulative sum accordingly. With proper processing of the segments of 1s, this can be done in $O(n)$ time. Interestingly, many testers found F not much harder than E, as such we gave them the same score. However, it seems that F was signficicantly harder than E based on the number of solves. Another thing was that FST for this problem were caused by testdata that involved sequences of '1's of increasing length.
[ "binary search", "data structures", "divide and conquer", "dp", "two pointers" ]
2,400
import sys range = xrange input = raw_input inp=sys.stdin.read().split() n = int(inp[0]) s=inp[1] tot=0 cur=0 hist=[0]*1000005 i=0 while (i<n): if (s[i]=='0'): tot+=cur else: l=i r=i while (r+1<n and s[r+1]=='1'): r+=1 for x in range(r-l+1): cur+=(l+x+1)-hist[x] tot+=cur hist[x]=r-x+1 i=r i+=1 print(tot)
1428
G2
Lucky Numbers (Hard Version)
\textbf{This is the hard version of the problem. The only difference is in the constraint on $q$. You can make hacks only if all versions of the problem are solved.} Zookeeper has been teaching his $q$ sheep how to write and how to add. The $i$-th sheep has to write exactly $k$ \textbf{non-negative integers} with the sum $n_i$. Strangely, sheep have superstitions about digits and believe that the digits $3$, $6$, and $9$ are lucky. To them, the fortune of a number depends on the decimal representation of the number; the fortune of a number is equal to the sum of fortunes of its digits, and the fortune of a digit depends on its value and position and can be described by the following table. For example, the number $319$ has fortune $F_{2} + 3F_{0}$. Each sheep wants to maximize the \textbf{sum of fortune} among all its $k$ written integers. Can you help them?
0 is a lucky number as well. There is at most one number whose digits is not entirely 0, 3, 6, 9. Knapsack. Group same items. Suppose in our final solution, there is a certain position (ones, tens...) that has two digits which are not $0$, $3$, $6$, or $9$. Let these two digits are $a$ and $b$. If $a+b \leq 9$, we can replace these two digits with $0$, and $a+b$. Otherwise, we can replace these two digits with $9$ and $a+b-9$. By doing this, our solution is still valid as the sum remains the same, and the total fortune will not decrease since $a$ and $b$ are not $3$, $6$ or $9$. As such, in the optimal solution, there should be at most one number that consists of digits that are not $0$, $3$, $6$ or $9$. Let's call this number that has other digits $x$. Since there's only one query, we can try all possibilities of $x$. The question reduces to finding the maximum sum of fortune among $k-1$ numbers containing the digits $0$, $3$, $6$ and $9$ that sum up to exactly $n-r$. We can model this as a 0-1 knapsack problem, where the knapsack capacity is $n-r$. For the ones position, the value (fortune) can be increased by $F_0$ for a weight of $3$ a total of $3(k-1)$ times, so we can create $3(k - 1)$ objects of weight $3$ and value $F_0$. For tens it's $F_1$ for a weight of $30$, and so on. Since there are many duplicate objects, we can group these duplicate objects in powers of $2$. For example, $25$ objects can be grouped into groups of $1$, $2$, $4$, $8$, $10$. This runs in $O(ndlogk)$ where $d$ is the number of digits (6 in this case) In the hard version, we are not able to search all $n$ possibilities for the last number $x$ as there are many queries. Using the 0-1 knapsack with duplicates, we have computed the dp table for the first $k-1$ numbers. The rest is incorporating the last number into the dp table. We can do this by considering each digit separately. Then, we can update the dp table by considering all possible transitions for all digits from $0$ to $9$.
[ "dp", "greedy" ]
3,000
#include <bits/stdc++.h> using namespace std; const long long inf = (1LL << 58LL); long long F[6]; long long fortune[10] = {0, 0, 0, 1, 0, 0, 2, 0, 0, 3}; long long ten[6] = {1, 10, 100, 1000, 10000, 100000}; long long dp[1000005]; int main(){ ios_base::sync_with_stdio(false); cin.tie(0); int K; int N = 999999; cin >> K; for(int i = 0;i <= 5;i++) cin >> F[i]; fill(dp,dp+N+1,-inf); dp[0] = 0; for(int d = 0;d <= 5;d++){ long long left = 3 * (K - 1); long long group = 1; ///grouping 3*(K-1) elements into groups of powers of 2 while(left > 0){ group = min(group, left); long long value = group * F[d]; long long weight = group * ten[d] * 3; for(int i = N;i >= weight;i--) dp[i] = max(dp[i], dp[i-weight] + value); ///knapsack DP left -= group; group *= 2; } } for(int d = 0;d <= 5;d++){ for(int i = N;i >= 0;i--){ for(int b = 1;b <= 9;b++){ int pre = i - ten[d] * b; if(pre < 0) break; dp[i] = max(dp[i], dp[pre] + fortune[b] * F[d]); } } } int Q; cin >> Q; while(Q--){ int n; cin >> n; cout << dp[n] << "\n"; } }
1428
H
Rotary Laser Lock
\textbf{This is an interactive problem.} To prevent the mischievous rabbits from freely roaming around the zoo, Zookeeper has set up a special lock for the rabbit enclosure. This lock is called the Rotary Laser Lock. The lock consists of $n$ concentric rings numbered from $0$ to $n-1$. The innermost ring is ring $0$ and the outermost ring is ring $n-1$. All rings are split equally into $nm$ sections each. Each of those rings contains a single metal arc that covers exactly $m$ contiguous sections. At the center of the ring is a core and surrounding the entire lock are $nm$ receivers aligned to the $nm$ sections. The core has $nm$ lasers that shine outward from the center, one for each section. The lasers can be blocked by any of the arcs. A display on the outside of the lock shows how many lasers hit the outer receivers. In the example above, there are $n=3$ rings, each covering $m=4$ sections. The arcs are colored in green (ring $0$), purple (ring $1$), and blue (ring $2$) while the lasers beams are shown in red. There are $nm=12$ sections and $3$ of the lasers are not blocked by any arc, thus the display will show $3$ in this case. Wabbit is trying to open the lock to free the rabbits, but the lock is completely opaque, and he cannot see where any of the arcs are. Given the \textbf{relative positions} of the arcs, Wabbit can open the lock on his own. To be precise, Wabbit needs $n-1$ integers $p_1,p_2,\ldots,p_{n-1}$ satisfying $0 \leq p_i < nm$ such that for each $i$ $(1 \leq i < n)$, Wabbit can rotate ring $0$ clockwise exactly $p_i$ times such that the sections that ring $0$ covers perfectly aligns with the sections that ring $i$ covers. In the example above, the relative positions are $p_1 = 1$ and $p_2 = 7$. To operate the lock, he can pick any of the $n$ rings and rotate them by $1$ section either clockwise or anti-clockwise. You will see the number on the display after every rotation. Because his paws are small, Wabbit has asked you to help him to find the \textbf{relative positions} of the arcs \textbf{after all of your rotations are completed}. You may perform up to $15000$ rotations before Wabbit gets impatient.
Using only arc $0$, find at least $1$ position where arc $0$ exactly matches another arc. Binary Search. Flatten the circle such that it becomes $n$ rows of length $nm$ with vertical sections numbered from $0$ to $nm-1$ that loops on the end. We say that the position of an arc is $x$ if the left endpoint of the arc is at section $x$. Indices are taken modulo $nm$ at all times. Moving arcs right is equivalent to rotating them clockwise, and moving arcs left is equivalent to rotating them counter-clockwise. Notice that if we shift arc 0 right and the display increases, then the leftmost section of arc 0 had no other arcs in the same section. Thus, if we shift arc 0 right again and the display does not increase, we are certain that there was another arc at that position, so we shift arc 0 left. We now enter the detection stage to find any arc that coincides with arc 0 (of which there exists at least 1) at this position. Let's note down this positions as $x$. Let $S$ be the set of arcs that we do not know the positions of yet (this set initially contains all arcs from 1 to $N-1$) and $T$ and $F$ be an empty sets. $T$ will be the set of all candidate arcs (those that may coincide here) and $F$ will contain all arcs that we have shifted leftwards. Take all elements in $S$ and put them in $T$, and pick half of the elements in $S$ and add them to the set $F$. We now shift all elements in $F$ left. We move arc 0 left to check if any arc is at position $x-1$. If there is, then we know that an arc that initially coincided at $x$ lies in $F$. In this case, we set $T$ to $T \cap F$ (elements of $T$ that are in $F$), pick half of the elements in $T$ to move right and remove those from $F$. If no arc is at position $x-1$, then the arc we are looking for lies in $T \ \backslash \ F$ (elements in $T$ that are not in $F$). We set $T$ to be $T \ \backslash \ F$ and pick half of $T$ to move left and add those to $F$. We then shift arc 0 right and recurse. When we have narrowed $T$ to exactly 1 arc, we know where exactly that arc is now. We shift that arc left such that its right endpoint is at $x-2$ so that it does not cover position $x-1$, which we may still need for future tests. Now we remove the arc found from $S$ and leave the detection stage and continue searching for the other arcs. Once we have found all $n-1$ other arcs, we find the relative position to arc 0 and print them as the final output. Whenever we shift arc 0 right and are not in the detection stage, we use 1 shift. This occurs at most $2nm-m$ times because it takes up to $nm-m$ shifts right to find the first position where arc 0 coincides, and another $nm$ to traverse the entire circle again to find all of the arcs. Whenever we enter the detection stage, we find one arc and use $2$ shifts initially when we move arc 0 right then left, yielding a total of $2n-2$ such shifts. Each binary search requires $2 \log |T|$ shifts of arc 0 (left and right), so across the $n-1$ detection stages this is at most $2n\log n$ when summed across all stages. The way we perform the binary search is quite important here. Performing it in a naive manner (e.g. shifting half left, test and shifting them back) can use up to $n^2$ queries. Instead, we set the number of elements that move in / out of $F$ at each iteration of the binary search to be the smaller half. This way we can guarantee that the number of shifts done by the candidate arcs is at most the total number of candidate arcs in the first place. This becomes $\frac{n(n-1)}{2}$ since we start with $n-1$ candidate arcs and reduce that number by 1 after each detection stage. When we shift each of the arcs left by $m$ or $m+1$ (depending on whether they were in $F$ when we narrowed it down to 1 arc), we use at most $(n-1)(m+1) = nm+n-m-1$ shifts. Thus in total, we use at most $2nm-m+2n-2+2n\log n + \frac{n(n-1)}{2} + nm+n-m-1$ shifts. For $n=100,m=20$, this is less than $13000$, which is much lower than the query limit of $15000$. The limit was set higher than the provable bound to allow for other possible solutions. At least $1$ tester found a different solution that used around $13500$ queries. Some other optimizations that empirically improve the number of queries: Instead of using arc $0$ as the detector, we can randomly pick one of the $n$ arcs as the detector arcs. Instead of using arc $0$ as the detector, we can randomly pick one of the $n$ arcs as the detector arcs. At the very beginning, we perform some constant number of random shifts of the arcs (e.g. $300$ to $400$ random shifts). This helps to break up long groups of arcs that overlap, which speeds up the initial $nm$ search. At the very beginning, we perform some constant number of random shifts of the arcs (e.g. $300$ to $400$ random shifts). This helps to break up long groups of arcs that overlap, which speeds up the initial $nm$ search. The official solution, augmented with these optimizations uses well below $11500$ queries and is very consistent for non-handcrafted test cases.
[ "binary search", "interactive" ]
3,500
#include <bits/stdc++.h> using namespace std; int queryRotation(int r, int d){ // Auxiliary function to print query and receive answer int res; cout << "? " << r << ' ' << d << endl << flush; // Print query cout.flush(); cin >> res; // Get response if (res == -1) exit(1); // Error occured, exit immediately else return res; // Return result } int main(){ ios_base::sync_with_stdio(false); cin.tie(0); int N, M; // Number of rings and number of sections per ring int totalPositions; // totalPositions = N*M int newNum; int currentNum; // track the current number on the display (responses from interactor) int state = 0; // 0 means searching for rings, 1 means finding the ring at this position int relpos[105] = {}; // Array to store the final relative positions cin >> N >> M; totalPositions = N*M; // While this statement doesn't do anything, // it serves as a reminder that we are taking // the initial starting position of ring 0 // to be position 0 relpos[0] = 0; currentNum = queryRotation(0,1); relpos[0] = (relpos[0] + 1) % totalPositions; // Now we prepare the set of unknown rings S vector<int> S; for (int i = 1; i < N; ++i){ S.push_back(i); } // We now perform a preliminary rotation of ring 0 around until // we are certain that ring 0 coincides with another ring // at its leftmost point and the section to its left is empty // This occurs when the difference changes from increasing when // ring 0 moves right to non-increasing when ring 0 // moves right int hasIncreased = 0; while (1){ // Rotate once, get the new number newNum = queryRotation(0,1); relpos[0] = (relpos[0] + 1) % totalPositions; // Check if the number has increased if (newNum > currentNum){ hasIncreased = 1; currentNum = newNum; } else{ // Otherwise the number hasn't increased currentNum = newNum; // Check if we have increased before if (hasIncreased){ // If we have, it means that our previous position // coincided with the left endpoint of another arc // Time to begin the detection stage state = 1; currentNum = queryRotation(0,-1); relpos[0] = (relpos[0] - 1 + totalPositions) % totalPositions; break; } } } while (!S.empty()){ // While we haven't found all of the rings yet if (state == 1){ // If we are searching for an arc here int curArc = 0; // to store the new arc vector<int> T = S; // set of candiate arcs vector<int> F; // set of arcs that have been moved left for (int i = 0; i < T.size()/2; ++i){ // take the smaller half F.push_back(T[i]); currentNum = queryRotation(T[i],-1); } while (T.size() > 1){ // while the number of candidate arcs is > 1, we binary search on the remaining arcs int newNum = queryRotation(0,-1); relpos[0] = (relpos[0] - 1 + totalPositions) % totalPositions; if (newNum >= currentNum){ // the arc that we are looking for lies in F // Set T to F T.clear(); for (auto it : F){ T.push_back(it); } // Half the size of F by moving the smaller half to the right int sz = F.size()/2; for (int i = 0; i < sz; ++i){ int revRing = F.back(); currentNum = queryRotation(revRing,1); F.pop_back(); } } else{ // the arc we are looking for does not lie in F vector<int> temp; for (auto it1 : T){ int check = 0; for (auto it2 : F){ if (it1 == it2) check = 1; } if (!check) temp.push_back(it1); } T = temp; F.clear(); for (int i = 0; i < T.size()/2; ++i){ // take the smaller half once again currentNum = queryRotation(T[i],-1); F.push_back(T[i]); } } currentNum = queryRotation(0,1); // bring arc 0 back relpos[0] = (relpos[0] + 1) % totalPositions; } curArc = T[0]; // we have found the arc // we want this arc to be M+1 sections away from the current position // of arc 0 so that it does not interfere with later searches if (!F.empty()){ // the arc we found still lies in F for (int i = 0; i < M; ++i){ // move the arc back by M sections currentNum = queryRotation(curArc,-1); } relpos[curArc] = relpos[0] - M - 1; } else{ // the arc we found does not lie in F for (int i = 0; i <= M; ++i){ // move the arc back by M+1 sections currentNum = queryRotation(curArc,-1); } relpos[curArc] = relpos[0] - M - 1; } // We now remove curArc from S vector<int> temp; for (auto it : S){ if (it != curArc) temp.push_back(it); } S.clear(); for (auto it : temp) S.push_back(it); state = 0; currentNum = queryRotation(0,-1); relpos[0] = (relpos[0] - 1 + totalPositions) % totalPositions; hasIncreased = 0; } else{ // keep moving right newNum = queryRotation(0,1); relpos[0] = (relpos[0] + 1) % totalPositions; if (newNum <= currentNum && hasIncreased == 1){ // start searching newNum = queryRotation(0,-1); relpos[0] = (relpos[0] - 1 + totalPositions) % totalPositions; state = 1; } else if (newNum > currentNum) hasIncreased = 1; currentNum = newNum; } } cout << "! "; for (int i = 1; i < N; ++i){ cout << (((relpos[i] - relpos[0] + 5 * totalPositions)%totalPositions) + totalPositions) % totalPositions << ' '; } cout << endl << flush; }
1430
A
Number of Apartments
Recently a new building with a new layout was constructed in Monocarp's hometown. According to this new layout, the building consists of three types of apartments: three-room, five-room, and seven-room apartments. It's also known that each room of each apartment has exactly one window. In other words, a three-room apartment has three windows, a five-room — five windows, and a seven-room — seven windows. Monocarp went around the building and counted $n$ windows. Now he is wondering, how many apartments of each type the building may have. Unfortunately, Monocarp only recently has learned to count, so he is asking you to help him to calculate the possible quantities of three-room, five-room, and seven-room apartments in the building that has $n$ windows. If there are multiple answers, you can print any of them. Here are some examples: - if Monocarp has counted $30$ windows, there could have been $2$ three-room apartments, $2$ five-room apartments and $2$ seven-room apartments, since $2 \cdot 3 + 2 \cdot 5 + 2 \cdot 7 = 30$; - if Monocarp has counted $67$ windows, there could have been $7$ three-room apartments, $5$ five-room apartments and $3$ seven-room apartments, since $7 \cdot 3 + 5 \cdot 5 + 3 \cdot 7 = 67$; - if Monocarp has counted $4$ windows, he should have mistaken since no building with the aforementioned layout can have $4$ windows.
There are many possible solutions to this problem. The simplest one is to notice that, using several flats of size $3$ and one flat of some size (possibly also $3$, possibly not), we can get any $n$ equal to $[3, 6, 9, \dots]$, $[5, 8, 11, \dots]$ or $[7, 10, 13, \dots]$. The only numbers that don't belong to these lists are $1$, $2$ and $4$, and it's easy to see that there is no answer for that numbers. So the solution is to try all possible sizes of one flat, and if the remaining number of windows is non-negative and divisible by $3$, then take the required number of three-room flats.
[ "brute force", "constructive algorithms", "math" ]
900
#include <iostream> #include <sstream> #include <cstdio> #include <vector> #include <cmath> #include <queue> #include <string> #include <cstring> #include <cassert> #include <iomanip> #include <algorithm> #include <set> #include <map> #include <ctime> #include <cmath> #define forn(i, n) for(int i=0;i<n;++i) #define fore(i, l, r) for(int i = int(l); i <= int(r); ++i) #define sz(v) int(v.size()) #define all(v) v.begin(), v.end() #define pb push_back #define mp make_pair #define x first #define y1 ________y1 #define y second #define ft first #define sc second #define pt pair<int, int> template<typename X> inline X abs(const X& a) { return a < 0? -a: a; } template<typename X> inline X sqr(const X& a) { return a * a; } typedef long long li; typedef long double ld; using namespace std; const int INF = 1000 * 1000 * 1000; const ld EPS = 1e-9; const ld PI = acos(-1.0); li n; inline void read() { cin >> n; } inline void solve() { if (n == 1 || n == 2 || n == 4) { cout << -1 << endl; return; } if (n % 3 == 0) { cout << n / 3 << ' ' << 0 << ' ' << 0 << endl; } else if (n % 3 == 1) { cout << (n - 7) / 3 << ' ' << 0 << ' ' << 1 << endl; } else { cout << (n - 5) / 3 << ' ' << 1 << ' ' << 0 << endl; } } int main () { #ifdef fcspartakm freopen("input.txt", "r", stdin); //freopen("output.txt", "w", stdout); #endif cerr << setprecision(10) << fixed; int t; cin >> t; while(t--) { read(); solve(); } //cerr << "TIME: " << clock() << endl; }
1430
B
Barrels
You have $n$ barrels lined up in a row, numbered from left to right from one. Initially, the $i$-th barrel contains $a_i$ liters of water. You can pour water from one barrel to another. In one act of pouring, you can choose two different barrels $x$ and $y$ (the $x$-th barrel shouldn't be empty) and pour any possible amount of water from barrel $x$ to barrel $y$ (possibly, all water). You may assume that barrels have infinite capacity, so you can pour any amount of water in each of them. Calculate the maximum possible difference between the maximum and the minimum amount of water in the barrels, if you can pour water \textbf{at most} $k$ times. Some examples: - if you have four barrels, each containing $5$ liters of water, and $k = 1$, you may pour $5$ liters from the second barrel into the fourth, so the amounts of water in the barrels are $[5, 0, 5, 10]$, and the difference between the maximum and the minimum is $10$; - if all barrels are empty, you can't make any operation, so the difference between the maximum and the minimum amount is still $0$.
A greedy strategy always works: take $k + 1$ largest barrels, choose one barrel among them and pour all water from those barrels to the chosen barrel. That way, we make the minimum amount equal to $0$ (it's quite obvious that we can't do anything better here), and the maximum amount as large as possible, so the difference between them will be as large as possible.
[ "greedy", "implementation", "sortings" ]
800
#include <iostream> #include <sstream> #include <cstdio> #include <vector> #include <cmath> #include <queue> #include <string> #include <cstring> #include <cassert> #include <iomanip> #include <algorithm> #include <set> #include <map> #include <ctime> #include <cmath> #define forn(i, n) for(int i=0;i<n;++i) #define fore(i, l, r) for(int i = int(l); i <= int(r); ++i) #define sz(v) int(v.size()) #define all(v) v.begin(), v.end() #define pb push_back #define mp make_pair #define x first #define y1 ________y1 #define y second #define ft first #define sc second #define pt pair<int, int> template<typename X> inline X abs(const X& a) { return a < 0? -a: a; } template<typename X> inline X sqr(const X& a) { return a * a; } typedef long long li; typedef long double ld; using namespace std; const int INF = 1000 * 1000 * 1000; const ld EPS = 1e-9; const ld PI = acos(-1.0); const int N = 200 * 1000 + 13; int n, k; int a[N]; inline void read() { cin >> n >> k; for (int i = 0; i < n; i++) cin >> a[i]; } inline void solve() { sort(a, a + n); reverse(a, a + n); li ans = 0; for (int i = 0; i <= k; i++) { ans += a[i]; } cout << ans << endl; } int main () { #ifdef fcspartakm freopen("input.txt", "r", stdin); //freopen("output.txt", "w", stdout); #endif cerr << setprecision(10) << fixed; int t; cin >> t; while(t--) { read(); solve(); } //cerr << "TIME: " << clock() << endl; }
1430
C
Numbers on Whiteboard
Numbers $1, 2, 3, \dots n$ (each integer from $1$ to $n$ once) are written on a board. In one operation you can erase any two numbers $a$ and $b$ from the board and write one integer $\frac{a + b}{2}$ rounded up instead. You should perform the given operation $n - 1$ times and make the resulting number that will be left on the board as small as possible. For example, if $n = 4$, the following course of action is optimal: - choose $a = 4$ and $b = 2$, so the new number is $3$, and the whiteboard contains $[1, 3, 3]$; - choose $a = 3$ and $b = 3$, so the new number is $3$, and the whiteboard contains $[1, 3]$; - choose $a = 1$ and $b = 3$, so the new number is $2$, and the whiteboard contains $[2]$. It's easy to see that after $n - 1$ operations, there will be left only one number. Your goal is to minimize it.
It's easy to see that we can't get the result less than $2$, because, if we merge two positive numbers, and at least one of them is $2$ or greater, the new number is always greater than $1$. So we can't get rid of all numbers greater than $1$. To always achieve $2$, we can use a greedy algorithm: always merge two maximum numbers. During the first step, we merge $n$ and $n - 1$, get $n$; then we merge $n$ and $n - 2$, get $n - 1$; then we merge $n - 1$ and $n - 3$, get $n - 2$; and it's easy to see that the last operation is merging $3$ and $1$, so the result is $2$.
[ "constructive algorithms", "data structures", "greedy", "implementation", "math" ]
1,000
#include <iostream> #include <sstream> #include <cstdio> #include <vector> #include <cmath> #include <queue> #include <string> #include <cstring> #include <cassert> #include <iomanip> #include <algorithm> #include <set> #include <map> #include <ctime> #include <cmath> #define forn(i, n) for(int i=0;i<n;++i) #define fore(i, l, r) for(int i = int(l); i <= int(r); ++i) #define sz(v) int(v.size()) #define all(v) v.begin(), v.end() #define pb push_back #define mp make_pair #define x first #define y1 ________y1 #define y second #define ft first #define sc second #define pt pair<int, int> template<typename X> inline X abs(const X& a) { return a < 0? -a: a; } template<typename X> inline X sqr(const X& a) { return a * a; } typedef long long li; typedef long double ld; using namespace std; const int INF = 1000 * 1000 * 1000; const ld EPS = 1e-9; const ld PI = acos(-1.0); int n; inline void read() { cin >> n; } inline void solve() { multiset<int> was; for (int i = 1; i <= n; i++) { was.insert(i); } vector<pair<int, int> > ans; for (int i = 0; i < n - 1; i++) { auto it = was.end(); it--; int a = *it; was.erase(it); it = was.end(); it--; int b = *it; was.erase(it); was.insert((a + b + 1) / 2); ans.pb(mp(a, b)); } cout << *was.begin() << endl; for (int i = 0; i < sz(ans); i++) { cout << ans[i].ft << ' ' << ans[i].sc << endl; } } int main () { #ifdef fcspartakm freopen("input.txt", "r", stdin); //freopen("output.txt", "w", stdout); #endif cerr << setprecision(10) << fixed; int t; cin >> t; while(t--) { read(); solve(); } //cerr << "TIME: " << clock() << endl; }
1430
D
String Deletion
You have a string $s$ consisting of $n$ characters. Each character is either 0 or 1. You can perform operations on the string. Each operation consists of two steps: - select an integer $i$ from $1$ to the length of the string $s$, then delete the character $s_i$ (the string length gets reduced by $1$, the indices of characters to the right of the deleted one also get reduced by $1$); - if the string $s$ is not empty, delete the maximum length prefix consisting of the same characters (the indices of the remaining characters and the string length get reduced by the length of the deleted prefix). Note that both steps are mandatory in each operation, and their order cannot be changed. For example, if you have a string $s =$ 111010, the first operation can be one of the following: - select $i = 1$: we'll get 111010 $\rightarrow$ 11010 $\rightarrow$ 010; - select $i = 2$: we'll get 111010 $\rightarrow$ 11010 $\rightarrow$ 010; - select $i = 3$: we'll get 111010 $\rightarrow$ 11010 $\rightarrow$ 010; - select $i = 4$: we'll get 111010 $\rightarrow$ 11110 $\rightarrow$ 0; - select $i = 5$: we'll get 111010 $\rightarrow$ 11100 $\rightarrow$ 00; - select $i = 6$: we'll get 111010 $\rightarrow$ 11101 $\rightarrow$ 01. You finish performing operations when the string $s$ becomes empty. What is the maximum number of operations you can perform?
Suppose the string consists of $n$ characters, and each character is different from the adjacent ones (so the string looks like 01010... or 10101...). It's easy to see that we can't make more than $\lceil \frac{n}{2} \rceil$ operations (each operation deletes at least two characters, except for the case when the string consists of only one character). And there is an easy way to perform exactly $\lceil \frac{n}{2} \rceil$ operations: always choose the last character and delete it. Okay, what about the case when some adjacent characters in the string are equal? It's never optimal to delete a character that's different from both adjacent characters: since the second part of each operation always deletes the left block of equal characters, this action merges two blocks, so they will be deleted in one second part of the operation (which decreases the total number of operations). So, we should always delete a character from a block with at least two equal characters. From which of the blocks, if there are more than one? It's easy to see that we should choose a character from the leftmost such block, since that block is the earliest to be deleted (and if we want to make the same action later, we might be unable to do it). So, the solution is greedy: during each action, we have to find the leftmost block consisting of at least $2$ equal characters, and delete a character from it (or the last character of the string, if there are no such blocks). Since the length of the string is up to $2 \cdot 10^5$ and the number of operations is up to $10^5$, we should do it efficiently, for example, by storing the eligible blocks in some data structure.
[ "binary search", "data structures", "greedy", "two pointers" ]
1,700
#include<bits/stdc++.h> using namespace std; char buf[200043]; int main() { int t; scanf("%d", &t); for(int tc = 1; tc <= t; tc++) { int n; scanf("%d", &n); scanf("%s", buf); string s = buf; queue<int> q; int cur = 0; for(int i = 0; i < n; i++) { if(i > 0 && s[i] != s[i - 1]) cur++; if(i > 0 && s[i] == s[i - 1]) q.push(cur); } int deleted = 0; int score = 0; for(int i = 0; i < n; i++) { if(q.empty()) break; q.pop(); deleted++; score++; while(!q.empty() && q.front() == i) { q.pop(); deleted++; } deleted++; //cerr << deleted << endl; } score += (n - deleted + 1) / 2; printf("%d\n", score); } }
1430
E
String Reversal
You are given a string $s$. You have to reverse it — that is, the first letter should become equal to the last letter before the reversal, the second letter should become equal to the second-to-last letter before the reversal — and so on. For example, if your goal is to reverse the string "abddea", you should get the string "aeddba". To accomplish your goal, you can swap the \textbf{neighboring elements of the string}. Your task is to calculate the minimum number of swaps you have to perform to reverse the given string.
First of all, let's find the resulting position for each character of the string. It's easy to see that we don't need to swap equal adjacent characters (it changes nothing), so the first character a in the original string is the first character a in the resulting string, the second character a in the original string is the second character a in the resulting string, and so on. Now, let's build a permutation $p$ of $n$ elements, where $p_i$ is the resulting position of the element that was on position $i$ in the original string. For example, for the string abcad this permutation will be $p = [2, 4, 3, 5, 1]$. In one operation, we may swap two elements in this permutation, and our goal is to sort it (since each character of the string has its own required position, and when for every $i$ the condition $p_i = i$ holds, each character is on the position it should be). The required number of swaps of adjacent elements to sort a permutation is exactly the number of inversions in it (since each swap changes the number of inversions by $1$), and this number can be calculated using many different techniques, for example, mergesort tree or Fenwick tree.
[ "data structures", "greedy", "strings" ]
1,900
#include <iostream> #include <sstream> #include <cstdio> #include <vector> #include <cmath> #include <queue> #include <string> #include <cstring> #include <cassert> #include <iomanip> #include <algorithm> #include <set> #include <map> #include <ctime> #include <cmath> #define forn(i, n) for(int i=0;i<n;++i) #define fore(i, l, r) for(int i = int(l); i <= int(r); ++i) #define sz(v) int(v.size()) #define all(v) v.begin(), v.end() #define pb push_back #define mp make_pair #define x first #define y1 ________y1 #define y second #define ft first #define sc second #define pt pair<int, int> template<typename X> inline X abs(const X& a) { return a < 0? -a: a; } template<typename X> inline X sqr(const X& a) { return a * a; } typedef long long li; typedef long double ld; using namespace std; const int INF = 1000 * 1000 * 1000; const ld EPS = 1e-9; const ld PI = acos(-1.0); const int N = 200 * 1000 + 13; int n; string s; string revS; vector<int> posS[30]; vector<int> posT[30]; int cnt[30]; int t[N]; inline int sum (int r) { int result = 0; for (; r >= 0; r = (r & (r+1)) - 1) result += t[r]; return result; } inline void inc (int i, int d) { for (; i < n; i = (i | (i+1))) t[i] += d; } int sum (int l, int r) { return sum (r) - sum (l-1); } inline void read() { cin >> n >> s; } inline void solve() { revS = s; reverse(all(revS)); for (int i = 0; i < sz(s); i++) { posS[s[i] - 'a'].pb(i); posT[revS[i] - 'a'].pb(i); } li ans = 0; for (int i = 0; i < sz(revS); i++) { int let = revS[i] - 'a'; int cur = posS[let][cnt[let]]; int oldC = cur; cur += sum(cur, n - 1); int need = i; ans += cur - need; inc(oldC, 1); cnt[let]++; } cout << ans << endl; } int main () { #ifdef fcspartakm freopen("input.txt", "r", stdin); //freopen("output.txt", "w", stdout); #endif srand(time(NULL)); cerr << setprecision(10) << fixed; read(); solve(); //cerr << "TIME: " << clock() << endl; }
1430
F
Realistic Gameplay
Recently you've discovered a new shooter. They say it has realistic game mechanics. Your character has a gun with magazine size equal to $k$ and should exterminate $n$ waves of monsters. The $i$-th wave consists of $a_i$ monsters and happens from the $l_i$-th moment of time up to the $r_i$-th moments of time. All $a_i$ monsters spawn at moment $l_i$ and you have to exterminate all of them before the moment $r_i$ ends (you can kill monsters right at moment $r_i$). For every two consecutive waves, the second wave starts not earlier than the first wave ends (though the second wave can start at the same moment when the first wave ends) — formally, the condition $r_i \le l_{i + 1}$ holds. Take a look at the notes for the examples to understand the process better. You are confident in yours and your character's skills so you can assume that aiming and shooting are instant and you need exactly one bullet to kill one monster. But reloading takes exactly $1$ unit of time. One of the realistic mechanics is a mechanic of reloading: when you reload you throw away the old magazine with all remaining bullets in it. That's why constant reloads may cost you excessive amounts of spent bullets. You've taken a liking to this mechanic so now you are wondering: what is the minimum possible number of bullets you need to spend (both used and thrown) to exterminate all waves. Note that you don't throw the remaining bullets away after eradicating all monsters, and you start with a full magazine.
Note some observations: if we meet a new wave and start shooting it's optimal to shoot monsters in the wave using full magazines while we can and there are no reasons to take breaks between shooting monsters from one wave. That's why we can track only moments when waves start and the number of remaining bullets in magazine we have at these moments. Moreover, since the next wave starts not earlier than the previous ends, we can think that when we start dealing with one wave we've already dealt with the previous one. Also, instead of keeping track of the remaining bullets, let's just look only at such indices of waves when we reloaded and threw remaining bullets before reaching them. So, we can write the next dp: $d[i]$ is the minimum number of bullets we spend dealing with the first $i$ waves and now we standing at the moment $l_i$ with full magazine. Obviously, $dp[0] = 0$. Now, with fixed $i$ we can iterate over the index $j$ of a wave before which we'll reload throwing away remaining bullets. And for waves $[i, j)$ we need to check that we are able to exterminate all these waves without throwing away any bullets. We can check it with several formulas. If it's possible for segment $[i, j)$ then the possibility for the segment $[i, j + 1)$ is just checking that we can exterminate the $j$-th wave having $rem \ge 0$ bullets in the start in no more than $r_j - l_j$ reloads plus checking that we have at least one unit before $l_{j + 1}$ for a reload. As a result, the time complexity of the solution is $O(n^2)$.
[ "dp", "greedy" ]
2,600
#include<bits/stdc++.h> using namespace std; #define fore(i, l, r) for(int i = int(l); i < int(r); i++) #define sz(a) int((a).size()) #define x first #define y second typedef long long li; typedef long double ld; typedef pair<int, int> pt; const int INF = int(1e9); const li INF64 = li(1e18); const ld EPS = 1e-9; int n, k; vector<int> l, r, a; inline bool read() { if(!(cin >> n >> k)) return false; l.resize(n); r.resize(n); a.resize(n); fore(i, 0, n) cin >> l[i] >> r[i] >> a[i]; return true; } inline void solve() { vector<li> d(n + 1, INF64); d[0] = 0; li ans = INF64; fore(i, 0, n) { li rem = k, total = d[i]; for (int j = i; j < n; j++) { li cntReloads = (max(0LL, a[j] - rem) + k - 1) / k; if (cntReloads > r[j] - l[j]) break; li newRem = (rem + cntReloads * k) - a[j]; total += a[j]; if (j + 1 < n) { if (l[j] + cntReloads < l[j + 1]) d[j + 1] = min(d[j + 1], total + newRem); } else ans = min(ans, total); rem = newRem; } } if (ans > INF64 / 2) ans = -1; cout << ans << endl; } int main() { #ifdef _DEBUG freopen("input.txt", "r", stdin); int tt = clock(); #endif ios_base::sync_with_stdio(false); cin.tie(0), cout.tie(0); cout << fixed << setprecision(15); if(read()) { solve(); #ifdef _DEBUG cerr << "TIME = " << clock() - tt << endl; tt = clock(); #endif } return 0; }
1430
G
Yet Another DAG Problem
You are given a directed acyclic graph (a directed graph that does not contain cycles) of $n$ vertices and $m$ arcs. The $i$-th arc leads from the vertex $x_i$ to the vertex $y_i$ and has the weight $w_i$. Your task is to select an integer $a_v$ for each vertex $v$, and then write a number $b_i$ on each arcs $i$ such that $b_i = a_{x_i} - a_{y_i}$. You must select the numbers so that: - all $b_i$ are positive; - the value of the expression $\sum \limits_{i = 1}^{m} w_i b_i$ is the lowest possible. It can be shown that for any directed acyclic graph with non-negative $w_i$, such a way to choose numbers exists.
The key observation in this problem is that the values of $a_v$ should form a contiguous segment of integers. For example, suppose there exists a value $k$ such that there is at least one $a_v < k$, there is at least one $a_v > k$, but no $a_v = k$. We can decrease all values of $a_v$ that are greater than $k$ by $1$, so the answer will still be valid, but the value of $\sum \limits_{i = 1}^{m} w_i b_i$ will decrease. So, the values of $a_v$ form a contiguous segment of integers. We can always assume that this segment is $[0, n - 1]$, since subtracting the same value from each $a_v$ does not change anything. The other observation we need is that we can rewrite the expression we have to minimize as follows: $\sum \limits_{i = 1}^{m} w_i b_i = \sum \limits_{v = 1}^{n} a_v c_v$, where $c_v$ is the signed sum of weights of all arcs incident to the vertex $v$ (the weights of all arcs leading from $v$ are taken with positive sign, and the weights of all arcs leading to $v$ are taken with negative sign). These two observations lead us to a bitmask dynamic programming solution: let $dp_{i, mask}$ be the minimum value of $\sum \limits_{v = 1}^{n} a_v c_v$, if we assigned the values from $[0, i - 1]$ to the vertices from $mask$. A naive way to calculate this dynamic programming is to iterate on the submask of $mask$, check that choosing the integer $i - 1$ for each vertex from that submask doesn't ruin anything (for each vertex that belongs to this submask, all vertices that are reachable from it should have $a_v < i - 1$, so they should belong to $mask$, but not to the submask we iterate on), and update the dynamic programming value. But this solution is $O(n 3^n)$, and, depending on your implementation, this might be too slow. It's possible to speed this up to $O(n^2 2^n)$ in a way similar to how profile dp can be optimized from $O(3^n)$ to $O(n 2^n)$: we won't iterate on the submask; instead, we will try to add the vertices one by one, and we should be able to add a vertex to the mask only if all vertices that are reachable from it already belong to the mask. There is a possibility that we add two vertices connected by an arc with the same value of $a_v$, so, for a fixed value of $a_v$, we should consider assigning it to vertices in topological sorting order (that way, if one vertex is reachable from another, it will be considered later, so we won't add both of those with the same value of $a_v$).
[ "bitmasks", "dfs and similar", "dp", "flows", "graphs", "math" ]
2,600
#include<bits/stdc++.h> using namespace std; typedef long long li; const int N = 18; const int M = (1 << N); const li INF64 = li(1e18); int n, m; vector<int> g[N]; li sum[N]; int need_mask[N]; li dp[N + 1][N + 1][M]; bool p[N + 1][N + 1][M]; vector<int> order; vector<int> used; void dfs(int x, bool build_topo) { if(used[x]) return; used[x] = 1; for(auto y : g[x]) dfs(y, build_topo); if(build_topo) order.push_back(x); } int main() { cin >> n >> m; for(int i = 0; i < m; i++) { int x, y, w; cin >> x >> y >> w; --x; --y; sum[x] += w; sum[y] -= w; g[x].push_back(y); } used.resize(n); for(int i = 0; i < n; i++) dfs(i, true); for(int i = 0; i < n; i++) { used = vector<int>(n, 0); dfs(i, false); for(int j = 0; j < n; j++) if(j != i && used[j] == 1) need_mask[i] |= (1 << j); } for(int i = 0; i <= n; i++) for(int j = 0; j <= n; j++) for(int k = 0; k < (1 << n); k++) dp[i][j][k] = INF64; dp[0][0][0] = 0; reverse(order.begin(), order.end()); for(int i = 0; i < n; i++) for(int j = 0; j <= n; j++) for(int k = 0; k < (1 << n); k++) { if(dp[i][j][k] > INF64 / 2) continue; if(j == n) { if(dp[i + 1][0][k] > dp[i][j][k]) { dp[i + 1][0][k] = dp[i][j][k]; p[i + 1][0][k] = false; } } else { int v = order[j]; li add = sum[v] * i; if(dp[i][j + 1][k] > dp[i][j][k]) { dp[i][j + 1][k] = dp[i][j][k]; p[i][j + 1][k] = false; } if(((k & (1 << v)) == 0) && ((need_mask[v] & k) == need_mask[v])) { int nk = k | (1 << v); if(dp[i][j + 1][nk] > dp[i][j][k] + add) { dp[i][j + 1][nk] = dp[i][j][k] + add; p[i][j + 1][nk] = true; } } } } vector<int> ans(n); int i = n; int j = 0; int k = (1 << n) - 1; while(i > 0 || j > 0 || k > 0) { if(j == 0) { j = n; i--; } else { if(p[i][j][k]) { int v = order[j - 1]; ans[v] = i; k ^= (1 << v); } j--; } } for(int i = 0; i < n; i++) cout << ans[i] << " \n"[i == n - 1]; }
1433
A
Boring Apartments
There is a building consisting of $10~000$ apartments numbered from $1$ to $10~000$, inclusive. Call an apartment \textbf{boring}, if its number consists of the same digit. Examples of boring apartments are $11, 2, 777, 9999$ and so on. Our character is a troublemaker, and he calls the intercoms of all \textbf{boring} apartments, till someone answers the call, in the following order: - First he calls all apartments consisting of digit $1$, in increasing order ($1, 11, 111, 1111$). - Next he calls all apartments consisting of digit $2$, in increasing order ($2, 22, 222, 2222$) - And so on. The resident of the boring apartment $x$ answers the call, and our character \textbf{stops} calling anyone further. Our character wants to know how many digits he pressed in total and your task is to help him to count the total number of keypresses. For example, if the resident of boring apartment $22$ answered, then our character called apartments with numbers $1, 11, 111, 1111, 2, 22$ and the total number of digits he pressed is $1 + 2 + 3 + 4 + 1 + 2 = 13$. You have to answer $t$ independent test cases.
This problem has a lot of solutions. You could even hard code all possible tests to solve it. But this problem has $O(1)$ solution. Let the digit of $x$ be $dig$. Then our character pressed each digit before $dig$ exactly $10$ times ($1 + 2 + 3 + 4$). And the amount of times he pressed the digit $dig$ depends on the length of $x$. Let $len$ be the length of $x$, then the amount of times he pressed the digit $dig$ is $1 + 2 + \ldots + len = \frac{len(len + 1)}{2}$. So the final answer is $10 \cdot (dig - 1) + \frac{len(len + 1)}{2}$.
[ "implementation", "math" ]
800
#include <bits/stdc++.h> using namespace std; int main() { #ifdef _DEBUG freopen("input.txt", "r", stdin); // freopen("output.txt", "w", stdout); #endif int t; cin >> t; while (t--) { string x; cin >> x; int dig = x[0] - '0' - 1; int len = x.size(); cout << dig * 10 + len * (len + 1) / 2 << endl; } return 0; }
1433
B
Yet Another Bookshelf
There is a bookshelf which can fit $n$ books. The $i$-th position of bookshelf is $a_i = 1$ if there is a book on this position and $a_i = 0$ otherwise. It is guaranteed that there is \textbf{at least one book} on the bookshelf. In one move, you can choose some contiguous segment $[l; r]$ consisting of books (i.e. for each $i$ from $l$ to $r$ the condition $a_i = 1$ holds) and: - Shift it to the right by $1$: move the book at index $i$ to $i + 1$ for all $l \le i \le r$. This move can be done only if $r+1 \le n$ and there is no book at the position $r+1$. - Shift it to the left by $1$: move the book at index $i$ to $i-1$ for all $l \le i \le r$. This move can be done only if $l-1 \ge 1$ and there is no book at the position $l-1$. Your task is to find the \textbf{minimum} number of moves required to collect all the books on the shelf as a \textbf{contiguous} (consecutive) segment (i.e. the segment without any gaps). For example, for $a = [0, 0, 1, 0, 1]$ there is a gap between books ($a_4 = 0$ when $a_3 = 1$ and $a_5 = 1$), for $a = [1, 1, 0]$ there are no gaps between books and for $a = [0, 0,0]$ there are also no gaps between books. You have to answer $t$ independent test cases.
We can notice that the answer is the number of zeros between the leftmost occurrence of $1$ and the rightmost occurrence of $1$. Why is it true? Let's take the leftmost maximum by inclusion segment of $1$ and just shift it right. We can see that using this algorithm we will do exactly described amount of moves and there is no way improve the answer.
[ "greedy", "implementation" ]
800
#include <bits/stdc++.h> using namespace std; int main() { #ifdef _DEBUG freopen("input.txt", "r", stdin); // freopen("output.txt", "w", stdout); #endif int t; cin >> t; while (t--) { int n; cin >> n; vector<int> a(n); for (auto &it : a) cin >> it; while (a.back() == 0) a.pop_back(); reverse(a.begin(), a.end()); while (a.back() == 0) a.pop_back(); cout << count(a.begin(), a.end(), 0) << endl; } return 0; }
1433
C
Dominant Piranha
There are $n$ piranhas with sizes $a_1, a_2, \ldots, a_n$ in the aquarium. Piranhas are numbered from left to right in order they live in the aquarium. Scientists of the Berland State University want to find if there is \textbf{dominant} piranha in the aquarium. The piranha is called \textbf{dominant} if it can eat all the other piranhas in the aquarium (except itself, of course). Other piranhas will do nothing while the \textbf{dominant} piranha will eat them. Because the aquarium is pretty narrow and long, the piranha can eat only one of the adjacent piranhas during one move. Piranha can do as many moves as it needs (or as it can). More precisely: - The piranha $i$ can eat the piranha $i-1$ if the piranha $i-1$ exists and $a_{i - 1} < a_i$. - The piranha $i$ can eat the piranha $i+1$ if the piranha $i+1$ exists and $a_{i + 1} < a_i$. When the piranha $i$ eats some piranha, its \textbf{size increases by one} ($a_i$ becomes $a_i + 1$). Your task is to find \textbf{any dominant} piranha in the aquarium or determine if there are no such piranhas. Note that you have to find \textbf{any} (exactly one) dominant piranha, you don't have to find all of them. For example, if $a = [5, 3, 4, 4, 5]$, then the third piranha can be \textbf{dominant}. Consider the sequence of its moves: - The piranha eats the second piranha and $a$ becomes $[5, \underline{5}, 4, 5]$ (the underlined piranha is our candidate). - The piranha eats the third piranha and $a$ becomes $[5, \underline{6}, 5]$. - The piranha eats the first piranha and $a$ becomes $[\underline{7}, 5]$. - The piranha eats the second piranha and $a$ becomes $[\underline{8}]$. You have to answer $t$ independent test cases.
If all the piranhas have the same size then the answer is -1. Otherwise, there are at least two different sizes of piranhas and the answer always exists. Claim that the answer is such a piranha with the maximum size that one of the adjacent piranhas has the size less than a maximum. Why is it true and why the answer always exists? First, if the piranha with the maximum size eats some other piranha, it becomes the only maximum in the array and can eat all other piranhas. Why is there always such a pair of piranhas? Let's change our array a bit: replace every maximum with $1$ and every non-maximum with $0$. There is always some $01$-pair or $10$-pair in such array because we have at least two different elements.
[ "constructive algorithms", "greedy" ]
900
#include <bits/stdc++.h> using namespace std; int main() { #ifdef _DEBUG freopen("input.txt", "r", stdin); // freopen("output.txt", "w", stdout); #endif int t; cin >> t; while (t--) { int n; cin >> n; vector<int> a(n); int mx = 0; for (auto &it : a) { cin >> it; mx = max(mx, it); } int idx = -1; for (int i = 0; i < n; ++i) { if (a[i] != mx) continue; if (i > 0 && a[i - 1] != mx) idx = i + 1; if (i < n - 1 && a[i + 1] != mx) idx = i + 1; } cout << idx << endl; } return 0; }
1433
D
Districts Connection
There are $n$ districts in the town, the $i$-th district belongs to the $a_i$-th bandit gang. Initially, no districts are connected to each other. You are the mayor of the city and want to build $n-1$ two-way roads to connect all districts (two districts can be connected directly or through other connected districts). If two districts belonging to the same gang are connected \textbf{directly} with a road, this gang will revolt. You don't want this so your task is to build $n-1$ two-way roads in such a way that all districts are reachable from each other (possibly, using intermediate districts) and \textbf{each pair} of directly connected districts belong to \textbf{different gangs}, or determine that it is impossible to build $n-1$ roads to satisfy all the conditions. You have to answer $t$ independent test cases.
If all districts belong to the same gang then the answer is NO. Otherwise, the answer is always YES (yeah, as in the previous problem). How to construct it? Let's choose the first "root" as the district $1$ and connect all such districts $i$ that $a_1 \ne a_i$ to the district $1$. So, all disconnected districts that remain are under control of the gang $a_1$. Let's find any district $i$ that $a_i \ne a_1$ and just connect all remaining districts of the gang $a_1$ to this district. This district always exists because we have at least two different gangs and it is connected to the remaining structure because its gang is not $a_1$. So, all conditions are satisfied.
[ "constructive algorithms", "dfs and similar" ]
1,200
#include <bits/stdc++.h> using namespace std; int main() { #ifdef _DEBUG freopen("input.txt", "r", stdin); // freopen("output.txt", "w", stdout); #endif int t; cin >> t; while (t--) { int n; cin >> n; vector<int> a(n); for (auto &it : a) cin >> it; vector<pair<int, int>> res; int idx = -1; for (int i = 1; i < n; ++i) { if (a[i] != a[0]) { idx = i; res.push_back({1, i + 1}); } } if (idx == -1) { cout << "NO" << endl; continue; } for (int i = 1; i < n; ++i) { if (a[i] == a[0]) { res.push_back({idx + 1, i + 1}); } } cout << "YES" << endl; for (auto [x, y] : res) cout << x << " " << y << endl; } return 0; }
1433
E
Two Round Dances
One day, $n$ people ($n$ is an even number) met on a plaza and made two round dances, each round dance consists of exactly $\frac{n}{2}$ people. Your task is to find the number of ways $n$ people can make two round dances if each round dance consists of exactly $\frac{n}{2}$ people. Each person should belong to exactly one of these two round dances. Round dance is a dance circle consisting of $1$ or more people. Two round dances are indistinguishable (equal) if one can be transformed to another by choosing the first participant. For example, round dances $[1, 3, 4, 2]$, $[4, 2, 1, 3]$ and $[2, 1, 3, 4]$ are indistinguishable. For example, if $n=2$ then the number of ways is $1$: one round dance consists of the first person and the second one of the second person. For example, if $n=4$ then the number of ways is $3$. Possible options: - one round dance — $[1,2]$, another — $[3,4]$; - one round dance — $[2,4]$, another — $[3,1]$; - one round dance — $[4,1]$, another — $[3,2]$. Your task is to find the number of ways $n$ people can make two round dances if each round dance consists of exactly $\frac{n}{2}$ people.
Firstly, we need to choose the set of $\frac{n}{2}$ people to be in the first round dance (the other half is going to the second one). The number of ways to do that is $\binom{n}{\frac{n}{2}}$. Then we need to set some order of people in both round dances, but we don't want to forget about rotation (because rotation can lead us to counting the same ways several times). So, the number of ways to arrange people inside one round dance is $(\frac{n}{2} - 1)!$. This is true because we just "fixed" who will be the first in the round dance, and place others in every possible order. So, we need to multiply our initial answer by this value twice because we have two round dances. And, finally, we have to divide our answer by $2$ because we counted "ordered" pairs (i.e. we distinguish pairs of kind $(x, y)$ and $(y, x)$ but we don't have to do that). So, the final answer is $\binom{n}{\frac{n}{2}} \cdot (\frac{n}{2} - 1)! \cdot (\frac{n}{2} - 1)!$ divided by $2$. This formula can be reduced to $\frac{n!}{\frac{n}{2}^2 \cdot 2}$. You could also find the sequence of answers in OEIS (and this can be really useful skill sometimes).
[ "combinatorics", "math" ]
1,300
#include <bits/stdc++.h> using namespace std; const int N = 21; long long f[N]; int main() { #ifdef _DEBUG freopen("input.txt", "r", stdin); // freopen("output.txt", "w", stdout); #endif int n; cin >> n; f[0] = 1; for (int i = 1; i < N; ++i) { f[i] = f[i - 1] * i; } long long ans = f[n] / f[n / 2] / f[n / 2]; ans = ans * f[n / 2 - 1]; ans = ans * f[n / 2 - 1]; ans /= 2; cout << ans << endl; return 0; }
1433
F
Zero Remainder Sum
You are given a matrix $a$ of size $n \times m$ consisting of integers. You can choose \textbf{no more than} $\left\lfloor\frac{m}{2}\right\rfloor$ elements in \textbf{each row}. Your task is to choose these elements in such a way that their sum is \textbf{divisible by} $k$ and this sum is the \textbf{maximum}. In other words, you can choose no more than a half (rounded down) of elements in each row, you have to find the maximum sum of these elements divisible by $k$. Note that you can choose zero elements (and the sum of such set is $0$).
This is pretty standard dynamic programming problem. Let $dp[x][y][cnt][rem]$ be the maximum possible sum we can obtain if we are at the element $a_{x, y}$ right now, we took $cnt$ elements in the row $x$ and our current remainder is $rem$. Initially, all states are $-\infty$ except $dp[0][0][0][0] = 0$. Transitions are standard because this is a knapsack problem: we either take the element if $cnt < \left\lfloor\frac{m}{2}\right\rfloor$ or don't take it. If the element $a_{x, y}$ is not the last element of the row, then transitions look like that: $dp[x][y + 1][cnt][rem] = max(dp[x][y + 1][cnt][rem], dp[x][y][cnt][rem])$ - we don't take the current element. $dp[x][y + 1][cnt + 1][(rem + a_{x, y}) \% k] = max(dp[x][y + 1][cnt + 1][(rem + a_{x, y}) \% k], dp[x][y][cnt][rem] + a_{x, y})$ - we take the current element (this transition is only possible if $cnt < \left\lfloor\frac{m}{2}\right\rfloor$). The transitions from the last element of the row are almost the same, but the next element is $a_{x + 1, 0}$ and the new value of $cnt$ is always zero. The answer is $max(0, dp[n][0][0][0])$.
[ "dp" ]
2,100
#include <bits/stdc++.h> using namespace std; #define forn(i, n) for (int i = 0; i < int(n); ++i) const int N = 75; const int INF = 1e9; int a[N][N]; int dp[N][N][N][N]; int main() { #ifdef _DEBUG freopen("input.txt", "r", stdin); // freopen("output.txt", "w", stdout); #endif int n, m, k; cin >> n >> m >> k; forn(i, n) forn(j, m) { cin >> a[i][j]; } forn(i, N) forn(j, N) forn(cnt, N) forn(rem, N) dp[i][j][cnt][rem] = -INF; dp[0][0][0][0] = 0; forn(i, n) forn(j, m) forn(cnt, m / 2 + 1) forn(rem, k) { if (dp[i][j][cnt][rem] == -INF) continue; int ni = (j == m - 1 ? i + 1 : i); int nj = (j == m - 1 ? 0 : j + 1); if (i != ni) { dp[ni][nj][0][rem] = max(dp[ni][nj][0][rem], dp[i][j][cnt][rem]); } else { dp[ni][nj][cnt][rem] = max(dp[ni][nj][cnt][rem], dp[i][j][cnt][rem]); } if (cnt + 1 <= m / 2) { int nrem = (rem + a[i][j]) % k; if (i != ni) { dp[ni][nj][0][nrem] = max(dp[ni][nj][0][nrem], dp[i][j][cnt][rem] + a[i][j]); } else { dp[ni][nj][cnt + 1][nrem] = max(dp[ni][nj][cnt + 1][nrem], dp[i][j][cnt][rem] + a[i][j]); } } } cout << max(0, dp[n][0][0][0]) << endl; return 0; }
1433
G
Reducing Delivery Cost
You are a mayor of Berlyatov. There are $n$ districts and $m$ two-way roads between them. The $i$-th road connects districts $x_i$ and $y_i$. The cost of travelling along this road is $w_i$. There is some path between each pair of districts, so the city is connected. There are $k$ delivery routes in Berlyatov. The $i$-th route is going from the district $a_i$ to the district $b_i$. There is one courier on each route and the courier will always choose the \textbf{cheapest} (minimum by total cost) path from the district $a_i$ to the district $b_i$ to deliver products. The route can go from the district to itself, some couriers routes can coincide (\textbf{and you have to count them independently}). You can make at most one road to have cost zero (i.e. you choose at most one road and change its cost with $0$). Let $d(x, y)$ be the cheapest cost of travel between districts $x$ and $y$. Your task is to find the minimum total courier routes cost you can achieve, if you optimally select the some road and change its cost with $0$. In other words, you have to find the minimum possible value of $\sum\limits_{i = 1}^{k} d(a_i, b_i)$ after applying the operation described above optimally.
If we would naively solve the problem, we would just try to replace each edge's cost with zero and run Dijkstra algorithm $n$ times to get the cheapest paths. But this is too slow. Let's try to replace each edge's cost with zero anyway but use some precalculations to improve the speed of the solution. Let's firstly run Dijkstra $n$ times to calculate all cheapest pairwise paths. Then, let's fix which edge we "remove" $(x, y)$. There are three cases for the path $(a, b)$: this edge was not on the cheapest path before removing and is not on the cheapest path after removing. Then the cost of this path is $d(a, b)$. The second case is when this edge was not on the cheapest path before removing but it is on the cheapest path after removing. Then the cost of this path is $min(d(a, x) + d(y, b), d(a, y) + d(x, b))$. So we are just going from $a$ to $x$ using the cheapest path, then going through the zero edge and then going from $y$ to $b$ using the cheapest path also (or vice versa, from $a$ to $y$ and from $x$ to $b$). And the third case is when this edge was already on the cheapest path between $a$ and $b$ but this case is essentially the same as the second one. So, if we fix the edge $(x, y)$, then the answer for this edge is $\sum\limits_{i=1}^{k} min(d(a_i, b_i), d(a_i, x) + d(y, b_i), d(a_i, y) + d(x, b_i))$. Taking the minimum over all edges, we will get the answer. The precalculating part works in $O(n m \log n)$ and the second part works in $O(k m)$.
[ "brute force", "graphs", "shortest paths" ]
2,100
#include <bits/stdc++.h> using namespace std; const int INF = 1e9; int n; vector<vector<int>> d; vector<vector<pair<int, int>>> g; void dijkstra(int s, vector<int> &d) { d = vector<int>(n, INF); d[s] = 0; set<pair<int, int>> st; st.insert({d[s], s}); while (!st.empty()) { int v = st.begin()->second; st.erase(st.begin()); for (auto [to, w] : g[v]) { if (d[to] > d[v] + w) { auto it = st.find({d[to], to}); if (it != st.end()) st.erase(it); d[to] = d[v] + w; st.insert({d[to], to}); } } } } int main() { #ifdef _DEBUG freopen("input.txt", "r", stdin); // freopen("output.txt", "w", stdout); #endif int m, k; cin >> n >> m >> k; g = vector<vector<pair<int, int>>>(n); for (int i = 0; i < m; ++i) { int x, y, w; cin >> x >> y >> w; --x, --y; g[x].push_back({y, w}); g[y].push_back({x, w}); } vector<pair<int, int>> r(k); for (auto &[a, b] : r) { cin >> a >> b; --a, --b; } d = vector<vector<int>>(n); for (int v = 0; v < n; ++v) { dijkstra(v, d[v]); } int ans = INF; for (int v = 0; v < n; ++v) { for (auto [to, w] : g[v]) { int cur = 0; for (auto [a, b] : r) { cur += min({d[a][b], d[a][v] + d[to][b], d[a][to] + d[v][b]}); } ans = min(ans, cur); } } cout << ans << endl; return 0; }
1434
E
A Convex Game
Shikamaru and Asuma like to play different games, and sometimes they play the following: given an increasing list of numbers, they take turns to move. Each move consists of picking a number from the list. Assume the picked numbers are $v_{i_1}$, $v_{i_2}$, $\ldots$, $v_{i_k}$. The following conditions must hold: - $i_{j} < i_{j+1}$ for all $1 \leq j \leq k-1$; - $v_{i_{j+1}} - v_{i_j} < v_{i_{j+2}} - v_{i_{j+1}}$ for all $1 \leq j \leq k-2$. However, it's easy to play only one instance of game, so today Shikamaru and Asuma decided to play $n$ simultaneous games. They agreed on taking turns as for just one game, \textbf{Shikamaru goes first}. At each turn, the player performs a valid move in any single game. The player who cannot move loses. Find out who wins, provided that both play optimally.
It's sufficient to calculate the Grundy value for each game instance. Consider a single game. Let $maxc$ be the maximal value in the sequence $v$. We are going to prove that the Grundy value does not exceed $\sqrt{2 \cdot maxc} + 1$. Proof: Assume the contrary; that is, that the Grundy value equals $d > \sqrt{2 \cdot maxc} + 1$. Then, by definition, there is a sequence $v_{i_0}$, $v_{i_1}$, ..., $v_{i_{d - 1}}$ which is a valid sequence of moves. Indeed, initially there is a move into a position in game with the value $d - 1$, then there is a move from it to the position with value $d - 2$, and so on. It's easy to see that $v_{i_{j + 1}} - v_{i_j} \geq j + 1$ for all $j\leq d - 2$. Then $v_{i_{d - 1}} - v_{i_0} \geq \frac {d (d - 1)} {2} \geq \frac {(d - 1) (d - 1)} {2} \geq maxc$, which leads to a contradiction. It is clear from the statement that the outcome of the game is defined by the index of the last move and the last difference between the elements. It follows from the Grundy theory that if we fix the last index and gradually decrease the last difference, the grundy value will not decrease. It'd be great to calculate the value of $dp[i][d]$ standing for the maximal possible last difference so that the Grundy value equals $d$, for each index $i$ and each possible Grundy value $d$. This, in its turn, can be done by calculating $maxv[d][i]$ being the maximal $v_j$ so that after the move from $v_i$ to $v_j$ the Grundy value will equal $d$. If we know it, then, standing at some index $j$ and knowing the range of last differences so that the Grundy value equals $d$ for all $d$ (we can obtain it from the values of $dp[i]$), we need to remax the values of $maxv[d]$ on some subsegment. Hence, we can already implement a segment tree solution working for $O(n \cdot maxc + \sum m_i \cdot \sqrt{2 \cdot maxc} \cdot log(m_i))$. However, it's too long. Now recall that the initial array is increasing in each game. This means that during the calculation of $dp$ and $maxv$ from left to right, we only need to remax something a single time (the first time). This operation can be done via DSU, if we compress subsegments of all already calculated values and one not yet calculated into a single component. Then the final time complexity will be $O(n \cdot maxc + \sum m_i \cdot \sqrt{2 \cdot maxc} \cdot \alpha(m_i))$.
[ "dsu", "games" ]
3,500
null
1436
A
Reorder
For a given array $a$ consisting of $n$ integers and a given integer $m$ find if it is possible to reorder elements of the array $a$ in such a way that $\sum_{i=1}^{n}{\sum_{j=i}^{n}{\frac{a_j}{j}}}$ equals $m$? It is forbidden to delete elements as well as insert new elements. Please note that no rounding occurs during division, for example, $\frac{5}{2}=2.5$.
You can notice that the $i$-th number in the array will be included in the sum $i$ times, which means that the value $\frac {a_i} {i}$ will add $a_i$ to the sum. That is, the permutation of the elements does not affect the required sum, and therefore it is enough to check whether the sum of the array elements is equal to the given number.
[ "math" ]
800
#include <algorithm> #include <cassert> #include <chrono> #include <cmath> #include <cstdio> #include <cstring> #include <ctime> #include <iostream> #include <map> #include <numeric> #include <queue> #include <random> #include <set> #include <stack> #include <string> #include <vector> using namespace std; #define all(x) (x).begin(), (x).end() #define rall(x) (x).rbegin(), (x).rend() #define reunique(v) v.resize(std::unique(v.begin(), v.end()) - v.begin()) #define sz(v) ((int)(v).size()) #define vec1d(x) vector<x> #define vec2d(x) vector<vec1d(x)> #define vec3d(x) vector<vec2d(x)> #define vec4d(x) vector<vec3d(x)> #define ivec1d(x, n, v) vec1d(x)(n, v) #define ivec2d(x, n, m, v) vec2d(x)(n, ivec1d(x, m, v)) #define ivec3d(x, n, m, k, v) vec3d(x)(n, ivec2d(x, m, k, v)) #define ivec4d(x, n, m, k, l, v) vec4d(x)(n, ivec3d(x, m, k, l, v)) #ifdef LOCAL #include "pretty_print.h" #define dbg(...) cerr << "[" << #__VA_ARGS__ << "]: ", debug_out(__VA_ARGS__) #else #define dbg(...) 42 #endif #define nl "\n" typedef long double ld; typedef long long ll; typedef unsigned long long ull; template <typename T> T sqr(T x) { return x * x; } template <typename T> T abs(T x) { return x < 0? -x : x; } template <typename T> T gcd(T a, T b) { return b? gcd(b, a % b) : a; } template <typename T> bool chmin(T &x, const T& y) { if (x > y) { x = y; return true; } return false; } template <typename T> bool chmax(T &x, const T& y) { if (x < y) { x = y; return true; } return false; } auto random_address = [] { char *p = new char; delete p; return (uint64_t) p; }; mt19937 rng(chrono::steady_clock::now().time_since_epoch().count() * (random_address() | 1)); mt19937_64 rngll(chrono::steady_clock::now().time_since_epoch().count() * (random_address() | 1)); int main(int /* argc */, char** /* argv */) { ios_base::sync_with_stdio(false); cin.tie(NULL); #ifdef LOCAL assert(freopen("i.txt", "r", stdin)); assert(freopen("o.txt", "w", stdout)); #endif int t; cin >> t; while (t--) { int n, m; cin >> n >> m; int s = 0; for (int i = 0; i < n; ++i) { int x; cin >> x; s += x; } cout << (s == m? "YES" : "NO") << nl; } #ifdef LOCAL cerr << "Time execute: " << clock() / (double)CLOCKS_PER_SEC << " sec" << endl; #endif return 0; }
1436
B
Prime Square
Sasha likes investigating different math objects, for example, magic squares. But Sasha understands that magic squares have already been studied by hundreds of people, so he sees no sense of studying them further. Instead, he invented his own type of square — a prime square. A square of size $n \times n$ is called prime if the following three conditions are held simultaneously: - all numbers on the square are non-negative integers not exceeding $10^5$; - there are no prime numbers in the square; - sums of integers in each row and each column are prime numbers. Sasha has an integer $n$. He asks you to find any prime square of size $n \times n$. Sasha is absolutely sure such squares exist, so just help him!
First, note that the numbers 0 and 1 are not prime. Now let's try to build a square from only these numbers. To begin with, fill in the main and secondary diagonal of the square with ones. If $n$ is even, then the sum in each row and each column is $2$ (prime number), and we have met the condition. If $n$ is odd, then the sum in the row with the number $\frac {n + 1} {2}$ and in the column with the number $\frac {n + 1} {2}$ will be equal to one. To fix this, add ones to the cells $(\frac {n} {2}, \frac {n + 1} {2})$ and $(\frac {n + 1} {2}, \frac {n + 1} {2} + 1)$. As a result, the sum in columns and rows will be equal to two or three, and we have fulfilled the condition of the problem.
[ "constructive algorithms", "math" ]
900
#include <bits/stdc++.h> using namespace std; void solve() { int n; cin >> n; vector <vector <int> > a(n); for (int i = 0; i < n; ++i) { a[i].resize(n, 0); } if (n == 4) { vector <vector <int> > matrix = {{4, 6, 8, 1}, {4, 9, 9, 9}, {4, 10, 10, 65}, {1, 4, 4, 4}}; cout << "4 6 8 1\n4 9 9 9\n4 10 10 65\n1 4 4 4\n"; return; } for (int i = 0; i < n; ++i) { a[i][i] = 1; a[i][n - i - 1] = 1; } if (n % 2) { a[n / 2 - 1][n / 2] = 1; a[n / 2][n / 2 + 1] = 1; } for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { cout << a[i][j]; if (j < n - 1) cout << " "; else cout << "\n"; } } } int main() { ios_base::sync_with_stdio(false); cin.tie(nullptr); int t; cin >> t; while(t--) { solve(); } return 0; }
1436
C
Binary Search
Andrey thinks he is truly a successful developer, but in reality he didn't know about the binary search algorithm until recently. After reading some literature Andrey understood that this algorithm allows to quickly find a certain number $x$ in an array. For an array $a$ indexed from zero, and an integer $x$ the pseudocode of the algorithm is as follows: Note that the elements of the array are indexed from zero, and the division is done in integers (rounding down). Andrey read that the algorithm only works if the array is sorted. However, he found this statement untrue, because there certainly exist unsorted arrays for which the algorithm find $x$! Andrey wants to write a letter to the book authors, but before doing that he must consider the permutations of size $n$ such that the algorithm finds $x$ in them. A permutation of size $n$ is an array consisting of $n$ distinct integers between $1$ and $n$ in arbitrary order. Help Andrey and find the number of permutations of size $n$ which contain $x$ at position $pos$ and for which the given implementation of the binary search algorithm finds $x$ (returns true). As the result may be extremely large, print the remainder of its division by $10^9+7$.
Let's simulate a binary search algorithm. Initially, we have the required position $pos$. For the next $middle$ position in the binary search, we can determine exactly whether the next number at this position should be greater or less than $x$. For all other positions, the values can be eiteher greater or less than $x$. As a result of the simulation of the algorithm, we have $cntBig$ positions at which numbers must be greater than $x$ and $cntLess$ positions at which numbers must be less than $x$. Let the large numbers be $hasBig$, and the smaller ones $hasLess$. Now let's count the number of ways to place large numbers in $cntBig$ positions using the formula $C(hasBig, cntBig) \cdot cntBig!$. Let's calculate in a similar way for smaller numbers, and the product of the resulting results will be the answer to the problem.
[ "binary search", "combinatorics" ]
1,500
#include <bits/stdc++.h> using namespace std; const int MOD = 1e9 + 7; int binPow(int a, int n) { int res = 1; while (n) { if (n & 1) res = (1LL * res * a) % MOD; a = (1LL * a * a) % MOD; n >>= 1; } return res; } void binarySearch(int n, int x_position, int &cnt_big, int &cnt_less) { int left = 0, right = n; while(left < right) { int middle = (left + right) / 2; if (x_position >= middle) { if (x_position != middle) cnt_less++; left = middle + 1; } else if (x_position < middle){ cnt_big++; right = middle; } } } int C(int n, int k, const vector <long long> &fact, const vector <long long> &inv) { if (k > n) return 0; int multiply = (1LL * fact[n] * inv[k]) % MOD; multiply = (1LL * multiply * inv[n - k]) % MOD; return multiply; } int main() { ios_base::sync_with_stdio(false); cin.tie(nullptr); int n, x, x_position; long long ans = 0; cin >> n >> x >> x_position; vector <long long> fact(n + 1, 1LL); vector <long long> inv(n + 1, 1LL); for (int i = 1; i <= n; ++i) { fact[i] = (fact[i - 1] * i) % MOD; inv[i] = binPow(fact[i], MOD - 2); } int cnt_big = 0, cnt_less = 0; binarySearch(n, x_position, cnt_big, cnt_less); int other = (n - cnt_big - cnt_less - 1); int can_big = n - x, can_less = x - 1; int countLess = C(can_less, cnt_less, fact, inv); int countBig = C(can_big, cnt_big, fact, inv); countBig = (1LL * countBig * fact[cnt_big]) % MOD; countLess = (1LL * countLess * fact[cnt_less]) % MOD; int multiply = (1LL * countBig * countLess) % MOD; multiply = (1LL * multiply * fact[other]) % MOD; ans = (ans + multiply) % MOD; cout << ans << endl; return 0; }
1436
D
Bandit in a City
Bandits appeared in the city! One of them is trying to catch as many citizens as he can. The city consists of $n$ squares connected by $n-1$ roads in such a way that it is possible to reach any square from any other square. The square number $1$ is the main square. After Sunday walk all the roads were changed to \textbf{one-way} roads in such a way that it is possible to reach any square from the main square. At the moment when the bandit appeared on the main square there were $a_i$ citizens on the $i$-th square. Now the following process will begin. First, each citizen that is currently on a square with some outgoing one-way roads chooses one of such roads and moves along it to another square. Then the bandit chooses one of the one-way roads outgoing from the square he is located and moves along it. The process is repeated until the bandit is located on a square with no outgoing roads. The bandit catches all the citizens on that square. The bandit wants to catch as many citizens as possible; the citizens want to minimize the number of caught people. The bandit and the citizens know positions of all citizens at any time, the citizens can cooperate. If both sides act optimally, how many citizens will be caught?
First, let's assume that all the citizens are at the root of the tree. Then the answer to the problem will be $\lceil \frac{a_1}{leaves} \rceil$, where $leaves$ is the number of leaves in the tree. According to the Dirichlet principle, this would be the minimum possible number of caught citizens. The answer to the original problem is $max_i {\lceil \frac{sum_{a_v}}{leafs_i} \rceil}$, where $v$ lies in the subtree of $i$, $leaves_i$ is the number of leaves in the subtree $i$. Consider some vertex $i$, for which it is impossible to split the citizens equally. Then there will be a vertex $m$ in which in the optimal splitup will have the maximum number of citizens. Obviously, it is not profitable for us to send any citizen from vertex $i$ to $m$. In this case, we can go one level down in the tree in the direction of $m$. We will repeat this step until we can divide the citizens equally. Hence it is clear why the above formula is correct.
[ "binary search", "dfs and similar", "graphs", "greedy", "trees" ]
1,900
#pragma GCC optimize("unroll-loops") #pragma GCC optimize("Ofast") // hloya template v26 // ░░░░░░░▄▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▀▄░░░░░░ // ░░░░░░█░░▄▀▀▀▀▀▀▀▀▀▀▀▀▀▄░░█░░░░░ // ░░░░░░█░█░▀░░░░░▀░░▀░░░░█░█░░░░░ // ░░░░░░█░█░░░░░░░░▄▀▀▄░▀░█░█▄▀▀▄░ // █▀▀█▄░█░█░░▀░░░░░█░░░▀▄▄█▄▀░░░█░ // ▀▄▄░▀██░█▄░▀░░░▄▄▀░░░░░░░░░░░░▀▄ // ░░▀█▄▄█░█░░░░▄░░█░░░▄█░░░▄░▄█░░█ // ░░░░░▀█░▀▄▀░░░░░█░██░▄░░▄░░▄░███ // ░░░░░▄█▄░░▀▀▀▀▀▀▀▀▄░░▀▀▀▀▀▀▀░▄▀░ // ░░░░█░░▄█▀█▀▀█▀▀▀▀▀▀█▀▀█▀█▀▀█░░░ // ░░░░▀▀▀▀░░▀▀▀░░░░░░░░▀▀▀░░▀▀░░░░ #include <bits/stdc++.h> using namespace std; bool dbg = 0; clock_t start_time = clock(); #define current_time fixed<<setprecision(6)<<(ld)(clock()-start_time)/CLOCKS_PER_SEC #define f first #define s second #define mp make_pair #define mt make_tuple #define pb push_back #define eb emplace_back #define all(v) (v).begin(), (v).end() #define sz(v) ((int)(v).size()) #define sqr(x) ((x) * (x)) #define ull unsigned long long #define ll long long #define ld long double #define pii pair<int,int> #define umap unordered_map<int, int> #define files1 freopen("input.txt","r",stdin) #define files2 freopen("output.txt","w",stdout) #define files files1;files2 #define fast_io ios_base::sync_with_stdio(0);cin.tie(0) // #define endl '\n' #define ln(i,n) " \n"[(i) == (n) - 1] void bad(string mes = "NO"){cout << mes;exit(0);} void bad(int mes){cout << mes;exit(0);} template<typename T> string bin(T x, int st = 2){ string ans = ""; while (x > 0){ ans += char('0' + x % st); x /= st; } reverse(ans.begin(), ans.end()); return ans.empty() ? "0" : ans; } mt19937_64 mt_rand( 228// chrono::system_clock::now().time_since_epoch().count() ); template<typename T1, typename T2> inline bool upmax(T1& a, T2 b) { return (a < b ? (a = b, true) : false); } template<typename T1, typename T2> inline bool upmin(T1& a, T2 b) { return (b < a ? (a = b, true) : false); } // inline int popcount(int x){ // int count = 0; // __asm__ volatile("POPCNT %1, %0;":"=r"(count):"r"(x):); // return count; // } template<typename T> T input(){ T ans = 0, m = 1; char c = ' '; while (!((c >= '0' && c <= '9') || c == '-')) { c = getchar(); } if (c == '-') m = -1, c = getchar(); while (c >= '0' && c <= '9'){ ans = ans * 10 + (c - '0'), c = getchar(); } return ans * m; } template<typename T> T gcd (T a, T b) { while (b) { a %= b; swap (a, b); } return a; } template<typename T> void read(T& a) { a = input<T>(); } template<typename T> void read(T& a, T& b) { read(a), read(b); } template<typename T> void read(T& a, T& b, T& c) { read(a, b), read(c); } template<typename T> void read(T& a, T& b, T& c, T& d) { read(a, b), read(c, d); } const int inf = 1e9 + 20; const short short_inf = 3e4 + 20; const long double eps = 1e-12; const int maxn = (int)2e5 + 3, base = 1e9 + 7; const ll llinf = 2e18 + 5; const int mod = 998244353; int binpow (int a, int n) { int res = 1; while (n) { if (n & 1) res = 1ll * res * a % base; a = 1ll * a * a % base; n >>= 1; } return res; } int a[maxn]; int p[maxn]; vector<int> g[maxn]; map<ll, int> *cnt[maxn]; void add(int v, ll val) { ll prev = 0; int cc = 0; while (cnt[v]->size()) { auto [h, c1] = *cnt[v]->begin(); ll allup = 1ll * cc * (h - prev); if (val >= allup) { val -= allup; prev = h; cc += c1; cnt[v]->erase(cnt[v]->begin()); } else { break; } } if (cc == 0) { (*cnt[v])[val]++; } else { ll x = prev + (val / cc); if (val % cc > 0) { (*cnt[v])[x + 1] += val % cc; } (*cnt[v])[x] += cc - val % cc; } } int sz[maxn]; void dfs(int v){ int mx = -1, bigChild = -1; sz[v] = 1; for(auto u : g[v]) { dfs(u); sz[v] += sz[u]; if(sz[u] > mx) { mx = sz[u], bigChild = u; } } if(bigChild != -1) cnt[v] = cnt[bigChild]; else cnt[v] = new map<ll, int> (); for(auto u : g[v]) if(u != bigChild){ for(auto x : *cnt[u]) (*cnt[v])[x.first] += x.second; } add(v, a[v]); } int main() { // files1; fast_io; int n; cin >> n; for (int i = 1; i < n; i++) { int par; cin >> par; par--; g[par].push_back(i); } for (int i = 0; i < n; i++) { cin >> a[i]; } dfs(0); ll ans = 0; for (auto [k, v] : *cnt[0]) { // if (v > 0) { ans = max(ans, k); // } } cout << ans << "\n"; return 0; }
1436
E
Complicated Computations
In this problem MEX of a certain array is the smallest \textbf{positive} integer not contained in this array. Everyone knows this definition, including Lesha. But Lesha loves MEX, so he comes up with a new problem involving MEX every day, including today. You are given an array $a$ of length $n$. Lesha considers all the \textbf{non-empty} subarrays of the initial array and computes MEX for each of them. Then Lesha computes MEX of the obtained numbers. An array $b$ is a subarray of an array $a$, if $b$ can be obtained from $a$ by deletion of several (possible none or all) elements from the beginning and several (possibly none or all) elements from the end. In particular, an array is a subarray of itself. Lesha understands that the problem is very interesting this time, but he doesn't know how to solve it. Help him and find the MEX of MEXes of all the subarrays!
Let's iterate over the answer. Let the current answer be $x$, then we can get it only when there are no subarrays, whose MEX is $x$. Note that we need to check the MEX of the subarrays that are between all occurrences of $x$. This can be done, for example, using a segment tree, processing its occurrences in order. A number for which MEX is not found will be the answer.
[ "binary search", "data structures", "two pointers" ]
2,400
#include <bits/stdc++.h> #define fi first #define se second #define p_b push_back #define pll pair<ll,ll> #define pii pair<int,int> #define m_p make_pair #define all(x) x.begin(),x.end() #define sset ordered_set #define sqr(x) (x)*(x) #define pw(x) (1ll << x) #define sz(x) (int)x.size() #define fout(x) {cout << x << "\n"; return; } using namespace std; typedef long long ll; typedef long double ld; const ll N = 1e5 + 5; const ll M = 1e5 + 2; const int inf = 1e8; const ll mod = 1e9 + 7; template <typename T> void vout(T s){cout << s << endl;exit(0);} int t[4 * N]; void modify(int v, int tl, int tr, int pos, int val){ if(tl == tr)t[v] = val; else{ int tm = (tl + tr) >> 1; if(pos <= tm)modify(v << 1, tl, tm, pos, val); else modify(v << 1 | 1, tm + 1, tr, pos, val); t[v] = min(t[v << 1 | 1], t[v << 1]); } } int find(int v, int tl, int tr, int k){ if(tl == tr)return tl; int tm = (tl + tr) >> 1; if(t[v << 1] < k){ return find(v << 1, tl, tm, k); } return find(v << 1 | 1, tm + 1, tr, k); } int last[2 * N]; int main(){ ios_base :: sync_with_stdio(0); cin.tie(0); map <int, bool> mp; int n; cin >> n; vector <int> a(n + 1); for(int i = 1; i <= n; i++)cin >> a[i]; for(int i = 1; i <= n; i++){ int x = a[i]; if(last[x] + 1 < i){ mp[find(1, 1, M, last[x] + 1)] = 1; } modify(1, 1, M, a[i], i); last[x] = i; } for(int i = 1; i <= M; i++)if(last[i] && last[i] != n){ mp[find(1, 1, M, last[i] + 1)] = 1; } mp[find(1, 1, M, 1)] = 1; for(int i = 1; ; i++)if(!mp[i])vout(i); return 0; }
1436
F
Sum Over Subsets
You are given a multiset $S$. Over all pairs of subsets $A$ and $B$, such that: - $B \subset A$; - $|B| = |A| - 1$; - greatest common divisor of all elements in $A$ is equal to one; find the sum of $\sum_{x \in A}{x} \cdot \sum_{x \in B}{x}$, modulo $998\,244\,353$.
Let's calculate the required product of the sums $ans_i$ for the sets, the greatest common divisor of the elements of which is $i$. First, let's select all the elements that are divisible by $i$. To find only those sets whose GCD is exactly $i$, one can find the product of the sums for all subsets and subtract the answers of all $ans_j$ such that $i<j$ and $i$ divides $j$ without a remainder. To find the products of all subsets of a set of $k$ elements, consider two cases: the product $a_i \cdot a_i$ will be counted $2^{k-2} \cdot (k - 1)$ times. Each element in the set $A$ can be removed and this will add the product $a_i^2$. The number of elements $k - 1$ and the number, select the rest of the subset $2 ^ {k-2}$; the product $a_i \cdot a_j$ will be counted $2 ^ {k-3} \cdot (k - 2) + 2 ^ {k-2}$. The first term is similar to the example above. And the second is obtained if $a_i$ is removed from the set $A$ - the number of ways to choose a subset of $k - 2$ elements is $2 ^ {k-2}$.
[ "combinatorics", "math", "number theory" ]
2,800
#include <algorithm> #include <cassert> #include <chrono> #include <cmath> #include <cstdio> #include <cstring> #include <ctime> #include <iostream> #include <map> #include <numeric> #include <queue> #include <random> #include <set> #include <stack> #include <string> #include <vector> using namespace std; #define all(x) (x).begin(), (x).end() #define rall(x) (x).rbegin(), (x).rend() #define reunique(v) v.resize(std::unique(v.begin(), v.end()) - v.begin()) #define sz(v) ((int)(v).size()) #define vec1d(x) vector<x> #define vec2d(x) vector<vec1d(x)> #define vec3d(x) vector<vec2d(x)> #define vec4d(x) vector<vec3d(x)> #define ivec1d(x, n, v) vec1d(x)(n, v) #define ivec2d(x, n, m, v) vec2d(x)(n, ivec1d(x, m, v)) #define ivec3d(x, n, m, k, v) vec3d(x)(n, ivec2d(x, m, k, v)) #define ivec4d(x, n, m, k, l, v) vec4d(x)(n, ivec3d(x, m, k, l, v)) #ifdef LOCAL #include "pretty_print.h" #define dbg(...) cerr << "[" << #__VA_ARGS__ << "]: ", debug_out(__VA_ARGS__) #else #define dbg(...) 42 #endif #define nl "\n" typedef long double ld; typedef long long ll; typedef unsigned long long ull; template <typename T> T sqr(T x) { return x * x; } template <typename T> T abs(T x) { return x < 0? -x : x; } template <typename T> T gcd(T a, T b) { return b? gcd(b, a % b) : a; } template <typename T> bool chmin(T &x, const T& y) { if (x > y) { x = y; return true; } return false; } template <typename T> bool chmax(T &x, const T& y) { if (x < y) { x = y; return true; } return false; } auto random_address = [] { char *p = new char; delete p; return (uint64_t) p; }; mt19937 rng(chrono::steady_clock::now().time_since_epoch().count() * (random_address() | 1)); mt19937_64 rngll(chrono::steady_clock::now().time_since_epoch().count() * (random_address() | 1)); const int MOD = 998244353; void addmod(ll &x, ll d) { x += d; if (x >= MOD) { x -= MOD; } if (x < 0) { x += MOD; } } ll powmod(ll a, ll b) { ll ret = 1; ll p = a; while (b) { if (b & 1) { ret = ret * p % MOD; } p = p * p % MOD; b >>= 1; } return ret; } ll divmod(ll a, ll b) { return a * powmod(b, MOD - 2) % MOD; } int main(int /* argc */, char** /* argv */) { ios_base::sync_with_stdio(false); cin.tie(NULL); #ifdef LOCAL assert(freopen("i.txt", "r", stdin)); assert(freopen("o.txt", "w", stdout)); #endif int m; cin >> m; const int n = 1e+5; vector<ll> sum(n + 1, 0); vector<ll> sum1(n + 1, 0); vector<ll> sum2(n + 1, 0); vector<ll> cnt(n + 1, 0); for (int i = 0; i < m; ++i) { ll x, f; cin >> x >> f; ll s = x * f % MOD; ll s1 = x * x % MOD * f % MOD; ll s2 = x * x % MOD * ((f - 1) * f % MOD) % MOD; auto add = [&](int i) { addmod(sum2[i], (s2 + 2 * s * sum[i]) % MOD); addmod(sum1[i], s1); addmod(sum[i], s); cnt[i] += f; }; for (int j = 1; j * j <= x; ++j) { if (x % j) { continue; } add(j); if (x / j != j) { add(x / j); } } } vector<ll> f(n + 1, 0); for (int i = n; i >= 1; --i) { if (!cnt[i]) { continue; } ll& ret = f[i]; ll k = cnt[i]; ll p3, p2; p3 = p2 = 0; if (k > 2) { p3 = powmod(2, k - 3); p2 = p3 * 2 % MOD; } else if (k > 1) { p2 = powmod(2, k - 2); } if (k > 1) { addmod(ret, p2 * ((k - 1) % MOD) % MOD * sum1[i] % MOD); addmod(ret, p2 % MOD * sum2[i] % MOD); } if (k > 2) { addmod(ret, p3 * ((k - 2) % MOD) % MOD * sum2[i] % MOD); } for (int j = i + i; j <= n; j += i) { addmod(ret, -f[j]); } } cout << f[1] << nl; #ifdef LOCAL cerr << "Time execute: " << clock() / (double)CLOCKS_PER_SEC << " sec" << endl; #endif return 0; }
1437
A
Marketing Scheme
You got a job as a marketer in a pet shop, and your current task is to boost sales of cat food. One of the strategies is to sell cans of food in packs with discounts. Suppose you decided to sell packs with $a$ cans in a pack with a discount and some customer wants to buy $x$ cans of cat food. Then he follows a greedy strategy: - he buys $\left\lfloor \frac{x}{a} \right\rfloor$ packs with a discount; - then he wants to buy the remaining $(x \bmod a)$ cans one by one. {$\left\lfloor \frac{x}{a} \right\rfloor$ is $x$ divided by $a$ rounded down, $x \bmod a$ is the remainer of $x$ divided by $a$.} But customers are greedy in general, so if the customer wants to buy $(x \bmod a)$ cans one by one and it happens that $(x \bmod a) \ge \frac{a}{2}$ he decides to buy the whole pack of $a$ cans (instead of buying $(x \bmod a)$ cans). It makes you, as a marketer, happy since the customer bought more than he wanted initially. You know that each of the customers that come to your shop can buy any number of cans from $l$ to $r$ inclusive. Can you choose such size of pack $a$ that each customer buys more cans than they wanted initially?
Note that if $\left\lfloor \frac{l}{a} \right\rfloor < \left\lfloor \frac{r}{a} \right\rfloor$ then exists such $k \cdot a$ that $l \le ka \le r$ and, obviously, a customer, who wants to buy $ka$ cans won't buy more than he wants. That's why $\left\lfloor \frac{l}{a} \right\rfloor = \left\lfloor \frac{r}{a} \right\rfloor$ and we can rephrase our task as finding such $a$ that $\frac{a}{2} \le (l \bmod a) \le (r \bmod a) < a$. The longer the segment $[\frac{a}{2}, a)$ is the better and the maximum we can take is $a = 2l$. As a result, we need to check that $r < a \leftrightarrow r < 2l$.
[ "brute force", "constructive algorithms", "greedy", "math" ]
800
fun main() { repeat(readLine()!!.toInt()) { val (l, r) = readLine()!!.split(' ').map { it.toInt() } println(if (2 * l > r) "YES" else "NO"); } }
1437
B
Reverse Binary Strings
You are given a string $s$ of even length $n$. String $s$ is binary, in other words, consists only of 0's and 1's. String $s$ has exactly $\frac{n}{2}$ zeroes and $\frac{n}{2}$ ones ($n$ is even). In one operation you can reverse any substring of $s$. A substring of a string is a contiguous subsequence of that string. What is the minimum number of operations you need to make string $s$ alternating? A string is alternating if $s_i \neq s_{i + 1}$ for all $i$. There are two types of alternating strings in general: 01010101... or 10101010...
We need to make our string alternating, i. e. $s_i \neq s_{i + 1}$. When we reverse substring $s_l \dots s_r$, we change no more than two pairs $s_{l - 1}, s_l$ and $s_r, s_{r + 1}$. Moreover, one pair should be a consecutive pair 00 and other - 11. So, we can find lower bound to our answer as maximum between number of pairs of 00 and number of pairs of 11. And we can always reach this lower bound, by pairing 00 with 11 or with left/right border of $s$. Another way to count the answer is next: suppose we want to make string 0101..., then let's transform $s$ to 1 + $s$ + 0. For example, if $s =$ 0110, we will get 101100. We claim that after this transformation, we will have equal number of 00 and 11, so the answer is the number of consecutive pairs of the same character divided by two. The answer is the minimum between answers for 1 + $s$ + 0 and 0 + $s$ + 1.
[ "constructive algorithms", "greedy" ]
1,200
#include<bits/stdc++.h> using namespace std; #define fore(i, l, r) for(int i = int(l); i < int(r); i++) #define sz(a) int((a).size()) const int INF = int(1e9); int n; string s; inline bool read() { if(!(cin >> n >> s)) return false; return true; } int cntSame(const string &s) { int ans = 0; fore (i, 1, sz(s)) ans += (s[i - 1] == s[i]); assert(ans % 2 == 0); return ans / 2; } inline void solve() { int ans = INF; fore (k, 0, 2) { ans = min(ans, cntSame(string(1, '0' + k) + s + string(1, '1' - k))); } cout << ans << endl; } int main() { ios_base::sync_with_stdio(false); cin.tie(0), cout.tie(0); cout << fixed << setprecision(15); int tc; cin >> tc; while(tc--) { assert(read()); solve(); } return 0; }
1437
C
Chef Monocarp
Chef Monocarp has just put $n$ dishes into an oven. He knows that the $i$-th dish has its optimal cooking time equal to $t_i$ minutes. At any \textbf{positive integer} minute $T$ Monocarp can put \textbf{no more than one} dish out of the oven. If the $i$-th dish is put out at some minute $T$, then its unpleasant value is $|T - t_i|$ — the absolute difference between $T$ and $t_i$. Once the dish is out of the oven, it can't go back in. Monocarp should put all the dishes out of the oven. What is the minimum total unpleasant value Monocarp can obtain?
There are a lot of solutions for the problem. Let's start with the easiest one. Sort the dishes in the non-decreasing order of their optimal time. I claim that there is an optimal answer such that the times $T$ for each dish go in the increasing order. That's not too hard to prove (something along the lines of if there are two dishes $i$ and $j$ such that $t_i < t_j$ and $T_i > T_j$, then $|t_i - T_i| + |t_j - T_j|$ is always greater than $|t_i - T_j| + |t_j - T_i|$). So we can use dynamic programming to solve the task. Let $dp[i][T]$ be the minimum total unpleasant value if $i$ dishes are processed and the current minute is $T$. For the transitions you can either put out the current dish $i$ at the current minute $T$ or wait one more minute. Notice that you'll never need more time than $2n$ minutes (the actual constraint is even smaller, just consider the case with all dishes times equal to $n$). So that dp works in $O(n^2)$. The other possible solution is matching. Let's build the following graph. The left partition is $n$ vertices corresponding to dishes. The right partition is $2n$ vertices corresponding to minutes (as we saw in previous solution $2n$ is always enough). Now add the edges between all dishes and all minutes with the cost of their absolute different. Finally, find the minimum cost maximum matching. That can be done with MCMF or Hungarian algorithm. Both should pass pretty easily. There's also a solution in $O(n \log n)$ involving the slope trick.
[ "dp", "flows", "graph matchings", "greedy", "math", "sortings" ]
1,800
#include <bits/stdc++.h> #define forn(i, n) for (int i = 0; i < int(n); i++) using namespace std; template<typename T> T hungarian(const vector<vector<T>>& cost) { const T INF = numeric_limits<T>::max(); int n = cost.size(), m = cost[0].size(); vector<T> u(n + 1), v(m + 1), dist(m + 1); vector<int> p(m + 1), way(m + 1), used(m + 1); for (int i = 1; i <= n; ++i) { p[0] = i; int j0 = 0; fill(dist.begin(), dist.end(), INF); do { used[j0] = i; int i0 = p[j0], j1 = -1; T delta = INF; for (int j = 1; j <= m; ++j) if (used[j] != i) { T cur = cost[i0 - 1][j - 1] - u[i0] - v[j]; if (cur < dist[j]) dist[j] = cur, way[j] = j0; if (dist[j] < delta) delta = dist[j], j1 = j; } forn(j, m + 1) { if (used[j] == i) u[p[j]] += delta, v[j] -= delta; else dist[j] -= delta; } j0 = j1; } while (p[j0] != 0); for (int j1; j0; j0 = j1) p[j0] = p[j1 = way[j0]]; } return -v[0]; } void solve(){ int n; scanf("%d", &n); vector<int> t(n); forn(i, n){ scanf("%d", &t[i]); --t[i]; } vector<vector<int>> cost(n, vector<int>(2 * n)); forn(i, n) forn(j, 2 * n) cost[i][j] = abs(t[i] - j); printf("%d\n", hungarian(cost)); } int main() { int q; scanf("%d", &q); forn(_, q) solve(); }
1437
D
Minimal Height Tree
Monocarp had a tree which consisted of $n$ vertices and was rooted at vertex $1$. He decided to study BFS (Breadth-first search), so he ran BFS on his tree, starting from the root. BFS can be described by the following pseudocode: \begin{verbatim} a = [] # the order in which vertices were processed q = Queue() q.put(1) # place the root at the end of the queue while not q.empty(): k = q.pop() # retrieve the first vertex from the queue a.append(k) # append k to the end of the sequence in which vertices were visited for y in g[k]: # g[k] is the list of all children of vertex k, sorted in ascending order q.put(y) \end{verbatim} Monocarp was fascinated by BFS so much that, in the end, he lost his tree. Fortunately, he still has a sequence of vertices, in which order vertices were visited by the BFS algorithm (the array a from the pseudocode). Monocarp knows that each vertex was visited exactly once (since they were put and taken from the queue exactly once). Also, he knows that all children of each vertex were viewed in ascending order. Monocarp knows that there are many trees (in the general case) with the same visiting order $a$, so he doesn't hope to restore his tree. Monocarp is okay with any tree that \textbf{has minimum height}. The height of a tree is the maximum depth of the tree's vertices, and the depth of a vertex is the number of edges in the path from the root to it. For example, the depth of vertex $1$ is $0$, since it's the root, and the depth of all root's children are $1$. Help Monocarp to find any tree with given visiting order $a$ and minimum height.
Due to the nature of BFS, the visiting order consists of several segments: first goes root (has depth $0$), then all vertices with depth $1$, then all vertices with depth $2$ and so on. Since any vertex of depth $d$ is a child of vertex of depth $d - 1$, then it's optimal to make the number of vertices with depth $1$ as many as possible, then make the number of vertices with depth $2$ as many as possible and so on. Since children of a vertex are viewed in ascending order and form a segment in visiting order then an arbitrary segment of visiting order can be children of the same vertex iff elements in the segments are in ascending order. These two observations lead us to a greedy strategy: $a_1 = 1$, then let's find the maximum $r_1$ that segment $a_2, \dots, a_{r_1}$ is in ascending order - they will be the children of $a_1$ and the only vertices of depth $1$. Next search the maximum $r_2$ such that segment $a_{r_1 + 1}, \dots, a_{r_2}$ is in ascending order - they will be the children of $a_2$, and so on. It's easy to see that this strategy maximizes the number of vertices of each depth level, so minimize the height of the tree.
[ "graphs", "greedy", "shortest paths", "trees" ]
1,600
#include<bits/stdc++.h> using namespace std; #define fore(i, l, r) for(int i = int(l); i < int(r); i++) #define sz(a) int((a).size()) #define x first #define y second typedef long long li; typedef pair<int, int> pt; const int INF = int(1e9); const li INF64 = li(1e18); int n; vector<int> a; inline bool read() { if(!(cin >> n)) return false; a.resize(n); fore (i, 0, n) cin >> a[i]; return true; } inline void solve() { vector<int> h(n, INF); h[0] = 0; int lst = 0; fore (i, 1, n) { if (i - 1 > 0 && a[i - 1] > a[i]) lst++; h[i] = h[lst] + 1; } cout << h[n - 1] << endl; } int main() { #ifdef _DEBUG freopen("input.txt", "r", stdin); int tt = clock(); #endif ios_base::sync_with_stdio(false); cin.tie(0), cout.tie(0); cout << fixed << setprecision(15); int tc; cin >> tc; while(tc--) { read(); solve(); #ifdef _DEBUG cerr << "TIME = " << clock() - tt << endl; tt = clock(); #endif } return 0; }
1437
E
Make It Increasing
You are given an array of $n$ integers $a_1$, $a_2$, ..., $a_n$, and a set $b$ of $k$ distinct integers from $1$ to $n$. In one operation, you may choose two integers $i$ and $x$ ($1 \le i \le n$, $x$ can be any integer) and assign $a_i := x$. This operation can be done only if $i$ does not belong to the set $b$. Calculate the minimum number of operations you should perform so the array $a$ is increasing (that is, $a_1 < a_2 < a_3 < \dots < a_n$), or report that it is impossible.
First, let's solve the problem without blocked positions. Let's look at the array $b_i = a_i - i$. Obviously, if $a$ strictly increases, then $b$ does not decrease, and vice versa. Now we have to find the maximum number of positions in the $b$ array that can be left unchanged. And you can always choose an integer that will not break the non-decreasing array $b$ for the rest of positions. This problem can be solved in $O(n \log{n})$ by analogy with the largest increasing subsequence, but now you can take equal elements. Now you can realize that the segments between two blocked positions do not depend on each other, and the initial problem can be solved as the problem described above. All that remains is to check that all blocked positions do not break the strict array increment.
[ "binary search", "constructive algorithms", "data structures", "dp", "implementation" ]
2,200
#include <bits/stdc++.h> using namespace std; #define forn(i, n) for (int i = 0; i < int(n); ++i) const int N = 500 * 1000 + 13; int n, k; int a[N], b[N]; int main() { scanf("%d%d", &n, &k); forn(i, n) scanf("%d", &a[i + 1]); a[0] = -1e9; a[n + 1] = 2e9; forn(i, n + 2) a[i] -= i; forn(i, k) scanf("%d", &b[i + 1]); b[k + 1] = n + 1; int ans = 0; forn(i, k + 1) { int l = b[i], r = b[i + 1]; if (a[l] > a[r]) { puts("-1"); return 0; } vector<int> lis; for (int j = l + 1; j < r; ++j) if (a[l] <= a[j] && a[j] <= a[r]) { auto pos = upper_bound(lis.begin(), lis.end(), a[j]); if (pos == lis.end()) lis.push_back(a[j]); else *pos = a[j]; } ans += (r - l - 1) - int(lis.size()); } printf("%d\n", ans); }
1437
F
Emotional Fishermen
$n$ fishermen have just returned from a fishing vacation. The $i$-th fisherman has caught a fish of weight $a_i$. Fishermen are going to show off the fish they caught to each other. To do so, they firstly choose an order in which they show their fish (each fisherman shows his fish exactly once, so, formally, the order of showing fish is a permutation of integers from $1$ to $n$). Then they show the fish they caught according to the chosen order. When a fisherman shows his fish, he might either become happy, become sad, or stay content. Suppose a fisherman shows a fish of weight $x$, and the maximum weight of a previously shown fish is $y$ ($y = 0$ if that fisherman is the first to show his fish). Then: - if $x \ge 2y$, the fisherman becomes happy; - if $2x \le y$, the fisherman becomes sad; - if none of these two conditions is met, the fisherman stays content. Let's call an order in which the fishermen show their fish emotional if, after all fishermen show their fish according to this order, each fisherman becomes either happy or sad. Calculate the number of emotional orders modulo $998244353$.
First of all, sort the fishermen so it is easier to consider them in ascending order. The key observation that allows us to solve the problem is the following: there will be an increasing sequence of happy fishermen, and all other fishermen will be unhappy. Consider the fisherman $i$ which belongs to the increasing sequence. Let's analyze which fisherman will be next to it in the order. It is either a fisherman that will be happy, or a fisherman that will be sad. In the first case, the fish caught by this fisherman must have a size of at least $2a_i$, in the second case - at most $\frac{a_i}{2}$. The first case will be considered later. For the second case, if we know the number of fishermen that were already placed in the order, we know that all of them (except the $i$-th one) belong to the "sad" category (that is, the fish of every already placed fisherman, except for the $i$-th one, is at least two times smaller than the fish of the $i$-th fisherman). So, if we have already placed $j$ fishermen, the last happy fisherman was the $i$-th one, and we want to place a sad fisherman, then the number of ways to choose this sad fisherman is exactly $cntLess(i) - j + 1$, where $cntLess(i)$ is the number of fishermen $k$ such that $2 a_k \le a_i$. If we can handle the first case, this observation will allow us to solve the problem with dynamic programming. Let $dp_{i, j}$ be the number of ways to choose $j$ first fishermen in the order so that the $i$-th fisherman is the last happy one. The case when the next fisherman is sad can be handled with a transition to the state $dp_{i, j + 1}$ (don't forget to multiply by the number of ways to choose the next sad fisherman, as described earlier). What about the case when the next fisherman is happy? We should iterate on the fisherman $k$ such that $a_k \ge 2a_i$ and transition from $dp_{i, j}$ to $dp_{k, j + 1}$, but this part works in $O(n^3)$. To get an $O(n^2)$ solution, we have to speed it up with prefix sums or something like that.
[ "combinatorics", "dp", "math", "two pointers" ]
2,600
#include<bits/stdc++.h> using namespace std; const int N = 5043; const int MOD = 998244353; int dp[N][N]; int pdp[N][N]; int cntLess[N]; int lastLess[N]; int a[N]; int n; int add(int x, int y) { x += y; while(x >= MOD) x -= MOD; while(x < 0) x += MOD; return x; } int sub(int x, int y) { return add(x, MOD - y); } int mul(int x, int y) { return (x * 1ll * y) % MOD; } int main() { cin >> n; for(int i = 0; i < n; i++) cin >> a[i]; sort(a, a + n); for(int i = 0; i < n; i++) { cntLess[i] = 0; lastLess[i] = -1; for(int j = 0; j < n; j++) if(a[j] * 2 <= a[i]) { lastLess[i] = j; cntLess[i]++; } } for(int i = 0; i < n; i++) { dp[i][1] = 1; pdp[i + 1][1] = add(pdp[i][1], dp[i][1]); } for(int k = 2; k <= n; k++) for(int i = 0; i < n; i++) { if(cntLess[i] + 1 >= k) dp[i][k] = add(mul(dp[i][k - 1], add(cntLess[i], sub(2, k))), pdp[lastLess[i] + 1][k - 1]); else dp[i][k] = 0; //cerr << i << " " << k << " " << dp[i][k] << endl; pdp[i + 1][k] = add(pdp[i][k], dp[i][k]); } cout << dp[n - 1][n] << endl; }
1437
G
Death DBMS
For the simplicity, let's say that the "Death Note" is a notebook that kills a person when their name is written in it. It's easy to kill with it, but it's pretty hard to keep track of people you haven't killed and still plan to. You decided to make a "Death Database Management System" — a computer program that provides the easy access to the database of possible victims. Let me describe its specifications to you. Let's define a victim entity: a victim has a name (not necessarily unique) that consists only of lowercase Latin letters and an integer suspicion value. At the start of the program the user enters a list of $n$ victim names into a database, each suspicion value is set to $0$. Then the user makes queries of two types: - $1~i~x$ — set the suspicion value of the $i$-th victim to $x$; - $2~q$ — given a string $q$ find the maximum suspicion value of a victim whose name is a contiguous substring of $q$. Just to remind you, this program doesn't kill people, it only helps to search for the names to write down in an actual notebook. Thus, the list of the victims in the database doesn't change throughout the queries. What are you waiting for? Write that program now!
I'm feeling extremely amused by the power of Aho-Corasick lately, so I will describe two solutions of this problem with it. Feel free to point out how cool you are solving the task with hashes or some suffix structure but Aho solutions will still be cooler. I also want to mention I'm quite proud of the name I came up with for that task :) First, let's assume that the words in the dictionary are unique. Build an Aho-Corasick automaton on the dictionary. Then build the tree of its suffix links. For the first solution you can use the fact that there are not a lot of words in the dictionary that can end in each position. To be exact, at most one word per unique word length. Thus, that's bounded by the square root of the total length. For that reason you can iterate over all the words that end in all positions of the queries in $O(q \sqrt n)$. How to do that fast? For each vertex of the automaton precalculate the closest vertex up the suffix link tree that's a terminal. Feed the query word into the automaton and from each vertex you stay at just jump up the tree until you reach the root. Take the maximum value over all the visited terminals. The second solution actually involves an extra data structure on top of that. No, it's not HLD. You are boring for using it. Let's abuse the fact that you are allowed to solve the problem fully offline. For each word you can save the list of pairs (time, value) of the times the value of the word changed. For each vertex of the automaton you can save all the times that vertex has been queried from. Now traverse the tree with dfs. When you enter the vertex, you want to apply all the updates that are saved for the words that are terminals here. What are the updates? From the list we obtained for a word you can generate such triples $(l, r, x)$ that this word had value $x$ from query $l$ to query $r$. Don't forget the $0$ value from $0$ to the first update to this word. Then ask all the queries. Then go to children. When you exit the vertex, you want all the updates to be gone. Well, there is a trick for these kinds of operations, it's called rollbacks. Maintain a segment tree over the query times, the $i$-th leaf should store the maximum value during the $i$-th query. The update operation updates the range with the new possible maximum. How to avoid using lazy propagation with such updates? Well, on point query you can collect all the values from the segtree nodes you visit on your way down. That way you don't have to push the updates all the way to the leaves. Not that it matters that much but the number of values to be saved for future rollbacks is decreased dramatically. That solution works in $O((n + q) \log q)$.
[ "data structures", "string suffix structures", "strings", "trees" ]
2,600
#include <bits/stdc++.h> #define forn(i, n) for (int i = 0; i < int(n); i++) using namespace std; const int AL = 26; vector<vector<pair<int, int>>> upd; vector<int> ans; int n, m; struct segtree{ vector<int*> where; vector<int> vals; vector<int> t; int n; segtree(){} segtree(int n) : n(n){ t.assign(4 * n, -1); where.clear(); vals.clear(); } void updh(int v, int l, int r, int L, int R, int val){ if (L >= R) return; if (l == L && r == R){ where.push_back(&t[v]); vals.push_back(t[v]); t[v] = max(t[v], val); return; } int m = (l + r) / 2; updh(v * 2, l, m, L, min(m, R), val); updh(v * 2 + 1, m, r, max(m, L), R, val); } void upd(int l, int r, int val){ updh(1, 0, n, l, r, val); } int geth(int v, int l, int r, int pos){ if (l == r - 1) return t[v]; int m = (l + r) / 2; if (pos < m) return max(t[v], geth(v * 2, l, m, pos)); return max(t[v], geth(v * 2 + 1, m, r, pos)); } int get(int pos){ return geth(1, 0, n, pos); } void rollback(){ *where.back() = vals.back(); where.pop_back(); vals.pop_back(); } }; segtree st; struct aho_corasick { struct node { map<int, int> nxt, go; int p, pch; int suf, ssuf; vector<int> term, qs; node() { nxt.clear(); go.clear(); suf = ssuf = -1; term.clear(); p = -1, pch = -1; qs.clear(); } }; vector<node> nodes; vector<vector<int>> g; aho_corasick() { nodes = vector<node>(1, node()); } void add(const string& s, int id) { int v = 0; forn(i, s.size()) { int c = s[i] - 'a'; if (!nodes[v].nxt.count(c)) { nodes.push_back(node()); nodes[v].nxt[c] = int(nodes.size()) - 1; nodes.back().p = v; nodes.back().pch = c; } v = nodes[v].nxt[c]; } nodes[v].term.push_back(id); } void feed(const string &s, int id){ int v = 0; forn(i, s.size()){ int c = s[i] - 'a'; v = go(v, c); nodes[v].qs.push_back(id); } } int go(int v, int c) { if (nodes[v].go.count(c)) return nodes[v].go[c]; if (nodes[v].nxt.count(c)) return nodes[v].go[c] = nodes[v].nxt[c]; if (v == 0) return nodes[v].go[c] = 0; return nodes[v].go[c] = go(suf(v), c); } int suf(int v) { if (nodes[v].suf != -1) return nodes[v].suf; if (v == 0 || nodes[v].p == 0) return nodes[v].suf = 0; return nodes[v].suf = go(suf(nodes[v].p), nodes[v].pch); } void build_tree() { g.resize(nodes.size()); forn(v, nodes.size()) { int u = suf(v); if (v != u) g[u].push_back(v); } } void dfs(int v){ int cur = st.where.size(); for (auto i : nodes[v].term){ int lst = m; for (auto it : upd[i]){ st.upd(it.first, lst, it.second); lst = it.first; } st.upd(0, lst, 0); } for (auto j : nodes[v].qs){ ans[j] = max(ans[j], st.get(j)); } for (int u : g[v]){ dfs(u); } int nw = st.where.size(); forn(_, nw - cur){ st.rollback(); } } }; aho_corasick ac; int main() { ios::sync_with_stdio(!cin.tie(0)); cin >> n >> m; upd.resize(n); ans.resize(m, -1); vector<int> tp2; ac = aho_corasick(); st = segtree(m); forn(i, n){ string s; cin >> s; ac.add(s, i); } forn(i, m){ int t; cin >> t; if (t == 1){ int j, x; cin >> j >> x; --j; upd[j].push_back(make_pair(i, x)); } else{ string q; cin >> q; ac.feed(q, i); tp2.push_back(i); } } forn(i, n){ reverse(upd[i].begin(), upd[i].end()); } ac.build_tree(); ac.dfs(0); for (auto it : tp2) cout << ans[it] << "\n"; return 0; }
1438
A
Specific Tastes of Andre
Andre has very specific tastes. Recently he started falling in love with arrays. Andre calls an nonempty array $b$ \textbf{good}, if sum of its elements is divisible by the length of this array. For example, array $[2, 3, 1]$ is good, as sum of its elements — $6$ — is divisible by $3$, but array $[1, 1, 2, 3]$ isn't good, as $7$ isn't divisible by $4$. Andre calls an array $a$ of length $n$ \textbf{perfect} if the following conditions hold: - Every nonempty subarray of this array is \textbf{good}. - For every $i$ ($1 \le i \le n$), $1 \leq a_i \leq 100$. Given a positive integer $n$, output any \textbf{perfect} array of length $n$. We can show that for the given constraints such an array always exists. An array $c$ is a subarray of an array $d$ if $c$ can be obtained from $d$ by deletion of several (possibly, zero or all) elements from the beginning and several (possibly, zero or all) elements from the end.
The array $a$ = $[1,1,\ldots,1,1]$ is perfect since the sum of every subarray is exactly equal to its length, and thus divisible by it.
[ "constructive algorithms", "implementation" ]
800
null
1438
B
Valerii Against Everyone
You're given an array $b$ of length $n$. Let's define another array $a$, also of length $n$, for which $a_i = 2^{b_i}$ ($1 \leq i \leq n$). Valerii says that every two non-intersecting subarrays of $a$ have different sums of elements. You want to determine if he is wrong. More formally, you need to determine if there exist four integers $l_1,r_1,l_2,r_2$ that satisfy the following conditions: If such four integers exist, you will prove Valerii wrong. Do they exist? An array $c$ is a subarray of an array $d$ if $c$ can be obtained from $d$ by deletion of several (possibly, zero or all) elements from the beginning and several (possibly, zero or all) elements from the end.
We claim the answer is NO if and only if the elements are pairwise distinct. If any element has two occurrences, we can trivially select them as the two subarrays. Otherwise, since all elements are distinct, choosing a subarray is the same as choosing the set bits of a $10^9$ digit long binary number. Since every number has a unique binary representation, no two subarrays can have the same sum.
[ "constructive algorithms", "data structures", "greedy", "sortings" ]
1,000
null
1438
C
Engineer Artem
Artem is building a new robot. He has a matrix $a$ consisting of $n$ rows and $m$ columns. The cell located on the $i$-th row from the top and the $j$-th column from the left has a value $a_{i,j}$ written in it. If two adjacent cells contain the same value, the robot will break. A matrix is called \textbf{good} if no two adjacent cells contain the same value, where two cells are called adjacent if they share a side. Artem wants to \textbf{increment the values in some cells by one} to make $a$ good. More formally, find a good matrix $b$ that satisfies the following condition — - For all valid ($i,j$), either $b_{i,j} = a_{i,j}$ or $b_{i,j} = a_{i,j}+1$. For the constraints of this problem, it can be shown that such a matrix $b$ always exists. If there are several such tables, you can output any of them. Please note that you do not have to minimize the number of increments.
The increment by one operation essentially allows us to change the parity of any position. Let's color the matrix like a chessboard. Since every pair of adjacent cells consist of cells with different colors, we can make values at all black cells even and values at all white cells odd.
[ "2-sat", "chinese remainder theorem", "constructive algorithms", "fft", "flows" ]
2,000
null
1438
D
Powerful Ksenia
Ksenia has an array $a$ consisting of $n$ positive integers $a_1, a_2, \ldots, a_n$. In one operation she can do the following: - choose three distinct indices $i$, $j$, $k$, and then - change all of $a_i, a_j, a_k$ to $a_i \oplus a_j \oplus a_k$ simultaneously, where $\oplus$ denotes the bitwise XOR operation. She wants to make all $a_i$ equal \textbf{in at most $n$ operations}, or to determine that it is impossible to do so. She wouldn't ask for your help, but please, help her!
We will first solve the problem for odd $n$, and then extend the solution to even $n$. Note that applying the operation to $a, b, b$ makes all of them equal to $a$. Thus, we can try making pairs of equal elements. This is easy for odd $n$: While at least $3$ unpaired elements exist, apply the operation on any $3$. Pair any two of them and repeat. The number of operations used is exactly $n-1$. Let us denote $X$ as the xor of all elements in the original array. To solve for even $n$, we note that applying the given operation does not change $X$. Since the xor of an even number of same elements is $0$, the answer is impossible for arrays with $X \neq 0$. To solve for even $n$ and $X = 0$, we can just solve the problem for the first $n-1$ using the odd approach and the last element will magically be equal to the first $n-1$. This problem was set by Anti-Light and prepared by knightron00
[ "bitmasks", "constructive algorithms", "math" ]
2,200
null
1438
E
Yurii Can Do Everything
Yurii is sure he can do everything. Can he solve this task, though? He has an array $a$ consisting of $n$ positive integers. Let's call a subarray $a[l...r]$ \textbf{good} if the following conditions are simultaneously satisfied: - $l+1 \leq r-1$, i. e. the subarray has length at least $3$; - $(a_l \oplus a_r) = (a_{l+1}+a_{l+2}+\ldots+a_{r-2}+a_{r-1})$, where $\oplus$ denotes the bitwise XOR operation. In other words, a subarray is good if the bitwise XOR of the two border elements is equal to the sum of the rest of the elements. Yurii wants to calculate the total number of good subarrays. What is it equal to? An array $c$ is a subarray of an array $d$ if $c$ can be obtained from $d$ by deletion of several (possibly, zero or all) elements from the beginning and several (possibly, zero or all) elements from the end.
It's natural to think that the number of good subarrays cannot be very large; this is indeed true. The following algorithm works: Fix the left endpoint $l$. Let $k$ be the most significant set bit in $a_l$. Check every $r$ in increasing order by bruteforce while $\text{sum}(l+1,r-1)$ is smaller than $2^{k+1}$. Reverse the array, and do the same again. Note that we need to be careful here since we might count the same subarray twice. We, now, prove its correctness and efficiency. Consider any good subarray $a[l...r]$, let $k_1$ be the most significant set bit in $\text{max}(a_l,a_r)$ and $k_2$ the most significant set bit in $\text{sum}(l+1,r-1)$. We must have $k_1 \geq k_2$ because all bits greater than $k_1$ will be unset in $a_l \oplus a_r$, but $k_2$ is set. Hence, the algorithm counts all possible good subarrays. We now prove the number of subarrays our algorithm checks is of the order $\mathcal{O}(n\log{}a_i)$. For every $r$, let's count the number of $l$'s it can be reached by. For a particular $k$, notice that only the $2$ closest $l$'s to the left with this bit set can reach this $r$. For the third one and beyond, the sum will be at least $2*2^k = 2^{k+1}$ simply due the to the contribution of the closest two. Since there are $n$ right endpoints and only $\log{}a_i$ possible values of $k$, our claim is true.
[ "binary search", "bitmasks", "brute force", "constructive algorithms", "divide and conquer", "two pointers" ]
2,500
null
1438
F
Olha and Igor
\textbf{This is an interactive problem.} Igor wants to find the key to Olha's heart. The problem is, that it's at the root of a binary tree. There is a perfect binary tree of height $h$ consisting of $n = 2^{h} - 1$ nodes. The nodes have been assigned distinct labels from $1$ to $n$. However, \textbf{Igor only knows $h$ and does not know which label corresponds to which node}. To find key to Olha's heart he needs to find the label assigned to the root by making queries of the following type \textbf{at most $n+420$ times}: - Select three \textbf{distinct} labels $u$, $v$ and $w$ ($1 \leq u,v,w \leq n$). - In response, Olha (the grader) will tell him the label of the \textbf{lowest common ancestor} of nodes labelled $u$ and $v$, if the tree was \textbf{rooted} at the node labelled $w$ instead. Help Igor to find the root! \textbf{Note:} the grader is not adaptive: the labels are fixed before any queries are made.
The solution is as follows. Query $420$ random triples. Let $c_1$ and $c_2$ be the two most frequently returned nodes - these are the children of the root. Query $c_1,c_2$ with every other $i$, and only the root will return $i$. Firstly, note that a query - $u$, $v$, and $w$ - returns a node $x$ that minimizes the sum of distances from all the three nodes. Thus, the order of the $3$ nodes is irrelevant. You can think of $x$ as the node that lies on the path of every possible pair formed from the $3$ nodes. Now, let's calculate the number of triples for which node $u$ is returned as the answer. For this, we will root the tree at $u$, and calculate the subtree sizes of its children - $s_1$, $s_2$, and $s_3$ ($s_3 = 0$, if $u$ is the actual root). With these values with us, the number of triples is: $(s_1\times s_2\times s_3)+(s_1\times s_2)+(s_2\times s_3)+(s_3\times s_1)$ In the above expression, the first term calculates the triples in which $u$ is not present, while the other $3$ terms assume $u$ is one of the nodes in the triple. Nodes at the same depth will, of course, have the same count. At this point, we can either observe that this expression is maximum when $u$ is a child of the root or calculate values for every depth and compare them. For example, for $h = 5$, each child of the root is the answer to about $23\%$ of the triples. This value converges to $18\%$ per child when $h$ approaches $18$. Thus, when we query $420$ random triples, we can be sure enough that the two most frequently appearing values will be the children of the root. Finally, we just note that for all non-root nodes $v$, querying $c_1$, $c_2$, and $v$ gives either $c_1$ or $c_2$. Time Complexity: $\mathcal{O}(n)$
[ "interactive", "probabilities", "trees" ]
3,000
null
1439
A2
Binary Table (Hard Version)
\textbf{This is the hard version of the problem. The difference between the versions is in the number of possible operations that can be made. You can make hacks if and only if you solved both versions of the problem.} You are given a binary table of size $n \times m$. This table consists of symbols $0$ and $1$. You can make such operation: select $3$ different cells that belong to one $2 \times 2$ square and change the symbols in these cells (change $0$ to $1$ and $1$ to $0$). Your task is to make all symbols in the table equal to $0$. You are allowed to make at most $nm$ operations. \textbf{You don't need to minimize the number of operations.} It can be proved, that it is always possible.
Consider two cases: If $n = 2$ and $m = 2$, there are only $4$ possible operations, and we can use up to $4$ operations. So, one can check all the $2^4$ possible ways of choosing these operations, and seeing which combination of these operation will result in a full $0$ grid. Otherwise, at least one of $n$ and $m$ is bigger than $2$. Without loss of generality imagine $n > 2$. Take the $n$th row. For each cell within that row, we can use one operation on it, its left neighbour and the two cells above to fix this cell. We can do this for the first $n - 2$ cells in the row, and fix the last two with one operation on them. We will make at most $n - 1$ operations and reach a situation with one empty row. We can take the last row away and apply this procedure for the remaining $(n - 1) \times m$ grid. If we say inductively that we will have at most $(n - 1)m$ operations for the remaining grid, we will have done at most $(n - 1)m + n - 1 = nm - 1 < nm$ operations in total. When $n = 2$, we can do the same with the columns, and when $n = m = 2$, we can fix the remaining $2 \times 2$ grid as we discussed above. Time complexity: $O(nm)$ for each case.
[ "constructive algorithms", "graphs", "greedy", "implementation" ]
1,900
// In The Name Of Allah #include <bits/stdc++.h> #define ss second #define ff first #define use_fast ios::sync_with_stdio(false), cin.tie(0), cout.tie(0) #define ret(n) return cout << n, 0 #define se(n) cout << setprecision(n) << fixed #define pb push_back #define ll long long #define ld long double #pragma GCC optimize("Ofast,no-stack-protector,unroll-loops") #pragma GCC optimize("no-stack-protector,fast-math") //#pragma GCC target("sse,sse2,sse3,ssse3,sse4,popcnt,abm,mmx,avx,tune=native") using namespace std; const int N = 200, OO = 1e9 + 7, T = 50, M = 1e9 + 7, P = 6151, SQ = 280, lg = 20; typedef pair <int, int> pii; char c[N][N]; bool cnt[N][N]; struct node {int x1, y1, x2, y2, x3, y3;} p[5]; vector <node> v; void upd(int x, int y, int tp, bool is) { if(is) v.pb({x + p[tp].x1, y + p[tp].y1, x + p[tp].x2, y + p[tp].y2, x + p[tp].x3, y + p[tp].y3}); else cnt[x + p[tp].x1][y + p[tp].y1] ^= 1, cnt[x + p[tp].x2][y + p[tp].y2] ^= 1, cnt[x + p[tp].x3][y + p[tp].y3] ^= 1; } void solve() { int n, m, od = 0; v.clear(); cin >> n >> m; for(int i = 1; i <= n; i++) { for(int j = 1; j <= m; j++) { cin >> c[i][j]; if(c[i][j] == '1') od++, cnt[i][j] = true; else cnt[i][j] = false; } } if(od == 0) { cout << 0 << endl; return; } if(n == 1 || m == 1) { cout << -1 << endl; return; } for(int i = 1; i <= n - 2; i++) { for(int j = 1; j <= m; j++) { if(cnt[i][j]) { if(j != m) { v.pb({i, j, i + 1, j, i + 1, j + 1}); cnt[i][j] ^= 1, cnt[i + 1][j] ^= 1, cnt[i + 1][j + 1] ^= 1; } else { v.pb({i, j, i + 1, j, i + 1, j - 1}); cnt[i][j] ^= 1, cnt[i + 1][j] ^= 1, cnt[i + 1][j - 1] ^= 1; } } } } for(int i = 1; i <= m - 2; i++) { if(cnt[n - 1][i]) { v.pb({n - 1, i, n - 1, i + 1, n, i + 1}); cnt[n - 1][i] ^= 1, cnt[n - 1][i + 1] ^= 1, cnt[n][i + 1] ^= 1; } if(cnt[n][i]) { v.pb({n, i, n - 1, i + 1, n, i + 1}); cnt[n][i] ^= 1, cnt[n - 1][i + 1] ^= 1, cnt[n][i + 1] ^= 1; } } for(int msk = 0; msk < (1 << 4); msk++) { for(int j = 0; j < 4; j++) if(msk & (1 << j)) upd(n - 1, m - 1, j, 0); if(!cnt[n - 1][m - 1] && !cnt[n - 1][m] && !cnt[n][m - 1] && !cnt[n][m]) { for(int j = 0; j < 4; j++) if(msk & (1 << j)) upd(n - 1, m - 1, j, 1); break; } for(int j = 0; j < 4; j++) if(msk & (1 << j)) upd(n - 1, m - 1, j, 0); } cout << (int)v.size() << endl; for(auto u : v) cout << u.x1 << " " << u.y1 << " " << u.x2 << " " << u.y2 << " " << u.x3 << " " << u.y3 << endl; } int32_t main(){ use_fast; p[0] = {0, 0, 0, 1, 1, 0}, p[1] = {0, 1, 0, 0, 1, 1}, p[2] = {1, 0, 1, 1, 0, 0}, p[3] = {1, 1, 0, 1, 1, 0}; int t; cin >> t; while(t--) solve(); return 0; }
1439
B
Graph Subset Problem
You are given an undirected graph with $n$ vertices and $m$ edges. Also, you are given an integer $k$. Find either a clique of size $k$ or a non-empty subset of vertices such that each vertex of this subset has at least $k$ neighbors in the subset. If there are no such cliques and subsets report about it. A subset of vertices is called a clique of size $k$ if its size is $k$ and there exists an edge between every two vertices from the subset. A vertex is called a neighbor of the other vertex if there exists an edge between them.
div1 B : It is easy to see that if $k > \sqrt{2m}$ the answer is $-1$; because if $k > \sqrt{2m}$, no matter whether we have a clique of size $k$ or a subset of the graph with $\delta \geq k$, we will have more than $m$ edges in total. Now, the main idea is to suppose $u$ is the vertex with minimum degree; if $d(u) < k - 1$ we should delete $u$ becuase $u$ can not be in clique or the subset of vertices such that each vertex of this subset has at least $k$ neighbors in the subset; so we have to erase $u$ and all edges attached to it. If $d(u) > k$, remaining vertices will form a subset that every vertex have at least $k$ neighbors in the subset, so we'll print this subset as answer. If $d(u) = k - 1$, we consider $u$ and all neighbors of $u$ as candidate for clique of size $k$. then we erase $u$ and all edges attached to it. If we erase all vertices and didn't found any good subset, then we should check clique candidates. for checking clique candidates fast, iterate over vertices and name current vertex $v$. then for neighbors of $v$ set $nei_v$ to $1$ and $0$ otherwise. for each clique candidate that contains $v$ like $C$, we check edge between $v$ and $u \in C$ in $O(1)$ using array $nei$. every time we find new clique candidate, we remove at least $k - 1$ edges, so number of clique candidates is at most $\frac{m}{k-1}$. for every candidate we check $\mathcal{O}(k^2)$ edges in overall. so time complexity is $\mathcal{O}(\frac{m}{k}).\mathcal{O}(k^2) \in \mathcal{O}(m.k)$.
[ "constructive algorithms", "data structures", "graphs" ]
2,600
// In The Name Of Allah #include <bits/stdc++.h> #define ss second #define ff first #define use_fast ios::sync_with_stdio(false), cin.tie(0), cout.tie(0) #define se(n) cout << setprecision(n) << fixed #define pb push_back //#define int long long #define ld long double #pragma GCC optimize("Ofast,no-stack-protector,unroll-loops") #pragma GCC optimize("no-stack-protector,fast-math") #pragma GCC target("sse,sse2,sse3,ssse3,sse4,popcnt,abm,mmx,avx,tune=native") using namespace std; const int N = 1e5 + 100, OO = 1e9 + 7, T = 22, M = 1e9 + 7, P = 6151, SQ = 1300, lg = 22; typedef pair <int, int> pii; int mark[N], deg[N], ct[N]; bool ans[N], can[N]; vector <int> v[N], A; vector <pii> ch[N]; bool cmp(int x, int y) { return mark[x] < mark[y]; } void solve() { int n, m, k; cin >> n >> m >> k; A.clear(); for(int i = 0; i <= n; i++) v[i].clear(), ch[i].clear(), mark[i] = deg[i] = ct[i] = 0, ans[i] = can[i] = false; for(int i = 0; i < m; i++) { int x, y; cin >> x >> y; v[x].pb(y); v[y].pb(x); } if(k > 500) { cout << -1 << endl; return; } set <pii> st; for(int i = 1; i <= n; i++) st.insert({deg[i] = (int)v[i].size(), i}); int cnt = 1; while((int)st.size()) { pii p = *st.begin(); if(p.ff >= k) { cout << 1 << " " << (int)st.size() << endl; for(auto u : st) cout << u.ss << " "; cout << endl; return; } else { st.erase(p); mark[p.ss] = cnt; for(auto u : v[p.ss]) if(!mark[u]) st.erase({deg[u], u}), st.insert({--deg[u], u}); A.pb(p.ss); } cnt++; } for(auto i : A) { int nxt = 0; for(auto u : v[i]) if(mark[u] > mark[i]) ct[nxt++] = u, can[u] = true; for(auto u : ch[i]) if(!can[u.ff]) ans[u.ss] = false; for(int j = 0; j < nxt; j++) can[ct[j]] = false; if(nxt != k - 1) continue; ans[i] = true; sort(ct, ct + nxt, cmp); for(int j = 0; j < nxt; j++) for(int k = j + 1; k < nxt; k++) ch[ct[j]].pb({ct[k], i}); } for(auto i : A) { if(!ans[i]) continue; cout << 2 << endl; cout << i << " "; for(auto u : v[i]) if(mark[u] > mark[i]) cout << u << " "; cout << endl; return; } cout << -1 << endl; return; } int32_t main() { use_fast; int t; cin >> t; while(t--) solve(); return 0; }
1439
C
Greedy Shopping
You are given an array $a_1, a_2, \ldots, a_n$ of integers. This array is \textbf{non-increasing}. Let's consider a line with $n$ shops. The shops are numbered with integers from $1$ to $n$ from left to right. The cost of a meal in the $i$-th shop is equal to $a_i$. You should process $q$ queries of two types: - 1 x y: for each shop $1 \leq i \leq x$ set $a_{i} = max(a_{i}, y)$. - 2 x y: let's consider a hungry man with $y$ money. He visits the shops from $x$-th shop to $n$-th and if he can buy a meal in the current shop he buys one item of it. Find how many meals he will purchase. The man can buy a meal in the shop $i$ if he has at least $a_i$ money, and after it his money decreases by $a_i$.
We can prove that the hungry man will eat at most $log_2(maxY)$ continiuous subsegments, where $maxY$ is the maximum amount of money possible. Why is that so? Suppose the hungry man buys a meal from the $i$ th shop but can't buy a meal from the $(i + 1)$th one after that. Then, the money the hungry man had before buying the $i$th food is at most twice the money he has after buying the $i$th food because $a_i > a_{i+1}$. So every time he breaks the subsegment of shops, his money is cut in at least half, so he will eat atmost $log_2(maxY)$ continuous subsegments. Now we need a data structure with the following queries: range_max and range_sum. Since our array is non-increasing, a segment tree will suffice. The first type of query is just a range_max query. For the second type of query you can find the first element that is equal to or smaller than the hungry man's money and after that find the segment that he will eat, in which we can use a binary-search on the tree to find these both. After that we can repeat this action untill we reach the end of the array, or until his money runs out. Time complexity: $O((n + q)log(maxY)log(n))$
[ "binary search", "data structures", "divide and conquer", "greedy", "implementation" ]
2,600
#include <bits/stdc++.h> #pragma GCC optimize ("O2,unroll-loops") #pragma GCC optimize("no-stack-protector,fast-math") //#pragma GCC target("sse,sse2,sse3,ssse3,sse4,popcnt,abm,mmx,avx,tune=native") using namespace std; typedef long long ll; typedef long double ld; typedef pair<int, int> pii; typedef pair<pii, int> piii; typedef pair<ll, ll> pll; #define debug(x) cerr<<#x<<'='<<(x)<<endl; #define debugp(x) cerr<<#x<<"= {"<<(x.first)<<", "<<(x.second)<<"}"<<endl; #define debug2(x, y) cerr<<"{"<<#x<<", "<<#y<<"} = {"<<(x)<<", "<<(y)<<"}"<<endl; #define debugv(v) {cerr<<#v<<" : ";for (auto x:v) cerr<<x<<' ';cerr<<endl;} #define all(x) x.begin(), x.end() #define pb push_back #define kill(x) return cout<<x<<'\n', 0; const int inf=1000000010; const ll INF=10000000000000010LL; const int mod=1000000007; const int MAXN=200010, LOG=20; ll n, m, k, u, v, x, y, t, a, b, ans; ll A[MAXN]; ll seg[MAXN<<2], lazy[MAXN<<2]; int Mn[MAXN<<2], Mx[MAXN<<2]; void Build(int id, int tl, int tr){ if (tr-tl==1){ Mn[id]=Mx[id]=seg[id]=A[tl]; return ; } int mid=(tl+tr)>>1; Build(id<<1, tl, mid); Build(id<<1 | 1, mid, tr); Mn[id]=min(Mn[id<<1], Mn[id<<1 | 1]); Mx[id]=max(Mx[id<<1], Mx[id<<1 | 1]); seg[id]=seg[id<<1] + seg[id<<1 | 1]; } inline void add_lazy(int id, int len, ll val){ Mn[id]=val; Mx[id]=val; lazy[id]=val; seg[id]=len*val; } inline void shift(int id, int tl, int tr){ if (!lazy[id]) return ; int mid=(tl+tr)>>1; add_lazy(id<<1, mid-tl, lazy[id]); add_lazy(id<<1 | 1, tr-mid, lazy[id]); lazy[id]=0; } void Maximize(int id, int tl, int tr, int pos, ll val){ if (pos<=tl || val<=Mn[id]) return ; if (tr<=pos && Mx[id]<=val){ add_lazy(id, tr-tl, val); return ; } shift(id, tl, tr); int mid=(tl+tr)>>1; Maximize(id<<1, tl, mid, pos, val); Maximize(id<<1 | 1, mid, tr, pos, val); Mn[id]=min(Mn[id<<1], Mn[id<<1 | 1]); Mx[id]=max(Mx[id<<1], Mx[id<<1 | 1]); seg[id]=seg[id<<1] + seg[id<<1 | 1]; } int BS1(int id, int tl, int tr, int pos, ll val){ if (tr<=pos || val<Mn[id]) return tr; if (tr-tl==1) return tl; shift(id, tl, tr); int mid=(tl+tr)>>1, tmp=BS1(id<<1, tl, mid, pos, val); if (tmp==mid) return BS1(id<<1 | 1, mid, tr, pos, val); return tmp; } int BS2(int id, int tl, int tr, ll val){ if (seg[id]<=val) return tr; if (tr-tl==1) return tl; shift(id, tl, tr); int mid=(tl+tr)>>1, tmp=BS2(id<<1, tl, mid, val); if (tmp<mid) return tmp; return BS2(id<<1 | 1, mid, tr, val-seg[id<<1]); } ll Get(int id, int tl, int tr, int l, int r){ if (r<=tl || tr<=l) return 0; if (l<=tl && tr<=r) return seg[id]; shift(id, tl, tr); int mid=(tl+tr)>>1; return Get(id<<1, tl, mid, l, r) + Get(id<<1 | 1, mid, tr, l, r); } int main(){ ios_base::sync_with_stdio(false);cin.tie(0);cout.tie(0); //freopen("input.txt", "r", stdin); //freopen("output.txt", "w", stdout); cin>>n>>m; for (int i=1; i<=n; i++) cin>>A[i]; Build(1, 1, n+1); while (m--){ cin>>t>>x>>y; if (t==1) Maximize(1, 1, n+1, x+1, y); else{ ans=0; while (1){ x=BS1(1, 1, n+1, x, y); if (x==n+1) break ; ll val=y+Get(1, 1, n+1, 1, x); int xx=BS2(1, 1, n+1, val); // buy [x, xx) ans+=xx-x; y-=Get(1, 1, n+1, x, xx); x=xx; } cout<<ans<<"\n"; } } return 0; }
1439
D
INOI Final Contests
Today is the final contest of INOI (Iranian National Olympiad in Informatics). The contest room is a row with $n$ computers. All computers are numbered with integers from $1$ to $n$ from left to right. There are $m$ participants, numbered with integers from $1$ to $m$. We have an array $a$ of length $m$ where $a_{i}$ ($1 \leq a_i \leq n$) is the computer behind which the $i$-th participant wants to sit. Also, we have another array $b$ of length $m$ consisting of characters 'L' and 'R'. $b_i$ is the side from which the $i$-th participant enters the room. 'L' means the participant enters from the left of computer $1$ and goes from left to right, and 'R' means the participant enters from the right of computer $n$ and goes from right to left. The participants in the order from $1$ to $m$ enter the room one by one. The $i$-th of them enters the contest room in the direction $b_i$ and goes to sit behind the $a_i$-th computer. If it is occupied he keeps walking in his direction until he reaches the first unoccupied computer. After that, he sits behind it. If he doesn't find any computer he gets upset and gives up on the contest. The madness of the $i$-th participant is the distance between his assigned computer ($a_i$) and the computer he ends up sitting behind. The distance between computers $i$ and $j$ is equal to $|i - j|$. The values in the array $a$ \textbf{can be} equal. There exist $n^m \cdot 2^m$ possible pairs of arrays $(a, b)$. Consider all pairs of arrays $(a, b)$ such that no person becomes upset. For each of them let's calculate the sum of participants madnesses. Find the sum of all these values. You will be given some prime modulo $p$. Find this sum by modulo $p$.
Suppose $n = m$. Let $dp1_i$ be the number of $(A , B)$ pairs for $i$ participants and $i$ computers so that no one gets upset. For updating this array we can consider the last participant, and where he will sit. Suppose he sits behind the $j$-th computer. If he comes from left to right, there are $i$ choices of computers for him to sit behind, and if he comes from right to left there are $n - i + 1$ choices for him. If the last participant sits behind the $j$-th computer there are $dp1_{j-1} \cdot dp1_{i-j}$ ${i-1}\choose{j-1}$ ways to fill in the rest of the seats because after removing we have two independent subsegments. So $dp1_i = (i+1) \sum_{j=1}^{i} dp1_{j-1} \cdot dp1_{i-j}$ ${i-1}\choose{j-1}$. Now let $dp2_i$ be the sum of the total madnesses for all cases with $i$ participants and $i$ computers. Imagine the last person sitting on the $j$-th computer. The madness of every participant except the last one is $(i+1) \cdot (dp2_{j-1} \cdot dp1_{i-j} + dp2_{i-j} \cdot dp1_{j-1})$ ${i-1}\choose{j-1}$. However, the madness of the last participant is $($ ${j-1}\choose{2}$ $+$ ${i-j}\choose{2}$ $) \cdot dp1_{j-1}\cdot dp1_{i-j}$ ${i-1}\choose{j-1}$. Now what if $n > m$? Suppose $dp3_{i ,j}$ is the number of $(A , B)$ pairs for $i$ computers and $j$ participants so that no one gets upset. For updating it we can consider the maximal suffix that all of the computers in that suffix will get occupied by participants. Consider its lenghth is $l$ . If $l = 0$ then we add $dp3_{i-1,j}$ to $dp3_{i,j}$; if not, we add $dp1_l \cdot dp3_{i-l-1,j-l}$ ${j}\choose{l}$ to $dp3_{i, j}$. The update is similar to $dp1$. The update is done correctly because the subsegments are independant. At last, suppose $dp4_{i, j}$ is the sum of the total madnesses for all cases with $i$ computers and $j$ participants. Consider $l$ to be the lenghth of the maximal suffix that all of the computers in that suffix will get occupied by participants. If $l = 0$ then to $dp4_{i, j}$ we add $dp4_{i - 1, j}$. If $l > 0$ then $(dp4_{i-1-l, j-l} \cdot dp1_l + dp3_{i-1-l,j-l} \cdot dp2_l)$ ${j}\choose{l}$ is added to $dp4_{i, j}$. The answer shall be $dp4_{n, m}$. Time complexity: $O(n^3)$. Computing $dp1$ and $dp2$ is an $O(n^2)$ task, while computing $dp3$ and $dp4$ has a complexity of $O(n^3)$. Challenge: Can you find a solution with a better time complexity?
[ "combinatorics", "dp", "fft" ]
3,100
// And you curse yourself for things you never done // Shayan.P 2020-08-29 #include<bits/stdc++.h> #define F first #define S second #define PB push_back #define sz(s) int((s).size()) #define bit(n,k) (((n)>>(k))&1) using namespace std; typedef long long ll; typedef pair<int,int> pii; const int maxn = 510, inf = 1e9 + 10, mod=1e9+7; ll n, m; ll Pow(ll a, ll b){ b = (b + (mod-1)) % (mod-1); // to handle b == -1 int ans=1; for(; b; b>>=1, a=a*a%mod) if (b&1) ans=ans*a%mod; return ans; } inline void add(ll &a, ll b){ a = (a + b) % mod; } ll fac[maxn], ifac[maxn]; ll C(ll n, ll k){ if (n<k || k<0) return 0; return fac[n]*ifac[k]%mod*ifac[n-k]%mod; } int main(){ ios_base::sync_with_stdio(false); cin.tie(0); cout.tie(); fac[0] = 1; for(int i = 1; i < maxn; i++) fac[i]=fac[i-1]*i%mod; ifac[maxn-1] = Pow(fac[maxn-1], mod-2); for(int i = maxn-2; i >= 0; i--) ifac[i]=ifac[i+1]*(i+1)%mod; cin >> n >> m; ++n; //! int ans = 0; for(ll bef = 0; bef < m; bef++){ for(ll w = 1; w + bef < m; w++){ // corner case : bef == 0 ll cnt1=Pow(w+1, w-1)*Pow(2, w)%mod; ll cnt2=Pow(n-w-1, bef-1)*Pow(2, bef)%mod*(n-w-1-bef)%mod; ll cnt=cnt1*cnt2%mod*C(w+bef, bef)%mod; ll score=w*(w+1)%mod*n%mod; ll after=Pow(n, m-bef-w-1)*Pow(2, m-bef-w-1) % mod; ans=(ans + cnt*score%mod*after) % mod; } } cout << 1ll * (n-m) * ans % mod * Pow(n, -1) % mod << "\n"; return 0; }
1439
E
Cheat and Win
Let's consider a $(10^9+1) \times (10^9+1)$ field. The rows are numbered with integers from $0$ to $10^9$ and the columns are numbered with integers from $0$ to $10^9$. Let's define as $(x, y)$ the cell located in the $x$-th row and $y$-th column. Let's call a cell $(x, y)$ good if $x \& y = 0$, there $\&$ is the bitwise and operation. Let's build a graph where vertices will be all good cells of the field and we will make an edge between all pairs of adjacent by side good cells. It can be proved that this graph will be a tree — connected graph without cycles. Let's hang this tree on vertex $(0, 0)$, so we will have a rooted tree with root $(0, 0)$. Two players will play the game. Initially, some good cells are black and others are white. Each player on his turn chooses a black good cell and a subset of its ancestors (possibly empty) and inverts their colors (from white to black and vice versa). The player who can't move (because all good cells are white) loses. It can be proved that the game is always finite. Initially, all cells are white. You are given $m$ pairs of cells. For each pair color all cells in a simple path between them as black. Note that we do not invert their colors, we paint them black. Sohrab and Mashtali are going to play this game. Sohrab is the first player and Mashtali is the second. Mashtali wants to win and decided to cheat. He can make the following operation multiple times before the game starts: choose a cell and invert colors of all vertices on the path between it and the root of the tree. Mammad who was watching them wondered: "what is the minimum number of operations Mashtali should do to have a winning strategy?". Find the answer to this question for the initial painting of the tree. It can be proved that at least one possible way to cheat always exists.
First let's solve the problem on arbitrary trees. Consider the following game: on some vertices of the tree we have some tokens. At each step you remove a token from some vertex and put a token in any subset of its ancestors. The first game can be transformed to this one by putting a token in each black vertex. So now we'll solve the new game. Obviously the game is independent for different tokens, so we can use Grundy numbers. After working around some examples you can find that the Grundy number for a vertex at height $h$ is $2^h$. it can be easily proved by induction since any number less than it can be built by exactly one subset of it's ancestors. Consider the binary representation of the grundy for the game(let it be S). Each cheating move means reversing the bits of a prefix of S. So the minimum number of operations needed can be shown as $T = (S[0] \oplus S[1]) + (S[1] \oplus S[2]) + ...$ Since each operation decreases this expression at most once, we will need at least $T$ operations, and there is an obvious way to make $S$ equal zero with that number of operations. Now to solve the original problem, we need to find a compressed tree of the marked vertices and then represent the black vertices as union of $O(m)$ paths from the root. After this we can finally calculate the compressed form of the total Grundy number and find the answer. For compressing the tree we need to do some operations, like sorting the vertices by starting time(dfs order), finding the LCA of 2 vertices, etc. in $O(H)$ time, where $H = log(10^9)$. How to do these? Well they can be done with the following observation (the details are left for the reader but they can be found in the model solution): For some $h$, consider the tree on these 3 set of cells: $T1: 0 \leq x < 2^h, 0 \leq y < 2^h$ $T2: 0 \leq x < 2^h, 2^h \leq y < 2^{h+1}$ $T3: 2^h \leq x < 2^{h+1}, 0 \leq y < 2^h$ One can see (and can prove) that these 3 trees are similar; so this gives us some recursive approach. Time complexity: $O(mH)$, where $H = log(10^9)$.
[ "bitmasks", "data structures", "games", "trees" ]
3,500
#include <bits/stdc++.h> #pragma GCC optimize ("O2,unroll-loops") //#pragma GCC optimize("no-stack-protector,fast-math") //#pragma GCC target("sse,sse2,sse3,ssse3,sse4,popcnt,abm,mmx,avx,tune=native") using namespace std; typedef long long ll; typedef long double ld; typedef pair<int, int> pii; typedef pair<pii, int> piii; typedef pair<ll, ll> pll; #define debug(x) cerr<<#x<<'='<<(x)<<endl; #define debugp(x) cerr<<#x<<"= {"<<(x.first)<<", "<<(x.second)<<"}"<<endl; #define debug2(x, y) cerr<<"{"<<#x<<", "<<#y<<"} = {"<<(x)<<", "<<(y)<<"}"<<endl; #define debugv(v) {cerr<<#v<<" : ";for (auto x:v) cerr<<x<<' ';cerr<<endl;} #define all(x) x.begin(), x.end() #define pb push_back #define kill(x) return cout<<x<<'\n', 0; const ld eps=1e-7; const int inf=1000000010; const ll INF=10000000000000010LL; const int mod=1000000007; const int MAXN=400010, LOG=30; int n, m, k, u, v, x, y, t, a, b, root, ans; pii A[MAXN], V[MAXN]; int par[MAXN], sum[MAXN]; bool is[MAXN]; vector<int> G[MAXN], grundy; mt19937 rng(chrono::steady_clock::now().time_since_epoch().count()); inline int GetH(pii p){ return p.first+p.second;} inline pii GetPar(pii p, int k){ int x=p.first, y=p.second; while (k){ int xx=(x&-x), yy=(y&-y); if (!xx) xx=2*inf; if (!yy) yy=2*inf; int tmp=min(k, min(xx, yy)); k-=tmp; if (xx<yy) x-=tmp; else y-=tmp; } return {x, y}; } inline int Zone(pii p, int n){ if (p.first&(1<<(n-1))) return 2; if (p.second&(1<<(n-1))) return 1; return 0; } void dfs_order(vector<pii> &vec, int n=LOG) { if (n==0 || vec.size()==0) return ; vector<pii> v[3]; for (pii p:vec) { int z=Zone(p, n); p.first&=(1<<(n-1))-1; p.second&=(1<<(n-1))-1; v[z].pb(p); } vec.clear(); for (int i:{0, 1, 2}) dfs_order(v[i], n-1); for (pii p:v[0]) if (!p.first) vec.pb(p); for (pii p:v[1]) vec.pb({p.first, p.second|(1<<(n-1))}); for (pii p:v[0]) if (p.first) vec.pb(p); for (pii p:v[2]) vec.pb({p.first|(1<<(n-1)), p.second}); } pii Lca(pii u, pii v, int n=LOG) { if (n==0) return {0, 0}; int zu = Zone(u, n), zv = Zone(v, n); if (zu > zv) swap(u, v), swap(zu, zv); u.first&=(1<<(n-1))-1; u.second&=(1<<(n-1))-1; v.first&=(1<<(n-1))-1; v.second&=(1<<(n-1))-1; if (zu == 1 && zv == 2) return {0, 0}; if (zu == 2 && zv == 2){ pii A = Lca(u, v, n-1); return {A.first+(1<<(n-1)), A.second}; } if (zu == 1 && zv == 1) { pii A = Lca(u, v, n-1); return {A.first, A.second+(1<<(n-1))}; } if (zv == 1) return Lca(u, {0, (1<<(n-1))-1}, n-1); if (zv == 2) return Lca(u, {(1<<(n-1))-1, 0}, n-1); return Lca(u, v, n-1); } inline int GetId(pii p){ return lower_bound(V, V+n, p)-V; } inline bool IsPar(pii u, pii v){ if (GetH(u)>GetH(v)) return 0; return GetPar(v, GetH(v)-GetH(u))==u; } int dfs(int node){ for (int v:G[node]) sum[node]+=dfs(v); return sum[node]; } int main(){ ios_base::sync_with_stdio(false);cin.tie(0);cout.tie(0); //freopen("input.txt", "r", stdin); //freopen("output.txt", "w", stdout); vector<pii> vec; cin>>m; for (int i=0; i<2*m; i++) cin>>A[i].first>>A[i].second, vec.pb(A[i]); sort(all(vec)); vec.resize(unique(all(vec))-vec.begin()); dfs_order(vec); for (int i=vec.size()-1; i; i--) vec.pb(Lca(vec[i], vec[i-1])); sort(all(vec)); vec.resize(unique(all(vec))-vec.begin()); dfs_order(vec); n=vec.size(); debug("sorted") for (int i=0; i<n; i++) V[i]=vec[i]; sort(V, V+n); vector<int> stk={root=GetId(vec[0])}; for (int i=1; i<n; i++){ int v=GetId(vec[i]); while (!IsPar(V[stk.back()], V[v])) stk.pop_back(); par[v]=stk.back(); G[stk.back()].pb(v); stk.pb(v); } for (int i=0; i<2*m; i+=2){ int u=GetId(A[i]), v=GetId(A[i+1]), lca=GetId(Lca(A[i], A[i+1])); sum[u]++; sum[v]++; sum[lca]-=2; is[lca]=1; } dfs(root); for (int v=0; v<n; v++){ if (sum[v]){ grundy.pb(GetH(V[par[v]])+1); grundy.pb(GetH(V[v])+1); } else if (is[v]){ grundy.pb(GetH(V[v])); grundy.pb(GetH(V[v])+1); } } sort(all(grundy)); vec.clear(); int last=-1; for (int x:grundy){ if (last==-1){ if (vec.empty() || vec.back().second<x) last=x; else{ last=vec.back().first; vec.pop_back(); } } else{ if (last<x) vec.pb({last, x}); last=-1; } } ans=2*vec.size(); if (ans && vec[0].first==0) ans--; cout<<ans<<"\n"; return 0; }
1440
A
Buy the String
You are given four integers $n$, $c_0$, $c_1$ and $h$ and a binary string $s$ of length $n$. A binary string is a string consisting of characters $0$ and $1$. You can change any character of the string $s$ (the string should be still binary after the change). You should pay $h$ coins for each change. After some changes (possibly zero) you want to buy the string. To buy the string you should buy all its characters. To buy the character $0$ you should pay $c_0$ coins, to buy the character $1$ you should pay $c_1$ coins. Find the minimum number of coins needed to buy the string.
We will consider each character seperately. Look at the $i$-th character; if it is originally a $1$, we can either change it to a $0$ and pay $h + c_0$ coins for this specific character, or we can not change it and pay $c_1$ coins for it. Since we want to pay as little as possible, we take the minimum of these two. So if the $i$th character is a $1$, we will have to pay $min(c_1, h + c_0)$ coins for it. A similar logic can be used for the zeroes; if the $i$th character is a $0$ we will have to pay $min(c_0, h + c_1)$ coins. So we iterate over $s$, and for each character we add the required minimum to the sum, depending on whether it's a $0$ or $1$. Time complexity: $O(n)$
[ "implementation", "math" ]
800
// In The Name Of Allah #include <bits/stdc++.h> #define ss second #define ff first #define use_fast ios::sync_with_stdio(false), cin.tie(0), cout.tie(0) #define ret(n) return cout << n, 0 #define se(n) cout << setprecision(n) << fixed #define pb push_back #define ll long long #define ld long double //#pragma GCC optimize("Ofast,no-stack-protector,unroll-loops") //#pragma GCC optimize("no-stack-protector,fast-math") //#pragma GCC target("sse,sse2,sse3,ssse3,sse4,popcnt,abm,mmx,avx,tune=native") using namespace std; const int N = 3e5 + 100, OO = 1e9 + 7, T = 50, M = 1e9 + 7, P = 6151, SQ = 280, lg = 20; typedef pair <int, int> pii; void solve() { int n, c0, c1, t; string s; cin >> n >> c0 >> c1 >> t >> s; int ans = 0; for(auto u : s) { if(u == '0') ans += min(c0, c1 + t); else ans += min(c1, c0 + t); } cout << ans << endl; } int32_t main() { int t; cin >> t; while(t--) solve(); return 0; }
1440
B
Sum of Medians
A median of an array of integers of length $n$ is the number standing on the $\lceil {\frac{n}{2}} \rceil$ (rounding up) position in the non-decreasing ordering of its elements. Positions are numbered starting with $1$. For example, a median of the array $[2, 6, 4, 1, 3, 5]$ is equal to $3$. \textbf{There exist some other definitions of the median, but in this problem, we will use the described one.} Given two integers $n$ and $k$ and \textbf{non-decreasing} array of $nk$ integers. Divide all numbers into $k$ arrays of size $n$, such that each number belongs to \textbf{exactly} one array. You want the sum of medians of all $k$ arrays to be the maximum possible. Find this maximum possible sum.
We will consider a greedy approach. We take the $\lceil {\frac{n}{2}} \rceil$ biggest numbers from the end of the array and the $\lfloor {\frac{n}{2}} \rfloor$ smallest numbers from the beginning. We take these elements as one group, erase them from our array and then continue the same procedure on the remaining array. This can be done in a loop of $O(k)$, by taking every $\lceil {\frac{n}{2}} \rceil$th character. We can also prove this claim. Imagine we have marked $k$ elements to be the medians of these arrays. Each one of these elements need at least $\lceil {\frac{n}{2}} \rceil - 1$ elements bigger than them and at least $\lfloor {\frac{n}{2}} \rfloor$ elements smaller than them to form a group in which it they are the median. So we can always push the biggest of these $k$ numbers forward until we have exactly $\lceil {\frac{n}{2}} \rceil - 1$ elements bigger than them, and by pushing forward the sum of medians either doesn't change or gets larger. So our algorithm will always give the biggest possible answer. Time complexity: $O(nk)$ for each testcase
[ "greedy", "math" ]
900
#include <bits/stdc++.h> typedef long long int ll; typedef long double ld; #define pb push_back #define pii pair < int , int > #define F first #define S second #define endl '\n' #define int long long #define sync ios_base::sync_with_stdio(false);cin.tie(0);cout.tie(0) #pragma GCC optimize("Ofast,no-stack-protector,unroll-loops,fast-math") #define kill(x) return cout<<x<<'\n', 0; using namespace std; const int N=2e5+100; ll a[N]; int Main(){ ll n, k; cin >> k >> n; for (int i=1;i<=n*k;i++){ cin >> a[i]; } ll x=(k+1)/2 - 1; x = k - x; ll z=n*k+1; ll ans=0; while(n--){ z-=x; if (z<=0) break; ans+=a[z]; } cout << ans << endl; } int32_t main(){ ll t; cin >> t; while(t--){ Main(); } }
1442
A
Extreme Subtraction
You are given an array $a$ of $n$ positive integers. You can use the following operation as many times as you like: select any integer $1 \le k \le n$ and do one of two things: - decrement by one $k$ of the first elements of the array. - decrement by one $k$ of the last elements of the array. For example, if $n=5$ and $a=[3,2,2,1,4]$, then you can apply one of the following operations to it (not all possible options are listed below): - decrement from the first two elements of the array. After this operation $a=[2, 1, 2, 1, 4]$; - decrement from the last three elements of the array. After this operation $a=[3, 2, 1, 0, 3]$; - decrement from the first five elements of the array. After this operation $a=[2, 1, 1, 0, 3]$; Determine if it is possible to make all the elements of the array equal to zero by applying a certain number of operations.
The problem sounds like this - check that there are increasing and decreasing arrays, the element-wise sum of which is equal to the given array. This problem can be solved greedily. Let's maximize each element of the decreasing array (let's call this array $a$, and the increasing one $b$). Suppose initial array is $v$ and we have solved the problem on a prefix of length $i-1$. Then, for the element $a[i]$, $a[i] \le a[i - 1]$ and $v[i] - a[i] \ge b[i - 1]$ must be fulfilled. Rewriting the second inequality and combining with the first one, we get $a[i] \le min(a[i - 1], v[i] - b[i - 1])$. It is clear that taking $a[i] = min(a[i - 1], v[i] - b[i - 1])$ is best by construction.
[ "constructive algorithms", "dp", "greedy" ]
1,800
null
1442
B
Identify the Operations
We start with a permutation $a_1, a_2, \ldots, a_n$ and with an empty array $b$. We apply the following operation $k$ times. On the $i$-th iteration, we select an index $t_i$ ($1 \le t_i \le n-i+1$), remove $a_{t_i}$ from the array, and append one of the numbers $a_{t_i-1}$ or $a_{t_i+1}$ (if $t_i-1$ or $t_i+1$ are within the array bounds) to the right end of the array $b$. Then we move elements $a_{t_i+1}, \ldots, a_n$ to the left in order to fill in the empty space. You are given the initial permutation $a_1, a_2, \ldots, a_n$ and the resulting array $b_1, b_2, \ldots, b_k$. All elements of an array $b$ are \textbf{distinct}. Calculate the number of possible sequences of indices $t_1, t_2, \ldots, t_k$ modulo $998\,244\,353$.
Consider element with index $i$ that has value $b_1$ in the array $a$ - $a_i$. There are three options: Both $a_{i-1}$ and $a_{i+1}$ are present in the array $b$. Then both of them should stay in the array $a$ after the first operation - we will write them down later on. However, $a_i$ can only be added to the array b while removing one of the neighbors. We have reached a contradiction, so the answer is 0. One of the numbers $a_{i-1}$, $a_{i+1}$ is present in the array $b$, another is not. Then we have to remove the one that is not present in $b$ and continue solving the problem. Neither $a_{i-1}$ nor $a_{i+1}$ is present in $b$. $a_i$ is not present in any other place in $b$ (because all number in $b$ are unique), and $a_{i-1}$, $a_i$ and $a_{i+1}$ are indistinguishable by the following operations. Let us then remove any one of them (say, left) and "remove" all remaining tags. In this case, we can multiply answer by 2 and continue solving the problem. Now we know that the answer is either 0 or a power of 2. To calculate the answer we only need to implement the above-mentioned algorithm. Let us store a set of available numbers in the array $a$, and a set of numbers that are yet to appear in the array $b$ to implement necessary checks. The solution will have $\mathcal{O}(n \log n)$ complexity (that can be optimized to $\mathcal{O}(n)$ with help of arrays and double-linked lists, but it was not necessary for this particular problem).
[ "combinatorics", "data structures", "dsu", "greedy", "implementation" ]
1,800
null
1442
C
Graph Transpositions
You are given a directed graph of $n$ vertices and $m$ edges. Vertices are numbered from $1$ to $n$. There is a token in vertex $1$. The following actions are allowed: - Token movement. To move the token from vertex $u$ to vertex $v$ if there is an edge $u \to v$ in the graph. This action takes $1$ second. - Graph transposition. To transpose all the edges in the graph: replace each edge $u \to v$ by an edge $v \to u$. This action takes increasingly more time: $k$-th transposition takes $2^{k-1}$ seconds, i.e. the first transposition takes $1$ second, the second one takes $2$ seconds, the third one takes $4$ seconds, and so on. The goal is to move the token from vertex $1$ to vertex $n$ in the shortest possible time. Print this time modulo $998\,244\,353$.
Consider a sequence of actions that moves the token from vertex $1$ to vertex $n$. Let us say it has $A$ token movements and $B$ graph transpositions. This sequence takes $A + 2^{B} - 1$ seconds. Note that the optimal path does not visit any edge twice. That means we need to consider only paths with $A \le m$. Consider another sequence consisting of $A'$ token movements and $B'$ graph transpositions. Let $lm = \lceil\log_2 m\rceil$. Note the following. If $B < lm < B'$ then $A + 2^{B} - 1$ < $A' + 2^{B'} - 1$. This is true because the difference between $A$ and $A'$ does not exceed $m$ and $2^{B'} - 2^{B} > m$. This gives us the following: if there is any sequence of actions with $B < lm$ that moves the token from vertex $1$ to vertex $n$ then optimal path's $B$ is less than $lm$ too. Let us check this with the following algorithm, and if it is so, find the optimal sequence of actions. We can now build a new graph that consists of $lm$ copies of the original graph: Reverse all the edges in every even graph copy. For every vertex $u$ add new edge between $k$-th and $k+1$-th copies of vertex $u$ with weight $2^{k-1}$ for $k = 1 \ldots lm - 1$. We can find optimal paths from the first copy of vertex $1$ to all the copies of vertex $n$ using Dijkstra algorithm. Shortest of these paths would correspond to the answer: movement along a copy of original edge denotes token movement; movement along a new edge denotes graph transposition. If the algorithm found no paths, then the sequence of actions that moves the token to from vertex 1 to vertex $n$ consists of at least $lm + 1$ transpositions. Note that if $lm < B < B'$ then $A + 2^{B} - 1$ < $A' + 2^{B'} - 1$. It means that all sequences of actions can be compared using ordered vector $(B, A)$ lexicographically. Let us build another graph consisting of $2$ copies of the original graph: Reverse all the edges in the second copy of the graph. Assign $(0, 1)$ to weights of all of these edges. For every vertex $u$ add two new edges between copies of $u$: from the first to the second copy and back. Weights of both edges is $(1, 0)$. Let us find optimal paths from the first copy of vertex $1$ to both copies of vertex $n$ using Dijkstra algorithm. Let $(B, A)$ be the length of the shortest one. New graph allows us to restore the optimal sequence of actions that moves the token from vertex $1$ to vertex $n$ that will take $A + 2^{B} - 1$ seconds.
[ "dfs and similar", "graphs", "greedy", "shortest paths" ]
2,400
null
1442
D
Sum
You are given $n$ \textbf{non-decreasing} arrays of non-negative numbers. Vasya repeats the following operation $k$ times: - Selects a non-empty array. - Puts the first element of the selected array in his pocket. - Removes the first element from the selected array. Vasya wants to maximize the sum of the elements in his pocket.
Straightforward dp solution, where $s(i, j)$ - max possible sum after $j$ operations on first $i$ arrays and transition in $O(k)$, has complexity of $O(nk^2)$ or precisely $O(k \cdot min(nk, \sum\limits_{i=1}^n t_i))$ and doesn't fit into the time limit. Taking into account the fact that the arrays are sorted helps to optimize it. Consider the optimal solution. Let's denote as $x_i$ number of operations on $i$-th array. Let's call the array partially removed if we applied at least one operation to that array, but the array is not yet empty. Consider two partially removed arrays within the optimal solution and integers $p$ and $q$ where $0 < x_p < t_p$ and $0 < x_q < t_q$. Assume, without loss of generality, $a_{p, x_p+1} \le a_{q, x_q+1}$. Let $y = min(x_p, t_q - x_q) \ge 1$. If we replace $y$ operations on $p$-th array with $y$ operations on $q$-th array, total sum will increase by $\sum\limits_{i=1}^y a_{q, x_q + i} - \sum\limits_{i=1}^y a_{p, x_p + 1 - i} \ge y \cdot a_{q, x_q + 1} - y \cdot a_{p, x_p + 1} \ge 0$, but the number of partially removed arrays will lower by 1. We can repeat that substitution until we get a single partially removed array. Now we need to identify the single partially removed array in the final solution. Every other array can either all be Vasya's pocket, or remain as is. We can solve the knapsack problem with $(n-1)$ items, with array sizes as weights and sum of array's elements as value. For each $0 \le w \le k$ we need to find $g(w)$ - max possible total value of items with total weight $w$. The only remaining step is to brute force the size of prefix, Vasya removed from the single partially removed array and combine it with $g(w)$. Solving each knapsack problem independently results in $O(n^2k + \sum\limits_{i=1}^n t_i)$ complexity, but the similarities between the problems allows to optimize down to $O(kn\log n)$ using divide and conquer approach. Let's split the items into two halves of approximately similar size. Before going in recursively into the first half we will relax dp values with each element of the second half (just like in usual knapsack problem) and undo the changes after. This way as soon as we reach the subset with just one piece we have already calculated dp for every other piece. Adding each piece into dp takes $O(\log n)$, each relaxation takes $O(k)$, final complexity is $O(kn\log n + \sum\limits_{i=1}^n t_i)$.
[ "data structures", "divide and conquer", "dp", "greedy" ]
2,800
null
1442
E
Black, White and Grey Tree
You are given a tree with each vertex coloured white, black or grey. You can remove elements from the tree by selecting a subset of vertices in a single connected component and removing them and their adjacent edges from the graph. The only restriction is that you are not allowed to select a subset containing a white and a black vertex at once. What is the minimum number of removals necessary to remove all vertices from the tree?
Let's solve the task step by step: Suppose that tree is a bamboo without the grey vertices. Such a tree can be viewed as an array of colors $1$ and $2$. We can see that if there are two adjacent vertices of equal color, we can always delete them together in one operation. We can merge adjacent vertices of the same color, and get an array of colors $b_1, b_2, \ldots, b_k$, such that $b_i \in \{1, 2\}; b_i \ne b_{i+1}$. Such an array can be defined by two numbers - $b_1, k$.We can see that such an array $b$ of length $k$ can not be deleted in less than $\lfloor {k \over 2} \rfloor + 1$ removals. It can be proved by induction. Also, you can delete all elements in this number of removals by deleting opposite leaves (after the first removal opposite leaves will have the same color). We can see that such an array $b$ of length $k$ can not be deleted in less than $\lfloor {k \over 2} \rfloor + 1$ removals. It can be proved by induction. Also, you can delete all elements in this number of removals by deleting opposite leaves (after the first removal opposite leaves will have the same color). Let's solve the task for a general tree without grey vertices. Let's assign the edge $uv$ with weight $0$ if $a_u=a_v$, and $1$ otherwise. Let's find the longest path (diameter) in this weighted tree, and let it be the vertices $v_1, v_2, \ldots, v_m$. We can see this path as bamboo from the previous paragraph, and find the corresponding value $k$ for this path (it is equal to diameter + 1). It is obvious that we can't delete the tree in less than $\lfloor {k \over 2} \rfloor + 1$ removals (otherwise we would be able to delete the bamboo in a smaller number of removals). Turns out that we can delete all vertices in this number of removals. We can do the same algorithm - let's delete the opposite leaves of diameter, and also let's delete all leaves in the tree that have the same color (why not). After one such removal, our path will still be a diameter (if another path becomes the diameter, then one of its leaves should have the same color, and was going to be deleted). We can find the diameter in such a 0/1 tree in linear time, or we can solve the task even simpler. We can see that we alternate the removal of black and white vertices, and we delete all the leaves with the same color. So, we can choose the first operation (delete black or white), and at each iteration just delete all corresponding leaves. It works in linear time. Turns out that we can delete all vertices in this number of removals. We can do the same algorithm - let's delete the opposite leaves of diameter, and also let's delete all leaves in the tree that have the same color (why not). After one such removal, our path will still be a diameter (if another path becomes the diameter, then one of its leaves should have the same color, and was going to be deleted). We can find the diameter in such a 0/1 tree in linear time, or we can solve the task even simpler. We can see that we alternate the removal of black and white vertices, and we delete all the leaves with the same color. So, we can choose the first operation (delete black or white), and at each iteration just delete all corresponding leaves. It works in linear time. Let's solve the task without additional constraints. Now there are the grey vertices. How do they change the solution? Let's see at the last removal - suppose we deleted vertex $v$, which was not grey. Then we can imagine that we make $v$ the root of the tree, and paint all the grey vertices in the color of their parents. Then we have a tree without grey vertices, which we can solve. Obviously, the answer for such a colored tree is not less than the answer for the initial tree (because we can make the same removals as in a colored tree). But we can see that we can't get the smaller answer, as by coloring grey vertices we effectively removed them from the tree, and the value $\lfloor {k \over 2} \rfloor + 1$ (over subsequences of black and white vertices) hasn't changed. So, overall, the solution is to choose the first removal (1 or 2), and alternate removals of black and white vertices. For removal of $c \in \{ 1, 2 \}$ we delete all the leaves with color $0$ or $c$. Also, we can note that the tree remains connected in this process.
[ "binary search", "constructive algorithms", "dfs and similar", "dp", "greedy", "trees" ]
3,000
null
1442
F
Differentiating Games
This is an interactive problem Ginny is taking an exam on game theory. The professor is tired of hearing the same answers over and over again, so he offered Ginny to play a game instead of a standard exam. As known from the course, a combinatorial game on a graph with multiple starting positions is a game with a directed graph and multiple starting vertices holding a token each. Two players take turns moving one of the tokens along the graph edges on each turn. The player who can't make a move loses the game. If both players can play an infinitely long game without losing, a draw is called. For the exam, the professor drew an acyclic directed graph and chose one of its vertices. Ginny needs to guess the vertex the professor chose. To do so, Ginny can choose a multiset of vertices $S$ several times and ask the professor: "If I put one token in each vertex of the given graph for each occurrence of the vertex in the multiset $S$, and then one more in the selected vertex, what would be the result of the combinatorial game?". Having given the task, the professor left the room to give Ginny some time to prepare for the game. Ginny thinks that she's being tricked because the problem is impossible to solve. Therefore, while the professor is away, she wants to add or remove several edges from the graph. Even though the original graph was acyclic, edges could be added to the graph to make cycles appear.
Let's first solve the problem for an empty graph. To do this we need to build a construction with not too many edges, where all vertices are pairwise not equivalent. For acyclic graph, there is not such construction. All Grundy functions of vertices should be different, so they must be integers from 0 to $n-1$. The only such graph is full, and it has quadratic number of edges. Moreover, if there is a part from which no cycles are reachable, it must be full acyclic graph, so it can't be too big. So, how can other graph look? No vertices can be equivalent to any of vertices in acyclic part. The easiest way to achieve it is to add a loop to every vertex. Let's note, that after doing so all this vertices can't be losing in sum with any other game, because we can just go through loop and don't change anything. So, this games differ from each other be set of games in sum with which they would be winning, not draw. One can see, that such a game would be winning with any acyclic game, it have a move to, because we can go to it, and this will be move to acyclic game with Grundy function 0. The game would be draw with any other acyclic game (and with not acyclic too, but this is not important for solution). This is true, because if we don't move in game with loop, resulting position is not winning. Therefore, to win we must go out from loop vertex. But than we come into acyclic game with non-zero Grundy function, and it's winning, so we can't win. And we can go through loop, achieving a draw. So, we need to build a graph, with acyclic part of full graph on $k$ vertices, and all other vertices should have a loop and some edges to this $k$ vertices, and all vertices should have different set of edges. If done, all vertices would be pairwise distinguishable by $k$ queries for each acyclic part vertex. If any of them is lost, this vertex is chosen. If all of them is winning or draw, than the vertex with loop, which have an edge if and only if sum with this vertex is winning, is chosen. In opposite to acyclic case, we can distinguish exponentially many vertices. Returning to the initial problem, one can get $k = 20$, and get all subsets of size 0, 1 and 2, and required part of subsets of size $3$. In such a graph there would be $190 + 980 + 1 \cdot 0 + 20 \cdot 1 + 190 \cdot 2 + 769 \cdot 3 = 3877$ edges. The only remaining part is to understand what to do with existing edges. One can not, then in built construction any edges between vertices with loops can be added, because they change nothing. If position is winning, then after first move they become unreachable, and doesn't change anything. If it was draw, they can't change anything too, because they are led to vertex with loop, which can't be losing. Let's use 20 vertices, which have no edges outside of this set as vertices of acyclic part. Initial graph was acyclic, so we can find such vertices. Let's add loop to all other vertices. Now, we need to modify graph in a way that all sets of edges to acyclic part would be different. Let's go through vertices in any order, and change minimal possible number of edges, so, that this vertex set would not be equal to any of previous. For first vertex we won't do anything. For next 20 vertices, we would need to change at most 1 edge. For next 190 vertices, we would need to change at most 2 edges, and at most 3 for others. This led us to the same 3877 edges to change. In general case we can do either $O(nlogn)$ edges changes and $O(logn)$ queries or $O((d+1)*n)$ edges changes and $O(\sqrt[d]{d!n})$ queries. For this problem we need d = 3.
[ "games", "interactive" ]
3,400
null
1443
A
Kids Seating
Today the kindergarten has a new group of $n$ kids who need to be seated at the dinner table. The chairs at the table are numbered from $1$ to $4n$. Two kids can't sit on the same chair. It is known that two kids who sit on chairs with numbers $a$ and $b$ ($a \neq b$) will indulge if: - $gcd(a, b) = 1$ or, - $a$ divides $b$ or $b$ divides $a$. $gcd(a, b)$ — the maximum number $x$ such that $a$ is divisible by $x$ and $b$ is divisible by $x$. For example, if $n=3$ and the kids sit on chairs with numbers $2$, $3$, $4$, then they will indulge since $4$ is divided by $2$ and $gcd(2, 3) = 1$. If kids sit on chairs with numbers $4$, $6$, $10$, then they will not indulge. The teacher really doesn't want the mess at the table, so she wants to seat the kids so there are no $2$ of the kid that can indulge. More formally, she wants no pair of chairs $a$ and $b$ that the kids occupy to fulfill the condition above. Since the teacher is very busy with the entertainment of the kids, she asked you to solve this problem.
Note that this seating arrangement for children satisfies all conditions: $4n, 4n-2, 4n-4, \ldots, 2n+2$.
[ "constructive algorithms", "math" ]
800
null
1443
B
Saving the City
Bertown is a city with $n$ buildings in a straight line. The city's security service discovered that some buildings were mined. A map was compiled, which is a string of length $n$, where the $i$-th character is "1" if there is a mine under the building number $i$ and "0" otherwise. Bertown's best sapper knows how to activate mines so that the buildings above them are not damaged. When a mine under the building numbered $x$ is activated, it explodes and activates two adjacent mines under the buildings numbered $x-1$ and $x+1$ (if there were no mines under the building, then nothing happens). Thus, it is enough to activate any one mine on a continuous segment of mines to activate all the mines of this segment. For manual activation of one mine, the sapper takes $a$ coins. He can repeat this operation as many times as you want. Also, a sapper can place a mine under a building if it wasn't there. For such an operation, he takes $b$ coins. He can also repeat this operation as many times as you want. The sapper can carry out operations in any order. You want to blow up all the mines in the city to make it safe. Find the minimum number of coins that the sapper will have to pay so that after his actions there are no mines left in the city.
Since the activation of any mine explodes the entire segment of mines, which it is, you can immediately replace the input string with an array of mine segments. We now have two operations. We can delete any segment by $a$ coins, or turn two adjacent segments $[l_1, r_1]$, $[l_2, r_2]$ ($r_1 < l_2$) into one segment for $b \cdot (l_2-r_1)$. That is, two segments can be deleted for a cost of $2 \cdot a$ or $a + b \cdot (l_2-r_1)$. This means that you need to merge two segments while $b \cdot (l_2-r_1) \le a$. You need to go through all adjacent segments and check this condition.
[ "dp", "greedy", "math", "sortings" ]
1,300
null
1443
C
The Delivery Dilemma
Petya is preparing for his birthday. He decided that there would be $n$ different dishes on the dinner table, numbered from $1$ to $n$. Since Petya doesn't like to cook, he wants to order these dishes in restaurants. Unfortunately, all dishes are prepared in different restaurants and therefore Petya needs to pick up his orders from $n$ different places. To speed up this process, he wants to order courier delivery at some restaurants. Thus, for each dish, there are two options for Petya how he can get it: - the dish will be delivered by a courier from the restaurant $i$, in this case the courier will arrive in $a_i$ minutes, - Petya goes to the restaurant $i$ on his own and picks up the dish, he will spend $b_i$ minutes on this. Each restaurant has its own couriers and they start delivering the order at the moment Petya leaves the house. In other words, all couriers work in parallel. Petya must visit all restaurants in which he has not chosen delivery, he does this consistently. For example, if Petya wants to order $n = 4$ dishes and $a = [3, 7, 4, 5]$, and $b = [2, 1, 2, 4]$, then he can order delivery from the first and the fourth restaurant, and go to the second and third on your own. Then the courier of the first restaurant will bring the order in $3$ minutes, the courier of the fourth restaurant will bring the order in $5$ minutes, and Petya will pick up the remaining dishes in $1 + 2 = 3$ minutes. Thus, in $5$ minutes all the dishes will be at Petya's house. Find the minimum time after which all the dishes can be at Petya's home.
If we order a courier with time $x$, then all couriers with time $y < x$ can also be ordered, since they do not change the answer (all couriers work in parallel). Therefore, you can sort the array, couriers always bring the prefix of the array, and Petya will go for the suffix. The time prefix is the maximum $b[i]$, and the suffix is the sum $a[i]$. Therefore, you need to calculate the suffix amounts and go through all the options.
[ "binary search", "greedy", "sortings" ]
1,400
null
1443
E
Long Permutation
A permutation is a sequence of integers from $1$ to $n$ of length $n$ containing each number exactly once. For example, $[1]$, $[4, 3, 5, 1, 2]$, $[3, 2, 1]$ — are permutations, and $[1, 1]$, $[4, 3, 1]$, $[2, 3, 4]$ — no. Permutation $a$ is lexicographically smaller than permutation $b$ (they have the same length $n$), if in the first index $i$ in which they differ, $a[i] < b[i]$. For example, the permutation $[1, 3, 2, 4]$ is lexicographically smaller than the permutation $[1, 3, 4, 2]$, because the first two elements are equal, and the third element in the first permutation is smaller than in the second. The next permutation for a permutation $a$ of length $n$ — is the lexicographically smallest permutation $b$ of length $n$ that lexicographically larger than $a$. For example: - for permutation $[2, 1, 4, 3]$ the next permutation is $[2, 3, 1, 4]$; - for permutation $[1, 2, 3]$ the next permutation is $[1, 3, 2]$; - for permutation $[2, 1]$ next permutation does not exist. You are given the number $n$ — the length of the initial permutation. The initial permutation has the form $a = [1, 2, \ldots, n]$. In other words, $a[i] = i$ ($1 \le i \le n$). You need to process $q$ queries of two types: - $1$ $l$ $r$: query for the sum of all elements on the segment $[l, r]$. More formally, you need to find $a[l] + a[l + 1] + \ldots + a[r]$. - $2$ $x$: $x$ times replace the current permutation with the next permutation. For example, if $x=2$ and the current permutation has the form $[1, 3, 4, 2]$, then we should perform such a chain of replacements $[1, 3, 4, 2] \rightarrow [1, 4, 2, 3] \rightarrow [1, 4, 3, 2]$. For each query of the $1$-st type output the required sum.
Let's notice that most of the elements in the original permutation will not change during all queries. Since the maximum permutation number does not exceed $10^{10}$, only the last $15$ elements of the permutation will change. Thus, after each query of the second type, you need to generate a permutation with the corresponding number. To answer the query of the first type, you need to split the segment into two parts: we sum the part of the segment that fell in the last $15$ elements in a simple cycle and calculate the other part of the segment using the formula of an arithmetic progression.
[ "brute force", "math", "two pointers" ]
2,400
null
1444
A
Division
Oleg's favorite subjects are History and Math, and his favorite branch of mathematics is division. To improve his division skills, Oleg came up with $t$ pairs of integers $p_i$ and $q_i$ and for each pair decided to find the \textbf{greatest} integer $x_i$, such that: - $p_i$ is divisible by $x_i$; - $x_i$ is not divisible by $q_i$. Oleg is really good at division and managed to find all the answers quickly, how about you?
Let $y = p / x$. Let's assume, that there exists prime $a$, such that $a$ divides $y$, but $q$ is not divisible by $a$. Then we can multiply $x$ and $a$ and the result will still divide $p$, but will not be divisible by $q$. So for maximal $x$ there is no such $a$. Let's assume, that there are two primes $a$ and $b$, such that they both divide $y$, and both divide $q$. Because $q$ is not divisible by $x$, there exists some prime $c$ ($c$ can be equal to $a$ or $b$), such that number of occurrences of $c$ in $x$ is less than number of occurrences of $c$ in $q$. One of $a$ and $b$ is not equal to $c$, so if we will multiply $x$ and such number, the result will not be divisible by $q$. So for maximal $x$ there are no such $a$ and $b$. That means that $x = p /$(power of some primal divisor of $q$). So to find maximal $x$, we have to find all prime divisors of $q$ (we have to factorise $q$ for it in time $O(\sqrt{q})$) and for each of them divide $p$ by it until result is not divisible by $q$. That will be all our candidates for greatest $x$. We will do all of that in time $O(\sqrt{q} + \log q \cdot \log p)$.
[ "brute force", "math", "number theory" ]
1,500
null
1444
B
Divide and Sum
You are given an array $a$ of length $2n$. Consider a partition of array $a$ into two subsequences $p$ and $q$ of length $n$ each (each element of array $a$ should be in exactly one subsequence: either in $p$ or in $q$). Let's sort $p$ in non-decreasing order, and $q$ in non-increasing order, we can denote the sorted versions by $x$ and $y$, respectively. Then the cost of a partition is defined as $f(p, q) = \sum_{i = 1}^n |x_i - y_i|$. Find the sum of $f(p, q)$ over all correct partitions of array $a$. Since the answer might be too big, print its remainder modulo $998244353$.
No matter how we split the array, the cost of a partition will always be the same. Let's prove it. Without loss of generality we will consider that the array $a$ sorted and denote for $L$ the set of elements with indexes from $1$ to $n$, and for $R$ the set of elements with indexes from $n + 1$ to $2n$. Then split the array $a$ into any two arrays $p$ and $q$ of size $n$. Let's sort $p$ in non-decreasing order and $q$ by non-increasing order. Any difference $|p_i - q_i|$ in our sum will be the difference of one element of $R$ and one element of $L$. If this is not the case, then there is an index $i$ such that both $p_i$ and $q_i$ belong to the same set. Let's assume that this is $L$. All elements with indexes less than or equal to $i$ in $p$ belong to $L$ ($i$ elements) All items with indexes greater than or equal to $i$ in $q$ belong to $L$ ($n - (i - 1)$ elements) Then $L$ has at least $i + n - (i-1) = n + 1$ elements, but there must be exactly $n$. Contradiction. For the set $R$ the proof is similar. Then the answer to the problem is (the sum of the elements of the set $R$ minus the sum of the elements of the set $L$) multiplied by the number of partitions of the array $C^{n}_{2n}$. Complexity: $O(n \log n)$ (due to sorting)
[ "combinatorics", "math", "sortings" ]
1,900
null
1444
C
Team-Building
The new academic year has started, and Berland's university has $n$ first-year students. They are divided into $k$ academic groups, however, some of the groups might be empty. Among the students, there are $m$ pairs of acquaintances, and each acquaintance pair might be both in a common group or be in two different groups. Alice is the curator of the first years, she wants to host an entertaining game to make everyone know each other. To do that, she will select two different academic groups and then divide the students of those groups into two teams. The game requires that there are no acquaintance pairs inside each of the teams. Alice wonders how many pairs of groups she can select, such that it'll be possible to play a game after that. All students of the two selected groups must take part in the game. Please note, that the teams Alice will form for the game don't need to coincide with groups the students learn in. Moreover, teams may have different sizes (or even be empty).
You're given an undirected graph without loops and multiple edges, each vertex has some color from $1$ to $k$. Count the number of pairs of colors such that graph induced by vertices of these two colors will be bipartite. Let's check for each color whether the graph induced by it is bipartite (for example, using depth-first search). This can be done in $O(n + m)$. We will not use non-bipartite colors further since they can't be in any pairs. Now let's construct a slow solution that we will make faster later. Consider some color $x$. There're edges from vertices of this color to vertices of colors $y_1, y_2, \ldots, y_k$. Let's check whether the graphs induced by pairs $(x, y_1), (x, y_2), \ldots, (x, y_k)$ are bipartite (also using depth-first search), thereby finding out which colors cannot be in pair with $x$. The others can. After doing this for each color $x$, we can find the asnwer. How fast does this work? Notice that any edge between different colors we will use in DFS only two times. The problem are edges between vertices of the same color, we can use them up to $k$ times, and there can be a lot of them. Let's solve this problem and construct a faster solution. A graph is bipartite if and only if it doesn't contains odd cycles. Consider some connected bipartite component induced by color $x$. If a cycle goes through this component, it doesn't matter how exactly it does it. If the path of the cycle in this component ends in the same side where it has started, then it has even length, and odd otherwise. This fact lets us compress this component to two vertices (one for each side) connected by one edge. For each color this way we compress all components formed by it. Now we have the compressed graph, where all connected components are either one vertex or two vertices connected by one edge. Let's do the same process we did in slow solution and check every connected pair of colors whether the graph induced by it is bipartite. To check the pair $(x, y)$, for each edge between vertices of colors $x$ and $y$ in the original graph add a new edge to the compressed graph between corresponding vertices. After that use DFS to check if graph is bipartite, rollback the changes and do the same for all other pairs. How long does this work for one pair $(x, y)$? Let's start DFS only from components that were connected by added edges, since the others do not affect whether the graph is bipartite or not, but there can be a lot of them. This way DFS will use only added edges and some edges between vertices of the same color $x$ or $y$. However, there will be at most two times more of the latter than the added, because each added edge connects at most two new components, and each new component has at most one edge. So, we check one pair in the time proportional to amount of edges between its colors, and it sums up to $O(m)$ for all pairs. So, the whole solution works in $O(n + m)$ or $O(m \log n)$, depending on the implementation.
[ "data structures", "dfs and similar", "dsu", "graphs" ]
2,500
null
1444
D
Rectangular Polyline
One drew a closed polyline on a plane, that consisted only of vertical and horizontal segments (parallel to the coordinate axes). The segments alternated between horizontal and vertical ones (a horizontal segment was always followed by a vertical one, and vice versa). The polyline did not contain strict self-intersections, which means that in case any two segments shared a common point, that point was an endpoint for both of them (please consult the examples in the notes section). Unfortunately, the polyline was erased, and you only know the lengths of the horizonal and vertical segments. Please construct any polyline matching the description with such segments, or determine that it does not exist.
First, note that in a correct polyline, since the horizontal and vertical segments alternate, $h=v$: if this equality does not hold, the answer is negative. Now let's fix a vertex and go around the polyline in some direction. Then in the process of traversin, we will move in one of four possible directions: up, down, right or left. Since the polyline is closed, this means that we will move to the left in total by the same distance as we will move to the left in total. The same is true for moving up and down. This means that if we split all the segments into four sets named $Up$, $Down$, $Left$, $Right$, then the total length of the segments in $Up$ will be equal to the total length of the segments in $Down$, and the total length of the segments in $Right$ will be equal to the total length of the segments in $Left$. But it means that the set of lengths of all horizontal segments can be divided into two sets with the same sum. The same should hold for vertical segments. Let's check whether it is possible to divide the set of lengths of horizontal segments into two sets of the same sum. This classic problem can be solved by applying the dynamic programming method to solve the backpack problem. The complexity of this solution will be $\mathcal O\left({nC^2}\right)$ If it is impossible to split horizontal or vertical lengths into two sets of equal length, the answer is "No". Now we will show how to construct a correct answer if such divisions exist. Let us divide all horizontal lengths into two sets of equal total length. We denote the smaller set as $R$, and the larger set as $L$. We will do the same with the set of lengths of vertical segments: we will denote the smaller set as $D$, and the larger one as $U$. Since $|R| \leq |L|$, $|R| \leq h / 2 = v / 2$. Similarly, we have $v / 2 \leq |U|$, which follows that $|R| \leq |U/$, $|D| \leq |L|$. Now let's divide all the segments into pairs as follows: each segment of $R$, we match with a segment from $U$. All remaining segments of $L$ are matched with one of the remaining vertical segments. Thus, we have divided all these segments into three sets of pairs: in the first one, a segment from $R$ is paired with a segment from $U$. In the second set a segment from $L$ is paired with a segment from $U$. In the third set a segment from $D$ is paired with a segment from $L$. From the first set of pairs, we make up the set of vectors directed up and to the right (from the pair (r, u), we construct the vector (r, u)). This way we can construct a set of vectors $A$. We will do the same with the second set of pairs (constructing a set of vectors $B$) and the third set of pairs (constructing a set of vectors $C$). for a better understanding, see the picture above. Note that the set $B$ may be empty, while the other two can not. Let's make a convex polyline from the vectors of $A$. In order to do this, sort them in ascending order by the polar angle and make a polyline from them in this order (see the picture below). Now we will replace each of the vectors of our polyline with two vectors: one vector directed to the right and one vector directed upwards. We will do the same for vectors from $C$: sort them in ascending order by the polar angle and make a convex polyline from them: Let's combine these two polylines so that the first one goes from the point $O$ to the point $A$ and the second one goes from the point $B$ to the point $O$: We don't have much left to do: we hate to connect the points $A$ and $B$ using vectors from the set $B$. Let's take these vectors (directed up and to the left) in any arbitrary order, then, since the sum of all vectors is 0, the resulting polyline, if you draw it with the beginning at the point $A$, will end at the point $B$. Since the first two polylines were convex, this means that none of the points of the first two polylines will lie strictly inside the angle $AOB$, which means that if you replace each of the vectors of the third polyline with two vectors, one directed to the left and one directed upwards, the resulting closed polyline will not contain self-intersections. It is easy to show that the resulting polyline will be closed and will satisfy all the conditions of the problem:
[ "constructive algorithms", "dp", "geometry" ]
2,900
null
1444
E
Finding the Vertex
\textbf{This is an interactive problem.} You are given a tree — connected undirected graph without cycles. One vertex of the tree is special, and you have to find which one. You can ask questions in the following form: given an edge of the tree, which endpoint is closer to the special vertex, meaning which endpoint's shortest path to the special vertex contains fewer edges. You have to find the special vertex by asking the minimum number of questions in the worst case for a given tree. Please note that the special vertex might not be fixed by the interactor in advance: it might change the vertex to any other one, with the requirement of being consistent with the previously given answers.
Consider the optimal strategy. Some edge will be your first query, mark it with a number $0$. After that, build similar colourings (recursively) for components on both sides of the edge, but increase their weights by one (to have only one zero in total). This colouring corresponds to the strategy, and if $k$ is the maximum weight of an edge in it, then this strategy can find a vertex in $k+1$ queries in the worst case. This colouring has a wonderful property that helps us identify the vertex: on a path between any two edges with the same colour an edge with the smaller colour presents. And any colouring with this property corresponds to a proper strategy! (Each time you can ask an edge with the smallest weight in the current component) To make it easier for us, "invert" all weights in the colouring, mark the first edge with the weight $k-1$, and then use the same construction as we had before for our colouring, but now subtract $1$ from the edges (weights should remain non-negative). Now our goal is to find colouring with the min weight of the max edge, such that each pair of edges with the same colours have an edge with the larger colour between them. We will build this colouring using subtree DP. For the fixed colouring of a subtree of the vertex $v$, let's see which colours are visibile if you will look from $v$ towards the subtree. The colour is visible, if there is such an edge of this colour, that there are no edges with the larger colour on a path from this edge to $v$. Potential function of our colouring is the sum $2^{c_1} + 2^{c_2} + \ldots + 2^{c_k}$, where $c_i$ are visible colours. Note that this value is a long number! Because our answer can be large. Lemma: we are interested only in the colouring of our subtree with the smallest potential. Assume that all subtrees of vertex $v$ are already coloured into colourings with the smallest potentials. Then we have to choose some weights of edges outgoing from $v$, such that after adding these edges to $v$, different subtrees won't have common visible colours (otherwise you will get a bad pair of edges of the same colour). You can color an edge from vertex $v$ to its child $u$ in a colour $c$, if a colour $c$ is not visible from $u$. After that, all weights smaller than $c$ will disappear from the set of visible colours, but the colour $c$ will be added. You have to change colourings of the subtrees in this way, to not have carries during the addition of the potentials (it will correspond to the situation without common visible colours). Under this constraint, we have to minimize the total sum, the potential of $v$. From this setting, the proof of the previous lemma is clear: for larger values of the potential, possible choices are not better. You have to solve a problem: you are given an array $a_1, a_2, \ldots, a_k$ of long binary numbers. You have to find an array with the smallest sum $b_1, b_2, \ldots, b_k$, such that $b_i > a_i$, and no carries will happen during the addition $b_1 + b_2 + \ldots + b_k$. You can solve this problem with a quite simple greedy algorithm: we will set bits greedily from left to right, and check that we can finish our goal with the fixed prefix. To check that you can get some answer with the fixed prefix of bits and the upper bound on the used bits, you can go from left to right and each time when you have to replace some number to the current bit, replace the number with the largest suffix. You can implement it in a naive way in $\mathcal{O}{(n^3)}$, but it also can be implemented in $\mathcal{O}{(n \log n)}$. You have to solve this subtask $n$ times, and we will get the solution with complexity $\mathcal{O}{(n \cdot T(n))} = \mathcal{O}{(n^2 \log n \ldots n^4)}$.
[ "brute force", "dfs and similar", "dp", "interactive", "trees" ]
3,500
null
1445
A
Array Rearrangment
You are given two arrays $a$ and $b$, each consisting of $n$ positive integers, and an integer $x$. Please determine if one can rearrange the elements of $b$ so that $a_i + b_i \leq x$ holds for each $i$ ($1 \le i \le n$).
It's enough to sort $a$ in non-decreasing order and sort $b$ in non-increasing order and check, whether $a_i + b_i \leq x$ for all $i$. Correctness can be proven by induction: let's show that if answer exists, there is a solution with minimum in $a$ and maximum in $b$ are paired. Let $m_a$ be minimum in $a$ and $m_b$ be maximum in $b$. Let $p$ be number paired with $m_a$ and $q$ be number paired with $m_b$. Since solution is correct, $m_a + p \leq x$ and $m_b + q \leq x$. Since $m_a \leq q$, $m_a + m_b \leq x$. Since $p \leq m_b$, $p + q \leq x$. So, $m_a$ can be paired with $m_b$.
[ "greedy", "sortings" ]
800
null
1445
B
Elimination
There is a famous olympiad, which has more than a hundred participants. The Olympiad consists of two stages: the elimination stage, and the final stage. At least a hundred participants will advance to the final stage. The elimination stage in turn consists of two contests. A result of the elimination stage is the total score in two contests, but, unfortunately, the jury lost the final standings and has only standings for the first and for the second contest separately. In each contest, the participants are ranked by their point score in non-increasing order. When two participants have a tie (earned the same score), they are ranked by their passport number (in accordance with local regulations, all passport numbers are distinct). In the first contest, the participant on the 100-th place scored $a$ points. Also, the jury checked all participants from the 1-st to the 100-th place (inclusive) in the first contest and found out that all of them have at least $b$ points in the second contest. Similarly, for the second contest, the participant on the 100-th place has $c$ points. And the jury checked that all the participants from the 1-st to the 100-th place (inclusive) have at least $d$ points in the first contest. After two contests, all participants are ranked by their total score in two contests in non-increasing order. When participants have the same total score, tie-breaking with passport numbers is used. The \textbf{cutoff score} to qualify to the final stage is the total score of the participant on the 100-th place. Given integers $a$, $b$, $c$, $d$, please help the jury determine the smallest possible value of the cutoff score.
The answer is at least $\max(a + b, d + c)$ because we have at least $100$ participants with the sum of $a + b$ and at least $100$ participants with the sum of $d + c$. If we have $99$ participants with points equal to $(a, c)$, and $2$ participants with points equal to $(a, b)$ and $(d, c)$, then the 100th participant will have a total of $\max(a + b, d + c)$ points, and the condition will be met, because $a \ge d$, $c \ge b$ and $a + c \ge \max(a + b, d + c)$.
[ "greedy", "math" ]
900
null
1446
A
Knapsack
You have a knapsack with the capacity of $W$. There are also $n$ items, the $i$-th one has weight $w_i$. You want to put some of these items into the knapsack in such a way that their total weight $C$ is at least half of its size, but (obviously) does not exceed it. Formally, $C$ should satisfy: $\lceil \frac{W}{2}\rceil \le C \le W$. Output the list of items you will put into the knapsack or determine that fulfilling the conditions is impossible. If there are several possible lists of items satisfying the conditions, you can output any. Note that you \textbf{don't} have to maximize the sum of weights of items in the knapsack.
Are there any items which you can put in the knapsack to fulfill the goal with one item? What happens if there are none? If there is an item of size $C$ satisfying: $\lceil \frac{W}{2}\rceil \le C \le W$, it is enough to output only that item. Otherwise, we should exclude items which are larger than the size of the knapsack and take a closer look at the situation. Consider greedily adding items in any order until we find a valid solution or run out of items. This is correct because all items have sizes less than $\frac{W}{2}$, so it is not possible to exceed knapsack size by adding one item in a situation where the sum of items $C$ doesn't satisfy constraint $\lceil \frac{W}{2}\rceil \le C \le W$. This gives us a solution in $O(n)$. From the analysis above, we can also conclude that greedily adding the items from largest to smallest if they still fit in the knapsack will always find a solution if one exists. This gives us a solution in $O(n log n)$.
[ "constructive algorithms", "greedy", "sortings" ]
1,300
null
1446
B
Catching Cheaters
You are given two strings $A$ and $B$ representing essays of two students who are suspected cheaters. For any two strings $C$, $D$ we define their similarity score $S(C,D)$ as $4\cdot LCS(C,D) - |C| - |D|$, where $LCS(C,D)$ denotes the length of the Longest Common \textbf{Subsequence} of strings $C$ and $D$. You believe that only some part of the essays could have been copied, therefore you're interested in their \textbf{substrings}. Calculate the maximal similarity score over all pairs of substrings. More formally, output maximal $S(C, D)$ over all pairs $(C, D)$, where $C$ is some substring of $A$, and $D$ is some substring of $B$. If $X$ is a string, $|X|$ denotes its length. A string $a$ is a \textbf{substring} of a string $b$ if $a$ can be obtained from $b$ by deletion of several (possibly, zero or all) characters from the beginning and several (possibly, zero or all) characters from the end. A string $a$ is a \textbf{subsequence} of a string $b$ if $a$ can be obtained from $b$ by deletion of several (possibly, zero or all) characters. Pay attention to the difference between the \textbf{substring} and \textbf{subsequence}, as they both appear in the problem statement. You may wish to read the Wikipedia page about the Longest Common Subsequence problem.
This is a dynamic programming problem. Recall the DP calculating the Longest Common Substring for two strings. What similarities are there in our setup, and what differs? If a substring has a negative score, we can throw it away and start from scratch. Let $DP[i][j]$ be the maximum similarity score if we end the first substring with $A_i$ and the second substring with $B_j$. We will also allow the corresponding most similar string to be empty so that $DP[i][j]$ is always at least $0$. It turns out that the fact we need to search for substrings of our words is not a big problem, because we can think of extending the previous ones. In fact, we have just two possibilities: $A_i$ and $B_j$ are the same letters. In this case, we say that $DP[i][j] = min(DP[i][j], DP[i-1][j-1] + 2)$ as the new letter will increase the LCS by $1$, but both of the strings increase by one in length, so the total gain is $4-1-1=2$. $A_i$ and $B_j$ are the same letters. In this case, we say that $DP[i][j] = min(DP[i][j], DP[i-1][j-1] + 2)$ as the new letter will increase the LCS by $1$, but both of the strings increase by one in length, so the total gain is $4-1-1=2$. In every case, we can refer to $DP[i][j-1]$ or $DP[i][j-1]$ to extend one of the previous substrings, but not the LCS, so: DP[i][j] = max(DP[i-1][j], DP[i][j-1]) - 1. In every case, we can refer to $DP[i][j-1]$ or $DP[i][j-1]$ to extend one of the previous substrings, but not the LCS, so: DP[i][j] = max(DP[i-1][j], DP[i][j-1]) - 1. An inquisitive reader may wonder why it doesn't hurt to always apply case $2$ in calculations, so clearing the doubts, it's important to informally notice that we never get a greater $LCS$ this way so wrong calculations only lead to the worse score, and that our code will always find a sequence of transitions which finds the true $LCS$ as well. Implementing the formulas gives a really short $O(n\cdot m)$ solution.
[ "dp", "strings" ]
1,800
null
1446
C
Xor Tree
For a given sequence of \textbf{distinct} non-negative integers $(b_1, b_2, \dots, b_k)$ we determine if it is \textbf{good} in the following way: - Consider a graph on $k$ nodes, with numbers from $b_1$ to $b_k$ written on them. - For every $i$ from $1$ to $k$: find such $j$ ($1 \le j \le k$, $j\neq i$), for which $(b_i \oplus b_j)$ \textbf{is the smallest} among all such $j$, where $\oplus$ denotes the operation of bitwise XOR (https://en.wikipedia.org/wiki/Bitwise_operation#XOR). Next, draw an \textbf{undirected} edge between vertices with numbers $b_i$ and $b_j$ in this graph. - We say that the sequence is \textbf{good} if and only if the resulting graph forms a \textbf{tree} (is connected and doesn't have any simple cycles). It is possible that for some numbers $b_i$ and $b_j$, you will try to add the edge between them twice. Nevertheless, you will add this edge only once. You can find an example below (the picture corresponding to the first test case). Sequence $(0, 1, 5, 2, 6)$ \textbf{is not} good as we \textbf{cannot} reach $1$ from $5$. However, sequence $(0, 1, 5, 2)$ \textbf{is} good. You are given a sequence $(a_1, a_2, \dots, a_n)$ of \textbf{distinct} non-negative integers. You would like to remove some of the elements (possibly none) to make the \textbf{remaining} sequence good. What is the minimum possible number of removals required to achieve this goal? It can be shown that for any sequence, we can remove some number of elements, leaving at least $2$, so that the remaining sequence is good.
Is it possible that the graph formed has a cycle? How to evaluate whether a sequence is good? Since we have $n$ edges and two of them must coincide (the pair of numbers with the smallest xor), we will have at most $n-1$ edges in the graph. Thus, we only need to check if the resulting graph is connected. How to do it? Let's look at the most significant bit, and group the numbers into two sets $S_0$ and $S_1$, depending on the value of the bit. What can we say about sizes of $S_0$ and $S_1$ in a good sequence? It is easy to see that if $2\leq|S_0|$ and $2\leq|S_1$| then the whole sequence isn't good, as for any number in $S_i$ the smallest xor will be formed by another number in $S_i$. So there won't be any connections between the numbers forming the two sets. Thus, one of the sets must have no more than one element. If that set contains no elements, then the solution can obviously be calculated recursively. If it instead had one element, then as long as we make the other sequence good, it will be connected to something - and thus the whole graph. Let $F(S)$ be the maximum possible number of values we can take from $S$ so that they form a good sequence. Divide the numbers into $S_0$ and $S_1$ as above. If $S_0$ or $S_1$ is empty, strip the most significant bit and solve the problem recursively. Otherwise, the result is $1 + max(F(S_0), F(S_1))$. Straightforward recursive calculation of this formula gives the runtime of $(n\cdot 30)$, as all numbers are smaller than $2^{30}$.
[ "binary search", "bitmasks", "data structures", "divide and conquer", "dp", "trees" ]
2,100
null
1446
D1
Frequency Problem (Easy Version)
\textbf{This is the easy version of the problem. The difference between the versions is in the constraints on the array elements. You can make hacks only if all versions of the problem are solved.} You are given an array $[a_1, a_2, \dots, a_n]$. Your goal is to find the length of the longest subarray of this array such that the most frequent value in it is \textbf{not} unique. In other words, you are looking for a subarray such that if the most frequent value occurs $f$ times in this subarray, then at least $2$ different values should occur exactly $f$ times. An array $c$ is a subarray of an array $d$ if $c$ can be obtained from $d$ by deletion of several (possibly, zero or all) elements from the beginning and several (possibly, zero or all) elements from the end.
What can you say about the values which are the most frequent ones in the optimal solution? Let $D$ be the most frequent value in the whole sequence. If $D$ is not unique, we output $n$. Otherwise, we can prove that one of the most frequent values in an optimal solution is $D$. We'll prove this by contradiction. Consider an optimal interval $[a, b]$ in which $D$ is not in the set of most frequent values. Let's think of the process of expanding the interval to $[a-1, b]$, $[a-2, b]$, ..., $[1, b]$, $[1, b+1]$, ..., $[1 ,n]$. Since $D$ is most frequent in the entire sequence, at some point in this process it will appear the same number of times as at least one of the other most frequent values. The corresponding interval will also satisfy the task's conditions, hence $[a, b]$ cannot be an optimal interval if $D$ does not appear most frequently. For each value $V$ we can solve the task when $(D, V)$ are the most-frequent values independently, ignoring other values. Now we would like to solve the following subproblem: find the longest interval of sum 0 where elements in the array are either $1$ ($V$), $-1$ ($D$) or $0$ (other values). The solution's complexity should be proportional to the frequency of $V$ in order to obtain an efficient solution for the entire task. You might be worried that we can't simply ignore the other values, since they may end up being more frequent than ($D$, $V$) in the interval that we've found. This might be true, but can only be an underestimation of the optimal interval's length. A similar expanding argument to the proof above shows that: we never overestimate the result for the values of ($D$, $V$) which form the optimal interval we find exactly the right interval value. For the easy version, it's sufficient to consider all pairs ($D$, $V$) in linear time, by using the standard algorithm which computes the longest interval with sum $0$. Thus, we get a solution with complexity $O(100\cdot n$).
[ "data structures", "greedy" ]
2,600
null
1446
D2
Frequency Problem (Hard Version)
\textbf{This is the hard version of the problem. The difference between the versions is in the constraints on the array elements. You can make hacks only if all versions of the problem are solved.} You are given an array $[a_1, a_2, \dots, a_n]$. Your goal is to find the length of the longest subarray of this array such that the most frequent value in it is \textbf{not} unique. In other words, you are looking for a subarray such that if the most frequent value occurs $f$ times in this subarray, then at least $2$ different values should occur exactly $f$ times. An array $c$ is a subarray of an array $d$ if $c$ can be obtained from $d$ by deletion of several (possibly, zero or all) elements from the beginning and several (possibly, zero or all) elements from the end.
A bit of thinking may lead you to the observation that solving the task in $O(n log n)$ or even $O(n log^2 n)$ isn't pleasant. The constraints are low enough to allow an $O(n \sqrt n)$ solution, how about giving it a try? If an element $V$ appears more than $\sqrt n$ times, we can simply brute-force this pair with partial sums $(D,V)$ in $O(n)$. For the other elements, for all the appearances of $V$ we'll consider only at most $|V|+1$ neighboring occurrences of $D$ to search for the optimal interval. We can generalize the brute-force solution to work for this case too, by writing a function that solves just for a vector of interesting positions. Don't forget to take into account extending intervals with zeroes as well. With proper preprocessing, this gives a solution in $O(|V|^2)$ per value $V$. Therefore, total runtime is bounded by $O(n \sqrt n)$.
[ "data structures", "greedy", "two pointers" ]
3,000
null
1446
E
Long Recovery
A patient has been infected with an unknown disease. His body can be seen as an infinite grid of triangular cells which looks as follows: Two cells are neighboring if they share a side. Therefore, each cell ($x$, $y$) has exactly three neighbors: - ($x+1$, $y$) - ($x-1$, $y$) - ($x+1$, $y-1$) if $x$ is even and ($x-1$, $y+1$) otherwise. Initially some cells are infected, all the others are healthy. The process of recovery begins. Each second, for \textbf{exactly one} cell (even though there might be multiple cells that could change its state) one of the following happens: - A healthy cell with at least $2$ infected neighbors also becomes infected. - An infected cell with at least $2$ healthy neighbors also becomes healthy. If no such cell exists, the process of recovery stops. Patient is considered recovered if the process of recovery has stopped and all the cells are healthy. We're interested in a \textbf{worst-case} scenario: is it possible that the patient never recovers, or if it's not possible, what is the maximum possible duration of the recovery process?
We're interested in the longest possible recovery path, so let's imagine choosing the longest sequence of operations ourselves. How long can this process last? Let $P$ be the number of pairs of neighboring cells with different state. Consider what happens to $P$ after an operation where we change the state of a cell $c$. It can be seen that $P$ decreases by 1 if $c$ had a neighbor with the same state as $c$, and $P$ decreases by 3 if all the neighbors of $c$ had a different state. Let's call the first kind of operation cheap and the other expensive. We now know that the process cannot last forever. When is it possible that the patient never recovers? If at some point there is a cycle of infected cells, then none of them can ever become healthy. Conversely, if there is no cycle of infected cells, and not all cells are healthy, then it is possible to make some infected cell healthy. If $A$ is the initial set of infected cells, let $\overline A$ be the set of cells which can eventually be infected. By the above, we should output SICK if and only if $\overline A$ contains a cycle. We can compute $\overline A$ using a BFS. No cell with coordinates outside of $[0, 500) x [0, 500)$ will ever become infected, so we can safely make the BFS linear in the number of cells in this region. Let's assume that $\overline A$ contains no cycle. We want to determine the minimum possible number of expensive operations used during the process of making all the cells healthy. We can consider each connected component of $\overline A$ separately and add the results, so let's suppose $\overline A$ is connected. The very last operation must be expensive, since it changes the state of an infected cell with three healthy neighbors. There is one other case where we need an expensive operation: if there's no way to make a cheap operation at the very beginning. In fact, this can happen only if $A$ consists of three cells arranged in a 'triforce'. It turns out that we never need any other expensive operations. So, assuming $\overline A$ contains no cycle, we can compute the longest possible recovery period as follows. Compute the number $P$ of pairs of neighboring cells with different states. Subtract twice the number of connected components of $\overline A$. Subtract twice the number of connected components of $\overline A$ for which the corresponding cells in $A$ are just three cells arranged in a triforce. Note that the observation that the bad beginning component is only a triforce is actually not necessary to code a working solution (one can just check if there's a cheap move at the beginning, just remember that we're inspecting components of $\overline A$), but it will help us to prove the solution formally. Let's now prove that we never need any other expensive operations than the ones described. Suppose $A$ is a set of at least 2 infected cells, such that (i) $\overline A$ is cycle-free, (ii) $\overline A$ is connected, and (iii) $A$ is not a triforce. We claim that there is a some cheap operation, such that the resulting set $A'$ of infected cells still satisfies (i), (ii) and (iii). We will always have (i), since $\overline{A'} \subseteq \overline A$, so we just need to worry about (ii) and (iii), i.e. we need to ensure that $\overline{A'}$ is connected and that $A'$ is not a triforce. We need to consider a few different cases. Suppose there is a cheap infection operation. Then just perform it. The resulting set $A'$ will have $\overline{A'} = \overline A$ still connected, and $A'$ will not be a triforce since it has adjacent infected cells. Suppose there is a cheap infection operation. Then just perform it. The resulting set $A'$ will have $\overline{A'} = \overline A$ still connected, and $A'$ will not be a triforce since it has adjacent infected cells. Suppose there is no cheap infection operation, but there is a cheap operation turning a cell $c$ from infected to healthy. Consider what happens if we perform it. Since there is no cheap infection operation, any cell in $\overline A \setminus A$ must have three neighbors in $A$. So it has at least two neighbors in $A \setminus {c}$. So it lies in $\overline{A \setminus {c}}$. So $\overline{A\setminus{c}} \supseteq \overline A \setminus{c}$. We claim that (ii) is satisfied, i.e. $\overline{A\setminus{c}}$ is connected. If $c$ is a leaf in $\overline A$, this is clear, since removing a leaf from a tree does not disconnect it. If $c$ is not a leaf in $\overline A$, then it has at least two neighbors in $\overline{A\setminus{c}}$, so $\overline{A\setminus{c}} = \overline A$ is again connected. So the only possible issue is with (iii), i.e. $A \setminus {c}$ might be a triforce. In this case, $A$ must be a triforce with an extra cell added, and we should just have chosen another $c$. Suppose there is no cheap infection operation, but there is a cheap operation turning a cell $c$ from infected to healthy. Consider what happens if we perform it. Since there is no cheap infection operation, any cell in $\overline A \setminus A$ must have three neighbors in $A$. So it has at least two neighbors in $A \setminus {c}$. So it lies in $\overline{A \setminus {c}}$. So $\overline{A\setminus{c}} \supseteq \overline A \setminus{c}$. We claim that (ii) is satisfied, i.e. $\overline{A\setminus{c}}$ is connected. If $c$ is a leaf in $\overline A$, this is clear, since removing a leaf from a tree does not disconnect it. If $c$ is not a leaf in $\overline A$, then it has at least two neighbors in $\overline{A\setminus{c}}$, so $\overline{A\setminus{c}} = \overline A$ is again connected. So the only possible issue is with (iii), i.e. $A \setminus {c}$ might be a triforce. In this case, $A$ must be a triforce with an extra cell added, and we should just have chosen another $c$. Suppose there is no cheap operation. This means that no elements of $A$ can be adjacent (since a tree with more than one vertex has a leaf). Moreover, any element of $\overline A \setminus A$ is surrounded by elements of $A$. Because $A$ has at least 2 cells and $\overline A$ is connected, we must somewhere have three cells in $A$ arranged in a triforce. See the picture below. The green cells are not in $A$ since no elements of $A$ are adjacent, and the red cells are not in $A$ since $\overline A$ is cycle-free. So $A$ cannot have any cells other than these $3$, since $\overline A$ is connected. Suppose there is no cheap operation. This means that no elements of $A$ can be adjacent (since a tree with more than one vertex has a leaf). Moreover, any element of $\overline A \setminus A$ is surrounded by elements of $A$. Because $A$ has at least 2 cells and $\overline A$ is connected, we must somewhere have three cells in $A$ arranged in a triforce. See the picture below. The green cells are not in $A$ since no elements of $A$ are adjacent, and the red cells are not in $A$ since $\overline A$ is cycle-free. So $A$ cannot have any cells other than these $3$, since $\overline A$ is connected. This covers all cases, finishing the proof.
[ "constructive algorithms", "dfs and similar" ]
3,500
null
1446
F
Line Distance
You are given an integer $k$ and $n$ distinct points with integer coordinates on the Euclidean plane, the $i$-th point has coordinates $(x_i, y_i)$. Consider a list of all the $\frac{n(n - 1)}{2}$ pairs of points $((x_i, y_i), (x_j, y_j))$ ($1 \le i < j \le n$). For every such pair, write out the distance from the line through these two points to the origin $(0, 0)$. Your goal is to calculate the $k$-th smallest number among these distances.
Binary search is your friend. To determine if the answer is bigger or smaller than $r$, we need to count the number of pairs of points $A$, $B$ such that $d(O, AB) > r$. We need to reformulate the condition $d(O, AB) > r$ to make the counting easier. Draw a circle of radius $r$ centered on $O$, and consider two points $A$, $B$ strictly outside the circle. Note that $d(O, AB) > r$ if and only if $AB$ does not intersect this circle. Now draw the tangents from $A$, $B$ to the circle. Let the corresponding points on the circle be $A_1, A_2, B_1, B_2$. Observation: the line segments $A_1 A_2, B_1 B_2$ intersect if and only if $AB$ does not intersect the circle. Moreover, if we pick polar arguments $a_1, a_2, b_1, b_2 \in [0, 2\pi)$, for the points in the circle, such that $a_1 < a_2$ and $b_1 < b_2$, then the line segments intersect if the intervals $[a_1, a_2], [b_1, b_2]$ overlap. We don't need to worry too much about precision errors, because if $r$ is close to the real answer, then it doesn't matter what our calculations return. (To compute $a_1$, note that $\cos \angle AOA_1 = r / \left| OA \right|$. So we get $a_1, a_2$ be adding / subtracting $\cos^{-1} \frac r {\left|OA\right|}$ from the argument of $A$.) We can prove the observation by considering three different cases, shown in pictures. One circle segment contains the other, and the line $AB$ intersects the circle outside the segment $AB$. The circle segments are disjoint, and the line segment $AB$ intersects the circle. The circle segments partially intersect, and the line $AB$ does not intersect the circle. Now we have computed some intervals and want to find the number of pairs of intervals which partially overlap. To do this, first sort the endpoints of the intervals. Do some coordinate compression on the left endpoints. Build a Fenwick tree on the compressed coordinates. Sweep through the endpoints. Whenever you see a left endpoint, add it to the tree. Whenever you see a right endpoint, remove it from the tree, query the tree for the number of points greater than the corresponding left endpoint, and add the result to the answer.
[ "binary search", "data structures", "geometry" ]
3,200
null
1447
A
Add Candies
There are $n$ bags with candies, initially the $i$-th bag contains $i$ candies. You want all the bags to contain an equal amount of candies in the end. To achieve this, you will: - Choose $m$ such that $1 \le m \le 1000$ - Perform $m$ operations. In the $j$-th operation, you will pick one bag and add $j$ candies to all bags apart from the chosen one. Your goal is to find a valid sequence of operations after which all the bags will contain an equal amount of candies. - It can be proved that for the given constraints such a sequence always exists. - You \textbf{don't} have to minimize $m$. - If there are several valid sequences, you can output \textbf{any}.
We're only interested in differences between the elements. Is there another way to express the operation? The operation in the $i$-th turn is equivalent to selecting one element and subtracting $i$ from it. The sequence $1$, $2$, ..., $n$ satisfies task's constraints. After the additions all positions will contain $\frac{n\cdot(n+1)}{2}$ candies.
[ "constructive algorithms", "math" ]
800
null
1447
B
Numbers Box
You are given a rectangular grid with $n$ rows and $m$ columns. The cell located on the $i$-th row from the top and the $j$-th column from the left has a value $a_{ij}$ written in it. You can perform the following operation any number of times (possibly zero): - Choose any two adjacent cells and multiply the values in them by $-1$. Two cells are called adjacent if they share a side. Note that you can use a cell more than once in different operations. You are interested in $X$, the \textbf{sum} of all the numbers in the grid. What is the maximum $X$ you can achieve with these operations?
We want to minimize the number of negative numbers as much as we can by applying the operations. What is the minimum possible number of those negatives? Let $X$ be the number of non-zero numbers in the grid, and let's see what happens in different scenarios. both cells have negative numbers, then $X$ goes down by $2$. both cells have positive numbers, then $X$ goes up by $2$. one cell has a positive number while the other one has a negative number, then $X$ stays the same. It is important to notice that we can apply this operation not only for the two neighboring cells, but for any two - to achieve this effect we apply this operation on any path between the cells consecutively. The parity of $X$ never changes. So, for even $X$ the answer is the sum of the absolute value of all numbers, $S$. Otherwise, one element will not be positive in the end -- so it's best to choose the one with minimum absolute value, $V$, and subtract $2\cdot V$ from the sum. The existence of zeroes doesn't really change anything, both formulas output the same value in such a case. This gives us a solution in $O(N\cdot M)$
[ "greedy", "math" ]
1,000
null
1450
A
Avoid Trygub
A string $b$ is a subsequence of a string $a$ if $b$ can be obtained from $a$ by deletion of several (possibly, zero or all) characters. For example, "xy" is a subsequence of "xzyw" and "xy", but not "yx". You are given a string $a$. Your task is to reorder the characters of $a$ so that "trygub" is not a subsequence of the resulting string. In other words, you should find a string $b$ which is a permutation of symbols of the string $a$ and "trygub" is not a subsequence of $b$. We have a truly marvelous proof that any string can be arranged not to contain "trygub" as a subsequence, but this problem statement is too short to contain it.
The string "trygub" is not sorted alphabetically, and a subsequence of a sorted string is necessarily sorted. So, if we sort the input string, it will be a solution. Complexity is $O(n)$ with counting sort.
[ "constructive algorithms", "sortings" ]
800
#include <bits/stdc++.h> using namespace std; int main() { ios::sync_with_stdio(false); cin.tie(0); int te; cin >> te; while(te--) { int n; string s; cin >> n >> s; sort(s.begin(), s.end()); cout << s << '\n'; } }
1450
B
Balls of Steel
You have $n$ \textbf{distinct} points $(x_1, y_1),\ldots,(x_n,y_n)$ on the plane and a non-negative integer parameter $k$. Each point is a microscopic steel ball and $k$ is the attract power of a ball when it's charged. The attract power is the same for all balls. In one operation, you can select a ball $i$ to charge it. Once charged, \textbf{all} balls with Manhattan distance at most $k$ from ball $i$ move to the position of ball $i$. Many balls may have the same coordinate after an operation. More formally, for all balls $j$ such that $|x_i - x_j| + |y_i - y_j| \le k$, we assign $x_j:=x_i$ and $y_j:=y_i$. \begin{center} An example of an operation. After charging the ball in the center, two other balls move to its position. On the right side, the red dot in the center is the common position of those balls. \end{center} Your task is to find the minimum number of operations to move all balls to the same position, or report that this is impossible.
We claim the answer is always $-1$ or $1$. In fact, suppose in the first operation of a solution we select a point $p$. If we aren't done, there will at least one point with distance more than $k$ from $p$. However, there will be no point within distance $k$ of $p$, no matter how we perform future operations. So it is impossible for $p$ to merge with a new point, and a solution with more than $1$ operation will be impossible. To see if the answer is $1$, we should check if there is some point $p$ within distance $k$ from all other points. Otherwise, the answer is $-1$. Complexity is $O(n^2)$ to compute pairwise distances.
[ "brute force", "geometry", "greedy" ]
1,000
#include <bits/stdc++.h> using namespace std; int main() { ios_base::sync_with_stdio(0), cin.tie(0); int t; cin >> t; while (t--) { int n, k; cin >> n >> k; vector<pair<int, int>> a(n); for (auto &i : a) cin >> i.first >> i.second; int ans = -1; for (int i = 0; i < n; ++i) { int mx = 0; for (int j = 0; j < n; ++j) { mx = max(mx, abs(a[i].first - a[j].first) + abs(a[i].second - a[j].second)); } if (mx <= k) ans = 1; } cout << ans << "\n"; } }