contest_id
stringlengths
1
4
index
stringclasses
43 values
title
stringlengths
2
63
statement
stringlengths
51
4.24k
tutorial
stringlengths
19
20.4k
tags
listlengths
0
11
rating
int64
800
3.5k
code
stringlengths
46
29.6k
1747
E
List Generation
For given integers $n$ and $m$, let's call a pair of arrays $a$ and $b$ of integers \textbf{good}, if they satisfy the following conditions: - $a$ and $b$ have the same length, let their length be $k$. - $k \ge 2$ and $a_1 = 0, a_k = n, b_1 = 0, b_k = m$. - For each $1 < i \le k$ the following holds: $a_i \geq a_{i - 1}$, $b_i \geq b_{i - 1}$, and $a_i + b_i \neq a_{i - 1} + b_{i - 1}$. Find the sum of $|a|$ over all good pairs of arrays $(a,b)$. Since the answer can be very large, output it modulo $10^9 + 7$.
Change your point of view from array to grid. Think of pair of arrays as paths in grid of size $(n + 1) \times (m + 1)$. First try counting number of good pair of arrays. Number of good pairs of arrays comes out to be $\sum\limits_{k = 0}{k = \min(n,m} \binom{n}{k} \cdot \binom{m}{k} \cdot 2^{n + m - k - 1}$ Given problem is equivalent to: You are currently at cell $(0,0)$. From any cell $(x,y)$ you can jump to cell $(x',y')$ such that $x \leq x' \leq n$ , $y \leq y' \leq m$ and $(x,y) \neq (x',y')$. Find sum of number of visited cells over all paths starting from $(0,0)$ and ending at $(n,m)$. Denote the required value by $f(n,m)$. Directly thinking in $2$ dimensions is difficult, lets first solve for case when $n = 0$ or $m = 0$. WLOG, assuming $m = 0$. We can solve this case using some binomials. $f(n,0) = 2^{n - 1} \cdot \frac{n + 3}{2}$, $n \gt 0$. Now, we can divide all possible paths from $(0,0)$ to $(n,m)$ into several classes of one dimensional paths. These classes are defined by what I call breakpoints. When we passes the breakpoint we turns right. Hence we can group paths by fixing the number of breakpoints. WLOG, Assuming $n \geq m$. For $k$ breakpoints there are $\binom{n}{k} \cdot \binom{m}{k}$ ways to select for $0 \leq k \leq m$. For a path with $k$ breakpoints, $n + m - k$ points are optional, that is there will exist $2^{n + m - k}$ paths with $k$ breakpoints. It is not difficult to see that sum of number of visited cells over paths with $k$ breakpoints turned out to be $f(n + m - k,0) + 2^{n + m - k - 1}\cdot k$. Hence we can write $f(n,m) = \sum\limits_{k = 0}^{m} \binom{n}{k} \cdot \binom{m}{k} \cdot (f(n + m - k,0) + 2^{n + m - k - 1}\cdot k)$ Time complexity of the solution would be $\mathcal{O}(\min(n,m))$
[ "combinatorics", "dp", "math" ]
2,900
// Jai Shree Ram #include<bits/stdc++.h> using namespace std; #define rep(i,a,n) for(int i=a;i<n;i++) #define ll long long #define int long long #define pb push_back #define all(v) v.begin(),v.end() #define endl "\n" #define x first #define y second #define gcd(a,b) __gcd(a,b) #define mem1(a) memset(a,-1,sizeof(a)) #define mem0(a) memset(a,0,sizeof(a)) #define sz(a) (int)a.size() #define pii pair<int,int> #define hell 1000000007 #define elasped_time 1.0 * clock() / CLOCKS_PER_SEC template<typename T1,typename T2>istream& operator>>(istream& in,pair<T1,T2> &a){in>>a.x>>a.y;return in;} template<typename T1,typename T2>ostream& operator<<(ostream& out,pair<T1,T2> a){out<<a.x<<" "<<a.y;return out;} template<typename T,typename T1>T maxs(T &a,T1 b){if(b>a)a=b;return a;} template<typename T,typename T1>T mins(T &a,T1 b){if(b<a)a=b;return a;} const int MOD = hell; struct mod_int { int val; mod_int(long long v = 0) { if (v < 0) v = v % MOD + MOD; if (v >= MOD) v %= MOD; val = v; } static int mod_inv(int a, int m = MOD) { int g = m, r = a, x = 0, y = 1; while (r != 0) { int q = g / r; g %= r; swap(g, r); x -= q * y; swap(x, y); } return x < 0 ? x + m : x; } explicit operator int() const { return val; } mod_int& operator+=(const mod_int &other) { val += other.val; if (val >= MOD) val -= MOD; return *this; } mod_int& operator-=(const mod_int &other) { val -= other.val; if (val < 0) val += MOD; return *this; } static unsigned fast_mod(uint64_t x, unsigned m = MOD) { #if !defined(_WIN32) || defined(_WIN64) return x % m; #endif unsigned x_high = x >> 32, x_low = (unsigned) x; unsigned quot, rem; asm("divl %4\n" : "=a" (quot), "=d" (rem) : "d" (x_high), "a" (x_low), "r" (m)); return rem; } mod_int& operator*=(const mod_int &other) { val = fast_mod((uint64_t) val * other.val); return *this; } mod_int& operator/=(const mod_int &other) { return *this *= other.inv(); } friend mod_int operator+(const mod_int &a, const mod_int &b) { return mod_int(a) += b; } friend mod_int operator-(const mod_int &a, const mod_int &b) { return mod_int(a) -= b; } friend mod_int operator*(const mod_int &a, const mod_int &b) { return mod_int(a) *= b; } friend mod_int operator/(const mod_int &a, const mod_int &b) { return mod_int(a) /= b; } mod_int& operator++() { val = val == MOD - 1 ? 0 : val + 1; return *this; } mod_int& operator--() { val = val == 0 ? MOD - 1 : val - 1; return *this; } mod_int operator++(int32_t) { mod_int before = *this; ++*this; return before; } mod_int operator--(int32_t) { mod_int before = *this; --*this; return before; } mod_int operator-() const { return val == 0 ? 0 : MOD - val; } bool operator==(const mod_int &other) const { return val == other.val; } bool operator!=(const mod_int &other) const { return val != other.val; } mod_int inv() const { return mod_inv(val); } mod_int pow(long long p) const { assert(p >= 0); mod_int a = *this, result = 1; while (p > 0) { if (p & 1) result *= a; a *= a; p >>= 1; } return result; } friend ostream& operator<<(ostream &stream, const mod_int &m) { return stream << m.val; } friend istream& operator >> (istream &stream, mod_int &m) { return stream>>m.val; } }; #define NCR const int N = 5e6 + 5; mod_int fact[N],inv[N],invv[N]; void init(int n=N){ fact[0]=inv[0]=inv[1]=1; invv[0] = invv[1] = 1; rep(i,1,N)fact[i]=i*fact[i-1]; rep(i,2,N){ invv[i] = (MOD - MOD/i)*invv[MOD % i]; inv[i] = invv[i]*inv[i - 1]; } } mod_int C(int n,int r){ if(r>n || r<0)return 0; return fact[n]*inv[n-r]*inv[r]; } int solve(){ int n,m; cin >> n >> m; if(m > n)swap(n,m); auto brute = [&](){ vector<vector<mod_int>>dp(n + 1,vector<mod_int>(m + 1)); vector<vector<mod_int>>exp(n + 1,vector<mod_int>(m + 1)); dp[0][0] = 1; exp[0][0] = 0; for(int i = 0; i <= n; i++){ for(int j = 0; j <= m; j++){ if(i + j == 0)continue; for(int x = 0; x <= i; x++){ for(int y = 0; y <= j; y++){ if(x + y == i + j)continue; dp[i][j] += dp[x][y]; exp[i][j] += (exp[x][y] + dp[x][y]); } } } } return exp[n][m] + dp[n][m]; }; auto correct = [&](){ mod_int in = mod_int(2).inv(); auto d = [&](int x){ mod_int val = x + 1; return val * in; }; mod_int ans = 0; mod_int pw = mod_int(2).pow(n + m); for(int i = 0; i <= m; i++){ pw *= in; ans += C(n,i)*C(m,i)*pw*(i + d(n + m - i) + 1); } return ans; }; cout << correct() << endl; return 0; } signed main(){ ios_base::sync_with_stdio(0);cin.tie(0);cout.tie(0); //freopen("input.txt", "r", stdin); //freopen("output.txt", "w", stdout); #ifdef SIEVE sieve(); #endif #ifdef NCR init(); #endif int t=1;cin>>t; while(t--){ solve(); } return 0; }
1748
A
The Ultimate Square
You have $n$ rectangular wooden blocks, which are numbered from $1$ to $n$. The $i$-th block is $1$ unit high and $\lceil \frac{i}{2} \rceil$ units long. Here, $\lceil \frac{x}{2} \rceil$ denotes the result of division of $x$ by $2$, rounded \textbf{up}. For example, $\lceil \frac{4}{2} \rceil = 2$ and $\lceil \frac{5}{2} \rceil = \lceil 2.5 \rceil = 3$. For example, if $n=5$, then the blocks have the following sizes: $1 \times 1$, $1 \times 1$, $1 \times 2$, $1 \times 2$, $1 \times 3$. \begin{center} {\small The available blocks for $n=5$} \end{center} Find the maximum possible side length of a square you can create using these blocks, \textbf{without rotating any of them}. Note that you don't have to use all of the blocks. \begin{center} {\small One of the ways to create $3 \times 3$ square using blocks $1$ through $5$} \end{center}
If $n$ is odd, it is possible to create a square using all $n$ blocks. If $n$ is even, it is possible to create a square using only the first $n-1$ blocks, since $n-1$ is odd. Can we use the last block to create a larger square? If $n$ is odd, let $k=\frac{n+1}{2}$ be the width of the last block. It is possible to create a square of side length $k$ using every block as follows: Line $1$ contains a $1 \times k$ block; Line $2$ contains a $1 \times 1$ block and a $1 \times (k-1)$ block; Line $3$ contains a $1 \times 2$ block and a $1 \times (k-2)$ block; $\ldots$ Line $i$ contains a $1 \times (i-1)$ block and a $1 \times (k-i+1)$ block; $\ldots$ Line $k$ contains a $1 \times (k-1)$ block and a $1 \times 1$ block. Since the area of this square is $k^2$, and the $n+1$-th block has a width of $k$ tiles, the total area of the first $n+1$ blocks is equal to $k^2+k \lt (k+1)^2$. Therefore, the answer for $n+1$ is also $k$. In conclusion, the answer for each testcase is $\lfloor \frac{n+1}{2} \rfloor$. Time complexity per testcase: $O(1)$.
[ "math" ]
800
#include<bits/stdc++.h> using namespace std; typedef long long ll; void testcase(){ ll n; cin>>n; cout<<(n+1)/2<<'\n'; } int main() { ll t; cin>>t; while(t--) testcase(); return 0; }
1748
B
Diverse Substrings
A non-empty digit string is diverse if the number of occurrences of each character in it doesn't exceed the number of distinct characters in it. For example: - string "7" is diverse because 7 appears in it $1$ time and the number of distinct characters in it is $1$; - string "77" is \textbf{not} diverse because 7 appears in it $2$ times and the number of distinct characters in it is $1$; - string "1010" is diverse because both 0 and 1 appear in it $2$ times and the number of distinct characters in it is $2$; - string "6668" is \textbf{not} diverse because 6 appears in it $3$ times and the number of distinct characters in it is $2$. You are given a string $s$ of length $n$, consisting of only digits $0$ to $9$. Find how many of its $\frac{n(n+1)}{2}$ substrings are diverse. A string $a$ is a substring of a string $b$ if $a$ can be obtained from $b$ by deletion of several (possibly, zero or all) characters from the beginning and several (possibly, zero or all) characters from the end. Note that if the same diverse string appears in $s$ multiple times, each occurrence should be counted independently. For example, there are two diverse substrings in "77" both equal to "7", so the answer for the string "77" is $2$.
What is the maximum number of distinct characters in a diverse string? What is the maximum frequency of a character in a diverse string? What is the maximum possible length a diverse string? In a diverse string, there are at most $10$ distinct characters: '0', '1', $\ldots$, '9'. Therefore, each of these characters can appear at most $10$ times in a diverse string. With all this in mind, the maximum possible length of a diverse string is $10^2=100$. To solve this problem, we only need to check whether each substring of length $l \le 100$ is diverse. Time complexity per testcase: $O(n \cdot 10^2)$
[ "brute force", "implementation", "strings" ]
1,400
#include<bits/stdc++.h> using namespace std; typedef long long ll; void tc(){ ll n; string s; cin>>n>>s; ll ans=0; for(ll i=0;i<s.size();i++){ int fr[10]{}, distinct=0, max_freq=0; for(ll j=i;j<s.size() && (++fr[s[j]-'0'])<=10;j++){ max_freq=max(max_freq,fr[s[j]-'0']); if(fr[s[j]-'0']==1) distinct++; if(distinct>=max_freq) ans++; } } cout<<ans<<'\n'; } int main() { ios_base::sync_with_stdio(false); cin.tie(0); cout.tie(0); ll t; cin>>t; while(t--) tc(); return 0; }
1748
C
Zero-Sum Prefixes
The score of an array $v_1,v_2,\ldots,v_n$ is defined as the number of indices $i$ ($1 \le i \le n$) such that $v_1+v_2+\ldots+v_i = 0$. You are given an array $a_1,a_2,\ldots,a_n$ of length $n$. You can perform the following operation multiple times: - select an index $i$ ($1 \le i \le n$) such that $a_i=0$; - then replace $a_i$ by an arbitrary integer. What is the maximum possible score of $a$ that can be obtained by performing a sequence of such operations?
What is the answer if $a_1=0$ and $a_i \neq 0$ for all $2 \le i \le n$? What is the answer if $a_i=0$ and $a_j \neq 0$ for all $1 \le j \le n$, $j \neq i$? What is the answer if there are only two indices $i$ and $j$ for which $a_i=a_j=0$? Let's consider the prefix sum array $s=[a_1,a_1+a_2,\ldots,a_1+a_2+\ldots+a_n]$. For every index $i$ such that $a_i=0$, if we change the value of $a_i$ to $x$, then every element from the suffix $[s_i,s_{i+1},\ldots,s_n]$ will be increased by $x$. Therefore, if $a_{i_1}=a_{i_2}=\ldots=a_{i_k}=0$, we'll partition array $s$ into multiple subarrays: $[s_1,s_2,\ldots,s_{i_1-1}]$; $[s_{i_1},s_{i_1+1},\ldots,s_{i_2-1}]$; $[s_{i_2},s_{i_2+1},\ldots,s_{i_3-1}]$; $\ldots$ $[s_{i_k},s_{i_{k+1}},\ldots,s_n]$; Since none of the elements from the first subarray can be changed, it will contribute with the number of occurences of $0$ in $[s_1,s_2,\ldots,s_{i_1-1}]$ towards the final answer. For each of the other subarrays $[s_l,s_{l+1},\ldots,s_r]$, let $x$ be the most frequent element in the subarray, appearing $fr[x]$ times. Since $a_l=0$, we can change the value of $a_l$ to $-x$. In this case, every $x$ in this subarray will become equal to $0$, and our current subarray will contribute with $fr[x]$ towards the final answer. Time complexity per testcase: $O(NlogN)$
[ "brute force", "data structures", "dp", "greedy", "implementation" ]
1,600
#include <bits/stdc++.h> using namespace std; typedef long long ll; const ll MAXN=2e5+5; ll a[MAXN]; map<ll,ll> freq; void tc(){ ll n; cin>>n; ll maxfr=0,current_sum=0,ans=0; bool found_wildcard=0; freq.clear(); for(ll i=0;i<n;i++){ cin>>a[i]; if(a[i]==0){ if(found_wildcard) ans+=maxfr; else ans+=freq[0]; found_wildcard=1; maxfr=0,freq.clear(); } current_sum+=a[i]; maxfr=max(maxfr,++freq[current_sum]); } if(found_wildcard) ans+=maxfr; else ans+=freq[0]; cout<<ans<<'\n'; } int main() { ios_base::sync_with_stdio(false), cin.tie(0); ll t; cin>>t; while(t--) tc(); return 0; }
1748
D
ConstructOR
You are given three integers $a$, $b$, and $d$. Your task is to find any integer $x$ which satisfies all of the following conditions, or determine that no such integers exist: - $0 \le x \lt 2^{60}$; - $a|x$ is divisible by $d$; - $b|x$ is divisible by $d$. Here, $|$ denotes the bitwise OR operation.
If at least one of $a$ and $b$ is odd, and $d$ is even, then there are no solutions. Without loss of generality, we will only consider the case when $a$ is odd and $d$ is even. Since the last bit of $a$ is $1$, then the last bit of $a|x$ must also be $1$. Therefore, $a|x$ cannot be a multiple of $d$, as $a|x$ is odd, while $d$ is even. Note that there are more cases in which no solutions exist, however they are more generalised versions of this case. If a triplet ($a,b,d$) has no solutions, then ($a\cdot 2,b\cdot 2,d\cdot 2$) has no solutions as well. Combined with the first hint, we can say that a triplet ($a,b,d$) has no solutions if $min(\text{lsb}(a),\text{lsb}(b)) \lt \text{lsb}(d)$. Here, $\text{lsb}(x)$ represents the least significant bit of $x$. Since both $a|x$ and $b|x$ must be divisible by $d$, it's better to choose an $x$ such that $a|x=b|x=x$. If $d$ is odd, since $a,b \lt 2^{30}$, we can ensure that $a|x=b|x=x$ by setting the last $30$ bits of $c$ to $1$. If $d$ is even, then the last $\text{lsb}(d)$ bits of $x$ should be set to $0$, while the other bits from the last $30$ bits should be set to $1$. Here, $\text{lsb}(x)$ represents the least significant bit of $x$. Let $k=\text{lsb}(d)$, where $\text{lsb}(d)$ represents the least significant bit of $d$. Since $a|x$ and $b|x$ are multiples of $d$, the last $k$ bits of $a$ and $b$ (and also $x$) must be equal to $0$. Otherwise, there are no solutions and we can print $-1$. To simplify the construction process, we will try to find some $x$ such that $a|x=b|x=x$. Since we already know that the last $k$ bits of $a$, $b$ and $x$ are $0$, we will consider that the other $30-k$ of the $30$ least significant bits of $x$ are equal to $1$: $x_{(2)}=p\text{ }1\text{ }1\text{ }1\text{ }\ldots\text{ }1\text{ }0\text{ }0\text{ }\ldots\text{ }0$ This gives the following general formula for $x$: $x=2^k \cdot (p \cdot 2^{30-k} + (2^{30-k} -1))$ Now, we'll try to find some $p$ for which $x$ is a multiple of $d=2^k \cdot d'$: $x=2^k \cdot (p \cdot 2^{30-k} + (2^{30-k} -1) \equiv 0 \mod 2^k \cdot d' \Leftrightarrow$ $\Leftrightarrow (p \cdot 2^{30-k} + (2^{30-k} -1) \equiv 0 \mod d' \Leftrightarrow$ $\Leftrightarrow (p+1) \cdot 2^{30-k} \equiv 1 \mod d' \Leftrightarrow$ $\Leftrightarrow p+1 \equiv 2^{k-30} \mod d' \Leftrightarrow$ $\Leftrightarrow p+1 \equiv (2^{-1})^{30-k} \mod d' \Leftrightarrow$ $\Leftrightarrow p+1 \equiv (\frac{d'+1}{2})^{30-k} \mod d' \Rightarrow$ $\Rightarrow p = ((\frac{d'+1}{2})^{30-k}+d'-1) \mod d'$ Time complexity per testcase: $O(log d)$ Note that if $a|b$ is already a multiple of $d$, we can consider $x=a|b$.
[ "bitmasks", "chinese remainder theorem", "combinatorics", "constructive algorithms", "math", "number theory" ]
2,100
#include <bits/stdc++.h> using namespace std; typedef long long ll; void tc(){ ll a,b,d,k=0,inverse_of_2,total_inverse=1; cin>>a>>b>>d; a|=b; if(a%d==0){ cout<<a<<'\n'; return; } while(a%2==0 && d%2==0) a/=2,d/=2,k++; inverse_of_2=(d+1)/2; if(a%2==1 && d%2==0){ cout<<"-1\n"; return; } for(ll i=0;i<30-k;i++) total_inverse=total_inverse*inverse_of_2%d; cout<<((total_inverse<<(30-k))-1)*(1ll<<k)<<'\n'; } int main() { ios_base::sync_with_stdio(false), cin.tie(0); ll t; cin>>t; while(t--) tc(); }
1748
E
Yet Another Array Counting Problem
The position of the leftmost maximum on the segment $[l; r]$ of array $x = [x_1, x_2, \ldots, x_n]$ is the smallest integer $i$ such that $l \le i \le r$ and $x_i = \max(x_l, x_{l+1}, \ldots, x_r)$. You are given an array $a = [a_1, a_2, \ldots, a_n]$ of length $n$. Find the number of integer arrays $b = [b_1, b_2, \ldots, b_n]$ of length $n$ that satisfy the following conditions: - $1 \le b_i \le m$ for all $1 \le i \le n$; - for all pairs of integers $1 \le l \le r \le n$, the position of the leftmost maximum on the segment $[l; r]$ of the array $b$ is equal to the position of the leftmost maximum on the segment $[l; r]$ of the array $a$. Since the answer might be very large, print its remainder modulo $10^9+7$.
Let $m$ be the position of the leftmost maximum on the segment $[1;n]$. If $l \le m \le r$, then the position of the leftmost maximum on the segment $[l;r]$ is equal to $m$. If $l \le m \le r$, then the position of the leftmost maximum on the segment $[l;r]$ is equal to $m$. If $l \le r \lt m$, then the leftmost maximum on the segment $[l;r]$ is some element $a_p$, $p \lt m$. If $l \le r \lt m$, then the leftmost maximum on the segment $[l;r]$ is some element $a_p$, $p \lt m$. If $m \lt l \le r$, then the leftmost maximum on the segment $[l;r]$ is some element $a_{p_2}$, $p_2 \gt m$. If $m \lt l \le r$, then the leftmost maximum on the segment $[l;r]$ is some element $a_{p_2}$, $p_2 \gt m$. Let $m$ be the position of the leftmost maximum on the segment $[l;r]$. If $p$ is the position of the leftmost maximum on the segment $[l;m-1]$ and $p_2$ is the position of the leftmost maximum on the segment $[m+1;r]$, then $b_m \gt b_{p}$ and $b_m \ge b_{p_2}$. Using the idea from the previous hint, we can recursively build a binary tree where the children of node $m$ are nodes $p$ and $p_2$. Can this problem be boiled down to a tree dp? (Note that $n \cdot m \le 10^6$) Let $f(i,j)$ be the position of the leftmost maximum in the interval $(i;j)$, $1 \le i \le j \le n$. Let's consider an interval $(l;r)$ such that $f(l,r)=m$. For the sake of simplicity, let's assume that $l \lt m \lt r$. Let $p=f(l,m-1)$ and $p_2=f(m+1,r)$. Since $a_m$ is the leftmost maximum in $(l;r)$, $p \lt m$ and $p_2 \gt m$, the following conditions must hold for array $b$: $b_m \gt b_p$ $b_m \ge b_{p_2}$ Let's consider a binary tree where the children of node $u=f(l,r)$ are nodes $p=f(l,u-1)$ and $p_2=f(u+1,r)$, for every $1 \le u \le n$. Note that if $u=l$, $f(l,l-1)$ is not defined, and, as such, node $u$ will have no left child. Similarly, if $u=r$, then node $u$ will have no right child. Let $dp[u][x]$ be equal to the number of ways to assign values to every element $b_v$ from the subtree rooted in $u$, if $b_u=x$. If $u$ has a left child and $x=1$, then $dp[u][x]=0$; Otherwise, if $u$ has two children, then $dp[u][x]=(\sum_{i=1}^{x-1}dp[p][i]) \cdot(\sum_{i=1}^{x}dp[p_2][i])$; If $u$ only has a left child, then $dp[u][x]=\sum_{i=1}^{x-1}dp[p][i]$; If $u$ only has a right child, then $dp[u][x]=\sum_{i=1}^{x}dp[p_2][i]$; If $u$ has no children, then $dp[u][x]=1$. To optimise the transitions, we'll also need to compute $sum[u][x]=\sum_{i=1}^{x} dp[u][x]$ alongside our normal $dp$. Intended time complexity per testcase: $O(n \cdot m+n \cdot log(n))$ In order to construct the binary tree, we can use a recursive divide and conquer function $divide(l,r)$ to split our current interval $(l;r)$ into two new intervals $(l;m-1)$ and $(m+1;r)$. Additionally, we can also compute the values of $dp[m][x]$ and $sum[m][x]$ inside $divide(l,r)$ after calling $divide(l,m-1)$ and divide $(m+1,r)$. Range leftmost maximumum queries can be answered in $O(1)$ using a sparse table, see the model solution for more information.
[ "binary search", "data structures", "divide and conquer", "dp", "flows", "math", "trees" ]
2,300
#include<bits/stdc++.h> using namespace std; typedef long long ll; const ll NMAX=2e5+5,LGMAX=18,MOD=1e9+7; int n,m; int leftmost_maximum[LGMAX][NMAX],msb[NMAX]; int v[NMAX]; vector<vector<ll>> dp,sum; int leftmost_maximum_query(int l, int r){ int bit=msb[r-l+1]; if(v[leftmost_maximum[bit][l]]>=v[leftmost_maximum[bit][r-(1<<bit)+1]]) return leftmost_maximum[bit][l]; else return leftmost_maximum[bit][r-(1<<bit)+1]; } int divide(int l, int r){ if(l>r) return -1; int mid=leftmost_maximum_query(l,r); int p=divide(l,mid-1),p2=divide(mid+1,r); for(int i=1;i<=m;i++){ if(p!=-1 && i==1) dp[mid][1]=0; else dp[mid][i]=(p>=0?sum[p][i-1]:1ll)*(p2>=0?sum[p2][i]:1ll)%MOD; sum[mid][i]=(sum[mid][i-1]+dp[mid][i])%MOD; } return mid; } void tc(){ cin>>n>>m; dp.resize(n),sum.resize(n); for(int i=0;i<n;i++){ cin>>v[i]; leftmost_maximum[0][i]=i; dp[i].resize(m+1),sum[i].resize(m+1); for(int j=0;j<=m;j++) dp[i][j]=sum[i][j]=0; } for(int bit=1;bit<LGMAX;bit++){ for(int i=0;i+(1<<bit)<=n;i++){ if(v[leftmost_maximum[bit-1][i]]>=v[leftmost_maximum[bit-1][i+(1<<(bit-1))]]) leftmost_maximum[bit][i]=leftmost_maximum[bit-1][i]; else leftmost_maximum[bit][i]=leftmost_maximum[bit-1][i+(1<<(bit-1))]; } } divide(0,n-1); cout<<sum[leftmost_maximum_query(0,n-1)][m]<<'\n'; } int main() { for(int i=2;i<NMAX;i++) msb[i]=msb[i-1]+((i&(i-1))==0); /// msb = floor(log2) ios_base::sync_with_stdio(false); cin.tie(0); cout.tie(0); int t; cin>>t; while(t--) tc(); return 0; }
1748
F
Circular Xor Reversal
You have an array $a_0, a_1, \ldots, a_{n-1}$ of length $n$. Initially, $a_i = 2^i$ for all $0 \le i \lt n$. Note that array $a$ is zero-indexed. You want to reverse this array (that is, make $a_i$ equal to $2^{n-1-i}$ for all $0 \le i \lt n$). To do this, you can perform the following operation no more than $250\,000$ times: - Select an integer $i$ ($0 \le i \lt n$) and replace $a_i$ by $a_i \oplus a_{(i+1)\bmod n}$. Here, $\oplus$ denotes the bitwise XOR operation. Your task is to find \textbf{any} sequence of operations that will result in the array $a$ being reversed. It can be shown that under the given constraints, a solution always exists.
How can we perform $a_i=a_i \oplus a_j$ for any $0 \le i,j \lt n$, $i \neq j$? We can swap the values of $a_i$ and $a_j$ by performing the following sequence of xor assignments: $a_i=a_i \oplus a_j$; $a_j=a_j \oplus a_i$; $a_i=a_i \oplus a_j$; Performing $a_i=a_i \oplus a_j$ on its own is pretty wasteful, as it requires $4 \cdot \text{dist}(i,j) - c$ operations, where $dist(i,j)=(j+n-i) \mod n$, and $c$ is a constant. If $j=i+3$, can we perform $a_i=a_i \oplus a_j$ and $a_{i+1}=a_{i+1} \oplus a_{j-1}$ simultaneously? Building on the idea presented in hint $3$, can we perform the following $\frac{n}{2}$ xor assignments simultaneously if $n$ is even? $a_0 = a_0 \oplus a_{n-1}$; $a_1 = a_1 \oplus a_{n-2}$; $a_2 = a_2 \oplus a_{n-3}$; $\ldots$ $a_{\frac{n}{2}-1} = a_{\frac{n}{2}-1} \oplus a_{\frac{n}{2}}$ The target number of operations for performing all $\frac{n}{2}$ assignments is $\frac{n^2}{2}$. Using hints $2$ and $4$, we can already solve the problem if $n$ is even. If $m=dist(i,j) \gt 0$ and $b_k=a_{(i+k) \mod n}$, let $f(i,j)$ be a sequence of operations that performs: $b_0 = b_0 \oplus b_{m}$; $b_1 = b_1 \oplus b_{m-1}$; $b_2 = b_2 \oplus b_{m-2}$; $\ldots$ $b_{\lfloor\frac{m-2}{2} \rfloor } = b_{\lfloor \frac{m-1}{2} \rfloor} \oplus b_{\lfloor \frac{m+2}{2} \rfloor}$ We can reverse array $a$ by performing $f(0,n-1)$, $f(\frac{n}{2},\frac{n}{2}-1)$ and $f(0,n-1)$, in this order. Otherwise, if $n$ is odd, we can perform $f(0,n-1)$, $f(\frac{n+1}{2},\frac{n-3}{2})$ and $f(0,n-1)$, in this order. The total number of operations is $3\cdot \frac{n^2}{2} \le 3 \cdot \frac{400^2}{2} = 240\,000 \lt 250\,000$. If $m=dist(i,j)=((j+n-i) \mod n)$, $m \gt 0$ and $b_k=a_{(i+k) \mod n}$, let $f(i,j)$ be a sequence of operations that performs: $b_0 = b_0 \oplus b_{m-1}$; $b_1 = b_1 \oplus b_{m-2}$; $b_2 = b_2 \oplus b_{m-3}$; $\ldots$ $b_{\lfloor\frac{m-1}{2} \rfloor } = b_{\lfloor \frac{m-1}{2} \rfloor} \oplus b_{\lfloor \frac{m+2}{2} \rfloor}$ If $n$ is even, we can reverse array $a$ by performing $f(0,n-1)$, $f(\frac{n}{2},\frac{n}{2}-1)$ and $f(0,n-1)$, in this order. Otherwise, if $n$ is odd, we can perform $f(0,n-1)$, $f(\frac{n+1}{2},\frac{n-3}{2})$ and $f(0,n-1)$, in this order. One possible way to construct $f(i,j)$ is as follows: Perform an operation on every $i$ from $m-1$ to $0$: $\Rightarrow b=[(0..m),(1..m),(2..m)\ldots,(i..m),\ldots,b_{m}]$ Perform an operation on every $i$ from $1$ to $m-1$: $\Rightarrow b=[(0..m),b_1,b_2,\ldots,b_i,\ldots,b_{m}]$ Perform an operation on every $i$ from $m-2$ to $1$: $\Rightarrow b=[(0..m),(1..m-1),(2..m-1),\ldots,(i..m-1),\ldots,b_{m-1},b_{m}]$ Perform an operation on every $i$ from $2$ to $m-2$: $\Rightarrow b=[(0..m),(1..m-1),b_2,b_3,\ldots,b_i,\ldots,b_{m-1},b_{m}]$ $\ldots$ $\Rightarrow b=[(0..m),(1..m-1),(2..m-2),\ldots, (i..m-i),(\lfloor\frac{m-1}{2}\rfloor..\lfloor\frac{m+2}{2}\rfloor),b_{\lfloor\frac{m+1}{2}\rfloor},\ldots,b_j,b_{m-1}]$ The last step is to perform an operation on every $i$ from $0$ to $\lfloor\frac{m-2}{2}\rfloor$: $\Rightarrow b=[a_0 \oplus a_{m},a_1 \oplus a_{m-1},\ldots,a_i \oplus a_{m-i},\ldots,b_{\lfloor\frac{m-1}{2}\rfloor} \oplus b_{\lfloor\frac{m+2}{2}\rfloor},b_{\lfloor\frac{m+1}{2}\rfloor},\ldots,b_j,\ldots,b_{m}]$ Here, $(l..r)$ denotes $b_l \oplus b_{l+1} \oplus \ldots \oplus b_r$. The number of operations needed for $f(i,j)$ is equal to $m+(m-1)+\ldots+1+(\frac{m}{2})=\frac{m\cdot(m+1)}{2}+\frac{m}{2}=\frac{m^2+2\cdot m}{2}$, therefore the total number of operations needed to reverse the array is $\frac{3}{2} \cdot (m^2+2\cdot m)$. Since $m \le n-1$, $\frac{3}{2} \cdot (m^2+2\cdot m) \le \frac{3}{2} \cdot (399^2 + 2\cdot 399) \lt 250\,000$. Time complexity per testcase: $O(N^2)$
[ "bitmasks", "constructive algorithms" ]
3,000
#include <bits/stdc++.h> using namespace std; vector<int> ans; int n; void add_op(int pos){ ans.push_back(pos%n); } void f(int l, int r){ if(r<l) r+=n; int m=r-l,direction=0,start=l; r--; while(l<=r){ if(direction==0){ for(int i=r;i>=l;i--) add_op(i); l++; } else{ for(int i=l;i<=r;i++) add_op(i); r--; } direction=1-direction; } for(int i=start;i<start+m/2;i++) add_op(i); } int main() { ios_base::sync_with_stdio(false), cin.tie(0); cin>>n; f(0,n-1); f((n+1)/2,(n-2)/2); f(0,n-1); cout<<ans.size()<<'\n'; for(auto it : ans) cout<<it<<' '; }
1749
A
Cowardly Rooks
There's a chessboard of size $n \times n$. $m$ rooks are placed on it in such a way that: - no two rooks occupy the same cell; - no two rooks attack each other. A rook attacks all cells that are in its row or column. Is it possible to move \textbf{exactly one} rook (you can choose which one to move) into a different cell so that no two rooks still attack each other? A rook can move into any cell in its row or column if no other rook stands on its path.
First, note that $m$ is always less than or equal to $n$. If there were at least $n+1$ rooks on the board, at least two of them would share a row or a column (by pigeonhole principle). If $m < n$, then there is always at least one free row and at least one free column. You can move any rook into that row or column. Otherwise, all rows and columns are taken, so any move will make two rooks share a row or a column, which is prohibited. Thus, if $m = n$, then it's "NO". Otherwise, it's "YES". Overall complexity: $O(1)$ per testcase. Alternatively, you could check every rook and every possible move. Overall complexity: $O(m^2 \cdot n^2)$ per testcase.
[ "greedy", "implementation" ]
800
#include <bits/stdc++.h> #define forn(i, n) for (int i = 0; i < int(n); i++) using namespace std; int main() { int t; scanf("%d", &t); forn(_, t){ int n, m; scanf("%d%d", &n, &m); vector<pair<int, int>> a(m); forn(i, m){ scanf("%d%d", &a[i].first, &a[i].second); --a[i].first, --a[i].second; } bool ans = false; forn(i, m) forn(x, n) forn(y, n) if ((x == a[i].first) ^ (y == a[i].second)){ bool ok = true; forn(j, m) if (i != j){ ok &= x != a[j].first && y != a[j].second; } ans |= ok; } puts(ans ? "YES" : "NO"); } }
1749
B
Death's Blessing
You are playing a computer game. To pass the current level, you have to kill a big horde of monsters. In this horde, there are $n$ monsters standing in the row, numbered from $1$ to $n$. The $i$-th monster has $a_i$ health and a special "Death's Blessing" spell of strength $b_i$ attached to it. You are going to kill all of them one by one. It takes exactly $h$ seconds to kill a monster with health $h$. When the $i$-th monster dies, it casts its spell that increases the health of its neighbors by $b_i$ (the neighbors of the $j$-th monster in the row are the monsters on places $j - 1$ and $j + 1$. The first and the last monsters have only one neighbor each). After each monster is killed, the row shrinks, so its former neighbors become adjacent to each other (so if one of them dies, the other one is affected by its spell). For example, imagine a situation with $4$ monsters with health $a = [2, 6, 7, 3]$ and spells $b = [3, 6, 0, 5]$. One of the ways to get rid of the monsters is shown below: \begin{center} \begin{tabular}{|c||c||c||c||c||c||c||c||c||c||c||c||c||ccl} $2$ & $6$ & $7$ & $3$ & \multirow{2}{*}{$\xrightarrow{6\ s}$} & $8$ & $13$ & $3$ & \multirow{2}{*}{$\xrightarrow{13\ s}$} & $8$ & $3$ & \multirow{2}{*}{$\xrightarrow{8\ s}$} & $6$ & \multirow{2}{*}{$\xrightarrow{6\ s}$} & \multirow{2}{*}{$\{\}$} \ $3$ & $6$ & $0$ & $5$ & & $3$ & $0$ & $5$ & & $3$ & $5$ & & $5$ & & & \ \end{tabular} {\small The first row represents the health of each monster, the second one — the power of the spells.} \end{center} As a result, we can kill all monsters in $6 + 13 + 8 + 6$ $=$ $33$ seconds. Note that it's only an example and may not be the fastest way to get rid of the monsters. What is the minimum time required to kill all monsters in the row?
Note that whichever order you choose, the total time will always contain all initial health $a_i$, in other words, any answer will contain $\sum_{i=1}^{n}{a_i}$ as its part. So the lower the sum of $b_i$ you will add to the answer - the better. Look at some monster $i$. If you kill it while it has both left and right neighbor, it will add $2 \cdot b_i$ to the answer. If it is the first or the last in the row, it will add just $b_i$. And if it is the last monster, it will add $0$. There can be only one last monster, so any other will add at least $b_i$ to the answer. And for any chosen last monster $l$ you can find the order that gives exactly $b_i$ for all other monsters. For example, you can firstly kill monsters $1, 2, \dots, (l-1)$, then $n, (n-1), \dots, (l + 1)$ and, finally, moster $l$. In other words, if the last monster is the $l$-th one, the total answer will be equal to $\sum_{i=1}^{n}{a_i} + \sum_{i=1}^{n}{b_i} - b_l$. Since we need to minimize answer, we can choose monster with maximum $b_l$. So, the answer is $\sum_{i=1}^{n}{a_i} + \sum_{i=1}^{n}{b_i} - \max_{i=1}^{n}{b_i}$.
[ "greedy" ]
900
fun main() { repeat(readLine()!!.toInt()) { val n = readLine()!!.toInt() val a = readLine()!!.split(' ').map { it.toLong() } val b = readLine()!!.split(' ').map { it.toLong() } println(a.sum() + b.sum() - b.maxOrNull()!!) } }
1749
C
Number Game
Alice and Bob are playing a game. They have an array of positive integers $a$ of size $n$. Before starting the game, Alice chooses an integer $k \ge 0$. The game lasts for $k$ stages, the stages are numbered from $1$ to $k$. During the $i$-th stage, Alice must remove an element from the array that is less than or equal to $k - i + 1$. After that, if the array is not empty, Bob must add $k - i + 1$ to an arbitrary element of the array. Note that both Alice's move and Bob's move are two parts of the same stage of the game. If Alice can't delete an element during some stage, she loses. If the $k$-th stage ends and Alice hasn't lost yet, she wins. Your task is to determine the maximum value of $k$ such that Alice can win if both players play optimally. Bob plays against Alice, so he tries to make her lose the game, if it's possible.
Note that if Bob has increased some element, then Alice can't remove it on the next stages. Obviously, it is more profitable for Bob to "prohibit" the smallest element of the array. Using this fact, we can iterate over the value of $k$, and then simulate the game process. To simulate the game, we can maintain the set of elements that Alice can remove. On the $i$-th stage, Alice removes the maximum element $x$, such that $x \le k - i + 1$, if there are no such elements, then Alice lost. Bob always removes the minimum element of the set. Thus, the complexity of the solution is $O(n^2\log{n})$ for each test case. There is another possible solution: we can notice that, if Alice wins, Bob will "prohibit" the elements on positions $1, 2, \dots, k-1$ of the sorted array. So, Alice has to delete the next $k$ elements. So, if the segment $[k \dots 2k-1]$ of the sorted array can be deleted by Alice during the game phases, she wins with this value of $k$.
[ "binary search", "data structures", "games", "greedy", "implementation" ]
1,400
#include <bits/stdc++.h> using namespace std; int main() { int t; cin >> t; while (t--) { int n; cin >> n; vector<int> a(n); for (int &x : a) cin >> x; int ans = 0; for (int k = 1; k <= n; ++k) { multiset<int> s(a.begin(), a.end()); for (int i = 0; i < k; ++i) { auto it = s.upper_bound(k - i); if (it == s.begin()) break; s.erase(--it); if (!s.empty()) { int x = *s.begin(); s.erase(s.begin()); s.insert(x + k - i); } } if (s.size() + k == n) ans = k; } cout << ans << '\n'; } }
1749
D
Counting Arrays
Consider an array $a$ of length $n$ with elements numbered from $1$ to $n$. It is possible to remove the $i$-th element of $a$ if $gcd(a_i, i) = 1$, where $gcd$ denotes the greatest common divisor. After an element is removed, the elements to the right are shifted to the left by one position. An array $b$ with $n$ integers such that $1 \le b_i \le n - i + 1$ is a \textbf{removal sequence for the array $a$} if it is possible to remove all elements of $a$, if you remove the $b_1$-th element, then the $b_2$-th, ..., then the $b_n$-th element. For example, let $a = [42, 314]$: - $[1, 1]$ is a removal sequence: when you remove the $1$-st element of the array, the condition $gcd(42, 1) = 1$ holds, and the array becomes $[314]$; when you remove the $1$-st element again, the condition $gcd(314, 1) = 1$ holds, and the array becomes empty. - $[2, 1]$ is not a removal sequence: when you try to remove the $2$-nd element, the condition $gcd(314, 2) = 1$ is false. An array is \textbf{ambiguous} if it has \textbf{at least two} removal sequences. For example, the array $[1, 2, 5]$ is ambiguous: it has removal sequences $[3, 1, 1]$ and $[1, 2, 1]$. The array $[42, 314]$ is not ambiguous: the only removal sequence it has is $[1, 1]$. You are given two integers $n$ and $m$. You have to calculate the number of \textbf{ambiguous} arrays $a$ such that the length of $a$ is from $1$ to $n$ and each $a_i$ is an integer from $1$ to $m$.
We will calculate the answer by subtracting the number of arrays which have only one removal sequence from the total number of arrays. The latter is very simple - it's just $m^1 + m^2 + \dots + m^n$. How do we calculate the number of unambiguous arrays? We can always delete the $1$-st element of an array; so, $[1, 1, 1, \dots, 1]$ is a removal sequence for each array. So, we have to calculate the number of arrays which have no other removal sequences. How do we check if the array has no removal sequences other than $[1, 1, \dots, 1]$? If, at any time, it's possible to remove some element other than the $1$-st from the array, it creates another removal sequence since we can always complete that sequence. Let's analyze the constraints on each element of the array. $a_1$ can be any integer from $1$ to $m$. $a_2$ should be divisible by $2$ (otherwise, we can remove it on the first step). $a_3$ should be divisible by $3$ (otherwise, we can remove it on the first step) and by $2$ (otherwise, we can remove it on the second step). $a_4$ should be divisible by $2$ and $3$, but not necessarily by $4$ since an element which is divisible by $2$ already has a common divisor with $4$. And so on - using induction, we can show that the $i$-th element should be divisible by $p_1 \cdot p_2 \cdot p_3 \cdot \dots \cdot p_k$, where $p_1, p_2, \dots, p_k$ are all of the primes in $[1, i]$. Obviously, the number of such elements is $\dfrac{m}{p_1 \cdot p_2 \cdot p_3 \cdot \dots \cdot p_k}$. So, we can easily calculate the number of possible elements for each index of the array, and that allows us to count all unambiguous arrays.
[ "combinatorics", "dp", "math", "number theory" ]
1,900
#include <bits/stdc++.h> using namespace std; const int MOD = 998244353; int add(int x, int y) { x += y; while(x >= MOD) x -= MOD; while(x < 0) x += MOD; return x; } int sub(int x, int y) { return add(x, -y); } int mul(int x, int y) { return (x * 1ll * y) % MOD; } int binpow(int x, int y) { int z = 1; while(y) { if(y & 1) z = mul(z, x); x = mul(x, x); y >>= 1; } return z; } bool prime(int x) { for(int i = 2; i * 1ll * i <= x; i++) if(x % i == 0) return false; return true; } int main() { int n; long long m; cin >> n >> m; int ans = 0; for(int i = 1; i <= n; i++) ans = add(ans, binpow(m % MOD, i)); long long cur = 1; int cnt = 1; for(int i = 1; i <= n; i++) { if(cur > m) continue; if(prime(i)) cur *= i; cnt = mul(cnt, (m / cur) % MOD); ans = sub(ans, cnt); } cout << ans << endl; }
1749
E
Cactus Wall
Monocarp is playing Minecraft and wants to build a wall of cacti. He wants to build it on a field of sand of the size of $n \times m$ cells. Initially, there are cacti in some cells of the field. \textbf{Note that, in Minecraft, cacti cannot grow on cells adjacent to each other by side — and the initial field meets this restriction}. Monocarp can plant new cacti (they must also fulfil the aforementioned condition). He can't chop down any of the cacti that are already growing on the field — he doesn't have an axe, and the cacti are too prickly for his hands. Monocarp believes that the wall is complete if there is no path from the top row of the field to the bottom row, such that: - each two consecutive cells in the path are adjacent by side; - no cell belonging to the path contains a cactus. Your task is to plant the minimum number of cacti to build a wall (or to report that this is impossible).
In order to block any path from the top row to the bottom row, you have to build a path from the left side to the right side consisting of '#'. Since two consecutive cacti in a path cannot be placed side by side, they should be placed diagonally (i.e $(x, y)$ should be followed by $(x \pm 1, y \pm 1)$ on the path). So we can rephrase the task as a shortest path problem. The edge weight is $0$ if cactus is already in the cell that corresponds to the end of the edge, and $1$ otherwise. Don't forget that some cells can't contain a cactus, thus be part of a path, because of the cacti initially placed. The shortest path can be found using Dijkstra's or 0-1 BFS algorithm.
[ "constructive algorithms", "dfs and similar", "graphs", "shortest paths" ]
2,400
#include <bits/stdc++.h> using namespace std; const int INF = 1e9; int dx[] = {0, 1, 0, -1, 1, 1, -1, -1}; int dy[] = {1, 0, -1, 0, -1, 1, -1, 1}; int main() { ios::sync_with_stdio(false); cin.tie(0); int t; cin >> t; while (t--) { int n, m; cin >> n >> m; vector<string> s(n); for (auto &it : s) cin >> it; auto in = [&](int x, int y) { return 0 <= x && x < n && 0 <= y && y < m; }; auto can = [&](int x, int y) { if (!in(x, y)) return false; for (int i = 0; i < 4; ++i) { int nx = x + dx[i], ny = y + dy[i]; if (in(nx, ny) && s[nx][ny] == '#') return false; } return true; }; vector<vector<int>> d(n, vector<int>(m, INF)), p(n, vector<int>(m)); deque<pair<int, int>> q; for (int i = 0; i < n; ++i) { if (s[i][0] == '#') { d[i][0] = 0; q.push_front({i, 0}); } else if (can(i, 0)) { d[i][0] = 1; q.push_back({i, 0}); } } while (!q.empty()) { auto [x, y] = q.front(); q.pop_front(); for (int i = 4; i < 8; ++i) { int nx = x + dx[i], ny = y + dy[i]; if (!can(nx, ny)) continue; int w = (s[nx][ny] != '#'); if (d[nx][ny] > d[x][y] + w) { d[nx][ny] = d[x][y] + w; p[nx][ny] = i; if (w) q.push_back({nx, ny}); else q.push_front({nx, ny}); } } } int x = 0, y = m - 1; for (int i = 0; i < n; ++i) if (d[x][y] > d[i][y]) x = i; if (d[x][y] == INF) { cout << "NO\n"; continue; } while (true) { s[x][y] = '#'; int i = p[x][y]; if (!i) break; x -= dx[i]; y -= dy[i]; } cout << "YES\n"; for (auto it : s) cout << it << '\n'; } }
1749
F
Distance to the Path
You are given a tree consisting of $n$ vertices. Initially, each vertex has a value $0$. You need to perform $m$ queries of two types: - You are given a vertex index $v$. Print the value of the vertex $v$. - You are given two vertex indices $u$ and $v$ and values $k$ and $d$ ($d \le 20$). You need to add $k$ to the value of each vertex such that the distance from that vertex to the path from $u$ to $v$ is less than or equal to $d$. The distance between two vertices $x$ and $y$ is equal to the number of edges on the path from $x$ to $y$. For example, the distance from $x$ to $x$ itself is equal to $0$. The distance from the vertex $v$ to some path from $x$ to $y$ is equal to the minimum among distances from $v$ to any vertex on the path from $x$ to $y$.
For the purpose of solving the task, let's choose some root in the tree and introduce another operation to the tree: add $k$ to all vertices that are in the subtree of the given vertex $v$ and on the distance $d$ from $v$. For example, if $d = 0$, it's $v$ itself or if $d = 1$ then it's all children of $v$. Let's $p[v]$ be the parent of vertex $v$, $p^2[v] = p[p[v]]$, $p^3[v] = p[p[p[v]]]$ and so on ($p^0[v] = v$). So, how to perform this operation? Instead of adding $k$ to all vertices in the subtree, we can add $k$ only to the vertex $v$. And when we need to get the answer for some vertex $u$, we will get it from $ans[p^d[u]]$. Of course, since there are different $d$-s, we'll create different arrays $ans_d$ for each possible $d$. So, the answer for the vertex will be equal to $\sum_{i=0}^{d}{ans_i[p^i[d]]}$. Now, let's discuss how to use the introduced operation to perform the given one. We can make the given operation "$u$ $v$ $k$ $d$" using ours in the following way: Let's find $l = lca(u, v)$ using any standard algorithm (binary lifting, for example). Let's split all affected vertices in three groups: subtrees of path $[v, l)$ ($v$ inclusive, $l$ exclusive), subtrees of path $[u, l)$ and subtrees of path $l, p[l], p^2[l], \dots, p^d[l]$. Note that in such way all affected vertices belong to at least one group. Let's look at group of path $[v, l)$. The lowest vertices are on distance $d$ from $v$, the next "level" are on distance $d$ from $p[v]$, the next "level" are on distance $d$ from $p^2[v]$ and so on. The last "level" we'll consider in this group is the vertices in the subtree of the child of $l$ on distance $d$ from it. In such a way, all we need to do is add $k$ to all $ans_d$ on the path from $[v, l)$. The group of the path $[u, l)$ is handled in the same way. What's left? It's vertices in subtree of $l$ on distances $d, (d-1), \dots, 0$; in subtree of $p[l]$ on distances $(d-1), (d-2), \dots, 0$; in subtree of $p^i[l]$ on distances $(d-i), (d-i-1), \dots, 0$; in subtree of $p^d[l]$ on distance $0$. Note that vertices in subtree of $l$ on distance $d-2$ are included in vertices in subtree of $p[l]$ on distance $d-1$. Analogically, vertices on distance $d-3$ from $l$ are included in vertices on distance $d-2$ from $p[l]$.Moreover, vertices on distance $d-4$ from $l$ are included in "$d-3$ from $p[l]$" that are included in "$d-2$ from $p^2[l]$" and so on. In other words, all we need to proccess are vertices: in subtree of $l$ on distances $d$ and $(d-1)$, in subtree of $p[l]$ on distances $(d-1)$ and $(d-2)$, in subtree of $p^i[l]$ on distances $(d-i)$ and $(d-i-1)$. In total, it's at most $2d$ operations: "add $k$ to some vertex $x$". in subtree of $l$ on distances $d, (d-1), \dots, 0$; in subtree of $p[l]$ on distances $(d-1), (d-2), \dots, 0$; in subtree of $p^i[l]$ on distances $(d-i), (d-i-1), \dots, 0$; in subtree of $p^d[l]$ on distance $0$. Moreover, vertices on distance $d-4$ from $l$ are included in "$d-3$ from $p[l]$" that are included in "$d-2$ from $p^2[l]$" and so on. In other words, all we need to proccess are vertices: in subtree of $l$ on distances $d$ and $(d-1)$, in subtree of $p[l]$ on distances $(d-1)$ and $(d-2)$, in subtree of $p^i[l]$ on distances $(d-i)$ and $(d-i-1)$. As a result, all we need to do is add $k$ on path from $v$ to some ancestor $l$ of $v$; add $k$ in some vertex $v$ (can be done as operation $1$ on path $[v, p[v])$); ask value in some vertex $v$. In total, complexity is $O(n \log{n} + m d \log{n})$ time and $O(n (\log{n} + d))$ space. P.S.: the second operation can be further optimized to $O(d + \log{n})$, but it's not really necessary.
[ "data structures", "dfs and similar", "trees" ]
2,800
#include<bits/stdc++.h> using namespace std; #define fore(i, l, r) for(int i = int(l); i < int(r); i++) #define sz(a) int((a).size()) typedef long long li; typedef pair<int, int> pt; const int INF = int(1e9); const li INF64 = li(1e18); const int N = int(2e5) + 55; const int LOG = 18; int n; vector<int> g[N]; inline bool read() { if(!(cin >> n)) return false; fore (i, 0, n - 1) { int u, v; cin >> u >> v; u--, v--; g[u].push_back(v); g[v].push_back(u); } return true; } int p[LOG][N]; int tin[N], tout[N], T = 0; void build(int v, int pr) { tin[v] = T++; p[0][v] = pr; fore (pw, 1, LOG) p[pw][v] = p[pw - 1][p[pw - 1][v]]; for (int to : g[v]) { if (to == pr) continue; build(to, v); } tout[v] = T; } bool inside(int l, int v) { return tin[l] <= tin[v] && tout[v] <= tout[l]; } int lca(int u, int v) { if (inside(u, v)) return u; if (inside(v, u)) return v; for (int pw = LOG - 1; pw >= 0; pw--) { if (!inside(p[pw][u], v)) u = p[pw][u]; } return p[0][u]; } const int D = 21; struct Fenwick { int n; vector<int> F; void init(int nn) { n = nn; F.assign(n, 0); } void add(int pos, int val) { for (; pos < n; pos |= pos + 1) F[pos] += val; } int sum(int pos) { int ans = 0; for (; pos >= 0; pos = (pos & (pos + 1)) - 1) ans += F[pos]; return ans; } int getSum(int l, int r) { return sum(r - 1) - sum(l - 1); } }; struct DS { Fenwick f; void init(int n) { f.init(n); } void addPath(int v, int l, int k) { f.add(tin[v], +k); f.add(tin[l], -k); } int getVertex(int v) { return f.getSum(tin[v], tout[v]); } void addVertex(int v, int k) { f.add(tin[v], +k); if (p[0][v] != v) f.add(tin[p[0][v]], -k); } }; DS t[D]; inline void solve() { fore (i, 0, D) { g[n - 1 + i].push_back(n + i); g[n + i].push_back(n - 1 + i); } int root = n + D - 1; build(root, root); fore (i, 0, D) t[i].init(root + 1); int m; cin >> m; fore(_, 0, m) { int tp; cin >> tp; if (tp == 1) { int v; cin >> v; v--; int ans = 0; for (int i = 0, cur = v; i < D; i++, cur = p[0][cur]) ans += t[i].getVertex(cur); cout << ans << endl; } else { assert(tp == 2); int u, v, k, d; cin >> u >> v >> k >> d; u--, v--; int l = lca(u, v); if (u != l) t[d].addPath(u, l, k); if (v != l) t[d].addPath(v, l, k); for (int i = 0; i <= d; i++, l = p[0][l]) { t[d - i].addVertex(l, k); if (d - i > 0) t[d - i - 1].addVertex(l, k); } } } } int main() { #ifdef _DEBUG freopen("input.txt", "r", stdin); int tt = clock(); #endif ios_base::sync_with_stdio(false); cin.tie(0), cout.tie(0); cout << fixed << setprecision(15); if(read()) { solve(); #ifdef _DEBUG cerr << "TIME = " << clock() - tt << endl; tt = clock(); #endif } return 0; }
1750
A
Indirect Sort
You are given a permutation $a_1, a_2, \ldots, a_n$ of size $n$, where each integer from $1$ to $n$ appears \textbf{exactly once}. You can do the following operation any number of times (possibly, zero): - Choose any three indices $i, j, k$ ($1 \le i < j < k \le n$). - If $a_i > a_k$, replace $a_i$ with $a_i + a_j$. Otherwise, swap $a_j$ and $a_k$. Determine whether you can make the array $a$ sorted in non-descending order.
We claim that we can sort the array if and only if $a_1 = 1$. Necessity We can notice that index $1$ cannot be affected by any swap operation. Let's see what happens to the value $1$. According to the definition of the operation, it can either increase or be swapped. In order to be increased, there must exist some $k$ such that $1 > a_k$, but since $1$ is the minimum possible value, it will never be true as other values in array $a$ can only increse as well. Since index $1$ can not be affected by a swap operation and $a_1>1$, we conclude that if $a_1 \neq 1$, the answer is No. Sufficiency Let's focus on the second operation. Since we have $a_1 = 1$, we can always choose $i=1$ and the operation then turns into picking some pair $2 \le j < k \le n$ and swapping $a_j$ with $a_k$. It's trivial to see we can always sort with such an operation.
[ "constructive algorithms", "implementation", "math" ]
800
"#include<bits/stdc++.h>\nusing namespace std;\nint t,n,a[109];\ninline int read(){\n\tint s = 0,w = 1;\n\tchar ch = getchar();\n\twhile (ch > '9' || ch < '0'){ if (ch == '-') w = -1; ch = getchar();}\n\twhile (ch <= '9' && ch >= '0') s = (s << 1) + (s << 3) + (ch ^ 48),ch = getchar();\n\treturn s * w;\n}\nint main(){\n\tt = read();\n\twhile (t --){\n\t\tn = read();\n\t\tfor (int i = 1;i <= n;i += 1) a[i] = read();\n\t\tputs(a[1] == 1 ? \"Yes\" : \"No\");\n\t}\n\treturn 0;\n}\n"
1750
B
Maximum Substring
A binary string is a string consisting only of the characters 0 and 1. You are given a binary string $s$. For some non-empty substring$^\dagger$ $t$ of string $s$ containing $x$ characters 0 and $y$ characters 1, define its cost as: - $x \cdot y$, if $x > 0$ and $y > 0$; - $x^2$, if $x > 0$ and $y = 0$; - $y^2$, if $x = 0$ and $y > 0$. Given a binary string $s$ of length $n$, find the maximum cost across all its non-empty substrings. $^\dagger$ A string $a$ is a substring of a string $b$ if $a$ can be obtained from $b$ by deletion of several (possibly, zero or all) characters from the beginning and several (possibly, zero or all) characters from the end.
Considering that if we want to find the max value of $x \cdot y$, then the whole string is the best to calculate, for it $0$ s and $1$ s are the max. Then considering $x \cdot x$ and $y \cdot y$ : what we need to do is to calculate the max continuous number of $0$ or $1$, compare its square to the first condition, then take the bigger one as the answer.
[ "brute force", "greedy", "implementation" ]
800
"#include <bits/stdc++.h>\n#define int long long\nusing namespace std;\nint32_t main()\n{\n\tcin.tie(nullptr)->ios_base::sync_with_stdio(false);\n\tint q;\n\tcin >> q;\n\twhile (q--)\n\t{\n\t\tint n;\n\t\tstring s;\n\t\tcin >> n >> s;\n\t\ts = '$' + s;\n\t\tint cnt0 = 0, cnt1 = 0;\n\t\tfor (int i = 1; i <= n; ++i)\n\t\t{\n\t\t\tcnt0 += s[i] == '0';\n\t\t\tcnt1 += s[i] == '1';\n\t\t}\n\t\tint ans = cnt0 * cnt1;\n\t\tint lg = 1;\n\t\tfor (int i = 2; i <= n; ++i)\n\t\t{\n\t\t\tif (s[i] == s[i - 1])\n\t\t\t{\n\t\t\t\tlg++;\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tans = max(ans, lg * lg);\n\t\t\t\tlg = 1;\n\t\t\t}\n\t\t}\n\t\tans = max(ans, lg * lg);\n\t\tcout << ans << '\\n';\n\t}\n}"
1750
C
Complementary XOR
You have two binary strings $a$ and $b$ of length $n$. You would like to make all the elements of both strings equal to $0$. Unfortunately, you can modify the contents of these strings using only the following operation: - You choose two indices $l$ and $r$ ($1 \le l \le r \le n$); - For every $i$ that respects $l \le i \le r$, change $a_i$ to the opposite. That is, $a_i := 1 - a_i$; - For every $i$ that respects either $1 \le i < l$ or $r < i \le n$, change $b_i$ to the opposite. That is, $b_i := 1 - b_i$. Your task is to determine if this is possible, and if it is, to find such an appropriate chain of operations. The number of operations \textbf{should not exceed} $n + 5$. It can be proven that if such chain of operations exists, one exists with at most $n + 5$ operations.
For each operation, the interval changed by a sequence and b sequence is complementary, so you must judge whether all $[a_i=b_i]$ are the same at the beginning. If they are different, you can't have a solution. Now, if $a = \neg b$, we can do an operation on $a$ and have $a=b$. Now suppose $a_i=b_i=1$ for some $i$ and try to make $a_i=b_i=0$ without changing anything else. If $i>1$, then this is very simple, we can just do an operation with $(1,i)$ and an operation on $(1,i-1)$. If $i=1$ we can make $(1,n)$ and $(2,n)$. Since $n>1$, this can always be done, thus we found a solution using $2 \cdot n + O(1)$ operations. To optimize it to only use $n + O(1)$ operations, note that we only care about the parity of the number of operations did at any index.
[ "constructive algorithms", "implementation" ]
1,400
"#include <bits/stdc++.h>\nusing namespace std;\nint main()\n{\n\tcin.tie(nullptr)->sync_with_stdio(false);\n\tint q;\n\tcin >> q;\n\twhile (q--)\n\t{\n\t\tint n;\n\t\tstring a, b;\n\t\tcin >> n >> a >> b;\n\t\ta = '$' + a;\n\t\tb = '$' + b;\n\t\tbool ok = true;\n\t\tfor (int i = 1; i <= n; ++i)\n\t\t{\n\t\t\tif (a[i] != char('1' - b[i] + '0'))\n\t\t\t{\n\t\t\t\tok = false;\n\t\t\t\tbreak;\n\t\t\t}\n\t\t}\n\t\tok = ok || (a == b);\n\t\tif (!ok)\n\t\t{\n\t\t\tcout << \"NO\\n\";\n\t\t\tcontinue;\n\t\t}\n\t\tvector<pair<int, int>> ops;\n\t\tif (a[1] != b[1])\n\t\t{\n\t\t\tops.push_back({1, n});\n\t\t\ta = b;\n\t\t}\n\t\tvector<int> cnt(n + 1);\n\t\tfor (int i = 1; i <= n; ++i)\n\t\t{\n\t\t\tif (a[i] == '1')\n\t\t\t{\n\t\t\t\tif (i == 1)\n\t\t\t\t{\n\t\t\t\t\tops.push_back({1, n});\n\t\t\t\t\tops.push_back({2, n});\n\t\t\t\t}\n\t\t\t\telse\n\t\t\t\t{\n\t\t\t\t\tcnt[i]++;\n\t\t\t\t\tcnt[i - 1]++;\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tfor (int i = 1; i <= n; ++i)\n\t\t{\n\t\t\tif (cnt[i] % 2 == 1)\n\t\t\t{\n\t\t\t\tops.push_back({1, i});\n\t\t\t}\n\t\t}\n\t\tcout << \"YES\\n\"\n\t\t\t << (int)ops.size() << '\\n';\n\t\tfor (auto i : ops)\n\t\t{\n\t\t\tcout << i.first << ' ' << i.second << '\\n';\n\t\t}\n\t}\n}"
1750
D
Count GCD
You are given two integers $n$ and $m$ and an array $a$ of $n$ integers. For each $1 \le i \le n$ it holds that $1 \le a_i \le m$. Your task is to count the number of different arrays $b$ of length $n$ such that: - $1 \le b_i \le m$ for each $1 \le i \le n$, and - $\gcd(b_1,b_2,b_3,...,b_i) = a_i$ for each $1 \le i \le n$. Here $\gcd(a_1,a_2,\dots,a_i)$ denotes the greatest common divisor (GCD) of integers $a_1,a_2,\ldots,a_i$. Since this number can be too large, print it modulo $998\,244\,353$.
We can notice that if for some $2 \le i \le n$, $a_{i-1}$ is not divisible by $a_{i}$, then the answer is $0$. Else, note that all the prime factors of $a_1$ are also prime factors in all the other values. Thus after factorizing $a_1$ we can quickly factorize every other value. Now let's find the number of ways we can select $b_i$ for every $i$. The answer will be the product of these values since each position is independent. It's easy to see that there is only one way to select $b_1$, that is $a_1$. Now for $i > 1$ we need to find the number of values $x$ such that $gcd(a_{i-1},x)=a_i$. Let $x=a_i \cdot k$, we can rephrase as $gcd( \frac{a_{i-1}}{a_i} \cdot a_i, a_i \cdot k) = a_i$, which is also equivalent to $gcd( \frac{a_{i-1}}{a_i}, k) = 1$. We have $k \le \frac{m}{a_i}$, thus the task reduces to a simple principle of inclusion-exclusion problem. Time complexity $O(2^9 \cdot 9 \cdot log + \sqrt{m})$ per testcase.
[ "combinatorics", "math", "number theory" ]
1,800
"#include <bits/stdc++.h>\nusing namespace std;\nint mod = 998244353;\nstruct Mint\n{\n int val;\n Mint(int _val = 0)\n {\n val = _val % mod;\n }\n Mint(long long _val = 0)\n {\n val = _val % mod;\n }\n Mint operator+(Mint oth)\n {\n return val + oth.val;\n }\n Mint operator*(Mint oth)\n {\n return 1LL * val * oth.val;\n }\n Mint operator-(Mint oth)\n {\n return val - oth.val + mod;\n }\n void operator+=(Mint oth)\n {\n val = (Mint(val) + oth).val;\n }\n void operator-=(Mint oth)\n {\n val = (Mint(val) - oth).val;\n }\n void operator*=(Mint oth)\n {\n val = (Mint(val) * oth).val;\n }\n};\nvector<int> get_primes(int n)\n{\n int d = 2;\n vector<int> ans;\n while (d * d <= n)\n {\n bool este = false;\n while (n % d == 0)\n {\n n /= d;\n este = true;\n }\n if (este)\n {\n ans.push_back(d);\n }\n d++;\n }\n if (n != 1)\n {\n ans.push_back(n);\n }\n return ans;\n}\nint gcd(int a, int b)\n{\n while (b)\n {\n int c = a % b;\n a = b;\n b = c;\n }\n return a;\n}\nint main()\n{\n cin.tie(nullptr)->sync_with_stdio(false);\n int q;\n cin >> q;\n while (q--)\n {\n int n, m;\n cin >> n >> m;\n vector<int> a(n + 1);\n for (int i = 1; i <= n; ++i)\n {\n cin >> a[i];\n }\n bool ok = true;\n for (int i = 2; i <= n; ++i)\n {\n if (a[i - 1] % a[i] != 0)\n {\n ok = false;\n break;\n }\n }\n if (!ok)\n {\n cout << 0 << '\\n';\n continue;\n }\n vector<int> factori = get_primes(a[1]);\n map<pair<int, int>, int> calculat;\n for (int i = 2; i <= n; ++i)\n {\n calculat[{a[i - 1], a[i]}] = 0;\n }\n for (auto i : calculat)\n {\n int left = i.first.first / i.first.second;\n int till = m / i.first.second;\n vector<int> left_primes;\n for (auto i : factori)\n {\n if (left % i == 0)\n {\n left_primes.push_back(i);\n }\n }\n int sz = (int)left_primes.size();\n int ans = 0;\n for (int mask = 0; mask < (1 << sz); ++mask)\n {\n int prod = 1;\n int cnt = 0;\n for (int j = 0; j < sz; ++j)\n {\n if (mask & (1 << j))\n {\n prod *= left_primes[j];\n cnt++;\n }\n }\n if (cnt % 2 == 0)\n {\n ans += till / prod;\n }\n else\n {\n ans -= till / prod;\n }\n }\n calculat[i.first] = ans;\n }\n Mint ans = 1;\n for (int i = 2; i <= n; ++i)\n {\n ans = ans * calculat[{a[i - 1], a[i]}];\n }\n cout << ans.val << '\\n';\n }\n}"
1750
E
Bracket Cost
Daemon Targaryen decided to stop looking like a Metin2 character. He turned himself into the most beautiful thing, a bracket sequence. For a bracket sequence, we can do two kind of operations: - Select one of its substrings$^\dagger$ and cyclic shift it to the right. For example, after a cyclic shift to the right, "(())" will become ")(()"; - Insert any bracket, opening '(' or closing ')', wherever you want in the sequence. We define the cost of a bracket sequence as the \textbf{minimum} number of such operations to make it balanced$^\ddagger$. Given a bracket sequence $s$ of length $n$, find the sum of costs across all its $\frac{n(n+1)}{2}$ non-empty substrings. Note that for each substring we calculate the cost \textbf{independently}. $^\dagger$ A string $a$ is a substring of a string $b$ if $a$ can be obtained from $b$ by deletion of several (possibly, zero or all) characters from the beginning and several (possibly, zero or all) characters from the end. $^\ddagger$ A sequence of brackets is called balanced if one can turn it into a valid math expression by adding characters $+$ and $1$. For example, sequences "(())()", "()", and "(()(()))" are balanced, while ")(", "(()", and "(()))(" are not.
Let $a_i=1$ if $s_i=($, $a_i=-1$ if $s_i=)$ and $b_i$ be the prefix sum of $a_i$. Theorem: the cost of $s[l+1,r]$ is $max(b_l,b_r)-min(b_l,b_{l+1},...,b_r)$ Necessity: after one operation, $max(b_l,b_r)-min(b_l,b_{l+1},...,b_r)$ decrease at most one. Sufficiency: If $b_l<b_r$, we can do operation 2, add a right bracket at the end of string. If $b_l>b_r$, we can do operation 2, add a left bracket at the beginning of string. If $b_l=b_r$, assume x be the largest x that $b_x=min(b_l,b_{l+1},...,b_r)$, then $s_{x+1}=($, so we can do operation 1, cyclic shift $s[l,x+1]$ to right. Under any condition, $max(b_l,b_r)-min(b_l,b_{l+1},...,b_r)$ decrease one after one operation. We can use binary index tree to calculate $max(b_l,b_r)$ and $min(b_l,b_{l+1},...,b_r)$.
[ "binary search", "data structures", "divide and conquer", "dp", "greedy", "strings" ]
2,400
"#include <bits/stdc++.h>\n#define int long long\nusing namespace std;\nstruct bit\n{\n\tvector<int> a;\n\tvoid resize(int n)\n\t{\n\t\ta = vector<int>(n + 1);\n\t}\n\tvoid update(int pos, int val)\n\t{\n\t\tint n = (int)a.size() - 1;\n\t\tfor (int i = pos; i <= n; i += i & (-i))\n\t\t{\n\t\t\ta[i] += val;\n\t\t}\n\t}\n\tint query(int pos)\n\t{\n\t\tint ans = 0;\n\t\tfor (int i = pos; i; i -= i & (-i))\n\t\t{\n\t\t\tans += a[i];\n\t\t}\n\t\treturn ans;\n\t}\n\tint query(int st, int dr)\n\t{\n\t\treturn query(dr) - query(st - 1);\n\t}\n};\nint32_t main()\n{\n\tcin.tie(nullptr)->ios_base::sync_with_stdio(false);\n\tint q;\n\tcin >> q;\n\twhile (q--)\n\t{\n\t\tint n;\n\t\tstring s;\n\t\tcin >> n >> s;\n\t\ts = '$' + s;\n\t\tvector<int> pref(n + 1);\n\t\tfor (int i = 1; i <= n; ++i)\n\t\t{\n\t\t\tpref[i] = pref[i - 1] + (s[i] == ')' ? -1 : 1);\n\t\t}\n\t\tvector<int> dp(n + 2);\n\t\tstack<int> paranteze;\n\t\tfor (int i = n; i >= 1; --i)\n\t\t{\n\t\t\tif (s[i] == '(')\n\t\t\t{\n\t\t\t\tif (!paranteze.empty())\n\t\t\t\t{\n\t\t\t\t\tdp[i] = dp[paranteze.top() + 1];\n\t\t\t\t\tparanteze.pop();\n\t\t\t\t}\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tdp[i] = dp[i + 1] + (n - i + 1);\n\t\t\t\tparanteze.push(i);\n\t\t\t}\n\t\t}\n\t\tint ans = 0;\n\t\tfor (int i = 1; i <= n; ++i)\n\t\t{\n\t\t\tans += dp[i];\n\t\t}\n\t\tmap<int, int> norm;\n\t\tvector<int> lesgo = pref;\n\t\tsort(lesgo.begin(), lesgo.end());\n\t\tint p = 1;\n\t\tfor (int i = 0; i <= n; ++i)\n\t\t{\n\t\t\tif (norm.find(lesgo[i]) == norm.end())\n\t\t\t{\n\t\t\t\tnorm[lesgo[i]] = p++;\n\t\t\t}\n\t\t}\n\t\tfor (int i = 0; i <= n; ++i)\n\t\t{\n\t\t\tlesgo[i] = norm[pref[i]];\n\t\t}\n\t\tp--;\n\t\tbit tree;\n\t\ttree.resize(p);\n\t\tfor (int i = 0; i <= n; ++i)\n\t\t{\n\t\t\tans += tree.query(1, lesgo[i]) * pref[i];\n\t\t\ttree.update(lesgo[i], 1);\n\t\t}\n\t\ttree.resize(p);\n\t\tfor (int i = n; i >= 0; --i)\n\t\t{\n\t\t\tans += tree.query(lesgo[i], p) * -pref[i];\n\t\t\ttree.update(lesgo[i], 1);\n\t\t}\n\t\tcout << ans << '\\n';\n\t}\n}"
1750
F
Majority
Everyone was happy coding, until suddenly a power shortage happened and the best competitive programming site went down. Fortunately, a system administrator bought some new equipment recently, including some UPSs. Thus there are some servers that are still online, but we need all of them to be working in order to keep the round rated. Imagine the servers being a binary string $s$ of length $n$. If the $i$-th server is online, then $s_i = 1$, and $s_i = 0$ otherwise. A system administrator can do the following operation called electricity spread, that consists of the following phases: - Select two servers at positions $1 \le i < j \le n$ such that both are online (i.e. $s_i=s_j=1$). The spread starts only from online servers. - Check if we have enough power to make the spread. We consider having enough power if the number of turned on servers in range $[i, j]$ is at least the number of turned off servers in range $[i, j]$. More formally, check whether $2 \cdot (s_i + s_{i+1} + \ldots + s_j) \ge j - i + 1$. - If the check is positive, turn on all the offline servers in range $[i, j]$. More formally, make $s_k := 1$ for all $k$ from $i$ to $j$. We call a binary string $s$ of length $n$ rated if we can turn on all servers (i.e. make $s_i = 1$ for $1 \le i \le n$) using the electricity spread operation any number of times (possibly, $0$). Your task is to find the number of rated strings of length $n$ modulo $m$.
First off, let's try to reduce the given operation to a simpler form. We claim that if it is possible to make the string $111...111$ using the specified operation, we can make it $111...111$ by doing the following new operation : Select two indices $(i,j)$ such that the substring $s_{i...j}$ looks like this $111..100...001...111$ and make it all one. The proof is just to show that if we can perform operation $(i,j)$, then it must exist some substring of $s_{i...j}$ respecting the propriety of the new operation. Let $dp_{i,j}$ be the number of binary strings of length $i$ that in the final form ( after no more operations can be made ) begin with a prefix full of ones of length $j$. For transitions, we have to iterate through the length of the next $0$ sequence and the length of the fixable prefix right after it ( such that we cannot perform an operation to make a bigger prefix, because that would break the definition ), but there is one problem -- we cannot compute $dp_{i,i}$ using any recurrence relation. Fortunately, we can compute it by subtracting the ways we get roadblocks, because, by definition $dp_{i,i}$ can be turned into $111...111$ which doesn't have any roadblocks at all. This is $O(n^4)$ if implemented naively, but one can optimize it to $O(n^2)$ by keeping prefix sums over prefix sums.
[ "combinatorics", "dp", "math", "strings" ]
2,700
"#include <bits/stdc++.h>\nusing namespace std;\nint p2[5001], dp[5001][5001], sum[5001][5001], sum2[5001][5001];\nint32_t main()\n{\n int n, mod;\n cin >> n >> mod;\n p2[0] = 1;\n for (int i = 1; i <= n; ++i)\n {\n p2[i] = p2[i - 1] + p2[i - 1];\n if (p2[i] >= mod)\n {\n p2[i] -= mod;\n }\n }\n dp[1][1] = 1;\n for (int j = 1; j <= n; ++j)\n {\n sum[1][j] = 1;\n sum2[1][j] = 1;\n }\n for (int i = 2; i <= n; ++i)\n {\n for (int j = 1; j <= i; ++j)\n {\n if (i == j)\n {\n dp[i][j] = p2[i - 2];\n for (int k = 1; k < i; ++k)\n {\n dp[i][j] -= dp[i][k];\n if (dp[i][j] < 0)\n {\n dp[i][j] += mod;\n }\n }\n continue;\n }\n int lg = j + 1;\n if (i - j - lg >= 0)\n {\n dp[i][j] = (1ll * dp[j][j] * sum2[i - j - lg][lg - j - 1]) % mod;\n }\n }\n for (int j = 1; j <= i; ++j)\n {\n sum[i][j] = sum[i][j - 1] + dp[i][j];\n if (sum[i][j] >= mod)\n {\n sum[i][j] -= mod;\n }\n }\n for (int j = i + 1; j <= n; ++j)\n {\n sum[i][j] = sum[i][j - 1];\n }\n for (int j = 0; j <= n; ++j)\n {\n sum2[i][j] = sum[i][j];\n if (j + 1 <= n)\n {\n sum2[i][j] += sum2[i - 1][j + 1];\n if (sum2[i][j] >= mod)\n {\n sum2[i][j] -= mod;\n }\n }\n }\n }\n cout << dp[n][n];\n}"
1750
G
Doping
We call an array $a$ of length $n$ fancy if for each $1 < i \le n$ it holds that $a_i = a_{i-1} + 1$. Let's call $f(p)$ applied to a permutation$^\dagger$ of length $n$ as the \textbf{minimum} number of subarrays it can be partitioned such that each one of them is fancy. For example $f([1,2,3]) = 1$, while $f([3,1,2]) = 2$ and $f([3,2,1]) = 3$. Given $n$ and a permutation $p$ of length $n$, we define a permutation $p'$ of length $n$ to be $k$-special if and only if: - $p'$ is lexicographically smaller$^\ddagger$ than $p$, and - $f(p') = k$. Your task is to count for each $1 \le k \le n$ the number of $k$-special permutations modulo $m$. $^\dagger$ A permutation is an array consisting of $n$ distinct integers from $1$ to $n$ in arbitrary order. For example, $[2,3,1,5,4]$ is a permutation, but $[1,2,2]$ is not a permutation ($2$ appears twice in the array) and $[1,3,4]$ is also not a permutation ($n=3$ but there is $4$ in the array). $^\ddagger$ A permutation $a$ of length $n$ is lexicographically smaller than a permutation $b$ of length $n$ if and only if the following holds: in the first position where $a$ and $b$ differ, the permutation $a$ has a smaller element than the corresponding element in $b$.
Consider a permutation $p$ that lexicographically smaller than given permutation, assume the first different position is $k$, if we fix $p_k$, the remaining numbers $a_{k+1},a_{k+2},...,a_n$ can be arranged in any order. Denote $S$ as the set of remaining numbers. Let $m$ be the number of consecutive pairs, $m=|{x|x \in S,(x-1) \in S\cup{p_k} }|$, $p_k$ is included because it is possible that $p_{k+1}=p_k+1$. Fix the number of positions that $p_i=p_{i-1}+1$, this consumes some consecutive pairs, the remaining consecutive pairs should not increase the number of positions. Define a sequence $p$ good if and only if it has no $i$ that $p_i=p_{i-1}+1$. Consider $dp[i][j]$ = number of good arrangements of $i$ values with $j$ consecutive pairs. Consider one consecutive pair, if it is the only wrong pair, the number of values decrease by one and this situation should be excluded, otherwise this pair can be ignored, so $dp[i][j]=dp[i][j-1]-dp[i-1][j-1]$, with boundary $dp[i][0]=i!$. After enumrating $k$, let $S'={a_k,a_{k+1},...,a_n}$, $p_k$ is chosen from $S'$ and $p_k<a_k$. Assume $p_k=x$, if $x-1 \in S'$, the number of consecutive pairs decrease by one, otherwise it stay the same. Additionally, if $x \ne p_{k-1}+1$, the number of subarrays increase by one. For every situation, count the number of $p_k$ and then calculate answer. Time complexity: $O(n^2)$. Since we have to deal with lexicographically smaller condition, it's natural to fix the common prefix $i$. Now, we can mismatch $p_{i+1}$ with some of the left values. After that, we are left with permuting the other elements. Let's say we are trying to find the number of ways to permute the remaining values {$a_1,a_2,a_3,...,a_k$} such that $a_i \neq a_{i+1}-1$ for $1 \le i < k$. For each $i$, we define $b_i$ to be a boolean value denoting the existence of $a_i - 1$ in set $a$. We can formulate the following 2 claims: The number of ways to permute array $a$ only depends on $k$ and sum of $b$ For some pair $(i,j)$, the number of ways to permute $a$ starting with $a_i$ is equal to the number of ways to permute $a$ starting with $a_j$ if and only if $b_i = b_j$ With these observations, we can formulate a $dp$ solution for counting permutations. Let $dp_{i,j,flag}$ be the number of ways to permute if $k=i$, $j$ is the sum of $b$, starting with a value with $b$ value equal to $flag$. Let's conclude transitions: $dp_{i,1,true} = dp_{i-1,1,false}$ $dp_{i,1,false} = (dp_{i-1,1,true} + dp_{i-1,1,false}) \cdot (i-1)$ $-$ By claim 2, we can think of ending in the minimum value and we are left with another permutation we can transition 2. $dp_{i,j>1,true} = (dp_{i-1,j-1,true}+dp_{i-1,j-1,false}) \cdot j$ $-$ By claim 1, we can think of an equivalent array $a$ in which all values with $b_i = 1$ are left alone. Combining with claim 2, all distributions are equal, and selecting a single value makes the permutation have $j-1$ values with $b_i=1$. $dp_{i,j>1,false} = (dp_{i-1,j,false} + dp_{i-1,j,true}) \cdot (i-j)$ $-$ By claim 2, we can select any value with $b_i=0$ and combining with claim $2$, we can select the first value in the last chain. Let $sum$ be the sum of $b$.The number of ways to permute $a$ such that it respects the given demands, is $dp_{k,sum,true} + dp_{k,sum,false}$. Now the number of ways to permute $a$ with $t$ positions such that $a_i \neq a_{i+1}-1$ is $\binom{k-sum}{t-sum} \cdot (dp_{k,t,true} + dp_{k,t,false})$. This is because we can choose the values that contribute, $sum$ of them are always contributing, so we are left with choosing $t-sum$ values out of $k-sum$ possibilities. Now thinking about the values we missmatch $p_{i+1}$ with, looking at claim $2$, a lot of values are equivalent. We can count each value type separately since there are $O(1)$ types, and have a $O(n)$ complexity to update the results accordingly. Also be careful with the case where a past range of consecutive values is continued after the missmatch. It's easy to handle this because $p'_{i+1}$ is fixed and we can count it independently in $O(n)$. Thus we have achieved $O(n^2)$ total time complexity
[ "combinatorics", "dp", "math" ]
3,300
"#include <bits/stdc++.h>\nusing namespace std;\nint mod;\nclass Mint\n{\npublic:\n int val;\n Mint(int _val = 0)\n {\n val = _val % mod;\n }\n Mint(long long _val)\n {\n val = _val % mod;\n }\n Mint operator+(Mint oth)\n {\n return val + oth.val;\n }\n Mint operator*(Mint oth)\n {\n return 1LL * val * oth.val;\n }\n Mint operator-(Mint oth)\n {\n return val - oth.val + mod;\n }\n void operator+=(Mint oth)\n {\n val = (Mint(val) + oth).val;\n }\n void operator-=(Mint oth)\n {\n val = (Mint(val) - oth).val;\n }\n void operator*=(Mint oth)\n {\n val = (Mint(val) * oth).val;\n }\n};\nint main()\n{\n cin.tie(nullptr)->sync_with_stdio(false);\n int n;\n cin >> n >> mod;\n vector<int> a(n + 1);\n for (int i = 1; i <= n; ++i)\n {\n cin >> a[i];\n }\n a[0] = -1;\n vector<vector<Mint>> pas(n + 1, vector<Mint>(n + 1));\n pas[0][0] = 1;\n for (int i = 1; i <= n; ++i)\n {\n for (int j = 0; j <= i; ++j)\n {\n if (j == 0 || j == i)\n {\n pas[i][j] = 1;\n continue;\n }\n pas[i][j] = pas[i - 1][j] + pas[i - 1][j - 1];\n }\n }\n function<Mint(int, int)> combi = [&](int i, int j)\n {\n if (i < j)\n {\n return Mint(0);\n }\n return pas[i][j];\n };\n function<Mint(int, int)> sb = [&](int n, int k)\n {\n return combi(n - 1, n - k);\n };\n vector<vector<vector<Mint>>> dp(n + 1, vector<vector<Mint>>(n + 1, vector<Mint>(2)));\n vector<vector<vector<Mint>>> coef(n + 1, vector<vector<Mint>>(n + 1, vector<Mint>(2)));\n dp[1][1][true] = 1;\n coef[1][1][true] = 1;\n for (int i = 2; i <= n; ++i)\n {\n dp[i][1][true] = dp[i - 1][1][false];\n coef[i][1][true] = dp[i - 1][1][false];\n dp[i][1][false] = (dp[i - 1][1][true] + dp[i - 1][1][false]) * (i - 1);\n coef[i][1][false] = dp[i - 1][1][true] + dp[i - 1][1][false];\n for (int j = 2; j <= i; ++j)\n {\n dp[i][j][true] = (dp[i - 1][j - 1][false] + dp[i - 1][j - 1][true]) * j;\n coef[i][j][true] = dp[i - 1][j - 1][false] + dp[i - 1][j - 1][true];\n dp[i][j][false] = (dp[i - 1][j][false] + dp[i - 1][j][true]) * (i - j);\n coef[i][j][false] = dp[i - 1][j][false] + dp[i - 1][j][true];\n }\n }\n vector<bool> exista(n + 2, true);\n exista[0] = false;\n exista[n + 1] = false;\n int cnt = 0;\n vector<Mint> ans(n + 1);\n for (int i = 1; i < n; ++i)\n {\n int last = -1;\n int m = 0;\n int sz = 0;\n for (int j = 1; j <= n; ++j)\n {\n if (exista[j])\n {\n m += j != (last + 1);\n last = j;\n sz++;\n }\n }\n function<void(int, int, int)> count = [&](int freq, int m, bool type)\n {\n if (freq == 0)\n {\n return;\n }\n if (type)\n {\n for (int k = 1; k <= n; ++k)\n {\n int need = k - cnt;\n if (need >= m)\n {\n ans[k] += combi(sz - m - 1, need - m) * (dp[need][m][false] + dp[need][m][true] - coef[need][m][true]) * freq;\n }\n if (need >= m - 1)\n {\n ans[k] += combi(sz - m - 1, need - m + 1) * coef[need + 1][m][true] * freq;\n }\n }\n }\n else\n {\n for (int k = 1; k <= n; ++k)\n {\n int need = k - cnt;\n if (need >= m)\n {\n ans[k] += combi(sz - m - 1, need - m) * (dp[need][m][true] + dp[need][m][false]) * freq;\n }\n }\n }\n };\n if (m)\n {\n if (exista[a[i - 1] + 1] && a[i - 1] + 1 < a[i])\n {\n if (exista[a[i - 1] + 2])\n {\n count(1, m, true);\n }\n else\n {\n count(1, m - 1, false);\n }\n }\n cnt++;\n int cnt1 = 0, cnt2 = 0, cnt3 = 0, cnt4 = 0;\n for (int j = 1; j < a[i]; ++j)\n {\n if (j != (a[i - 1] + 1) && exista[j])\n {\n if (exista[j - 1] && exista[j + 1])\n {\n cnt1++;\n continue;\n }\n if (!exista[j - 1] && !exista[j + 1])\n {\n cnt2++;\n continue;\n }\n if (!exista[j - 1])\n {\n cnt3++;\n continue;\n }\n if (!exista[j + 1])\n {\n cnt4++;\n continue;\n }\n }\n }\n count(cnt1, m + 1, true);\n count(cnt2, m - 1, false);\n count(cnt3, m, true);\n count(cnt4, m, false);\n cnt--;\n }\n exista[a[i]] = false;\n cnt += a[i] != (a[i - 1] + 1);\n }\n for (int i = 1; i <= n; ++i)\n {\n cout << ans[i].val << ' ';\n }\n}"
1750
H
BinaryStringForces
You are given a binary string $s$ of length $n$. We define a maximal substring as a substring that cannot be extended while keeping all elements equal. For example, in the string $11000111$ there are three maximal substrings: $11$, $000$ and $111$. In one operation, you can select two maximal adjacent substrings. Since they are maximal and adjacent, it's easy to see their elements must have different values. Let $a$ be the length of the sequence of ones and $b$ be the length of the sequence of zeros. Then do the following: - If $a \ge b$, then replace $b$ selected zeros with $b$ ones. - If $a < b$, then replace $a$ selected ones with $a$ zeros. As an example, for $1110000$ we make it $0000000$, for $0011$ we make it $1111$. We call a string being good if it can be turned into $1111...1111$ using the aforementioned operation any number of times (possibly, zero). Find the number of good substrings among all $\frac{n(n+1)}{2}$ non-empty substrings of $s$.
We call a maximal sequence of $0$ a $0$ block, a maximal sequence of $1$ a $1$ block. For each $i$, store some disjoint intervals with the following property: For each $j$ that $i<=j$ and $s[j]=1$, j is in one of the intervals if and only if $(i,j)$ is a good substring. We can prove the number of intervals for each $i$ is $O(\log n)$. Let's start from $(i,i)$ to $(i,n)$, and consider when. good substring become bad and bad substring become good. Assume we are at $(i,j)$. If it is a good substring and ending with 1, it become bad when it meet a longer 0 block, so we get one interval and go to a bad substring ending with 0. If it is a bad substring and ending with 0, suppose the next 1 is at k, then the next good intervals start at the smallest j' with three properties: (1) $(k,j')$ is good (2) $s[j']=1$ (3) $(k,j')$ is not shorter than the last 0 block. Proof: Property 2 is obvious because we only care about substrings ending with 1. If $(k,j')$ is shorter than the last $0$ block, it is impossible to change this $0$ block, so $(i,j')$ is bad. If $(k,j')$ is bad, if it starts with a 1 block $(k,j")$ not shorter than the last $0$ block after some operations, then $j"$ is smaller than $j'$ and has those three properties, otherwise it is impossible to change the last $0$ block. If it has all three properties, in substring $(i,j')$, $(k,j')$ is good and can change the last $0$ block $1$, and then change $(i,j')$ to $1$, so we go to a good substring ending with 1. When good substring become bad, the length is doubled, so the number of intervals for each i is $O(logn)$. To know when good substring become bad ,for every 0 block $(j,k)$,if $(j,k)$ is longer than $(i,j-1)$, then $(i,k)$ is bad, we can preprocess those i in $O(n)$. To know when bad substring become good, we go with $i$ from $n$ to $1$ and for each $i$, search in $O(\log n)$ intervals $O(\log n)$ times. So we get those intervals in $O(n\log^2 n)$ Now we can check each substring ending with $1$ is good or not. Do the above algorithm on the reversed string, so we can check each substring starting with 1 is good or not. But we can not check substring starting and ending with 0. Suppose $(i',j')$ is the longest $0$ block in $(i,j)$, then $(i,j)$ is good if at least one of $(i,i'-1)$ and $(j'+1,j)$ is good and not shorter than $(i',j')$. Proof: After changing the longest $0$ block, we can change all other $0$ blocks. So for any substring, it is good if and only if we can change its longest $0$ block. If $(i,i'-1)$ is good and not shorter, it can change $(i',j')$, so $(i,j)$ is good. If $(i,j)$ is good assume the longest 0 block is changed from left. So after some operations in $(i,i'-1)$, there is a substring $(k,i'-1)$ become all $1$ and not shorter than $(i',j')$. Then $(k,i'-1)$ can make $(i,i'-1)$ good as there is no longer $0$ block. Similarly, we can prove for $(j'+1,j)$. So for each substring, we consider its longest $0$ block (substrings with no $0$ are always good). For convenience, if there are multiple maximum, we take the rightmost. The longest $0$ block in substrings can be either a $0$ block in the original string or part of $0$ block in the original string. We can ignore substring in one $0$ block, so the longest $0$ block can only be a prefix or suffix, the number of possible longest $0$ block is $O(n)$. For each possible 0 block $(i,j)$, assume the longest substring that $(i,j)$ is the longest $0$ block is $(i',j')$, we need to count answer for substring $(l,r)$ that $i' \le l \le i,j \le r \le j'$, it is equivalent to count the number of $l$ that $i'\le l \le i-(j-i+1)$ and $(l,i-1)$ is good and the number of $r$ that $j+(j-i+1) \le r \le j'$ and $(j+1,r)$ is good. Notice that $s[i-1]=s[j+1]=1$, we can calculate them by above intervals. We can use persistent segment tree directly or use segment tree or binary index tree after sorting queries. We need $O(nlogn)$ modifications and $O(n)$ queries. Thus we have solved our problem in $O(n\log^2 n)$.
[ "constructive algorithms", "data structures", "dp" ]
3,500
"#include <bits/stdc++.h>\nusing namespace std;\nmt19937_64 rng(chrono::steady_clock::now().time_since_epoch().count());\nint random(int st, int dr)\n{\n uniform_int_distribution<mt19937::result_type> gen(st, dr);\n return gen(rng);\n}\nvector<int> lg;\nstruct bit\n{\n vector<int> b;\n void resize(int n)\n {\n b.resize(n + 1);\n }\n void update(int pos, int val)\n {\n int n = (int)b.size() - 1;\n for (int i = pos; i <= n; i += i & (-i))\n {\n b[i] += val;\n }\n }\n int query(int pos)\n {\n int ans = 0;\n for (int i = pos; i; i -= i & (-i))\n {\n ans += b[i];\n }\n return ans;\n }\n int query(int st, int dr)\n {\n return query(dr) - query(st - 1);\n }\n};\nstruct stack_rmq\n{\n vector<vector<int>> rmq;\n void insert(int val)\n {\n rmq.push_back({val});\n int sz = rmq.size() - 1;\n for (int i = 1; sz - (1 << i) + 1 >= 0; ++i)\n {\n rmq[sz].push_back(\n max(rmq[sz][i - 1], rmq[sz - (1 << (i - 1))][i - 1]));\n }\n }\n void update(int val)\n {\n int sz = (int)rmq.size() - 1;\n int cine = val + rmq[sz][0];\n rmq.pop_back();\n insert(cine);\n }\n int query(int st, int dr)\n {\n if (st > dr)\n {\n return 0;\n }\n int pow_2 = lg[dr - st + 1];\n return max(rmq[dr][pow_2], rmq[st + (1 << pow_2) - 1][pow_2]);\n }\n};\nstruct maximal\n{\n stack_rmq lesgo;\n vector<pair<int, int>> ranges;\n vector<int> qui;\n string s;\n int n;\n void build(string _s, int _n)\n {\n s = _s;\n n = _n;\n qui = vector<int>(n + 1);\n bool este = false;\n for (int i = 1; i <= n; ++i)\n {\n if (s[i] == '0')\n {\n if (este)\n {\n lesgo.update(1);\n ranges[(int)ranges.size() - 1].second++;\n }\n else\n {\n lesgo.insert(1);\n ranges.push_back({i, i});\n este = true;\n }\n }\n else\n {\n este = false;\n }\n qui[i] = (int)ranges.size() - 1;\n }\n }\n int query(int st, int dr)\n {\n if (s[st] == '1')\n {\n int l = qui[st] + 1;\n int r = (s[dr] == '0' ? qui[dr] - 1 : qui[dr]);\n int partial = (s[dr] == '0' ? dr - ranges[qui[dr]].first + 1 : 0);\n return max(lesgo.query(l, r), partial);\n }\n if (s[dr] == '1')\n {\n int r = qui[dr];\n int l = qui[st] + 1;\n int partial = (s[st] == '0' ? ranges[qui[st]].second - st + 1 : 0);\n return max(lesgo.query(l, r), partial);\n }\n }\n int next(int lg, int pos, bool fata)\n {\n assert(s[pos] == '1');\n if (fata)\n {\n int st = pos, dr = n;\n int ans = 0;\n while (st <= dr)\n {\n int mid = (st + dr) / 2;\n if (query(pos, mid) <= lg)\n {\n ans = mid;\n st = mid + 1;\n }\n else\n {\n dr = mid - 1;\n }\n }\n return ans;\n }\n else\n {\n int st = 1, dr = pos;\n int ans = 0;\n while (st <= dr)\n {\n int mid = (st + dr) / 2;\n if (query(mid, pos) < lg)\n {\n ans = mid;\n dr = mid - 1;\n }\n else\n {\n st = mid + 1;\n }\n }\n return ans;\n }\n }\n};\nvector<vector<pair<int, int>>> find_relevant_ranges(string s, int n)\n{\n vector<int> sp(n + 1);\n for (int i = 1; i <= n; ++i)\n {\n sp[i] = sp[i - 1] + (s[i] == '1');\n }\n function<int(int, int)> query = [&](int st, int dr)\n {\n return sp[dr] - sp[st - 1];\n };\n vector<vector<pair<int, int>>> cine(n + 1);\n stack_rmq lesgo;\n vector<pair<int, int>> secv;\n bool este = false;\n for (int i = n; i >= 1; --i)\n {\n if (s[i] == '0')\n {\n if (!este)\n {\n lesgo.insert(1);\n secv.push_back({i, i});\n este = true;\n }\n else\n {\n lesgo.update(1);\n secv[(int)secv.size() - 1].first--;\n }\n }\n else\n {\n este = false;\n }\n int cnt = 0;\n int p = i;\n while (p <= n)\n {\n int st = 0, dr = (int)secv.size() - 1;\n int rep = -1;\n while (st <= dr)\n {\n int mid = (st + dr) / 2;\n if (lesgo.query(mid, (int)secv.size() - 1) > cnt)\n {\n rep = mid;\n st = mid + 1;\n }\n else\n {\n dr = mid - 1;\n }\n }\n if (rep == -1)\n {\n cine[i].push_back({p, n});\n break;\n }\n st = secv[rep].first, dr = secv[rep].second;\n\n int l = (dr - st + 1);\n cnt += (secv[rep].first - p);\n if (st != p)\n {\n cine[i].push_back({p, st - 1});\n }\n p = secv[rep].first;\n if (l > cnt)\n {\n if (dr == n)\n {\n break;\n }\n int save = dr + 1;\n bool ok = false;\n for (int j = 0; j < cine[save].size(); ++j)\n {\n if (cine[save][j].second - save + 1 >= l)\n {\n pair<int, int> interv = {max(save + l - 1, cine[save][j].first), cine[save][j].second};\n if (query(interv.first, interv.second))\n {\n int low = interv.first, high = interv.second;\n int qui = -1;\n while (low <= high)\n {\n int mid = (low + high) / 2;\n if (query(interv.first, mid))\n {\n qui = mid;\n high = mid - 1;\n }\n else\n {\n low = mid + 1;\n }\n }\n cine[i].push_back({qui, qui});\n cnt += (qui - p + 1);\n p = qui + 1;\n ok = true;\n break;\n }\n }\n }\n if (!ok)\n {\n break;\n }\n }\n else\n {\n cine[i].push_back({p, dr});\n cnt += dr - p + 1;\n p = dr + 1;\n }\n }\n }\n return cine;\n}\n\nvector<vector<pair<int, int>>> cine1, cine2;\nint n;\nstring s;\nvoid smart()\n{\n long long ans = 0;\n vector<vector<pair<int, int>>> events1(n + 1);\n vector<vector<pair<int, int>>> events2(n + 1);\n bit tree1;\n bit tree2;\n maximal cine;\n cine.build(s, n);\n tree1.resize(n + 1);\n tree2.resize(n + 1);\n int p1 = 1;\n int p2 = 1;\n for (int i = 1; i <= n; ++i)\n {\n for (auto j : cine1[i])\n {\n events1[j.first].push_back({i, 1});\n if (j.second + 1 <= n)\n {\n events1[j.second + 1].push_back({i, -1});\n }\n }\n }\n for (int i = 1; i <= n; ++i)\n {\n for (auto j : cine2[i])\n {\n events2[j.first].push_back({i, 1});\n if (j.second + 1 <= n)\n {\n\n events2[j.second + 1].push_back({i, -1});\n }\n }\n }\n function<void(int)> move1 = [&](int pos)\n {\n while (p1 <= pos)\n {\n for (auto i : events1[p1])\n {\n tree1.update(i.first, i.second);\n }\n p1++;\n }\n };\n function<void(int)> move2 = [&](int pos)\n {\n while (p2 <= pos)\n {\n for (auto i : events2[p2])\n {\n tree2.update(i.first, i.second);\n }\n p2++;\n }\n };\n for (auto x : cine.ranges)\n {\n int lg = x.second - x.first + 1;\n int l = 0, r = 0;\n int ways1 = 0, ways2 = 0;\n if (x.first - 1 >= 1)\n {\n move1(x.first - 1);\n for (int i = 1; i < lg; ++i)\n {\n int qui = cine.next(i, x.first - 1, false);\n int last = x.first - i;\n if (qui <= last)\n {\n ans += tree1.query(qui, last);\n }\n }\n l = cine.next(lg, x.first - 1, false);\n int last = x.first - lg;\n ways1 = x.first - l + 1;\n if (l <= last)\n {\n ways1 -= tree1.query(l, last);\n }\n }\n else\n {\n l = 1;\n ways1 = 1;\n }\n if (x.second + 1 <= n)\n {\n move2(x.second + 1);\n for (int i = 1; i < lg; ++i)\n {\n int qui = cine.next(i, x.second + 1, true);\n int first = x.second + i;\n\n if (first <= qui)\n {\n ans += tree2.query(first, qui);\n }\n }\n r = cine.next(lg, x.second + 1, true);\n int first = x.second + lg;\n ways2 = r - x.second + 1;\n if (first <= r)\n {\n ways2 -= tree2.query(first, r);\n }\n }\n else\n {\n r = n;\n ways2 = 1;\n }\n ans += 1ll * (r - x.second + 1ll) * (x.first - l + 1ll) - 1ll * ways1 * ways2;\n }\n int lg = 0;\n for (int i = 1; i <= n; ++i)\n {\n if (s[i] == '1')\n {\n lg++;\n }\n else\n {\n ans += 1ll * lg * (lg + 1) / 2;\n lg = 0;\n }\n }\n ans += 1ll * lg * (lg + 1) / 2;\n cout << ans << '\\n';\n}\nint main()\n{\n cin.tie(nullptr)->sync_with_stdio(false);\n int q;\n cin >> q;\n while (q--)\n {\n cin >> n >> s;\n lg = vector<int>(n + 1);\n for (int i = 2; i <= n; ++i)\n {\n lg[i] = lg[i / 2] + 1;\n }\n s = '$' + s;\n cine1 = find_relevant_ranges(s, n);\n reverse(s.begin() + 1, s.end());\n cine2 = find_relevant_ranges(s, n);\n reverse(s.begin() + 1, s.end());\n for (int i = 1; i <= n; ++i)\n {\n for (int j = 0; j < (int)cine2[i].size(); ++j)\n {\n cine2[i][j].first = n - cine2[i][j].first + 1;\n cine2[i][j].second = n - cine2[i][j].second + 1;\n swap(cine2[i][j].first, cine2[i][j].second);\n }\n reverse(cine2[i].begin(), cine2[i].end());\n }\n for (int i = 1; i <= n; ++i)\n {\n if (i < (n - i + 1))\n {\n swap(cine2[i], cine2[n - i + 1]);\n }\n }\n smart();\n }\n}"
1753
A1
Make Nonzero Sum (easy version)
\textbf{This is the easy version of the problem. The difference is that in this version the array can not contain zeros. You can make hacks only if both versions of the problem are solved.} You are given an array $[a_1, a_2, \ldots a_n]$ consisting of integers $-1$ and $1$. You have to build a partition of this array into the set of segments $[l_1, r_1], [l_2, r_2], \ldots, [l_k, r_k]$ with the following property: - Denote the alternating sum of all elements of the $i$-th segment as $s_i$: $s_i$ = $a_{l_i} - a_{l_i+1} + a_{l_i+2} - a_{l_i+3} + \ldots \pm a_{r_i}$. For example, the alternating sum of elements of segment $[2, 4]$ in array $[1, 0, -1, 1, 1]$ equals to $0 - (-1) + 1 = 2$. - The sum of $s_i$ over all segments of partition should be equal to zero. Note that each $s_i$ does \textbf{not} have to be equal to zero, this property is about sum of $s_i$ over all segments of partition. The set of segments $[l_1, r_1], [l_2, r_2], \ldots, [l_k, r_k]$ is called a partition of the array $a$ of length $n$ if $1 = l_1 \le r_1, l_2 \le r_2, \ldots, l_k \le r_k = n$ and $r_i + 1 = l_{i+1}$ for all $i = 1, 2, \ldots k-1$. In other words, each element of the array must belong to exactly one segment. You have to build a partition of the given array with properties described above or determine that such partition does not exist. Note that it is \textbf{not} required to minimize the number of segments in the partition.
If the sum of all elements of the array is odd, the partitions does not exist because the partition does not affect the parity of the sum. Otherwise the answer exists. Let's build such construction. As the sum of all elements is even, $n$ is even too. Consider pairs of elements with indices $(1, 2)$, $(3, 4)$, ..., $(n - 1, n)$. Consider the pair $(2i - 1, 2i)$. If $a_{2i - 1} = a_{2i}$, add the segment $[2i - 1, 2i]$ to the answer. In this case the alternating sum of elements of this segment will be equal to $a_{2i - 1} - a_{2i} = 0$. Otherwise we will add two segments to the answer: $[2i - 1, 2i - 1]$ and $[2i, 2i]$. The sum of the first segment is $a_{2i - 1}$, and the sum of the second segment is $a_{2i}$. The sum of two sums will be equal to zero. So the sum of all alternating sums will be equal to zero.
[ "constructive algorithms", "dp", "greedy" ]
1,300
null
1753
A2
Make Nonzero Sum (hard version)
\textbf{This is the hard version of the problem. The difference is that in this version the array contains zeros. You can make hacks only if both versions of the problem are solved.} You are given an array $[a_1, a_2, \ldots a_n]$ consisting of integers $-1$, $0$ and $1$. You have to build a partition of this array into the set of segments $[l_1, r_1], [l_2, r_2], \ldots, [l_k, r_k]$ with the following property: - Denote the alternating sum of all elements of the $i$-th segment as $s_i$: $s_i$ = $a_{l_i} - a_{l_i+1} + a_{l_i+2} - a_{l_i+3} + \ldots \pm a_{r_i}$. For example, the alternating sum of elements of segment $[2, 4]$ in array $[1, 0, -1, 1, 1]$ equals to $0 - (-1) + 1 = 2$. - The sum of $s_i$ over all segments of partition should be equal to zero. Note that each $s_i$ does \textbf{not} have to be equal to zero, this property is about sum of $s_i$ over all segments of partition. The set of segments $[l_1, r_1], [l_2, r_2], \ldots, [l_k, r_k]$ is called a \textbf{partition} of the array $a$ of length $n$ if $1 = l_1 \le r_1, l_2 \le r_2, \ldots, l_k \le r_k = n$ and $r_i + 1 = l_{i+1}$ for all $i = 1, 2, \ldots k-1$. In other words, each element of the array must belong to exactly one segment. You have to build a partition of the given array with properties described above or determine that such partition does not exist. Note that it is \textbf{not} required to minimize the number of segments in the partition.
If the sum of all numbers in the array is odd, then splitting is impossible, because splitting does not affect the evenness of the sum. Otherwise, we will build the answer constructively. Suppose we have considered some kind of array prefix. Let's keep going until we get exactly $2$ non-zero numbers. We want to make these two non-zero numbers add up to $0$. Then if on the last segment the sum is already equal to $0$, then just take it as an answer. Otherwise, consider a few cases: If the length of the segment is even, then we simply separate the last number (it will be non-zero) into a separate segment. Then its sign will change and in total these two numbers will give $0$. The same can be done if the length of the segment is odd, but its first element is equal to $0$. Separate this $0$ and repeat the algorithm above. If the length of the segment is odd and the first element is not equal to $0$, then we separate it. Then the value of the first element will not change, and the last will change to the opposite, and then their sum will be equal to $0$.
[ "constructive algorithms", "dp", "greedy" ]
1,500
null
1753
B
Factorial Divisibility
You are given an integer $x$ and an array of integers $a_1, a_2, \ldots, a_n$. You have to determine if the number $a_1! + a_2! + \ldots + a_n!$ is divisible by $x!$. Here $k!$ is a factorial of $k$ — the product of all positive integers less than or equal to $k$. For example, $3! = 1 \cdot 2 \cdot 3 = 6$, and $5! = 1 \cdot 2 \cdot 3 \cdot 4 \cdot 5 = 120$.
Let's create an array $[cnt_1, cnt_2, \ldots, cnt_x]$ where $cnt_i$ equals to number of elements equals to $i$ in the initial array. Note that $a_1!\ + a_2!\ +\ \ldots\ +\ a_n!$ equals to sum of $k! \cdot cnt_k$ over all $k$ from $1$ to $x - 1$, $cnt_x$ does not affect anything because $x!$ divides $x!$ itself. We have to check if this sum is divisible by $x!$. Suppose there exists some $k < x$ such that $cnt_k \geq k + 1$. In this case we can make two transformations: $cnt_k \mathrel{-}= (k + 1); cnt_{k+1} \mathrel{+}= 1$ and the sum of $k! \cdot cnt_k$ will not change because $(k+1) \cdot k! = (k+1)!$. Let's perform this operation until it is possible for all numbers from $1$ to $x - 1$. After all operations the sum of $k! \cdot cnt_k$ will not change and for each $k < x$ the inequality $cnt_k \leq k$ will be satisfied because if $cnt_k \geq k+1$ we could perform an operation with this element. Let's see what is the maximum value of sum of $k \cdot cnt_k$ over all $k$ from $1$ to $x - 1$ after all operations. We know that $cnt_k \leq k$ for all $k$, so the maximum value of the sum is the sum of $k \cdot k!$ over all $k$. Note that $k \cdot k! = ((k + 1) - 1) \cdot k! = (k + 1) \cdot k! - k! = (k + 1)! - k!$. It means that the sum of such values over all $k$ from $1$ to $x - 1$ equals to $(2! - 1!) + (3! - 2!) + \ldots + (x! - (x-1)!)$. Each factorial from $2$ to $x - 1$ will be added and subtracted from the sum. So the result is $x! - 1$. So the only one case when this sum is divisible by $x!$ is when the sum equals to $0$. It means that $cnt_k$ equals to zero for all $k$ from $1$ to $x - 1$ after performing all operations. Time complexity: $\mathcal{O}(n + x)$.
[ "math", "number theory" ]
1,600
null
1753
C
Wish I Knew How to Sort
You are given a binary array $a$ (all elements of the array are $0$ or $1$) of length $n$. You wish to sort this array, but unfortunately, your algorithms teacher forgot to teach you sorting algorithms. You perform the following operations until $a$ is sorted: - Choose two random indices $i$ and $j$ such that $i < j$. Indices are chosen equally probable among all pairs of indices $(i, j)$ such that $1 \le i < j \le n$. - If $a_i > a_j$, then swap elements $a_i$ and $a_j$. What is the expected number of such operations you will perform before the array becomes sorted? It can be shown that the answer can be expressed as an irreducible fraction $\frac{p}{q}$, where $p$ and $q$ are integers and $q \not \equiv 0 \pmod{998\,244\,353}$. Output the integer equal to $p \cdot q^{-1} \bmod 998\,244\,353$. In other words, output such an integer $x$ that $0 \le x < 998\,244\,353$ and $x \cdot q \equiv p \pmod{998\,244\,353}$.
Let the number of zeros in the array be $g$. Let $dp[k]$ be the expected number of swaps needed when there are $k$ zeros in the first $g$ positions. Then, we know that $dp[g] = 0$, and we can write down the recurrence equations for $dp[k]$ by considering the case where some element equals to one from the first $g$ positions and some element equals to zero from the last $(n-g)$ positions are swapped. This is the only case where the $dp$ value will change. Thus, our recurrence is as follows. Let $p = \frac{2 \cdot (g-k) \cdot (g-k)}{n \cdot (n-1)}$. Then $dp[k] = 1 + dp[k] \cdot (1 - p) + dp[k+1] \cdot p$. The answer is $dp[o]$, where $o$ is the initial number of zeros in the first $g$ positions.
[ "dp", "math", "probabilities" ]
2,000
null
1753
D
The Beach
Andrew loves the sea. That's why, at the height of the summer season, he decided to go to the beach, taking a sunbed with him to sunbathe. The beach is a rectangular field with $n$ rows and $m$ columns. Some cells of the beach are free, some have roads, stones, shops and other non-movable objects. Some of two adjacent along the side cells can have sunbeds located either horizontally or vertically. Andrew hopes to put his sunbed somewhere, but that's a bad luck, there may no longer be free places for him! That's why Andrew asked you to help him to find a free place for his sunbed. Andrew's sunbed also should be places on two adjacent cells. If there are no two adjacent free cells, then in order to free some place for a sunbed, you will have to disturb other tourists. You can do the following actions: - Come to some sunbed and, after causing $p$ units of discomfort to its owner, lift the sunbed by one of its sides and rotate it by $90$ degrees. One half of the sunbed must remain in the same cell and another half of the sunbed must move to the free cell. At the same time, anything could be on the way of a sunbed during the rotation .\begin{center} \begin{tabular}{ccc} & & \ \end{tabular} {\small Rotation of the sunbed by $90$ degrees around cell $(1, 2)$.} \end{center} - Come to some sunbed and, after causing $q$ units of discomfort to its owner, shift the sunbed along its long side by one cell. One half of the sunbed must move to the place of another, and another — to the free cell.\begin{center} \begin{tabular}{ccc} & & \ \end{tabular} {\small Shift of the sunbed by one cell to the right.} \end{center} In any moment each sunbed occupies two adjacent free cells. You cannot move more than one sunbed at a time. Help Andrew to free a space for his sunbed, causing the minimum possible number of units of discomfort to other tourists, or detect that it is impossible.
Let's paint our field in a chess coloring. Now let's consider our operations not as the movement of sunbeds, but as the movement of free cells. Then, a free cell adjacent to the long side of the sunbed can move to a cell of the sunbed that is not adjacent to this one, for $p$ units of discomfort. A free cell adjacent to the short side of the sunbed can move to a cell of the sunbed that is not adjacent to this one, for $q$ units of discomfort. Note that in this cases, the free cell does not change its color (in chess coloring). Since each sunbed should occupy one black and one white cell, then some two free cells of different colors should move to neighboring ones using operations. It can be shown that in the optimal answer we use no more than one operation with each sunbed. Then, for each position, looking at the adjacent sunbeds, we will determine where the free cell can move if it turns out to be in this position. Let's construct a weighted oriented graph on the cells of the field. Edge $(x_1, y_1) \rightarrow (x_2, y_2)$ of weight $w$ (equal to $p$ or $q$) will mean that there is a sunbed such that by moving it with an operation that brings $w$ discomfort, we will free the cell $(x_2, y_2)$ and block the cell $(x_1, y_1)$. Note that the graphs on the black and white cells are not connected. Let's run Dijkstra's algorithm from all free cells at once. Then, for each cell $d_{x, y}$ - the minimum distance in this graph from a free cell is equal to the minimum amount of discomfort that must be used to free this cell. The answer to the problem is the minimum for all pairs $(x_1, y_1)$, $(x_2, y_2)$ neighboring cells, $d_{x_1, y_1} + d_{x_2, y_2}$. Or $-1$ if there is no pair of adjacent cells, both of which are reachable from the free ones. Asymptotics of the solution: $\mathcal{O}(n m \cdot\log{(nm))}$
[ "constructive algorithms", "dfs and similar", "graphs", "shortest paths" ]
2,400
null
1753
E
N Machines
You have been invited as a production process optimization specialist to some very large company. The company has $n$ machines at its factory, standing one behind another in the production chain. Each machine can be described in one of the following two ways: $(+,~a_i)$ or $(*,~a_i)$. If a workpiece with the value $x$ is supplied to the machine of kind $(+,~a_i)$, then the output workpiece has value $x + a_i$. If a workpiece with the value $x$ is supplied to the machine of kind $(*,~a_i)$, then the output workpiece has value $x \cdot a_i$. The whole production process is as follows. The workpiece with the value $1$ is supplied to the first machine, then the workpiece obtained after the operation of the first machine is supplied to the second machine, then the workpiece obtained after the operation of the second machine is supplied to the third machine, and so on. The company is not doing very well, so now the value of the resulting product does not exceed $2 \cdot 10^9$. The directors of the company are not satisfied with the efficiency of the production process and have given you a budget of $b$ coins to optimize it. To optimize production you can change the order of machines in the chain. Namely, by spending $p$ coins, you can take any machine of kind $(+,~a_i)$ and move it to any place in the chain without changing the order of other machines. Also, by spending $m$ coins, you can take any machine of kind $(*,~a_i)$ and move it to any place in the chain. What is the maximum value of the resulting product that can be achieved if the total cost of movements that are made should not exceed $b$ coins?
Let $C$ bi the maximum value of the resulting product before any movements. The problem statement says that it is guaranteed that $C \le 2 \cdot 10^9$. Observation 0 - after each movements the value of the resulting product is not greater than $\frac{C^2}{4}$. Observation 1 - each machine of kind $(*, a_i)$ should be moved to the end of the sequence, and each machine of kind $(+, a_i)$ - to the beginning of the sequence, and the order of movements does not make sense. Observation 2 - there are at most $\log_2{C}$ non-trivial machines of kind $(*, a_i)$ (such machines that $a_i \neq 1$). We will need some more strong observation for machines of kind $(*, a_i)$, but this will be useful too. Observation 3 - if there are two machines $(*, a_i)$, $(*, a_j)$, where $i < j$ and $a_i >= a_j$, then in optimal answer machine $j$ may be moved if and only if machine $i$ is moved too. It is true because we could increase the answer otherwise, by moving machine $i$ instead of machine $j$. The last two observations says that there are not many subsets of machines of kind $(*, a_i)$ (that satisfies the property from observation 3). Let's say that there are $F(C)$ such subsets, in the end of the editorial we will estimate this value. Let's pick out subsegments of machines of kind $(+, a_i)$ between machines of kind $(*, a_i)$, sort them and count prefix sums. There will be not more than $\log_2{C} + 1$ such segments. In the optimal answer some maximums will be moved from each of the segments. Let's fix some subset of machines of kind $(*, a_i)$, that will be moved to the end, and count the current value of the output product. Consider some element $(+, a_j)$ in the array. Let the product of machines $(*, a_i)$ to the left of it be $lmul$, and to the right of it - to be $rmul$. Now if we move this element to the beginning of the array, the value of the resulting product will increase by $\frac{lmul-1}{lmul} \cdot rmul \cdot a_j$. Let's call this $profit_j$. Now we have to find the sum of some numbers of maximum values $profit_j$. Let's use binary search to find some "critical" value $profit$: such value that all elements $profit_j \ge profit$ will be moved to the beginning. $profit_j$ of each element is not greater than $C^2/4$. Inside binary search we have to iterate over all segments of elements $(+, a_i)$ and find the number of elements with $profit_j \ge profit$ inside this segment using binary search. We have to check if we can to move the selected amount of elements to the beginning of the array to understand how to move borders of the external binary search. After we find the critical value $profit$, let's iterate over all segments $(+, a_i)$ and add the sum of elements that are $profit_j \ge profit$ to the answer. Separately let's consider elements with $profit_j$ = $profit - 1$. We could move some of them to the beginning too. Let's update the answer with this value. Time complexity: $O(F(C) \log^3_2(C) + n \cdot \log_2(n))$. It should be noted that this estimate is actually higher than in fact. Let's estimate the value $F(C)$ now: Consider some sequence $b_1, b_2, \dots, b_k$, such that $b_1 \cdot b_2 \cdot$ $\dots$ $\cdot b_k \le C$ and $2 \le b_i$ Sort it by ascending, $b_1 \le b_2 \le \dots \le b_k$ - the product of elements will not change and the number of "interesting" subsets will not become smaller. Replace all the smallest elements of the sequence with $2$, the second minimums with $3$ and so on. If there are smaller number of elements equals to $x$ than elements equals to $y$ and $x < y$, let's swap their numbers. Now the number of interesting subsets is not changed, $b_1 \cdot b_2 \cdot$ $\dots$ $\cdot b_k$ is not increased. The sequence looks like $2, 2, \dots, 3, \dots, 4, \dots$ now. The number of interesting subsets in the new sequence equals to $(cnt_2 + 1) \cdot (cnt_3 + 1) \cdot \dots$, where $cnt_x$ is the number of elements if sequence equals to $x$. (Let's run the code that will brute-force over all sequences of such kind and see that the number of interesting subsets is $4608$, which is achieved on sequence $2, 2, 2, 3, 3, 4, 4, 5, 6, 7, 8, 9, 10, 11$) Let's continue estimating this value "fairly": the elements of the sequence do not exceed $12$ because $13! > C$. Let's replace each number with a prime number corresponding to it by order: $p_2 = 2, p_3 = 3, p_4 = 5, p_5 = 7, p_6 = 11, ...$ and replace all elements $x$ with $p_x$. The product of elements will increate in at most $\max\limits_{x}{((\frac{p_x}{x})^{\log_x(C)})}$ times, so the product will not exceed $\max\limits_{x}{(C^{\log_x{(p_x)}})}$. It is easy to check that the maximum is achieved in $x=6$, so the product is not greater than $C^{\log_6{11}}$ $\le 3 \cdot 10^{12}$. The number of interesting subsets of our sequence does not exceed the number of divisors of received numbers that can be estimated as $(3 \cdot 10^{12})^{(1/3)}$ $\le 15000$
[ "binary search", "brute force", "greedy" ]
3,300
null
1753
F
Minecraft Series
Little Misha goes to the programming club and solves nothing there. It may seem strange, but when you find out that Misha is filming a Minecraft series, everything will fall into place... Misha is inspired by Manhattan, so he built a city in Minecraft that can be imagined as a table of size $n \times m$. $k$ students live in a city, the $i$-th student lives in the house, located at the intersection of the $x_i$-th row and the $y_i$-th column. Also, each student has a degree of his aggressiveness $w_i$. Since the city turned out to be very large, Misha decided to territorially limit the actions of his series to some square $s$, which sides are parallel to the coordinate axes. The length of the side of the square should be an integer from $1$ to $\min(n, m)$ cells. According to the plot, the main hero will come to the city and accidentally fall into the square $s$. Possessing a unique degree of aggressiveness $0$, he will be able to show his leadership qualities and assemble a team of calm, moderate and aggressive students. In order for the assembled team to be versatile and close-knit, degrees of aggressiveness of all students of the team must be pairwise distinct and must form a single segment of consecutive integers. Formally, if \textbf{there exist} students with degrees of aggressiveness $l, l+1, \ldots, -1, 1, \ldots, r-1, r$ inside the square $s$, where $l \le 0 \le r$, the main hero will be able to form a team of $r-l+1$ people (of course, he is included in this team). \textbf{Notice}, that it is not required to take all students from square $s$ to the team. Misha thinks that the team should consist of at least $t$ people. That is why he is interested, how many squares are there in the table in which the main hero will be able to form a team of at least $t$ people. Help him to calculate this.
Let's formalize the problem condition. It is required to calculate the number of squares $s$ in the table for which we have inequality $A+B \geq T$, where $A$ is a $\text{MEX}$ of positive integers in the sqare and $B$ is a $\text{MEX}$ of absolute values of all negative integers in the square. Then we denote cost of a square as $A+B$. Note that when the square is expanded, its value cannot decrease. Let's fix the diagonal that contains the upper left and lower right sides of the square. Now, with a fixed lower right cell, we want to maintain the upper left cell of the square that is maximally removed from it so that its cost does not exceed $T-1$. Note that this upper-left boundary can only shift in the direction of moving the right lower one, which means we can use the two pointers technique. We will also need to maintain a set of numbers that are contained in a square. To do this, we will process each cell separately, which are added and removed from our square. Note that for each cell there are no more than $\min \{ N, M \}$ diagonals on which it is possible to construct a square containing this cell, and also note that due to the structure of our solution for each such diagonal, our cell will be added to the set no more than $1$ time. Thus, the total number of additions of cells to our set does not exceed $M N \cdot \min \{N, M \}$, and accordingly the total number of additions of numbers to the set does not exceed $K \cdot \min \{ N, M \}$. We will also need to find out the $\text{MEX}$ of all positive integers in the square, as well as the $\text{MEX}$ of absolute values of negative integers in the square. Here we need to make another observation about our algorithm. The number of $\text{MEX}$ queries will not exceed $2MN$. That is, you can use the square roots technique to adding and removing integers in $O(1)$ time, and find out the $\text{MEX}$ values in $O(\sqrt K)$ time. To summarize, our algorithm will work in asymptotic time: $O((NM + K) \cdot \min \{ N, M \} + NM \sqrt{K})$
[ "brute force", "two pointers" ]
3,500
null
1754
A
Technical Support
You work in the quality control department of technical support for a large company. Your job is to make sure all client issues have been resolved. Today you need to check a copy of a dialog between a client and a technical support manager. According to the rules of work, each message of the client must be followed by \textbf{one or several} messages, which are the answer of a support manager. However, sometimes clients ask questions so quickly that some of the manager's answers to old questions appear after the client has asked some new questions. Due to the privacy policy, the full text of messages is not available to you, only the order of messages is visible, as well as the type of each message: a customer question or a response from the technical support manager. \textbf{It is guaranteed that the dialog begins with the question of the client.} You have to determine, if this dialog may correspond to the rules of work described above, or the rules are certainly breached.
Let's process each character of the string from left to right and store the number of unanswered questions $cnt$. Initially this value equals to zero. Consider the $i$-th character of the string. If it equals to "Q", increase $cnt$ by one. If it equals to "A", decrease $cnt$ by one. If $cnt$ has become negative, it means that some of the questions was answered several times. In this case let's assign zero to $cnt$. If $cnt$ will be equal to zero after processing all string, then all questions were answered, and the answer is "Yes". Otherwise, the answer is "No". Time complexity: $\mathcal{O}(n)$ for each test case.
[ "greedy" ]
800
null
1754
B
Kevin and Permutation
For his birthday, Kevin received the set of pairwise distinct numbers $1, 2, 3, \ldots, n$ as a gift. He is going to arrange these numbers in a way such that the minimum absolute difference between two consecutive numbers be maximum possible. More formally, if he arranges numbers in order $p_1, p_2, \ldots, p_n$, he wants to maximize the value $$\min \limits_{i=1}^{n - 1} \lvert p_{i + 1} - p_i \rvert,$$ where $|x|$ denotes the absolute value of $x$. Help Kevin to do that.
Let's prove that the minimum difference of consecutive elements is not greater than $\lfloor \frac{n}{2} \rfloor$. To do it, let's prove that larger value is not achievable. Consider element of a permutation with value $\lfloor \frac{n}{2} \rfloor + 1$. It will have at least one adjacent element in the constructed permutation. And the maximum absolute difference of this element with the adjacent elements is at most $\lfloor \frac{n}{2} \rfloor$. Now we will construct the permutation with the minimum absolute difference of consecutive elements equals to $\lfloor \frac{n}{2} \rfloor$. Assign $x = \lfloor \frac{n}{2} + 1 \rfloor$. Now we can construct such permutation: $x, 1, x + 1, 2, x + 2, \ldots$. It's easy to see that the minimum absolute difference of consecutive elements equals to $x - 1$.
[ "constructive algorithms", "greedy", "math" ]
800
null
1758
A
SSeeeeiinngg DDoouubbllee
A palindrome is a string that reads the same backward as forward. For example, the strings $z$, $aaa$, $aba$, and $abccba$ are palindromes, but $codeforces$ and $ab$ are not. The double of a string $s$ is obtained by writing each character twice. For example, the double of $seeing$ is $sseeeeiinngg$. Given a string $s$, rearrange its double to form a palindrome. Output the rearranged string. It can be proven that such a rearrangement always exists.
Output $s + \text{reverse}(s)$. It works, since each character in $s$ occurs exactly twice (once in $s$, once in $\text{reverse}(s)$), and the result is a palindrome.
[ "constructive algorithms", "strings" ]
800
for _ in range(int(input())): s = input() print(s + s[::-1])
1758
B
XOR = Average
You are given an integer $n$. Find a sequence of $n$ integers $a_1, a_2, \dots, a_n$ such that $1 \leq a_i \leq 10^9$ for all $i$ and $$a_1 \oplus a_2 \oplus \dots \oplus a_n = \frac{a_1 + a_2 + \dots + a_n}{n},$$ where $\oplus$ represents the bitwise XOR. It can be proven that there exists a sequence of integers that satisfies all the conditions above.
Let us consider the cases when $n$ is odd and when its even. $n$ is odd: We can see that printing $\underbrace{1,\dots,1}_{n\text{ times}}$ will lead to an average of $1$ and an XOR of $1$ (since $1 \oplus 1 = 0$). Similarly, you could print any integer $n$ times to pass this case. $n$ is even: We use a slight modification of the solution for odd $n$ here. Instead of printing the same number $n$ times, we print $1, 3, \underbrace{2, \dots, 2}_{n-2\text{ times}}$. Both the XOR and the average of $1$ and $3$ are $2$. Therefore the average of the total sequence remains $2$, and the XOR of the whole sequence is also $2$. Note that there are other possible solutions, but the simplest one is described here.
[ "constructive algorithms" ]
900
for _ in range(int(input())): n = int(input()) print(*[1] if n == 1 else [1 + n % 2] + [2]*(n-2) + [3 - n % 2])
1758
C
Almost All Multiples
Given two integers $n$ and $x$, a permutation$^{\dagger}$ $p$ of length $n$ is called funny if $p_i$ is a multiple of $i$ for all $1 \leq i \leq n - 1$, $p_n = 1$, and $p_1 = x$. Find the lexicographically minimal$^{\ddagger}$ funny permutation, or report that no such permutation exists. $^{\dagger}$ A permutation of length $n$ is an array consisting of each of the integers from $1$ to $n$ exactly once. $^{\ddagger}$ Let $a$ and $b$ be permutations of length $n$. Then $a$ is lexicographically smaller than $b$ if in the first position $i$ where $a$ and $b$ differ, $a_i < b_i$. A permutation is lexicographically minimal if it is lexicographically smaller than all other permutations.
We start by giving the answer for $n=12$, $k=2$: $[\color{red}{2}, \color{red}{4}, 3, \color{red}{12}, 5, 6, 7, 8, 9, 10, 11, \color{red}{1}]$ $[\color{red}{3}, 2, \color{red}{6}, 4, 5, \color{red}{12}, 7, 8, 9, 10, 11, \color{red}{1}].$ As you can see, the array is almost the identity permutation, with certain elements rotated to the left. In particular, these are the elements that you get when you find the longest sequence $a_i$ such that $x \mid a_1 \mid a_2 \mid \dots \mid n$ (recall $a \mid b$ means $a$ divides $b$). For example, $3 \mid 6 \mid 12$ and $2 \mid 4 \mid 12$. To find this longest sequence, you need to prime factorize $\frac{n}{x}$. The complexity is $\mathcal{O}(n \log n)$. The main idea is intuitive, but the proof is rather long. We include it below. The idea is to look at cycles in the permutation. Consider any cycle of length greater than $1$, say $c_1, c_2, \dots, c_k$ (that is, $p_{c_1}=c_2$, $p_{c_2}=c_3, \dots, p_{c_k}=c_1$). We claim that for at least one element $c_i$ of the cycle, $p_{c_i}$ is not a multiple of $c_i$. In fact, we'll show a more general claim: for one element of the cycle $p_{c_i} < c_i$, which implies that $p_{c_i}$ cannot be a multiple of $c_i$. Indeed, let's sum $p_{c_i} - c_i$ over all elements of the cycle. This sum is $0$, because each element appears once before the $-$ sign and once afterwards. Since none of these equal $0$, it follows that at least one of these terms is negative (and at least one is positive). If $p_{c_i} - c_i < 0$, then $p_{c_i} < c_i$, as desired. So in each cycle, we must have at least one element breaking the key claim in the problem. But this claim holds for all $1 \leq i \leq n-1$, so the only cycle we can have goes through $p_n$! Indeed, since $p_n=1$ and $p_1=x$, the cycle goes $n \to 1 \to x \to \dots \to n$. For all arrows except the first one, we $a \mid b$ to write $a \to b$, because only $p_n$ can break the condition. Since we want the permutation to be lexicographically minimal, we want the longest such chain. So we should find the longest sequence of numbers from $x$ to $n$, such that each number divides the previous. If there are multiple such sequences, we need to pick the one that puts smaller numbers earlier, since we want smaller elements earlier on in the sequence. To do this, we can just find the prime factorization of $\frac{n}{x}$ (it is the longest, since the primes cannot be broken up into smaller factors), sort it, and cycle it.
[ "greedy", "number theory" ]
1,400
for _ in range(int(input())): n, x = map(int, input().split()) if n % x: print(-1) continue a = list(range(1, n + 1)) if x == n: a[0], a[-1] = a[-1], a[0] print(*a) continue a[0], a[-1], a[x-1] = x, 1, n x -= 1 for i in range(1, n-1): if a[x] % (i + 1) == 0 and a[i] % (x + 1) == 0: a[i], a[x] = a[x], a[i] x = i print(*a)
1758
D
Range = √Sum
You are given an integer $n$. Find a sequence of $n$ \textbf{distinct} integers $a_1, a_2, \dots, a_n$ such that $1 \leq a_i \leq 10^9$ for all $i$ and $$\max(a_1, a_2, \dots, a_n) - \min(a_1, a_2, \dots, a_n)= \sqrt{a_1 + a_2 + \dots + a_n}.$$ It can be proven that there exists a sequence of \textbf{distinct} integers that satisfies all the conditions above.
Let us consider the cases when $n$ is odd and when its even. $n$ is odd: First, we can start with the $n$ consecutive distinct numbers centered at $n$. The minimum-maximum difference is $n - 1$, and the sum is $n^2$. If we add 2 to each number, the minimum-maximum difference remains the same, and the sum increases to $n^2 + 2n$.Now, we can decrease the minimum by 1 and the increase the maximum by 1. The sum remains at $n^2 + 2n$, while the difference increases to $n + 1$. To make the sum equal $(n + 1)^2 = n^2 + 2n + 1$, we can increase the 2nd last number by 1, which we can do since we previously increased the maximum by 1. As an example, this sequence is followed for $n = 5$: [3, 4, 5, 6, 7] (centered at $5$) [5, 6, 7, 8, 9] (increase by $2$) [4, 6, 7, 8, 10] (shift min/max) [4, 6, 7, 9, 10] (shift 2nd last) Now, we can decrease the minimum by 1 and the increase the maximum by 1. The sum remains at $n^2 + 2n$, while the difference increases to $n + 1$. To make the sum equal $(n + 1)^2 = n^2 + 2n + 1$, we can increase the 2nd last number by 1, which we can do since we previously increased the maximum by 1. As an example, this sequence is followed for $n = 5$: [3, 4, 5, 6, 7] (centered at $5$) [5, 6, 7, 8, 9] (increase by $2$) [4, 6, 7, 8, 10] (shift min/max) [4, 6, 7, 9, 10] (shift 2nd last) $n$ is even: We can let $[a_1, \dots, a_n] = [n / 2, n / 2 + 1, \dots, n - 1, n + 1, \dots, 3n / 2]$. The difference between the minimum and maximum is $n$, and the sum of the numbers equals $n^2$, so this is valid. Other solutions exist, only one is described here. Soon Soon
[ "binary search", "brute force", "constructive algorithms", "math", "two pointers" ]
1,800
for _ in range(int(input())): n = int(input()) if n % 2 == 0: print(*[i for i in range(n//2, n//2 + n + 1) if i != n]) else: a = list(range(n//2 + 3, n//2 + 3 + n)) a[0] -= 1 a[-1] += 1 a[-2] += 1 print(*a)
1758
E
Tick, Tock
Tannhaus, the clockmaker in the town of Winden, makes mysterious clocks that measure time in $h$ hours numbered from $0$ to $h-1$. One day, he decided to make a puzzle with these clocks. The puzzle consists of an $n \times m$ grid of clocks, and each clock always displays some hour exactly (that is, it doesn't lie between two hours). In one move, he can choose any row or column and shift all clocks in that row or column one hour forward$^\dagger$. The grid of clocks is called solvable if it is possible to make all the clocks display the same time. While building his puzzle, Tannhaus suddenly got worried that it might not be possible to make the grid solvable. Some cells of the grid have clocks already displaying a certain initial time, while the rest of the cells are empty. Given the partially completed grid of clocks, find the number of ways$^\ddagger$ to assign clocks in the empty cells so that the grid is solvable. The answer can be enormous, so compute it modulo $10^9 + 7$. $^\dagger$ If a clock currently displays hour $t$ and is shifted one hour forward, then the clock will instead display hour $(t+1) \bmod h$. $^\ddagger$ Two assignments are different if there exists some cell with a clock that displays a different time in both arrangements.
Notice that a relationship between two clocks with assigned values on the grid on different rows but the same column, that is, $g_{x, z}$ and $g_{y, z}$, can be represented as $g_{y, z} \equiv g_{x, z} + d \pmod{h}$, where $0 \le d < h$. Now, for every $1 \le i \le m$, $g_{y, i} \equiv g_{x, i} + d \pmod{h}$. Using these relationships, we can create a weighted directed graph using our rows as nodes. Obviously, no solutions exist if there are discrepancies in the graph $\pmod{h}$, no solution exists. Now, for each connected component, if there is an assigned value in one of the rows it contains, we can determine all of the other values for that column in the connected component. We can merge different connected components $i, j$ by choosing a common difference $d_{i, j}$ for these components. This needs to be done (connected components - 1) times, and there are $h$ different ways to choose a common difference when combining different components, resulting in $h^{\text{connected components} - 1}$ different ways to combine all components into one connected component. This leaves us with columns that are fully empty, i.e., they consist $\textit{only}$ of unassigned clocks. As all rows are in one connected component at this point, assigning a clock in one empty column results in all other clocks in that column becoming assigned values too. There are $h^{\text{empty columns}}$ different ways to assign clocks to these empty columns. Thus, overall, our solution is $h^{\text{connected components} + \text{empty columns} - 1}$.
[ "combinatorics", "dfs and similar", "dsu", "graphs" ]
2,500
import io, os input = io.BytesIO(os.read(0, os.fstat(0).st_size)).readline for _ in range(int(input())): n, m, h = map(int, input().split()) a = [list(map(int, input().split())) for i in range(n)] graph = [[] for __ in range(n + m)] for i in range(n): for j in range(m): if a[i][j] == -1: continue graph[i] += [n + j] graph[n + j] += [i] visited = [False] * (n + m) rowVals, colVals = [-1] * n, [-1] * m count = 0 for i in range(n + m): if visited[i]: continue count += 1 queue = [i] while queue: x = queue.pop() visited[x] = True if x < n: if rowVals[x] == -1: rowVals[x] = 0 for y in range(m): if a[x][y] != -1 and colVals[y] == -1: colVals[y] = (-rowVals[x] - a[x][y]) % h queue += [y + n] else: x -= n if colVals[x] == -1: colVals[x] = 0 for y in range(n): if a[y][x] != -1 and rowVals[y] == -1: rowVals[y] = (-colVals[x] - a[y][x]) % h queue += [y] pos = True for i in range(n): for j in range(m): if a[i][j] != -1: pos &= (a[i][j] + rowVals[i] + colVals[j]) % h == 0 if not pos: print(0) else: print(pow(h, count - 1, 10**9 + 7))
1758
F
Decent Division
A binary string is a string where every character is $0$ or $1$. Call a binary string decent if it has an equal number of $0$s and $1$s. Initially, you have an infinite binary string $t$ whose characters are all $0$s. You are given a sequence $a$ of $n$ updates, where $a_i$ indicates that the character at index $a_i$ will be flipped ($0 \leftrightarrow 1$). You need to keep and modify after each update a set $S$ of \textbf{disjoint} ranges such that: - for each range $[l,r]$, the substring $t_l \dots t_r$ is a decent binary string, and - for all indices $i$ such that $t_i = 1$, there exists $[l,r]$ in $S$ such that $l \leq i \leq r$. You only need to output the ranges that are added to or removed from $S$ after each update. You can only add or remove ranges from $S$ at most $\mathbf{10^6}$ times. More formally, let $S_i$ be the set of ranges after the $i$-th update, where $S_0 = \varnothing$ (the empty set). Define $X_i$ to be the set of ranges removed after update $i$, and $Y_i$ to be the set of ranges added after update $i$. Then for $1 \leq i \leq n$, $S_i = (S_{i - 1} \setminus X_i) \cup Y_i$. The following should hold for all $1 \leq i \leq n$: - $\forall a,b \in S_i, (a \neq b) \rightarrow (a \cap b = \varnothing)$; - $X_i \subseteq S_{i - 1}$; - $(S_{i-1} \setminus X_i) \cap Y_i = \varnothing$; - $\displaystyle\sum_{i = 1}^n {(|X_i| + |Y_i|)} \leq 10^6$.
After each update, we want to maintain the invariant that each interval is balanced, and additionally that there is a gap containing at least one zero in between each pair of consecutive intervals. Since every $\texttt{1}$ must be contained in an interval, this is equivalent to having non-empty gaps between consecutive intervals after an update. There are several cases we need to handle. Case 1: Bit $a_i$ is changed from $\texttt{0}$ to $\texttt{1}$. Case 1a: Bit $a_i$ is contained in an interval after the previous updateIf we are inside an interval, then we want to grow the interval that contains it by 2 zeros to maintain balance. If the interval containing $a_i$ is $[l, r]$, then we can expand it to $[l, r + 1]$. Since there is at least one $\texttt{0}$ after each interval, $a_{r + 1} = 0$, so this contains one of the two zeroes we need. If there is another interval $r + 2 \in [l', r']$, then increasing the right bound again by 1 would overlap with this interval. In this case, We know that $a_{r' + 1} = 0$ as well, so we can merge both intervals together into $[l, r' + 1]$ to get a total of two new zeros. $[l', r']$ was also previously balanced, so the interval is still balanced. In this case, we removed two intervals and added one interval, so a total of three operations were used. If there is no intervals where $r + 2 \in [l', r']$, then $a_{r + 2} = 0$, so we can simply expand the current interval once more to $[l, r + 2]$. One interval was removed and one interval was added, so a total of two operations were used. Case 1b: Bit $a_i$ is not contained in an interval.If there exists an interval where $i + 1 \in [i + 1, r]$, then we can expand it to $[k, r + 1]$. This interval is directly after $a_i$, so expanding it by one to the left will include an extra $\texttt{1}$. $a_{r + 1} = 0$ since it is part of the gap between two intervals, so including it balances $a_i$. One interval was removed and one interval was added, so a total of two operations were used. If there is no interval where $i + 1 \in [i + 1, r]$, then $a_{i + 1} = 0$. Therefore, we can simply add $[i, i + 1]$, which is balanced. One interval was added, so a total of one operation was used. In both cases, after adding a new interval to the set, we can merge with any adjacent intervals to the left or right. This will maintain the invariant that there is a gap between consecutive ranges as mentioned earlier. We merge at most once to the left and one to the right. Since the maximum number of operations done earlier is 3, the maximum number of operations in total is 5 in this case. Case 1a: Bit $a_i$ is contained in an interval after the previous updateIf we are inside an interval, then we want to grow the interval that contains it by 2 zeros to maintain balance. If the interval containing $a_i$ is $[l, r]$, then we can expand it to $[l, r + 1]$. Since there is at least one $\texttt{0}$ after each interval, $a_{r + 1} = 0$, so this contains one of the two zeroes we need. If there is another interval $r + 2 \in [l', r']$, then increasing the right bound again by 1 would overlap with this interval. In this case, We know that $a_{r' + 1} = 0$ as well, so we can merge both intervals together into $[l, r' + 1]$ to get a total of two new zeros. $[l', r']$ was also previously balanced, so the interval is still balanced. In this case, we removed two intervals and added one interval, so a total of three operations were used. If there is no intervals where $r + 2 \in [l', r']$, then $a_{r + 2} = 0$, so we can simply expand the current interval once more to $[l, r + 2]$. One interval was removed and one interval was added, so a total of two operations were used. If we are inside an interval, then we want to grow the interval that contains it by 2 zeros to maintain balance. If the interval containing $a_i$ is $[l, r]$, then we can expand it to $[l, r + 1]$. Since there is at least one $\texttt{0}$ after each interval, $a_{r + 1} = 0$, so this contains one of the two zeroes we need. If there is another interval $r + 2 \in [l', r']$, then increasing the right bound again by 1 would overlap with this interval. In this case, We know that $a_{r' + 1} = 0$ as well, so we can merge both intervals together into $[l, r' + 1]$ to get a total of two new zeros. $[l', r']$ was also previously balanced, so the interval is still balanced. In this case, we removed two intervals and added one interval, so a total of three operations were used. If there is no intervals where $r + 2 \in [l', r']$, then $a_{r + 2} = 0$, so we can simply expand the current interval once more to $[l, r + 2]$. One interval was removed and one interval was added, so a total of two operations were used. Case 1b: Bit $a_i$ is not contained in an interval.If there exists an interval where $i + 1 \in [i + 1, r]$, then we can expand it to $[k, r + 1]$. This interval is directly after $a_i$, so expanding it by one to the left will include an extra $\texttt{1}$. $a_{r + 1} = 0$ since it is part of the gap between two intervals, so including it balances $a_i$. One interval was removed and one interval was added, so a total of two operations were used. If there is no interval where $i + 1 \in [i + 1, r]$, then $a_{i + 1} = 0$. Therefore, we can simply add $[i, i + 1]$, which is balanced. One interval was added, so a total of one operation was used. If there exists an interval where $i + 1 \in [i + 1, r]$, then we can expand it to $[k, r + 1]$. This interval is directly after $a_i$, so expanding it by one to the left will include an extra $\texttt{1}$. $a_{r + 1} = 0$ since it is part of the gap between two intervals, so including it balances $a_i$. One interval was removed and one interval was added, so a total of two operations were used. If there is no interval where $i + 1 \in [i + 1, r]$, then $a_{i + 1} = 0$. Therefore, we can simply add $[i, i + 1]$, which is balanced. One interval was added, so a total of one operation was used. In both cases, after adding a new interval to the set, we can merge with any adjacent intervals to the left or right. This will maintain the invariant that there is a gap between consecutive ranges as mentioned earlier. We merge at most once to the left and one to the right. Since the maximum number of operations done earlier is 3, the maximum number of operations in total is 5 in this case. Case 2: Bit $a_i$ is changed from $\texttt{1}$ to $\texttt{0}$. Suppose that $i \in [l, r]$. In this case, we want to somehow split the interval into two balanced portions. Now, suppose we compute the prefix sums of the balance, where $\texttt{0}$ corresponds to $-1$ and $\texttt{1}$ corresponds to $+1$. If $x$ is the first location where the prefix sum equals $-2$, then we claim that we can split the interval into $[l, x - 2]$ and $[x + 1, r]$. To prove this, note that the balance of the empty prefix is 0, so before the prefix sum equals $-2$ for the first time, it must have gone $..., 0, -1, -2$. To have two decreases in a row, we must have $a_{x - 1} = a_x = 0$. In the interval $[l, x - 2]$, the final balance prefix sum is $0$, so the first interval is balanced. Since we changed a $\texttt{1}$ to a $\texttt{1}$ and removed two $\texttt{0}$s, the first interval being balanced implies that the second interval is balanced as well. In addition, since the original interval satisfied the separation invariant, and the new intervals are separated by two $\texttt{0}$s, the separation invariant is still satisfied. To compute the first time when the balance prefix sum equals -2, we can use binary search on the interval using a lazy segment tree. The segment tree represents a global balance prefix sum, and we can range query the minimum balance on an interval. We can binary search for the lowest index on the interval where the minimum prefix sum is less than -2. In this case, we removed one interval and added two new intervals, for a total of three operations. Suppose that $i \in [l, r]$. In this case, we want to somehow split the interval into two balanced portions. Now, suppose we compute the prefix sums of the balance, where $\texttt{0}$ corresponds to $-1$ and $\texttt{1}$ corresponds to $+1$. If $x$ is the first location where the prefix sum equals $-2$, then we claim that we can split the interval into $[l, x - 2]$ and $[x + 1, r]$. To prove this, note that the balance of the empty prefix is 0, so before the prefix sum equals $-2$ for the first time, it must have gone $..., 0, -1, -2$. To have two decreases in a row, we must have $a_{x - 1} = a_x = 0$. In the interval $[l, x - 2]$, the final balance prefix sum is $0$, so the first interval is balanced. Since we changed a $\texttt{1}$ to a $\texttt{1}$ and removed two $\texttt{0}$s, the first interval being balanced implies that the second interval is balanced as well. In addition, since the original interval satisfied the separation invariant, and the new intervals are separated by two $\texttt{0}$s, the separation invariant is still satisfied. To compute the first time when the balance prefix sum equals -2, we can use binary search on the interval using a lazy segment tree. The segment tree represents a global balance prefix sum, and we can range query the minimum balance on an interval. We can binary search for the lowest index on the interval where the minimum prefix sum is less than -2. In this case, we removed one interval and added two new intervals, for a total of three operations. In both cases, we use at most 5 operations in a single step, so we in total use at most $5n$ operations, which fits in our bound. In practice, this upper bound is quite loose. Because of the binary search on the lazy segment tree, the time complexity for this solution is $O(n \log^2 n)$. Note that this can be optimized to $O(n \log n)$ by optimizing the binary search, but this was not required.
[ "constructive algorithms", "data structures" ]
3,000
class LazySegmentTree: def __init__(self, array): self.n = len(array) self.size = 1 << (self.n - 1).bit_length() self.func = min self.default = float("inf") self.data = [self.default] * (2 * self.size) self.lazy = [0] * (2 * self.size) self.process(array) def process(self, array): self.data[self.size : self.size+self.n] = array for i in range(self.size-1, -1, -1): self.data[i] = self.func(self.data[2*i], self.data[2*i+1]) def push(self, index): """Push the information of the root to it's children!""" self.lazy[2*index] += self.lazy[index] self.lazy[2*index+1] += self.lazy[index] self.data[2 * index] += self.lazy[index] self.data[2 * index + 1] += self.lazy[index] self.lazy[index] = 0 def build(self, index): """Build data with the new changes!""" index >>= 1 while index: self.data[index] = self.func(self.data[2*index], self.data[2*index+1]) + self.lazy[index] index >>= 1 def query(self, alpha, omega): """Returns the result of function over the range (inclusive)!""" res = self.default alpha += self.size omega += self.size + 1 for i in reversed(range(1, alpha.bit_length())): self.push(alpha >> i) for i in reversed(range(1, (omega - 1).bit_length())): self.push((omega-1) >> i) while alpha < omega: if alpha & 1: res = self.func(res, self.data[alpha]) alpha += 1 if omega & 1: omega -= 1 res = self.func(res, self.data[omega]) alpha >>= 1 omega >>= 1 return res def update(self, alpha, omega, value): """Increases all elements in the range (inclusive) by given value!""" alpha += self.size omega += self.size + 1 l, r = alpha, omega while alpha < omega: if alpha & 1: self.data[alpha] += value self.lazy[alpha] += value alpha += 1 if omega & 1: omega -= 1 self.data[omega] += value self.lazy[omega] += value alpha >>= 1 omega >>= 1 self.build(l) self.build(r-1) class SortedList: def __init__(self, iterable=[], _load=200): """Initialize sorted list instance.""" values = sorted(iterable) self._len = _len = len(values) self._load = _load self._lists = _lists = [values[i:i + _load] for i in range(0, _len, _load)] self._list_lens = [len(_list) for _list in _lists] self._mins = [_list[0] for _list in _lists] self._fen_tree = [] self._rebuild = True def _fen_build(self): """Build a fenwick tree instance.""" self._fen_tree[:] = self._list_lens _fen_tree = self._fen_tree for i in range(len(_fen_tree)): if i | i + 1 < len(_fen_tree): _fen_tree[i | i + 1] += _fen_tree[i] self._rebuild = False def _fen_update(self, index, value): """Update `fen_tree[index] += value`.""" if not self._rebuild: _fen_tree = self._fen_tree while index < len(_fen_tree): _fen_tree[index] += value index |= index + 1 def _fen_query(self, end): """Return `sum(_fen_tree[:end])`.""" if self._rebuild: self._fen_build() _fen_tree = self._fen_tree x = 0 while end: x += _fen_tree[end - 1] end &= end - 1 return x def _fen_findkth(self, k): """Return a pair of (the largest `idx` such that `sum(_fen_tree[:idx]) <= k`, `k - sum(_fen_tree[:idx])`).""" _list_lens = self._list_lens if k < _list_lens[0]: return 0, k if k >= self._len - _list_lens[-1]: return len(_list_lens) - 1, k + _list_lens[-1] - self._len if self._rebuild: self._fen_build() _fen_tree = self._fen_tree idx = -1 for d in reversed(range(len(_fen_tree).bit_length())): right_idx = idx + (1 << d) if right_idx < len(_fen_tree) and k >= _fen_tree[right_idx]: idx = right_idx k -= _fen_tree[idx] return idx + 1, k def _delete(self, pos, idx): """Delete value at the given `(pos, idx)`.""" _lists = self._lists _mins = self._mins _list_lens = self._list_lens self._len -= 1 self._fen_update(pos, -1) del _lists[pos][idx] _list_lens[pos] -= 1 if _list_lens[pos]: _mins[pos] = _lists[pos][0] else: del _lists[pos] del _list_lens[pos] del _mins[pos] self._rebuild = True def _loc_left(self, value): """Return an index pair that corresponds to the first position of `value` in the sorted list.""" if not self._len: return 0, 0 _lists = self._lists _mins = self._mins lo, pos = -1, len(_lists) - 1 while lo + 1 < pos: mi = (lo + pos) >> 1 if value <= _mins[mi]: pos = mi else: lo = mi if pos and value <= _lists[pos - 1][-1]: pos -= 1 _list = _lists[pos] lo, idx = -1, len(_list) while lo + 1 < idx: mi = (lo + idx) >> 1 if value <= _list[mi]: idx = mi else: lo = mi return pos, idx def _loc_right(self, value): """Return an index pair that corresponds to the last position of `value` in the sorted list.""" if not self._len: return 0, 0 _lists = self._lists _mins = self._mins pos, hi = 0, len(_lists) while pos + 1 < hi: mi = (pos + hi) >> 1 if value < _mins[mi]: hi = mi else: pos = mi _list = _lists[pos] lo, idx = -1, len(_list) while lo + 1 < idx: mi = (lo + idx) >> 1 if value < _list[mi]: idx = mi else: lo = mi return pos, idx def add(self, value): """Add `value` to sorted list.""" _load = self._load _lists = self._lists _mins = self._mins _list_lens = self._list_lens self._len += 1 if _lists: pos, idx = self._loc_right(value) self._fen_update(pos, 1) _list = _lists[pos] _list.insert(idx, value) _list_lens[pos] += 1 _mins[pos] = _list[0] if _load + _load < len(_list): _lists.insert(pos + 1, _list[_load:]) _list_lens.insert(pos + 1, len(_list) - _load) _mins.insert(pos + 1, _list[_load]) _list_lens[pos] = _load del _list[_load:] self._rebuild = True else: _lists.append([value]) _mins.append(value) _list_lens.append(1) self._rebuild = True def discard(self, value): """Remove `value` from sorted list if it is a member.""" _lists = self._lists if _lists: pos, idx = self._loc_right(value) if idx and _lists[pos][idx - 1] == value: self._delete(pos, idx - 1) def remove(self, value): """Remove `value` from sorted list; `value` must be a member.""" _len = self._len self.discard(value) if _len == self._len: raise ValueError('{0!r} not in list'.format(value)) def bisect_left(self, value): """Return the first index to insert `value` in the sorted list.""" pos, idx = self._loc_left(value) return self._fen_query(pos) + idx def bisect_right(self, value): """Return the last index to insert `value` in the sorted list.""" pos, idx = self._loc_right(value) return self._fen_query(pos) + idx def __getitem__(self, index): """Lookup value at `index` in sorted list.""" pos, idx = self._fen_findkth(self._len + index if index < 0 else index) return self._lists[pos][idx] # ---------------------------------------------------------------------------------------------------------------------- import io, os input = io.BytesIO(os.read(0, os.fstat(0).st_size)).readline maxRange = 400005 st = LazySegmentTree(list(range(0, -maxRange - 1, -1))) sl = SortedList([(-2, -2), (maxRange + 2, maxRange + 2)]) for __ in range(int(input())): x = int(input()) index = sl.bisect_left((x + 1, -1)) - 1 prevStart, prevEnd = sl.__getitem__(index) nextStart, nextEnd = sl.__getitem__(index + 1) to_add, to_remove = [], [] if prevEnd < x: # bit x is not set and no segment overlaps with x st.update(x, maxRange, 2) newStart, newEnd = x, x + 1 if prevEnd == x - 1: sl.remove((prevStart, prevEnd)) to_remove += [(prevStart, prevEnd)] newStart = prevStart if nextStart == x + 1: newEnd = nextEnd + 1 sl.remove((nextStart, nextEnd)) to_remove += [(nextStart, nextEnd)] start, end = sl.__getitem__(sl.bisect_left((newEnd + 1, newEnd + 1))) if start == newEnd + 1: newEnd = end sl.remove((start, end)) to_remove += [(start, end)] sl.add((newStart, newEnd)) to_add += [(newStart, newEnd)] else: isSet = st.query(x-1, x) != st.query(x, x) if not isSet: # bit x is not set and x is included in the segment st.update(x, maxRange, 2) sl.remove((prevStart, prevEnd)) to_remove += [(prevStart, prevEnd)] newStart, newEnd = prevStart, prevEnd + 2 if nextStart == prevEnd + 2: newEnd = nextEnd + 1 sl.remove((nextStart, nextEnd)) to_remove += [(nextStart, nextEnd)] start, end = sl.__getitem__(sl.bisect_left((newEnd + 1, newEnd + 1))) if start == newEnd + 1: newEnd = end sl.remove((start, end)) to_remove += [(start, end)] sl.add((newStart, newEnd)) to_add += [(newStart, newEnd)] else: # bit x is set st.update(x, maxRange, -2) target = st.query(prevStart - 1, prevStart - 1) - 2 alpha, omega = prevStart, prevEnd while alpha < omega: mid = (alpha + omega) // 2 if st.query(prevStart, mid) <= target: omega = mid else: alpha = mid + 1 sl.remove((prevStart, prevEnd)) to_remove += [(prevStart, prevEnd)] if alpha - 1 != prevStart: sl.add((prevStart, alpha - 2)) to_add += [(prevStart, alpha - 2)] if alpha != prevEnd: sl.add((alpha + 1, prevEnd)) to_add += [(alpha + 1, prevEnd)] print(len(to_remove)) for p, q in to_remove: print(p, q) print(len(to_add)) for p, q in to_add: print(p, q)
1759
A
Yes-Yes?
You talked to Polycarp and asked him a question. You know that when he wants to answer "yes", he repeats Yes many times in a row. Because of the noise, you only heard part of the answer — some substring of it. That is, if he answered YesYes, then you could hear esY, YesYes, sYes, e, but you couldn't Yess, YES or se. Determine if it is true that the given string $s$ is a substring of YesYesYes... (Yes repeated many times in a row).
Note that it is enough to consider the string $full =$YesYes...Yes, where Yes is written $18$ times, since $18 \cdot 3 = 54$, and our substring $s$ has size $|s| \le 50$. Then we just use the built-in function $find$ to find out if our string $s$ is a substring of the string $full$.
[ "implementation", "strings" ]
800
full = 'Yes' * 18 t = int(input()) for _ in range(t): if full.find(input()) >= 0: print('YES') else: print('NO')
1759
B
Lost Permutation
A sequence of $n$ numbers is called a permutation if it contains all integers from $1$ to $n$ exactly once. For example, the sequences [$3, 1, 4, 2$], [$1$] and [$2,1$] are permutations, but [$1,2,1$], [$0,1$] and [$1,3,4$] — are not. Polycarp lost his favorite permutation and found only some of its elements — the numbers $b_1, b_2, \dots b_m$. He is sure that the sum of the lost elements equals $s$. Determine whether one or more numbers can be appended to the given sequence $b_1, b_2, \dots b_m$ such that the sum of the added numbers equals $s$, and the resulting new array is a permutation?
Let us add to $s$ the sum of the elements of the array $b$ and try to find a suitable permutation. To do this, greedily add elements $1, 2, \dots, cnt$ until their sum is less than $s$. And at the end we will check that the sum has matched. Also check that the maximal element from $b$: $max(b) \le cnt$, and that the total elements in $b$: $n \le cnt$.
[ "math" ]
800
t = int(input()) for _ in range(t): n, s = map(int, input().split()) a = [int(x) for x in input().split()] s += sum(a) sm = 0 cnt = 0 for i in range(1, s + 1): if sm >= s: break sm += i cnt = i if sm != s or max(a) > cnt or cnt <= n: print("NO"); else: print("YES")
1759
C
Thermostat
Vlad came home and found out that someone had reconfigured the old thermostat to the temperature of $a$. The thermostat can only be set to a temperature from $l$ to $r$ inclusive, the temperature cannot change by less than $x$. Formally, in one operation you can reconfigure the thermostat from temperature $a$ to temperature $b$ if $|a - b| \ge x$ and $l \le b \le r$. You are given $l$, $r$, $x$, $a$ and $b$. Find the minimum number of operations required to get temperature $b$ from temperature $a$, or say that it is impossible.
First let's consider the cases when the answer exists: If $a=b$, then the thermostat is already set up and the answer is $0$. else if $|a - b| \ge x$, then it is enough to reconfigure the thermostat in $1$ operation. else if exist such temperature $c$, that $|a - c| \ge x$ and $|b - c| \ge x$, then you can configure the thermostat in $2$ operations. If such $c$ exists between $l$ and $r$, we can chose one of bounds: $a \rightarrow l \rightarrow b$ or $a \rightarrow r \rightarrow b$. we need to make $3$ operations if times if we cannot reconfigure through one of the boundaries as above, but we can through both: $a \rightarrow l \rightarrow r \rightarrow b$ or $a \rightarrow r \rightarrow l \rightarrow b$ If we can't get the temperature $b$ in one of these ways, the answer is $-1$.
[ "greedy", "math", "shortest paths" ]
1,100
def solve(): l, r, x = map(int, input().split()) a, b = map(int, input().split()) if a == b: return 0 if abs(a - b) >= x: return 1 if r - max(a, b) >= x or min(a, b) - l >= x: return 2 if r - b >= x and a - l >= x or r - a >= x and b - l >= x: return 3 return -1 t = int(input()) for _ in range(t): print(solve())
1759
D
Make It Round
Inflation has occurred in Berlandia, so the store needs to change the price of goods. The current price of good $n$ is given. It is allowed to increase the price of the good by $k$ times, with $1 \le k \le m$, k is an integer. Output the roundest possible new price of the good. That is, the one that has the maximum number of zeros at the end. For example, the number 481000 is more round than the number 1000010 (three zeros at the end of 481000 and only one at the end of 1000010). If there are several possible variants, output the one in which the new price is maximal. If it is impossible to get a rounder price, output $n \cdot m$ (that is, the maximum possible price).
The answer is $n \cdot k$. First, count two numbers: $cnt_2, cnt_5$ which denote the degree of occurrence of $2$ and $5$ in the number $n$ respectively, that is $n = 2^cnt_2 \cdot 5^cnt_5 \cdot d$. Where $d$ is not divisible by either $2$ or $5$. Now while $cnt_2 \neq cnt_5$ we will increase the corresponding value. For example, if $cnt_2 < cnt_5$, then as long as $cnt_2 \neq cnt_5$ and at that $k \cdot 2 \le m$ we will increase $cnt_2$ by $1$ and multiply $k$ by $2$ times. That way we can get the most round number possible by spending the least possible $k$. Now we either have $cnt_2 = cnt_5$, or $k \cdot 5 > m$ or $k \cdot 2 > m$. Then in the first case, we will multiply the number $k$ by $10$ as long as we can. That is, until $k \cdot 10 \le m$. Now in either case we have: $k \cdot 10 > m$. Then $\lfloor \frac{m}{k} \rfloor = x < 10$. Then we multiply $k$ by $x$ times and get our desired answer. In the last step, we can no longer get a rounder number, but just find the maximal possible number.
[ "brute force", "number theory" ]
1,400
#include <bits/stdc++.h> using namespace std; #define forn(i, n) for (int i = 0; i < int(n); i++) #define sz(v) (int)v.size() #define all(v) v.begin(),v.end() #define eb emplace_back using ll = long long; void solve() { ll n,m; cin >> n >> m; ll n0 = n; int cnt2 = 0, cnt5 = 0; ll k = 1; while (n > 0 && n % 2 == 0) { n /= 2; cnt2++; } while (n > 0 && n % 5 == 0) { n /= 5; cnt5++; } while (cnt2 < cnt5 && k * 2 <= m) { cnt2++; k *= 2; } while (cnt5 < cnt2 && k * 5 <= m) { cnt5++; k *= 5; } while (k * 10 <= m) { k *= 10; } if (k == 1) { cout << n0 * m << endl; } else { k *= m / k; // 1 <= m/k < 10 cout << n0 * k << endl; } } int main() { int t; cin >> t; forn(tt, t) { solve(); } }
1759
E
The Humanoid
There are $n$ astronauts working on some space station. An astronaut with the number $i$ ($1 \le i \le n$) has power $a_i$. An evil humanoid has made his way to this space station. The power of this humanoid is equal to $h$. Also, the humanoid took with him \textbf{two} green serums and \textbf{one} blue serum. In one second , a humanoid can do any of three actions: - to absorb an astronaut with power \textbf{strictly less} humanoid power; - to use green serum, if there is still one left; - to use blue serum, if there is still one left. When an astronaut with power $a_i$ is absorbed, this astronaut disappears, and power of the humanoid increases by $\lfloor \frac{a_i}{2} \rfloor$, that is, an integer part of $\frac{a_i}{2}$. For example, if a humanoid absorbs an astronaut with power $4$, its power increases by $2$, and if a humanoid absorbs an astronaut with power $7$, its power increases by $3$. After using the green serum, this serum disappears, and the power of the humanoid doubles, so it increases by $2$ times. After using the blue serum, this serum disappears, and the power of the humanoid triples, so it increases by $3$ times. The humanoid is wondering what the maximum number of astronauts he will be able to absorb if he acts optimally.
Let's make two obvious remarks: If we can absorb two astronauts with power $x \le y$, then we can always first absorb an astronaut with power $x$, and then an astronaut with power $y$; If we can absorb some astronaut, it is effective for us to do it right now. Let's sort the astronauts powers in increasing order. Now let's lock the sequence of serums we use. There are only three of them: blue serum can be the first, second or third. Let's absorb the astronauts in increasing order of their powers, and if we can't, then use the next serum in a locked sequence or stop. This solution works for $O(n)$.
[ "brute force", "dp", "sortings" ]
1,500
#include <bits/stdc++.h> using namespace std; const int MAXN = 200200; int n; int arr[MAXN]; int solve(int i, long long h, int s2, int s3) { if (i == n) return 0; if (arr[i] < h) return solve(i + 1, h + (arr[i] / 2), s2, s3) + 1; int ans1 = (s2 ? solve(i, h * 2, s2 - 1, s3) : 0); int ans2 = (s3 ? solve(i, h * 3, s2, s3 - 1) : 0); return max(ans1, ans2); } int main() { int t; cin >> t; while(t--) { long long h; cin >> n >> h; for (int i = 0; i < n; ++i) cin >> arr[i]; sort(arr, arr + n); cout << solve(0, h, 2, 1) << endl; } }
1759
F
All Possible Digits
A positive number $x$ of length $n$ in base $p$ ($2 \le p \le 10^9$) is written on the blackboard. The number $x$ is given as a sequence $a_1, a_2, \dots, a_n$ ($0 \le a_i < p$) — the digits of $x$ in order from left to right (most significant to least significant). Dmitry is very fond of all the digits of this number system, so he wants to see each of them at least once. In one operation, he can: - take any number $x$ written on the board, increase it by $1$, and write the new value $x + 1$ on the board. For example, $p=5$ and $x=234_5$. - Initially, the board contains the digits $2$, $3$ and $4$; - Dmitry increases the number $234_5$ by $1$ and writes down the number $240_5$. On the board there are digits $0, 2, 3, 4$; - Dmitry increases the number $240_5$ by $1$ and writes down the number $241_5$. Now the board contains all the digits from $0$ to $4$. Your task is to determine the minimum number of operations required to make all the digits from $0$ to $p-1$ appear on the board at least once.
If all digits from $0$ to $p-1$ are initially present in the number, then the answer is $0$. Each time we will increase the number by $1$. If the last digit is less than $p-1$, then only it will change. Otherwise, all digits equal to $p-1$ at the end will become equal to $0$, and the previous one will increase by $1$ (or a new digit equal to $1$ will be added if all digits were equal to $p-1$). For a $p-1$ operation, the last digit will run through all possible values. However, we can get all the numbers earlier. We will solve the problem using binary search, sorting through the number of operations. We can have 2 options: whether $0$ was at the end or not. Depending on this, one or two subsegments of the segment $[0, p-1]$ - a subsegment in the middle or a prefix and a suffix remained uncovered by the last digit of the number. They need to be completely covered with numbers that were already in positions, except for the last one - these are the original numbers and, in case there was $0$ at the end, the number into which the transfer was made. There are at most $n+1$ of them.
[ "binary search", "data structures", "greedy", "math", "number theory" ]
1,800
#pragma GCC optimize("Ofast") #include <bits/stdc++.h> using namespace std; #include <ext/pb_ds/assoc_container.hpp> using namespace __gnu_pbds; typedef long long ll; typedef long double ld; typedef tree<pair<int, int>, null_type, less<>, rb_tree_tag, tree_order_statistics_node_update> ordered_set; int newDigit = -1; bool check(set<int> digits, int l, int r, bool useNewDigit) { for (int i = l; i <= r; ++i) { if (useNewDigit && i == newDigit) { continue; } if (!digits.count(i)) { return false; } } return true; } void solve() { int n, p; cin >> n >> p; vector<int> a(n + 1); set<int> digits; for (int i = 1; i <= n; ++i) { cin >> a[i]; digits.insert(a[i]); } if (digits.size() == p) { cout << "0\n"; return; } for (int i = n - 1; i >= 0; --i) { if (a[i] < p - 1) { newDigit = a[i] + 1; break; } } int l = 0, r = p - 1; int x = a[n]; while (l < r) { int m = (l + r) >> 1; bool res = false; if (x + m >= p) { if (check(digits, x + m + 1 - p, x - 1, true)) { res = true; } } else { if (check(digits, 0, x - 1, false) && check(digits, x + m + 1, p - 1, false)) { res = true; } } if (res) { r = m; } else { l = m + 1; } } cout << l << '\n'; } bool multitest = true; int main() { ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0); cout.precision(25); size_t number_of_tests = 1; if (multitest) { cin >> number_of_tests; } for (size_t _ = 0; _ < number_of_tests; ++_) { solve(); } return 0; }
1759
G
Restore the Permutation
A sequence of $n$ numbers is called permutation if it contains all numbers from $1$ to $n$ exactly once. For example, the sequences [$3, 1, 4, 2$], [$1$] and [$2,1$] are permutations, but [$1,2,1$], [$0,1$] and [$1,3,4$] — are not. For a permutation $p$ of even length $n$ you can make an array $b$ of length $\frac{n}{2}$ such that: - $b_i = \max(p_{2i - 1}, p_{2i})$ for $1 \le i \le \frac{n}{2}$ For example, if $p$ = [$2, 4, 3, 1, 5, 6$], then: - $b_1 = \max(p_1, p_2) = \max(2, 4) = 4$ - $b_2 = \max(p_3, p_4) = \max(3,1)=3$ - $b_3 = \max(p_5, p_6) = \max(5,6) = 6$ As a result, we made $b$ = $[4, 3, 6]$.For a given array $b$, find the \textbf{lexicographically minimal} permutation $p$ such that you can make the given array $b$ from it. If $b$ = [$4,3,6$], then the lexicographically minimal permutation from which it can be made is $p$ = [$1,4,2,3,5,6$], since: - $b_1 = \max(p_1, p_2) = \max(1, 4) = 4$ - $b_2 = \max(p_3, p_4) = \max(2, 3) = 3$ - $b_3 = \max(p_5, p_6) = \max(5, 6) = 6$ A permutation $x_1, x_2, \dots, x_n$ is lexicographically smaller than a permutation $y_1, y_2 \dots, y_n$ if and only if there exists such $i$ ($1 \le i \le n$) that $x_1=y_1, x_2=y_2, \dots, x_{i-1}=y_{i-1}$ and $x_i<y_i$.
First, let's check the $b$ array for correctness, that is, that it has no repeating elements. Then let's look at the following ideas: each number $b_i$ must be paired with another permutation element $p_j$, with $p_j \lt b_i$ by the definition of array $b$. Then, since we want a lexicographically minimal permutation, it is always more advantageous to put element $p_j$ before $b_i$. for the permutation to be lexicographically minimal, the smallest possible numbers must be placed at the beginning. Consequently, the largest numbers must be placed at the end. Let's proceed as follows: Let's select the set of $unused$ numbers that are not included in the $b$ array. For an element $b_{\frac{n}{2}}$, find the maximum number $k$ of the set $unused$ such that $b_{\frac{n}{2}} > k$ and put that number in front of the element $b_{\frac{n}{2}}$. moving from the end of the array to its beginning, each element $b_i$ will be matched with such an element. If at some point $k$ can not be matched - array $b$ is not composed correctly, and the answer to the query - "NO". Otherwise, print "YES" and the resulting permutation $p$.
[ "binary search", "constructive algorithms", "data structures", "greedy", "math" ]
1,900
#include "bits/stdc++.h" using namespace std; int n; void solve(){ cin >> n; vector<int>b(n / 2), p(n); vector<bool>isUsed(n + 1, false); set<int>unused; for(int i = 0; i < n / 2; i++){ cin >> b[i]; p[i * 2 + 1] = b[i]; isUsed[b[i]] = true; } for(int i = 1; i <= n; i++){ if(!isUsed[i]) unused.insert(i); } if(int(unused.size()) != n / 2){ cout << "-1\n"; return; } for(int i = n / 2 - 1; i >= 0; i--){ auto k = unused.upper_bound(p[2 * i + 1]); if(k == unused.begin()){ cout << "-1\n"; return; } k--; if(*k < p[2 * i + 1]){ p[2 * i] = *k; unused.erase(k); } else{ cout << "-1\n"; return; } } for(auto i : p) cout << i << ' '; cout << endl; } int main(){ ios_base::sync_with_stdio(false); cin.tie(nullptr); int t; cin >> t; while(t--){ solve(); } }
1760
A
Medium Number
Given three \textbf{distinct} integers $a$, $b$, and $c$, find the medium number between all of them. The medium number is the number that is neither the minimum nor the maximum of the given three numbers. For example, the median of $5,2,6$ is $5$, since the minimum is $2$ and the maximum is $6$.
Here are two ways to implement what's given in the problem: Take input as an array $[a_1, a_2, a_3]$, and sort it. Output the middle element. Write two if-statements. The first: if $(a>b \text{ and } a<c) \text{ or } (a<b \text{ and } a>c)$, output $a$. Else, if $(b>a \text{ and } b<c) \text{ or } (b<a \text{ and } b>c)$, output $b$. Else, output $c$.
[ "implementation", "sortings" ]
800
#include <bits/stdc++.h> using namespace std; const int MAX = 200007; const int MOD = 1000000007; void solve() { int a[3]; cin >> a[0] >> a[1] >> a[2]; sort(a, a + 3); cout << a[1] << '\n'; } int main() { ios::sync_with_stdio(false); cin.tie(nullptr); int tt; cin >> tt; for (int i = 1; i <= tt; i++) {solve();} // solve(); }
1760
B
Atilla's Favorite Problem
In order to write a string, Atilla needs to first learn all letters that are contained in the string. Atilla needs to write a message which can be represented as a string $s$. He asks you what is the minimum alphabet size required so that one can write this message. The alphabet of size $x$ ($1 \leq x \leq 26$) contains \textbf{only the first} $x$ Latin letters. For example an alphabet of size $4$ contains \textbf{only} the characters $a$, $b$, $c$ and $d$.
To solve the problem we need to find the character with the highest alphabetical order in our string, since Atilla will need at least that alphabet size and won't need more. To do this iterate through the string and find the character with the highest alphabetical order. Output the maximum alphabetical order found. The solution can be done in $O(n).$
[ "greedy", "implementation", "strings" ]
800
#include "bits/stdc++.h" using namespace std; using ll = long long; #define forn(i,n) for(int i=0;i<n;i++) #define all(v) v.begin(), v.end() #define rall(v) v.rbegin(),v.rend() #define pb push_back #define sz(a) (int)a.size() void solve() { int n; string s; cin >> n >> s; sort(all(s)); cout << s.back() - 'a' + 1 << "\n"; } int32_t main() { ios_base::sync_with_stdio(0);cin.tie(0);cout.tie(0); int t = 1; cin >> t; while(t--) { solve(); } }
1760
C
Advantage
There are $n$ participants in a competition, participant $i$ having a strength of $s_i$. Every participant wonders how much of an advantage they have over the other best participant. In other words, each participant $i$ wants to know the difference between $s_i$ and $s_j$, where $j$ is the strongest participant in the competition, not counting $i$ (a difference can be negative). So, they ask you for your help! For each $i$ ($1 \leq i \leq n$) output the difference between $s_i$ and the maximum strength of any participant other than participant $i$.
Make a copy of the array $s$: call it $t$. Sort $t$ in non-decreasing order, so that $t_1$ is the maximum strength and $t_2$ - the second maximum strength. Then for everyone but the best person, they should compare with the best person who has strength $t_1$. So for all $i$ such that $s_i \neq t_1$, we should output $s_i - t_1$. Otherwise, output $s_i - t_2$ - the second highest strength, which is the next best person.
[ "data structures", "implementation", "sortings" ]
800
#include <bits/stdc++.h> using namespace std; #define forn(i, n) for (int i = 0; i < int(n); i++) int main() { int t; cin >> t; forn(tt, t) { int n; cin >> n; vector<int> a(n); forn(i, n) cin >> a[i]; vector<int> b(a); sort(b.begin(), b.end()); forn(i, n) { if (a[i] == b[n - 1]) cout << a[i] - b[n - 2] << " "; else cout << a[i] - b[n - 1] << " "; } cout << endl; } }
1760
D
Challenging Valleys
You are given an array $a[0 \dots n-1]$ of $n$ integers. This array is called a "valley" if there exists \textbf{exactly one} subarray $a[l \dots r]$ such that: - $0 \le l \le r \le n-1$, - $a_l = a_{l+1} = a_{l+2} = \dots = a_r$, - $l = 0$ or $a_{l-1} > a_{l}$, - $r = n-1$ or $a_r < a_{r+1}$. Here are three examples: The first image shows the array [$3, 2, 2, 1, 2, 2, 3$], it \textbf{is a valley} because only subarray with indices $l=r=3$ satisfies the condition. The second image shows the array [$1, 1, 1, 2, 3, 3, 4, 5, 6, 6, 6$], it \textbf{is a valley} because only subarray with indices $l=0, r=2$ satisfies the codition. The third image shows the array [$1, 2, 3, 4, 3, 2, 1$], it \textbf{is not a valley} because two subarrays $l=r=0$ and $l=r=6$ that satisfy the condition. You are asked whether the given array is a valley or not. Note that we consider the array to be indexed from $0$.
One possible solution is to represent a range of equal element as a single element with that value. Construct this array $b$ and loop through it and check how many element $b_i$ satisfy the conditions $i = 0$ or $b_{i-1} < b_i$ and $i = n-1$ or $b_i > b_{i+1}$. If exactly one index satisfies these conditions, print "YES" and othewise "NO". Complexity: $O(n)$
[ "implementation", "two pointers" ]
1,000
#include <bits/stdc++.h> using namespace std; void solve() { int n; cin >> n; vector<int> a; for(int i = 0; i < n; i++) { int x; cin >> x; if(i == 0 || x != a.back()) { a.push_back(x); } } int num_valley = 0; for(int i = 0; i < a.size(); i++) { if((i == 0 || a[i-1] > a[i]) && (i == a.size()-1 || a[i] < a[i+1])) { num_valley++; } } if(num_valley == 1) { cout << "YES" << endl; } else { cout << "NO" << endl; } } int32_t main(){ int t = 1; cin >> t; while (t--) { solve(); } }
1760
E
Binary Inversions
You are given a binary array$^{\dagger}$ of length $n$. You are allowed to perform one operation on it \textbf{at most once}. In an operation, you can choose any element and flip it: turn a $0$ into a $1$ or vice-versa. What is the maximum number of inversions$^{\ddagger}$ the array can have after performing \textbf{at most one} operation? $^\dagger$ A binary array is an array that contains only zeroes and ones. $^\ddagger$ The number of inversions in an array is the number of pairs of indices $i,j$ such that $i<j$ and $a_i > a_j$.
Let's find out how to count the number of binary inversions, without flips. This is the number of $1$s that appear before a $0$. To do this, iterate through the array and keep a running total $k$ of the number of $1$s seen so far. When we see a $0$, increase the total inversion count by $k$, since this $0$ makes $k$ inversions: one for each of the $1$s before it. Now let's see how to maximize the inversions. Consider the flip $0 \to 1$. We claim that it is best to always flip the earliest $0$ in the array. It's never optimal to flip a later $0$, since we have strictly fewer $0$s after it to form inversions. Similarly, we should flip the latest $1$ in the array. Now recalculate the answer for these two choices for flipping, and pick the maximum. The complexity is $\mathcal{O}(n)$.
[ "data structures", "greedy", "math" ]
1,100
#include "bits/stdc++.h" using namespace std; using ll = long long; #define forn(i,n) for(int i=0;i<n;i++) #define all(v) v.begin(), v.end() #define rall(v) v.rbegin(),v.rend() #define pb push_back #define sz(a) (int)a.size() ll calc(vector<int>& a) { ll zeroes = 0, ans = 0; for(int i = sz(a) - 1; i >= 0; --i) { if(a[i] == 0) ++zeroes; else ans += zeroes; } return ans; } void solve() { int n; cin >> n; vector<int> a(n); forn(i, n) cin >> a[i]; ll ans = calc(a); forn(i, n) { if(a[i] == 0) { a[i] = 1; ans = max(ans, calc(a)); a[i] = 0; break; } } for(int i = n - 1; i >= 0; --i) { if(a[i] == 1) { a[i] = 0; ans = max(ans, calc(a)); a[i] = 1; break; } } cout << ans << "\n"; } int32_t main() { ios_base::sync_with_stdio(0);cin.tie(0);cout.tie(0); int t = 1; cin >> t; while(t--) { solve(); } }
1760
F
Quests
There are $n$ quests. If you complete the $i$-th quest, you will gain $a_i$ coins. You can only complete at most one quest per day. However, once you complete a quest, you cannot do the same quest again for $k$ days. (For example, if $k=2$ and you do quest $1$ on day $1$, then you cannot do it on day $2$ or $3$, but you can do it again on day $4$.) You are given two integers $c$ and $d$. Find the maximum value of $k$ such that you can gain at least $c$ coins over $d$ days. If no such $k$ exists, output Impossible. If $k$ can be arbitrarily large, output Infinity.
Let's fix $k$ and find the maximum number of coins we can get. Here we can do a greedy solution: at every step, we should always take the most rewarding quest. (Intuitively, it makes sense, since doing more rewarding quests earlier allows us to do them again later.) If no quests are available, we do nothing. To implement this, sort the quests in decreasing order, and $0$-index them. On day $i$ we should do quest $i \bmod k$, provided that this value is less than $n$. This is because after every $k$ days, we cycle back to the first quest. Thus we solved the problem for a fixed $k$ in $\mathcal{O}(d)$ with $\mathcal{O}(n \log n)$ precomputation to sort the array. Now to solve the problem, we can binary search on the answer, since if some $k$ works, then all smaller $k$ work. The minimum value of $k$ is $0$, and the maximum value is $n$ (for larger $k$, we won't be able to do the same quest multiple times anyways, so it's useless to consider them). If we find that $k$ always goes towards the smaller end of our binary search and $k=0$ still fails, we output Impossible. If we find that $k$ always goes towards the larger end of our binary search and $k=n$ still fails, we output Infinity. Otherwise, just output $k$. The overall time complexity is $\mathcal{O}(n \log n + d \log n)$. Remark. It is not hard to improve the solution to $\mathcal{O}(n \log n)$. Originally, I proposed the problem this way, but we ended up removing this part of the problem because the implementation of this solution was tricky enough.
[ "binary search", "greedy", "sortings" ]
1,500
#include <bits/stdc++.h> using namespace std; const int MAX = 200007; const int MOD = 1000000007; void solve() { int n, d; long long c; cin >> n >> c >> d; long long a[n]; for (int i = 0; i < n; i++) { cin >> a[i]; } sort(a, a + n, greater<long long>()); int l = 0, r = d + 2; while (l < r) { int m = l + (r - l + 1) / 2; long long tot = 0; int curr = 0; for (int i = 0; i < d; i++) { if (i % m < n) {tot += a[i % m];} } if (tot >= c) { l = m; } else { r = m - 1; } } if (l == d + 2) {cout << "Infinity\n"; return;} if (l == 0) {cout << "Impossible\n"; return;} cout << l - 1 << '\n'; } int main() { ios::sync_with_stdio(false); cin.tie(nullptr); int tt; cin >> tt; for (int i = 1; i <= tt; i++) {solve();} // solve(); }
1760
G
SlavicG's Favorite Problem
You are given a weighted tree with $n$ vertices. Recall that a tree is a connected graph without any cycles. A weighted tree is a tree in which each edge has a certain weight. The tree is undirected, it doesn't have a root. Since trees bore you, you decided to challenge yourself and play a game on the given tree. In a move, you can travel from a node to one of its neighbors (another node it has a direct edge with). You start with a variable $x$ which is initially equal to $0$. When you pass through edge $i$, $x$ changes its value to $x ~\mathsf{XOR}~ w_i$ (where $w_i$ is the weight of the $i$-th edge). Your task is to go from vertex $a$ to vertex $b$, but you are allowed to enter node $b$ if and only if after traveling to it, the value of $x$ will become $0$. In other words, you can travel to node $b$ only by using an edge $i$ such that $x ~\mathsf{XOR}~ w_i = 0$. Once you enter node $b$ the game ends and you win. Additionally, you can teleport \textbf{at most once} at any point in time to any vertex except vertex $b$. You can teleport from any vertex, even from $a$. Answer with "YES" if you can reach vertex $b$ from $a$, and "NO" otherwise. Note that $\mathsf{XOR}$ represents the bitwise XOR operation.
Let's ignore the teleporting, and decide how to find the answer. Note that we don't need to ever go over an edge more than once, since going over an edge twice cancels out (since $a~\mathsf{XOR}~a = 0$ for all $a$). In other words, the only possible value of $x$ equals the $\mathsf{XOR}$ of the edges on the unique path from $a$ to $b$. We can find it through a BFS from $a$, continuing to keep track of $\mathsf{XOR}$s as we move to each adjacent node, and $\mathsf{XOR}$ing it by the weight of the corresponding edge as we travel across it. Now let's include the teleport. It means that we travel from $a \to c$, then teleport to $d$, and go from $d \to b$, for some nodes $c$ and $d$. Also, we cannot pass $b$ on the path from $a \to c$. Again, note that the value of $x$ is fixed on each of the paths from $a \to c$ and $d \to b$, since there is a unique path between them. Let $x_1$ be the $\mathsf{XOR}$ of the first path and $x_2$ be the $\mathsf{XOR}$ of the second. Then we need $x_1~\mathsf{XOR}~x_2=0 \implies x_1=x_2$. So we need to find if there are two nodes $c$, $d$ such that the $\mathsf{XOR}$s from $a$ and $b$ to those nodes are the same. To do this, we can do our BFS from before, but instead run one BFS from $a$ and another from $b$, and check if any two values are the same. Make sure not to include nodes past $b$ while we look for $c$ on our BFS from $a$. The time complexity is $\mathcal{O}(n \log n)$.
[ "bitmasks", "dfs and similar", "graphs" ]
1,700
#include "bits/stdc++.h" using namespace std; using ll = long long; #define forn(i,n) for(int i=0;i<n;i++) #define all(v) v.begin(), v.end() #define rall(v) v.rbegin(),v.rend() #define pb push_back #define sz(a) (int)a.size() const int N = 1e5 + 10; vector<pair<int, int>> adj[N]; set<int> s; bool ok = true; int n, a, b; void dfs1(int u, int par, int x) { if(u == b) return; s.insert(x); for(auto e: adj[u]) { int v = e.first, w = e.second; if(v == par) continue; dfs1(v, u, x ^ w); } } bool dfs2(int u, int par, int x) { if(u != b && s.count(x)) return true; for(auto e: adj[u]) { int v = e.first, w = e.second; if(v == par) continue; if(dfs2(v, u, w ^ x)) return true; } return false; } void solve() { s.clear(); cin >> n >> a >> b; --a, --b; forn(i, n) adj[i].clear(); for(int i = 0; i < n - 1; ++i) { int u, v, w; cin >> u >> v >> w; --u, --v; adj[u].pb({v, w}); adj[v].pb({u, w}); } dfs1(a, -1, 0); if(dfs2(b, -1, 0)) cout << "YES\n"; else cout << "NO\n"; } int32_t main() { ios_base::sync_with_stdio(0);cin.tie(0);cout.tie(0); int t = 1; cin >> t; while(t--) { solve(); } }
1761
A
Two Permutations
You are given three integers $n$, $a$, and $b$. Determine if there exist two permutations $p$ and $q$ of length $n$, for which the following conditions hold: - The length of the longest common prefix of $p$ and $q$ is $a$. - The length of the longest common suffix of $p$ and $q$ is $b$. A permutation of length $n$ is an array containing each integer from $1$ to $n$ exactly once. For example, $[2,3,1,5,4]$ is a permutation, but $[1,2,2]$ is not a permutation ($2$ appears twice in the array), and $[1,3,4]$ is also not a permutation ($n=3$ but there is $4$ in the array).
If $a+b+2\leq n$, we can always find such pair, here is a possible construction: $A=\{\color{red}{1,2,\cdots,a},\color{orange}{n-b},\color{green}{a+1,a+2,\cdots,n-b-1},\color{blue}{n-b+1,n-b+2,\cdots,n}\}\\B=\{\color{red}{1,2,\cdots,a},\color{green}{a+1,a+2,\cdots,n-b-1},\color{orange}{n-b},\color{blue}{n-b+1,n-b+2,\cdots,n}\}$ The red part is their longest common prefix, and the blue part is their longest common suffix. Otherwise, the two permutations must be equal, so such pair exists iff $a=b=n$.
[ "brute force", "constructive algorithms" ]
800
null
1761
B
Elimination of a Ring
Define a cyclic sequence of size $n$ as an array $s$ of length $n$, in which $s_n$ is adjacent to $s_1$. Muxii has a ring represented by a cyclic sequence $a$ of size $n$. However, the ring itself hates equal adjacent elements. So if two adjacent elements in the sequence are equal at any time, \textbf{one of them} will be erased \textbf{immediately}. The sequence doesn't contain equal adjacent elements initially. Muxii can perform the following operation until the sequence becomes empty: - Choose an element in $a$ and erase it. For example, if ring is $[1, 2, 4, 2, 3, 2]$, and Muxii erases element $4$, then ring would erase one of the elements equal to $2$, and the ring will become $[1, 2, 3, 2]$. Muxii wants to find the \textbf{maximum} number of operations he could perform. \textbf{Note that in a ring of size $1$, its only element isn't considered adjacent to itself (so it's not immediately erased).}
Hint Do we need more than $3$ types of elements? Try to solve the problem with $a_i\leq 3$. Solution First of all, when there're only $2$ types of elements appearing in the sequence, the answer would be $\frac{n}2+1$. Otherwise, the conclusion is that we can always reach $n$ operations when there are more than $2$ types of elements appearing in the sequence. The proof is given below: When the length of the sequence is greater than $3$, there will always be a pair of positions $(i,j)$, such that $a_i=a_j$ and $a_i$ has two different neighboring elements. Then we can erase $a_i$ and then the problem is decomposed into a smaller one. If there do not exist such pairs, then we can infer that there exists at least $1$ element which appeared only once in the sequence. If there exists such element $b$, then we can continuously erase all the elements next to $b$, then erase $b$ at last. When the length $n$ of the sequence is less than $3$, it is clear that there will be exactly $n$ operations as well. So we only need to check the number of elements that appeared in the sequence of length $n$. If the number is $2$, the answer will be $\frac n2 + 1$. Otherwise, the answer equals $n$.
[ "constructive algorithms", "greedy", "implementation" ]
1,000
null
1761
C
Set Construction
You are given a binary matrix $b$ (all elements of the matrix are $0$ or $1$) of $n$ rows and $n$ columns. You need to construct a $n$ sets $A_1, A_2, \ldots, A_n$, for which the following conditions are satisfied: - Each set is nonempty and consists of distinct integers between $1$ and $n$ inclusive. - All sets are distinct. - For all pairs $(i,j)$ satisfying $1\leq i, j\leq n$, $b_{i,j}=1$ if and only if $A_i\subsetneq A_j$. In other words, $b_{i, j}$ is $1$ if $A_i$ is a proper subset of $A_j$ and $0$ otherwise. Set $X$ is a proper subset of set $Y$, if $X$ is a nonempty subset of $Y$, and $X \neq Y$. It's guaranteed that for all test cases in this problem, such $n$ sets exist. \textbf{Note that it doesn't mean that such $n$ sets exist for all possible inputs.} If there are multiple solutions, you can output any of them.
Hint 1: When you are trying to add an element into a set $S$, you will have to add the element to every set that is meant to include $S$. Hint 2: If $A$ does not include $B$, then $A$ and $B$ are already distinct. If $A$ does include $B$, What is the easiest way of making $A$ and $B$ distinct? Solution: Denote an ancestor to $S$ as a set that is meant to include $S$. Denote a descendant to $S$ as a set that is meant to be included by $S$. Let all sets be empty from the beginning. Iterate through the sets. To make set $S$ distinct from its descendants, we can add a new number $x_S$ that hasn't been added to any previous sets to $S$ and all of its ancestors. After the execution above, we will find out that the conditions are all satisfied, since: - For all descendants of a set $S$, all the elements they have will be included in $S$; - Vice versa for all ancestors of a set $S$; - For each set $T$ that is not an ancestor nor a descendant to $S$, they will not include each other. This is because $S$ does not include $T$, since $S$ does not have the element $x_T$; and $T$ does not include $S$ for the same reason. Therefore, the construction above satisfies all given conditions. Moreover, we can set $x_S$ to the index of $S$ for a simpler implementation.
[ "constructive algorithms", "dfs and similar", "graphs", "greedy" ]
1,400
null
1761
D
Carry Bit
Let $f(x,y)$ be the number of carries of $x+y$ in binary (i. e. $f(x,y)=g(x)+g(y)-g(x+y)$, where $g(x)$ is the number of ones in the binary representation of $x$). Given two integers $n$ and $k$, find the number of ordered pairs $(a,b)$ such that $0 \leq a,b < 2^n$, and $f(a,b)$ equals $k$. Note that for $a\ne b$, $(a,b)$ and $(b,a)$ are considered as two different pairs. As this number may be large, output it modulo $10^9+7$.
Hint 1: Try to solve the problem in $O(nk)$ using DP. Hint 2: There is no need for DP. Hint 3: You can consider enumerating the bits to carry, and then counting. Let $a_i$ represents the $i$-th bit of $a$ in binary representation (that is, $2^i \times a_i=a \wedge 2^i$) and define $b_i$ similarly. If you decide which bits to carry ahead, you will find that every bit of $a,b$ is independent (because whether the previous bit carries or not is decided), so you can use the multiplication principle to count. Therefore, in the remaining tutorial, we should determine the carries first and then count the number of options of $a_i,b_i$ meeting the constraints of carries. Define array $c$ as our decided carry plan, $c_i=1$ indicates that the $i$-th bit is carried, and define $c_{-1}$ as $0$. Notice that $c_i=a_i \vee b_i \vee c_{i-1}$. Ponder each bit, we will notice that if $c_i=c_{i-1}=0$, $(a_i,b_i)$ can be $(0,0),(0,1),(1,0)$. $c_i=c_{i-1}=1$, $(a_i,b_i)$ can be $(1,1),(0,1),(1,0)$. $c_i=1$ and $c_{i-1}=0$, $(a_i,b_i)$ must be $(1,1)$. $c_i=0$ and $c_{i-1}=1$, $(a_i,b_i)$ must be $(0,0)$. That means that pair $(a_i,b_i)$ has $3$ options if $c_i=c_{i-1}$, and pair $(a_i,b_i)$ has $1$ options if $c_i\neq c_{i-1}$. So if array $c$ has $q$ positions that $c_i\neq c_{i-1}$ ( $0 \leq i < n$, remember we define $c_{-1}$ as $0$ ), the count of pair $(a,b)$ is $3^{n-q}$. Now we can enumerate $q$, and count the number of $c$ has $q$ positions that $c_i\neq c_{i-1}$. The new problem equals a typical binomial problem. Notice that for every $q$, a valid $c$ should have $\lfloor \frac{q}{2} \rfloor$ segment of consecutive $1$s and $\lceil \frac{q}{2} \rceil$ segment of consecutive $0$s if we seen $c_{-1}$ as a normal bit (so that we have $n-k+1$ zeros). The number of solutions that divide $a$ elements into $b$ segments is $\binom{a-1}{b-1}$. Therefore the answer of each $q$ is $3^{n-q} \times \binom{k-1}{\lfloor \frac{q}{2} \rfloor-1} \times \binom{(n-k+1)-1}{\lceil \frac{q}{2} \rceil-1}$, and we can calculate it in $\Theta(1)$. Add them all and you can find the answer in $\Theta(n)$.
[ "combinatorics", "math" ]
2,100
null
1761
E
Make It Connected
You are given a simple undirected graph consisting of $n$ vertices. The graph doesn't contain self-loops, there is at most one edge between each pair of vertices. Your task is simple: make the graph connected. You can do the following operation any number of times (possibly zero): - Choose a vertex $u$ arbitrarily. - For each vertex $v$ satisfying $v\ne u$ in the graph individually, if $v$ is adjacent to $u$, remove the edge between $u$ and $v$, otherwise add an edge between $u$ and $v$. Find the minimum number of operations required to make the graph connected. Also, find any sequence of operations with the minimum length that makes the graph connected.
Hint 1 Try to figure out the conditions where a task can be solved with $1$ operation. Then $2$ operations, and then even more operations. Hint 2 The answer could be larger than $2$ only when the graph is made up of $2$ cliques, where you could only perform the operations on every vertex in the smaller clique to get the minimum number of operations. Solution First of all, we need to check if the graph is already connected at the beginning. If so, the answer would be $0$. Otherwise, there will be more than $1$ connected component. If there exists a vertex that is the only vertex required to be operated to make the graph connected, we call such a vertex "feasible vertex". We may find out that a feasible vertex can only appear in a connected component that is not a clique. But actually, there will always be such a vertex in a non-clique component. To prove this, we may figure out the sufficient condition for being a feasible vertex first. The sufficient condition is that, if a vertex is not a cut vertex, and it is not adjacent to all other vertices in the connected component, then it must be a feasible vertex. We can prove that such a vertex always exists in a non-clique component. Here is the proof: Firstly, if there exist non-cut vertices that are adjacent to all other vertices in the component, we erase them one by one until there don't exist any non-cut vertices which are adjacent to all other vertices (note that a non-cut vertex which is adjacent to all other vertices may become a cut vertex after erasing some of the other vertices). Apparently, the remaining component would still be a non-clique component. Otherwise, the component could only be a clique from the beginning, which contradicts the premise. Apparently, the remaining component would still be a non-clique component. Otherwise, the component could only be a clique from the beginning, which contradicts the premise. Then, we will find a non-cut vertex in the remaining component, since that vertices in a graph couldn't be all cut vertices. The non-cut vertex we found is the vertex we are searching for. But implementing it directly (meaning using Tarjan's algorithm to find a non-cut vertex) might not be the easiest way to solve the problem. Actually, the vertex with the least degree in a connected component always satisfy the condition. We would like to leave the proof work of the alternative method to you. Now we have proven that, if there exists a connected component that is not a clique, then the answer would be at most $1$. What if all connected components are cliques? If there are exactly $2$ connected components, then apparently we will have to operate on all vertices in a connected component. So we'll choose the smaller connected component to operate, and the answer is exactly the size of it. Otherwise, we can arbitrarily choose two vertices from two different connected components and operate on them. The answer is $2$. Note that we also need to deal with isolated vertices (meaning vertices that are not adjacent to any other vertices) separately.
[ "binary search", "brute force", "constructive algorithms", "dsu", "graphs", "greedy", "matrices", "trees", "two pointers" ]
2,400
null
1761
F1
Anti-median (Easy Version)
\textbf{This is the easy version of the problem. The only difference between the two versions is the constraint on $n$. You can make hacks only if all versions of the problem are solved.} Let's call an array $a$ of odd length $2m+1$ (with $m \ge 1$) \textbf{bad}, if element $a_{m+1}$ is equal to the median of this array. In other words, the array is bad if, after sorting it, the element at $m+1$-st position remains the same. Let's call a permutation $p$ of integers from $1$ to $n$ \textbf{anti-median}, if every its subarray of odd length $\ge 3$ is not bad. You are already given values of some elements of the permutation. Find the number of ways to set unknown values to obtain an \textbf{anti-median} permutation. As this number can be very large, find it modulo $10^9+7$.
Let's analyze the structure of anti-median permutations. First, if for any $2 \le i \le n-1$ holds $a_{i-1}>a_i>a_{i+1}$, or $a_{i-1}<a_i<a_{i+1}$, then segment $p[i-1:i+1]$ is bad. So, the signs between adjacent elements are alternating. So, consider two cases: all elements on even positions are local maximums, and on odd local minimums, and vice versa. Let's find the answer for the first case (for the second, you can find the answer similarly). Consider a segment of length $5$, $[p_{i-2}, p_{i-1}, p_i, p_{i+1}, p_{i+2}]$. Consider the case when $i$ is even first. Then $p_i>p_{i-1}, p_{i+1}$. For $p_i$ to not be median, it has to be larger than one of $p_{i-2}, p_{i+2}$. So, when we consider only even elements, each element (except the first and last one) has at least one adjacent element smaller than it. It's easy to see that this implies that elements at even positions are first increasing and then decreasing. Similarly, we can see that elements at odd positions are first decreasing, then increasing. It's not hard to see that these conditions are sufficient. Indeed, suppose that: All elements on even positions are local maximums, and all elements on odd positions are local minimums Elements at even positions are first increasing and then decreasing. Elements at odd positions are first decreasing, then increasing. Then, consider any segment of odd length. Denote it by $b_1, b_2, \ldots, b_{2m+1}$, and wlog $b_{m+1}$ is local maximum. If we look at local maximums, at least one of the following two conditions has to hold: all local maximums to the right of $b_{m+1}$ are smaller than it, or all local maximums to the left of $b_{m+1}$ are smaller than it. Wlog first case. Then all elements to the right of $b_{m+1}$ are smaller than it, and $b_m$ is also smaller than it, so $b_{m+1}$ can't be a median. Now, let's put all elements on the circle in the following order: first, all even elements from left to right, then all odd elements from right to left. In this circle, the elements on both paths between $n$ and $1$ are decreasing. It follows that for any $k$, numbers from $k$ to $n$ form a segment (in this cyclic arrangement). Then, we can write a $dp$ of the form: $dp[l][r]$: how many ways are there to arrange the largest $(r-l+1+n)\bmod n$ elements so that they end up in the positions from $l$-th to $r$-th in this cyclic arrangement. All the transitions and checks are done in $O(1)$, and there are $O(n^2)$ states, so we are done.
[ "dp", "math" ]
3,100
null
1761
F2
Anti-median (Hard Version)
\textbf{This is the hard version of the problem. The only difference between the two versions is the constraint on $n$. You can make hacks only if all versions of the problem are solved.} Let's call an array $a$ of odd length $2m+1$ (with $m \ge 1$) \textbf{bad}, if element $a_{m+1}$ is equal to the median of this array. In other words, the array is bad if, after sorting it, the element at $m+1$-st position remains the same. Let's call a permutation $p$ of integers from $1$ to $n$ \textbf{anti-median}, if every its subarray of odd length $\ge 3$ is not bad. You are already given values of some elements of the permutation. Find the number of ways to set unknown values to obtain an \textbf{anti-median} permutation. As this number can be very large, find it modulo $10^9+7$.
For this version, we have to analyze our dp a bit more. Once again, how do anti-median permutations look? We consider the order of positions on the cycle, as in F1. We choose the position of $n$, and then, for $i$ from $n-1$ to $1$, we choose a position of number $i$ among two options: right to the right of the current segment or right to the left. If we fill this way, do we always get an anti-median permutation? Not really. This way makes sure that elements at even positions are first increasing, then decreasing, and at odd positions, first decreasing, then increasing, but it doesn't make sure that the element at the even position is larger than its neighbors. How do we guarantee that? Well, some segments are just not allowed: those, which contain a prefix of odd positions, prefix of even positions, and the prefix of odd positions is larger (off by $\pm 1$, depending on the parity of $n$), and same for suffixes. Another observation is that if we know where $x$ is, we only have to options for where can the segment of numbers from $x$ to $n$ be (to the right or to the left of $x$). This reduces our problem to the following subproblem: we start from some segment and have to end in another segment by expanding the current segment by $1$ to the right or to the left without ever entering "bad segments." Turns out that we can solve this in $O(1)$! Indeed, represent expanding segment to the right by a move up in a coordinate plane and to the left by a move to the right. Then, we have to get from point $(0, 0)$ to some point $(a, b)$ by moving up or to the right without ever crossing some line of form $x = y + c$. This is a well-known counting problem.
[ "combinatorics", "dp", "math" ]
3,500
null
1761
G
Centroid Guess
\textbf{This in an interactive problem}. There is an unknown tree consisting of $n$ nodes, which has \textbf{exactly one} centroid. You only know $n$ at first, and your task is to find the centroid of the tree. You can ask the distance between any two vertices for at most $2\cdot10^5$ times. Note that the interactor is \textbf{not} adaptive. That is, the tree is fixed in each test beforehand and does not depend on your queries. A vertex is called a centroid if its removal splits the tree into subtrees with at most $\lfloor\frac{n}{2}\rfloor$ vertices each.
Assuming we have already determined that the centroid of the tree is located on the path between $u$ and $v$, we may now consider how to locate it. Let $c_1,c_2,\dots,c_k$ be the vertices on the path from $u$ to $v$. Let $A_i$ be the set of vertices reachable from $c_i$ (including $c_i$) if we erase all other vertices on the path from $u$ to $v$. Then we may find that $A_1,A_2,\dots,A_k$ are a division of all vertices. Let $s_i$ be the size of $A_i$. Then there must exist a vertex $c_x$ on the path satisfying that $\max\{\sum_{i=1}^{x-1}s_i,\sum_{i=x+1}^ks_i\}\leq \lfloor\frac n2\rfloor$. Notice that the vertices that do not satisfy the condition could not be a centroid, and it is already determined that the centroid is on the path, so $c_x$ is exactly the centroid of the tree. Then we may consider finding out which set each vertex belongs to with $2n$ queries so that we can calculate the value of $s_i$. For each vertex $x$, we may query $dis_{u,x}$ and $dis_{v,x}$. For any two vertices $x$ and $y$ that belong to the same set, $dis_{u,x}-dist_{v,x}$ should be equal to $dis_{u,y}-dis_{v,y}$. Let $t_1=dis_{u,x}-dis_{v,x}$ and $t_2=dis_{u,v}$, then $x\in A_{(t_1+t_2)/2+1}$. Thus we have found out the sets each vertex belongs to, as well as the value of $s_i$, as well as the centroid. This process requires at most $7.5\times10^4\times2=1.5\times 10^5$ queries. Now the problem remains to find a pair of vertices $u$ and $v$ such that the centroid locates on the path from $u$ to $v$. We can pick a constant $M$ satisfying that $\frac{M(M-1)}2\leq 5\times 10^4$, then select $M$ vertices on the tree randomly, and query the distances between every pair of selected vertices. This requires $\frac{M(M-1)}2\leq 5\times 10^4$ queries. Let these $M$ vertices be $p_1,p_2,\dots,p_M$. We can build a virtual tree with $p_1$ being the root that contains all the LCAs of each pair of vertices. Observe that $dis_{x,y}+dis_{y,z}=dis_{x,z}$ if and only if $y$ is located on the path between $x$ and $z$. For a vertex $p_x$, we can find out all vertices on the path from $p_1$ to $p_x$, and then find out the closest vertex to $p_x$ and connect them. It is exactly the deepest ancestor of $p_x$. Now that we have constructed the virtual tree without the LCAs with $p_1$ being the root, we will then add the LCAs into the virtual tree. Start DFS from $p_1$. Assume the current vertex is $u$. Enumerate through the nodes adjacent to $u$. Assume the current vertex is $v$. If there exists another vertex $x$ which is adjacent to $u$ satisfying that $u$ is not on the path between $x$ and $v$, then $x$ and $u$ should be both in the subtree of one of $u$'s child nodes. After finding out all vertices that are in the same subtree as $v$, it would be easy to calculate the depth of their LCAs as well as the distance between an LCA vertex and all other vertices in the virtual tree. Then, remove the old edge, and then add edges between the LCA and all vertices found in the same subtree as $v$. Lastly, add an edge between the LCA and $u$. Then repeat the process above, until any two vertices adjacent to $u$ are not in the same subtree of a child node of $u$. Then DFS the child nodes of $u$. We will get the whole virtual tree after all DFS are done. For the $M$ vertices chosen from the beginning, we assume that their weights are all $1$, while other vertices have $0$ weight. Then we may find the weighted centroid of the virtual tree (when there are multiple such centroids, arbitrarily pick one), and then make it the root. Then for the two vertices with the largest and second-largest subtree of the root, DFS them, recursively find their child vertex with the largest subtree. We will be resulted with $2$ leaf nodes. Then the centroid of the hidden tree is highly possible to be located on the path between these $2$ nodes. The number of queries in both parts would not exceed $2\times 10^5$. Proof of correctness: If the centroid is not on the path between $u$ and $v$, assume the centroid of the virtual tree is in the subtree $E$ of the centroid of the hidden tree. If the subtrees other than $E$ contain at least $\frac 13$ of the $M$ vertices, then the centroid of the hidden tree must be on the path between $u$ and $v$. So there will be at most $\frac 13$ of the $M$ vertices not being in $E$. In other words, for each of $M$ vertices, it has a possibility greater than $\frac 12$ of not being in $E$, and there will be at most $\frac 13$ of the vertices which are not in $E$. The possibility of the algorithm being wrong is not greater than $\sum_{i=0}^{M/3}C_M^i/2^M$, let $M=316$, then the value would be approximately $6\times 10^{-10}$.
[ "interactive", "probabilities", "trees" ]
3,500
null
1762
A
Divide and Conquer
An array $b$ is good if the sum of elements of $b$ is even. You are given an array $a$ consisting of $n$ positive integers. In one operation, you can select an index $i$ and change $a_i := \lfloor \frac{a_i}{2} \rfloor$. $^\dagger$ Find the minimum number of operations (possibly $0$) needed to make $a$ good. It can be proven that it is \textbf{always} possible to make $a$ good. $^\dagger$ $\lfloor x \rfloor$ denotes the floor function — the largest integer less than or equal to $x$. For example, $\lfloor 2.7 \rfloor = 2$, $\lfloor \pi \rfloor = 3$ and $\lfloor 5 \rfloor =5$.
If sum is even, answer is $0$. Otherwise we need to change parity of atleast one element of $a$. It it optimal to change parity of atmost one element. Answer can be atmost $20$, as we need to divide any integer $x$ ($1 \leq x \leq 10^6$) atmost $20$ times to change its parity. We are assuming initial sum is odd. Suppose $f(x)(1 \leq x \leq 10^6)$ gives the minimum number of operations needed to change parity of $x$. Iterate from $i=1$ to $n$ and calculate $f(a_i)$ for each $i$. Answer is minimum among all the calculated values. Time complexity is $O(n \cdot log(A_{max}))$.
[ "greedy", "math", "number theory" ]
800
#include <bits/stdc++.h> using namespace std; #define ll long long void solve(){ ll n; cin>>n; ll sum=0,ans=21; vector<ll> a(n); for(auto &it:a){ cin>>it; sum+=it; } if(sum&1){ for(auto &it:a){ ll cur=it,now=0; while(!((cur+it)&1)){ now++; cur/=2; } ans=min(ans,now); } } else{ ans=0; } cout<<ans<<"\n"; } int main() { ios_base::sync_with_stdio(false); cin.tie(NULL); ll t; cin>>t; while(t--){ solve(); } }
1762
B
Make Array Good
An array $b$ of $m$ positive integers is good if for all pairs $i$ and $j$ ($1 \leq i,j \leq m$), $\max(b_i,b_j)$ is divisible by $\min(b_i,b_j)$. You are given an array $a$ of $n$ positive integers. You can perform the following operation: - Select an index $i$ ($1 \leq i \leq n$) and an integer $x$ ($0 \leq x \leq a_i$) and add $x$ to $a_i$, in other words, $a_i := a_i+x$. - After this operation, $a_i \leq 10^{18}$ should be satisfied. You have to construct a sequence of \textbf{at most} $n$ operations that will make $a$ good. It can be proven that under the constraints of the problem, such a sequence of operations \textbf{always} exists.
Suppose we have a prime number $p$. Suppose there are two perfect powers of $p$ - $l$ and $r$. Now it is easy to see $\max(l,r)$ is divisible by $\min(l,r)$. So now we need to choose some prime number $p$. Let us start with the smallest prime number $p=2$. Here is one interesting fact. There always exists a power of $2$ in the range $[x,2x]$ for any positive integer $x$. Suppose $f(x)$ gives the smallest power of $2$ which is greater than $x$. Iterate from $i=1$ to $n$ and change $a_i$ to $f(a_i)$ by adding $f(a_i)-a_i$ to $i$-th element. Time complexity is $O(n \cdot log(A_{max}))$.
[ "constructive algorithms", "implementation", "number theory", "sortings" ]
1,100
#include <bits/stdc++.h> using namespace std; #define ll long long ll f(ll x){ ll cur=1; while(cur<=x){ cur*=2; } return cur; } void solve(){ ll n; cin>>n; cout<<n<<"\n"; for(ll i=1;i<=n;i++){ ll x; cin>>x; cout<<i<<" "<<f(x)-x<<"\n"; } } int main() { ios_base::sync_with_stdio(false); cin.tie(NULL); ll t; cin>>t; while(t--){ solve(); } }
1762
C
Binary Strings are Fun
A binary string$^\dagger$ $b$ of odd length $m$ is good if $b_i$ is the median$^\ddagger$ of $b[1,i]^\S$ for all \textbf{odd} indices $i$ ($1 \leq i \leq m$). For a binary string $a$ of length $k$, a binary string $b$ of length $2k-1$ is an extension of $a$ if $b_{2i-1}=a_i$ for all $i$ such that $1 \leq i \leq k$. For example, {\underline{1}0\underline{0}1\underline{0}1\underline{1}} and {\underline{1}1\underline{0}1\underline{0}0\underline{1}} are extensions of the string 1001. String $x=$1011011 is not an extension of string $y=$1001 because $x_3 \neq y_2$. Note that there are $2^{k-1}$ different extensions of $a$. You are given a binary string $s$ of length $n$. Find the sum of the number of good extensions over all prefixes of $s$. In other words, find $\sum_{i=1}^{n} f(s[1,i])$, where $f(x)$ gives number of good extensions of string $x$. Since the answer can be quite large, you only need to find it modulo $998\,244\,353$. $^\dagger$ A binary string is a string whose elements are either $\mathtt{0}$ or $\mathtt{1}$. $^\ddagger$ For a binary string $a$ of length $2m-1$, the median of $a$ is the (unique) element that occurs at least $m$ times in $a$. $^\S$ $a[l,r]$ denotes the string of length $r-l+1$ which is formed by the concatenation of $a_l,a_{l+1},\ldots,a_r$ in that order.
Let us first find $f(s[1,n])$. $f(s[1,n])=2^{len-1}$ where $len$ is the length of longest suffix of $s$ in which all characters are same. How to prove the result in hint $2$? First of all it is easy to see if all characters of $s$ are same, $f(s[1,n])=2^{n-1}$ as median is always $s_i$. Now we assume that $s$ contains distinct characters. Suppose $t$ is one good extension of $s$. Assume we are index $i$. If there exists an index $j(j>i)$ such that $s_i \neq s_j$, we should have $t_{2i} \neq s_i$. Why? Assume $k$ is the smallest index greater than $i$ such that $s_i \neq s_k$. Now if we have $t_{2i} = s_i$, $s_k$ can never be median of subarray $t[1,2k-1]$. So if longest suffix of $s$ having same characters of starts at index $i$, $t_{2j} \neq s_j$ for all $j(1 \leq j < i)$ and $t_{2j}$ can be anything(either $0$ or $1$) for all $j(i \leq j < n)$. Now we know how to solve for whole string $s$. We can similarly solve for all prefixes. To find $f(s[1,i])$, we need to find the longest suffix of $s[1,i]$ containing same character. We can easily calculate this all prefixes while moving from $i=1$ to $n$. Time complexity is $O(n)$.
[ "combinatorics", "math" ]
1,400
#include <bits/stdc++.h> using namespace std; #define ll long long const ll MOD=998244353; void solve(){ ll n; cin>>n; string s; cin>>s; s=" "+s; ll ans=0,cur=1; for(ll i=1;i<=n;i++){ if(s[i]==s[i-1]){ cur=(2*cur)%MOD; } else{ cur=1; } ans=(ans+cur)%MOD; } cout<<ans<<"\n"; } int main() { ios_base::sync_with_stdio(false); cin.tie(NULL); ll t; cin>>t; while(t--){ solve(); } }
1762
D
GCD Queries
This is an interactive problem. There is a secret permutation $p$ of $[0,1,2,\ldots,n-1]$. Your task is to find $2$ indices $x$ and $y$ ($1 \leq x, y \leq n$, possibly $x=y$) such that $p_x=0$ or $p_y=0$. In order to find it, you are allowed to ask \textbf{at most} $2n$ queries. In one query, you give two integers $i$ and $j$ ($1 \leq i, j \leq n$, $i \neq j$) and receive the value of $\gcd(p_i,p_j)^\dagger$. Note that the permutation $p$ is fixed \textbf{before} any queries are made and does not depend on the queries. $^\dagger$ $\gcd(x, y)$ denotes the greatest common divisor (GCD) of integers $x$ and $y$. Note that $\gcd(x,0)=\gcd(0,x)=x$ for all positive integers $x$.
Intended solution uses $2 \cdot (n-2)$. You are allowed to guess two indices. Doesn't this hint towards something? If we can eliminate $n-2$ elements that cannot be $0$ for sure, we are done. Suppose we have three distinct indices $i$, $j$ and $k$. Is it possible to remove one index(say $x$) out of these three indices such that $p_x \neq 0$ for sure. You are allowed to query two times. So suppose we have three distinct indices $i$, $j$ and $k$. Let us assume $l=query(i,k)$ and $r=query(j,k)$ Now we have only three possibilities. $l=r$ In this case, $p_k$ cannot be $0$. Why? $p_i$ and $p_j$ are distinct, and we have $\gcd(0,x) \neq \gcd(0,y)$ if $x \neq y$ $l > r$ In this case, $p_j$ cannot be $0$. Why? Note $\gcd(0,p_k)=p_k$ and $\gcd(m,p_k)$ can be atmost $p_k$ for any non negative integer. If $l > r$, this means $r$ cannot be $p_k$. Thus $p_r \neq 0$ for sure $l < r$ In this case, $p_i$ cannot be $0$. Why? Refer to the above argument. This we can eliminate one index on using $2$ queries. We will perform this operation $n-2$ times. Refer to attached code for details. Time complexity is $O(n)$.
[ "constructive algorithms", "interactive", "number theory" ]
2,100
#include <bits/stdc++.h> using namespace std; #define ll long long void solve(){ ll n; cin>>n; ll l=1,r=2; for(ll i=3;i<=n;i++){ ll ql,qr; cout<<"? "<<l<<" "<<i<<endl; cin>>ql; cout<<"? "<<r<<" "<<i<<endl; cin>>qr; if(ql>qr){ r=i; } else if(ql<qr){ l=i; } } cout<<"! "<<l<<" "<<r<<endl; ll check; cin>>check; assert(check==1); } int main() { ios_base::sync_with_stdio(false); cin.tie(NULL); ll t; cin>>t; while(t--){ solve(); } }
1762
E
Tree Sum
Let us call an edge-weighted tree with $n$ vertices numbered from $1$ to $n$ good if the weight of each edge is either $1$ or $-1$ and for each vertex $i$, the product of the edge weights of all edges having $i$ as one endpoint is $-1$. You are given a positive integer $n$. There are $n^{n-2} \cdot 2^{n-1}$ distinct$^\dagger$ edge-weighted trees with $n$ vertices numbered from $1$ to $n$ such that each edge is either $1$ or $-1$. Your task is to find the sum of $d(1,n)^\ddagger$ of all such trees that are good. Since the answer can be quite large, you only need to find it modulo $998\,244\,353$. $^\dagger$ Two trees are considered to be distinct if either: - there exists two vertices such that there is an edge between them in one of the trees, and not in the other. - there exists two vertices such that there is an edge between them in both trees but the weight of the edge between them in one tree is different from the one in the other tree. Note that by Cayley's formula, the number of trees on $n$ labeled vertices is $n^{n-2}$. Since we have $n-1$ edges, there are $2^{n-1}$ possible assignment of weights(weight can either be $1$ or $-1$). That is why total number of distinct edge-weighted tree is $n^{n-2} \cdot 2^{n-1}$. $^\ddagger$ $d(u,v)$ denotes the sum of the weight of all edges on the unique simple path from $u$ to $v$.
There does not exist any good tree of size $n$ if $n$ is odd. How to prove it? Suppose $f(v)$ gives the product of weight of edges incident to node $v$ in a good tree. We know that $f(i)=-1$ as if tree is good. Now $\prod_{i=1}^{n} f(i) = -1$ if $n$ is odd. There is another way to find $\prod_{i=1}^{n} f(i)$. Look at contribution of each edge. Each edge contribitues $1$ to $\prod_{i=1}^{n} f(i)$, no matter what the weight of this edge is, as it gets multiplied twice. Thus we get $\prod_{i=1}^{n} f(i) = 1$. We got contradiction. Thus no good tree of size $n$ exists. Now assume $n$ is even. Here is an interesting claim. For any unweighted tree,there exists exactly one assignment of weight of edges which makes it good. Thus there are $n^{n-2}$ distinct edge-weighted trees. How to prove the claim in hint $2$? Arbitrarily root the tree at node $1$. Now start from leaves and move towards root and assign the weight of edges in the path. First of all the edge incident to any leaf node will have $-1$ as the weight. While moving towards root, it can be observed that weight of edge between $u$ and parent of $u$ depends on the product of weight of edges between $u$ and its children. As we are moving from leaves towards root, weight of edges between $u$ and its children are already fixed. Weight of edge between $u$ and parent $u$ is $-1 \cdot \prod_{x \in C(u)}{pw(x)}$, where $pw(x)$ gives the weight of edge between $x$ and its parent, and $C(u)$ denotes the set of children of $u$. Time for one more interesting claim. The weight of edge $e$ is $(-1)^{l}$ if there are $l$ nodes on one side and $n-l$ nodes on other side of $e$, irrespective of the structure of tree. We can prove this claim by induction, similar to what we did in hint $3$. To find answer we will look at contribution of each edge. Here's detailed explanation on how to dot it. In total, we have $n^{n-2} \cdot (n-1)$ edges. Suppose for some edge(say $e$), we have $l$ nodes(including node $1$) on left side and $r$ nodes(including node $n$) on right side. Among $n^{n-2} \cdot (n-1)$ edges, how many possibilities do we have for $e$? It is ${{n-2} \choose {l-1}} \cdot l \cdot r \cdot l^{l-2} \cdot r^{r-2}$. Why? First we select $l-1$ nodes(as node $1$ is fixed to be on left side) to be on left side, we get ${{n-2} \choose {l-1}}$ for this. Now we have $l$ nodes on left side and $r$ nodes on right side. Edge $e$ will connect one among $l$ nodes on left and one among $r$ nodes on right. So edge $e$ will exist between $l \cdot r$ pairs. We know that number of distinct trees having $x$ nodes is $x^{x-2}$. Now on selecting one node from left and one from right, we have fixed the root of subtree on left side, and have also fixed the root of subtree on right side. So, number of distinct subtrees on left side is $l^{l-2}$, and number of distinct subtrees on right side is $r^{r-2}$. Thus, on mutliplying all(since they are independent), we get ${n \choose l} \cdot l \cdot r \cdot l^{l-2} \cdot r^{r-2}$ possibilities for $e$. Now this edge lies on the path from $1$ to $n$ as both lie on opposite sides of this node. So this edge contributes $(-1)^l \cdot {{n-2} \choose {l-1}} \cdot l \cdot r \cdot l^{l-2} \cdot r^{r-2}$ to answer. Hence $d(1,n)=\sum_{l=1}^{n-1} (-1)^l \cdot {{n-2} \choose {l-1}} \cdot l \cdot r \cdot l^{l-2} \cdot r^{r-2}$ where $l+r=n$. Note that we assumed that we are always going from left subtree to right subtree while calculating contribution. As we have tried all possibilties for l, all cases get covered. We used left and right subtrees just for our own convention. Time complexity is $O(n \cdot \log(n))$.
[ "combinatorics", "math", "trees" ]
2,600
#include <bits/stdc++.h> using namespace std; #define ll long long const ll MOD=998244353; const ll MAX=500500; vector<ll> fact(MAX+2,1),inv_fact(MAX+2,1); ll binpow(ll a,ll b,ll MOD){ ll ans=1; a%=MOD; while(b){ if(b&1) ans=(ans*a)%MOD; b/=2; a=(a*a)%MOD; } return ans; } ll inverse(ll a,ll MOD){ return binpow(a,MOD-2,MOD); } void precompute(ll MOD){ for(ll i=2;i<MAX;i++){ fact[i]=(fact[i-1]*i)%MOD; } inv_fact[MAX-1]=inverse(fact[MAX-1],MOD); for(ll i=MAX-2;i>=0;i--){ inv_fact[i]=(inv_fact[i+1]*(i+1))%MOD; } } ll nCr(ll a,ll b,ll MOD){ if((a<0)||(a<b)||(b<0)) return 0; ll denom=(inv_fact[b]*inv_fact[a-b])%MOD; return (denom*fact[a])%MOD; } void solve(){ ll n,ans=0; cin>>n; if(n&1){ cout<<0; return; } ll sgn=1; for(ll i=1;i<n;i++){ sgn*=-1; ll r=n-i,l=i; ll fix_l=nCr(n-2,l-1,MOD); //fixing l nodes on left side ll fix_root=(l*r)%MOD; //fixing roots of subtrees on both sides ll trees=(binpow(l,l-2,MOD)*binpow(r,r-2,MOD))%MOD; //counting no of subtrees ll no_of_e=(((fix_l*fix_root)%MOD)*trees)%MOD; //no of possibilities for e ans=(ans+sgn*no_of_e)%MOD; } ans=(ans+MOD)%MOD; cout<<ans; return; } int main() { ios_base::sync_with_stdio(false); cin.tie(NULL); precompute(MOD); ll t=1; //cin>>t; while(t--){ solve(); } }
1762
F
Good Pairs
You are given an array $a$ consisting of $n$ integers and an integer $k$. A pair $(l,r)$ is good if there exists a sequence of indices $i_1, i_2, \dots, i_m$ such that - $i_1=l$ and $i_m=r$; - $i_j < i_{j+1}$ for all $1 \leq j < m$; and - $|a_{i_j}-a_{i_{j+1}}| \leq k$ for all $1 \leq j < m$. Find the number of pairs $(l,r)$ ($1 \leq l \leq r \leq n$) that are good.
We should have $|a_{i_j}-a_{i_{j+1}}| \leq k$. This seems a bit hard, as we can have $a_{i_{j+1}}$ greater than, smaller than or equal to $a_{i_j}$. Why not solve the easier version first? A pair $(l,r)$ is good if there exists a sequence of indices $i_1, i_2, \dots, i_m$ such that $i_1=l$ and $i_m=r$; $i_j < i_{j+1}$ for all $1 \leq j < m$; and $0 < a_{i_j}-a_{i_{j+1}} \leq k$ for all $1 \leq j < m$. Suppose $F(a,k)$ number of pairs $(l,r)$ ($1 \leq l < r \leq n$) that are good. Find $F(a,k)$. To solve the problem in hint $1$, let us define $dp_i$ as the number of pairs $j(i<j)$ such that $(i,j)$ is good. Let us move from $i=n$ to $1$. To find $dp_i$, let us first find the smallest index $j$ such that $a_j$ lies in range $[a_i+1,a_i+k]$. We can observe that $dp_i=dp_j+f(i,a_i+1,a_j)$, where $f(i,l,r)$ gives us the number of indices $x$ among last $i$ elements of $a$ such that $a_x$ lies in the range $[l,r]$. We can use fenwik tree or ordered set to find $f(i,l,r)$. Now let us get back to original problem. First let us count number of pairs $(i,j)(1 \leq i \leq j)$ such that $a_i=a_j$. Assume $cnt$ is number of such pairs. Time for another cool claim! For our original problem, answer is $cnt+F(a,k)+F(rev(a),k)$, where $rev(a)$ denotes the array $a$ when it is reversed. How to prove the claim in hint $3$? Suppose we have a good pair $(l,r)$ such that $a_l \neq a_r$. Now using exchange arguments we can claim that there always exists a sequence(say $s$) starting at index $l$ and ending at index $r$ such that difference between adjacent elements of $a$ is atmost $k$ strictly increasing if $a_l < a_r$ strictly decreasing if $a_l > a_r$ Thus $(l,r)$ will be counted in $F(a,k)$ if $a_l < a_r$ and $(l,r)$ will be counted in $F(rev(a),k)$ if $a_l > a_r$. Time complexity is $O(n \cdot \log(n))$.
[ "binary search", "data structures", "dp" ]
2,600
#include <bits/stdc++.h> using namespace std; #define ll long long const ll MAX=1000100; class ST{ public: vector<ll> segs; ll size=0; ll ID=MAX; ST(ll sz) { segs.assign(2*sz,ID); size=sz; } ll comb(ll a,ll b) { return min(a,b); } void upd(ll idx, ll val) { segs[idx+=size]=val; for(idx/=2;idx;idx/=2){ segs[idx]=comb(segs[2*idx],segs[2*idx+1]); } } ll query(ll l,ll r) { ll lans=ID,rans=ID; for(l+=size,r+=size+1;l<r;l/=2,r/=2) { if(l&1) { lans=comb(lans,segs[l++]); } if(r&1){ rans=comb(segs[--r],rans); } } return comb(lans,rans); } }; struct FenwickTree{ vector<ll> bit; ll n; FenwickTree(ll n){ this->n = n; bit.assign(n, 0); } FenwickTree(vector<ll> a):FenwickTree(a.size()){ ll x=a.size(); for(size_t i=0;i<x;i++) add(i,a[i]); } ll sum(ll r) { ll ret=0; for(;r>=0;r=(r&(r+1))-1) ret+=bit[r]; return ret; } ll sum(ll l,ll r) { if(l>r) return 0; return sum(r)-sum(l-1); } void add(ll idx,ll delta) { for(;idx<n;idx=idx|(idx+1)) bit[idx]+=delta; } }; FenwickTree freq(MAX); ST segtree(MAX); vector<ll> dp(MAX,0); ll solve(vector<ll> a,ll n,ll k){ ll now=0; for(ll i=n-1;i>=0;i--){ ll j=segtree.query(a[i]+1,a[i]+k); if(j<n){ dp[i]=dp[j]+freq.sum(a[i]+1,a[j]); } else{ dp[i]=0; } now+=dp[i]; segtree.upd(a[i],i); freq.add(a[i],1); } for(auto it:a){ segtree.upd(it,MAX); freq.add(it,-1); } return now; } void solve(){ ll n,k; cin>>n>>k; vector<ll> a(n); ll ans=0; map<ll,ll> cnt; for(auto &it:a){ cin>>it; cnt[it]++; ans+=cnt[it]; } ans+=solve(a,n,k); reverse(a.begin(),a.end()); ans+=solve(a,n,k); cout<<ans<<"\n"; return; } int main() { ios_base::sync_with_stdio(false); cin.tie(NULL); ll t=1; cin>>t; while(t--){ solve(); } }
1762
G
Unequal Adjacent Elements
You are given an array $a$ consisting of $n$ positive integers. Find any permutation $p$ of $[1,2,\dots,n]$ such that: - $p_{i-2} < p_i$ for all $i$, where $3 \leq i \leq n$, and - $a_{p_{i-1}} \neq a_{p_i}$ for all $i$, where $2 \leq i \leq n$. Or report that no such permutation exists.
Answer is NO only when there exists an element of $a$ which occurs more that $\lceil \frac{n}{2} \rceil$ times. Let us say an array $b$ is beautiful if length of $b$ is odd and mode(say $x$) of $b$ occurs exactly $\lceil \frac{n}{2} \rceil$ times. If $a$ is beautiful, there exists only one permutation. We have rearrange such that $x$ occupies all the odd indices and keep the elements at even indices such that condition $2$ in satisfied. To solve the original problem, we will divide the array $a$ into multiple beautiful subarrays and arrange the elements in those subarrays. Let us continue from where we left off. So our motivation is to break the original array into multiple beautiful subarrays and the elements in those subarrays, as mentioned before. Now for condition $1$ to be satisfied, we should not have two adjacent subarrays such that the elements at the end positions of both subarrays(after rearranging the elements) are the same. Here is one construction using which we can achieve our goal. Suppose $l$ denotes the leftmost point of our concerned subarray. If $a_l \neq a_{l+1}$, we move forward, as subarray $a[l,l]$ is good. Otherwise, we keep moving towards the right till index $r$(here, $r$ should be the smallest possible) such that the subarray $a[l,r]$ is beautiful and $a_l \neq a_{r+1}$. So it is easy to notice the following observations about the subarray $a[l,r]$ length of this subarray is odd length of this subarray is odd $a_l$ occurs exactly $\lceil \frac{r-l+1}{2} \rceil$ times in this subarray $a_l$ occurs exactly $\lceil \frac{r-l+1}{2} \rceil$ times in this subarray Now we can rearrange the elements of this subarray $a[l,r]$. Do note that the subarray $a[1,r]$ satisfies both the conditions stated in the statement. So our task is to make the subarray $a[r+1,n]$ good now. We can now update $l=r+1$ and continue searching for the corresponding $r$ and so on. Now it might be the case that we did not get a valid $r$ for the last search. From here, I assume we did not get valid $r$ for the last search. We could print the obtained permutation if we got it, as $a$ would satisfy both conditions. Assume that we had started at $pos=l$ and couldn't find $r$. Subarray $a[1,pos-1]$ is already good. To fix this issue, we will do a similar search that we did before. We start from the back(from index $n$) and move towards left till index $m$ such that $m < pos$ $a[m,n]$ is beautiful $a_{pos}$ occurs exactly $\lceil \frac{n-m+1}{2} \rceil$ times in this subarray $a_{pos} \neq a_{m-1}$ Now we arrange elements of this subarray in the same fashion that we did before. Are we done? No. First, we must prove that we will always get some $m$. Let us have function $f(a,l,r,x)$, which denotes the score of the subarray $a[l,r]$ for the element $x$. $f(a,l,r,x)=freq_x-(r-l+1-freq_x)$, where $freq_x$ denotes the frequency of element $x$ in the subarray $a[l,r]$ It is easy to note that $f(a,pos,n,a_{pos}) > 1$ (Hint $-$ Prove that $f(a,pos,r,a_{pos}) \neq 0$ for $pos \leq r \leq n$. Why?(If it does then $a[pos,r-1]$ would be beautiful )) Now we start from the back and move towards the right to find $m$ with $n$ as our right endpoint of the concerned subarray. Note that $f(a,1,n,a_{pos}) \leq 1$ (Why? $a_{pos}$ would have occurred at most $\lceil \frac{n}{2} \rceil$ times in $a$) So while moving from $pos$ to $1$ we will indeed find a $m$ such that $f(a,m,n,a_{pos})=1$, and $a_{m-1} \neq a_{pos}$ (assuming $a_0=-1$) Are we done? Not still :p. We can observe that condition $1$ is satisfied, but sometimes condition $2$ would not be. For example, simulate the above approach on the array $a=[1,1,2,3,3]$. How to fix this issue? It's pretty easy to fix this issue. Instead of rearranging the subarray $a[m,n]$, we will rearrange the subarray $a[m-1,n]$. How to rearrange? Okay, time for one more hint. What will be the answer for $a=[1,1,2,3,3]$? $p=[1,4,2,5,3]$ You can refer to the attached code for implementation details.
[ "constructive algorithms", "sortings" ]
3,100
#include <bits/stdc++.h> using namespace std; #define ll long long #define all(x) x.begin(),x.end() void solve(){ ll n; cin>>n; vector<ll> a(n+5),freq(n+5,0); for(ll i=1;i<=n;i++){ cin>>a[i]; freq[a[i]]++; } for(ll i=1;i<=n;i++){ ll till=(n+1)/2; if(freq[i]>till){ cout<<"NO\n"; return; } } cout<<"YES\n"; vector<ll> ans; ll cur=1; while(cur<=n){ ll val=a[cur]; vector<ll> v1,v2; while(cur<=n){ if(a[cur]==val){ v1.push_back(cur); } else{ v2.push_back(cur); } if(v1.size()==v2.size()){ for(ll i=0;i<v1.size();i++){ ans.push_back(v1[i]); ans.push_back(v2[i]); } ans.pop_back(); break; } if(cur==n){ while(1){ if(ans.empty()||v1.size()==v2.size()){ sort(all(v1)); sort(all(v2)); if(v1.size()!=v2.size()){ ans.push_back(v1[0]); v1.erase(v1.begin()); } if(!v2.empty()&&!ans.empty()){ if(a[ans.back()]==a[v2[0]]){ swap(v1,v2); } } for(ll i=0;i<v1.size();i++){ ans.push_back(v2[i]); ans.push_back(v1[i]); } break; } if(a[ans.back()]==val){ v1.push_back(ans.back()); } else{ v2.push_back(ans.back()); } ans.pop_back(); } cur=n+1; } cur++; } } for(auto it:ans){ cout<<it<<" "; } cout<<"\n"; return; } int main() { ll test_cases=1; cin>>test_cases; while(test_cases--){ solve(); } }
1763
A
Absolute Maximization
You are given an array $a$ of length $n$. You can perform the following operation several (possibly, zero) times: - Choose $i$, $j$, $b$: Swap the $b$-th digit in the binary representation of $a_i$ and $a_j$. Find the maximum possible value of $\max(a) - \min(a)$. In a binary representation, bits are numbered from right (least significant) to left (most significant). Consider that there are an infinite number of leading zero bits at the beginning of any binary representation. For example, swap the $0$-th bit for $4=100_2$ and $3=11_2$ will result $101_2=5$ and $10_2=2$. Swap the $2$-nd bit for $4=100_2$ and $3=11_2$ will result $000_2=0_2=0$ and $111_2=7$. Here, $\max(a)$ denotes the maximum element of array $a$ and $\min(a)$ denotes the minimum element of array $a$. The binary representation of $x$ is $x$ written in base $2$. For example, $9$ and $6$ written in base $2$ are $1001$ and $110$, respectively.
Which $1$s in the binary representation cannot be changed to $0$. Similarly, Which $0$s in the binary representation cannot be changed to $1$. Considering the last two hints, try to maximize the maximum element and minimize the minimum element. In the minimum element, we want to make every bit $0$ when possible, it won't be possible to set a particular bit to $0$ when that bit is set in all the elements of $a$. Therefore, the minimum value we can achieve after performing the operations is the bitwise AND of all the elements of $a$. In the maximum element, we want to make every bit $1$ when possible, it won't be possible to set a particular bit to $1$ when that bit is not set in any of the elements of $a$. Therefore, the maximum value we can achieve after performing the operations is the bitwise OR of all the elements of $a$. Therefore the answer is (OR of the array - AND of the array). Time Complexity: $O(n)$
[ "bitmasks", "constructive algorithms", "greedy", "math" ]
800
null
1763
B
Incinerate
To destroy humanity, The Monster Association sent $n$ monsters to Earth's surface. The $i$-th monster has health $h_i$ and power $p_i$. With his last resort attack, True Spiral Incineration Cannon, Genos can deal $k$ damage to all monsters alive. In other words, Genos can reduce the health of all monsters by $k$ (if $k > 0$) with a single attack. However, after every attack Genos makes, the monsters advance. With their combined efforts, they reduce Genos' attack damage by the power of the $^\dagger$weakest monster $^\ddagger$alive. In other words, the minimum $p_i$ among all currently living monsters is subtracted from the value of $k$ after each attack. $^\dagger$The Weakest monster is the one with the least power. $^\ddagger$A monster is alive if its health is strictly greater than $0$. Will Genos be successful in killing all the monsters?
What if the array $p$ was sorted? Is it necessary to decrease the health of each monster manually after every attack? Sort the monsters in ascending order of their powers. Now we iterate through the monsters while maintaining the current attack power and the total damage dealt. Only the monsters with health greater than the total damage dealt are considered alive, and every time we encounter such a monster it will be the weakest one at the current time, thus we need to attack until the total damage dealt exceeds the current monster's health while lowering our attack power by its power each time. If we can kill all the monsters in this way, the answer is YES, otherwise it is NO. Time Complexity: $O(nlogn)$ Sort the monsters in ascending order of their health. Now we maintain a count of monsters alive after each attack. This could be achieved by applying $upper bound()$ on $h$ array for each attack. The total damage dealt could be stored and updated in a separate variable. To find the power of the weakest monster alive, we could just precompute the minimum power of monsters in a suffix array. In other words, $p_i = \min(p_i, p_{i+1}).$ Time Complexity: $O(nlogn)$
[ "binary search", "brute force", "data structures", "implementation", "math", "sortings" ]
1,200
"#include <bits/stdc++.h>\nusing namespace std;\n#define int long long\n#define ll long long\n#define el '\\n'\n#define yes cout<<\"YES\"<<el\n#define no cout<<\"NO\"<<el\n#define f(i,a,b) for(ll i = a; i <= b; i++)\n#define fr(i,a,b) for(ll i = a; i >= b; i--)\n#define vi vector<int>\n#define speed ios_base::sync_with_stdio(false); cin.tie(0), cout.tie(0)\n#define pb push_back\n#define all(x) x.begin(),x.end()\n#define sz(x) ((int)(x).size())\n#define test ll _x86_; cin>>_x86_; while(_x86_--)\n \nvoid solve()\n{\n int n, k, baseh{0}; cin>>n>>k;\n vector<pair<int,int>> m(n); vi h(n); \n\n f(i,0,n-1) cin>>m[i].first;\n f(i,0,n-1) cin>>m[i].second;\n \n sort(all(m));\n \n f(i, 0, n-1) h[i] = m[i].first;\n\n fr(i, n-2, 0) m[i].second = min(m[i+1].second, m[i].second);\n\n while(k > 0)\n {\n int ded = upper_bound(all(h), k+baseh) - h.begin();\n\n if(ded == n) { yes; return; }\n \n baseh += k;\n k -= m[ded].second;\n }\n no;\n}\n \nint32_t main()\n{\n speed; test \n \n solve();\n}"
1763
C
Another Array Problem
You are given an array $a$ of $n$ integers. You are allowed to perform the following operation on it as many times as you want (0 or more times): - Choose $2$ indices $i$,$j$ where $1 \le i < j \le n$ and replace $a_k$ for all $i \leq k \leq j$ with $|a_i - a_j|$ Print the maximum sum of all the elements of the final array that you can obtain in such a way.
What happens when we apply the same operation twice? What about n = 3 ? Let's first consider the case for $n \geq 4$. The key observation to make here is that we can make all the elements of a subarray $a_l,...a_r$ zero by applying the operation on range $[l,r]$ twice. Then let's assume the maximum element $mx$ of the array is at an index $m > r$. We can apply the operation on the range $[l,m]$ and turn all its elements into $mx$. Using the above information we can see that to achieve the final array with maximum sum we need to make all the elements in it equal to the maximum element in the array. Regardless of the given array this can be achieved by making the last two elements (n-1,n) zero. Then applying the operation on subarray $[m,n]$ to make all its elements equal to $mx$. Then making the first two elements (1,2) zero and applying the operation on the whole array making all the elements equal to $mx$. Thus the maximum sum for the final array will always be $n*mx$. (In case $m = n-1$ or $n$, we can operate on the left side first to reach the same solution). For $n = 2$ the maximum final sum would be $\max(a_1+a_2, 2*(|a_1-a_2|))$. For $n=3$, when the maximum element is present at index $1$ or $3$ we can make all the elements of the array into $mx$. When the maximum element is at index $2$, we have the following options. Case 1: We can apply the operation on (1,2), then we can convert all the elements of the array into $\max(a_3,|a_2-a_1|)$. Case 2: We can apply the operation on (2,3), then we can convert all the elements of the array into $\max(a_1,|a_2-a_3|)$. Case 3: We can apply the operation on (1,3) making all the elements in the array $|a_1-a_3|$. This is redundant since $a_2 > a_1,a_3$ either case 1 or case 2 will give a larger sum as $a_2 - \min(a_1,a_3) > \max(a_1,a_3) - \min(a_1,a_3)$. Now considering case 1, if $3* \max(a_3,|a_2-a_1|) \leq a_1+a_2+a_3$ the maximum sum possible would be the current sum of the array (see sample 1 and 3). Therefore no operations are required. Similar case for case 2. So the maximum possible sum for $n=3$ will be $\max(3*a_1, 3*a_3, 3*|a_1-a_2|, 3*|a_3-a_2|,a_1+a_2+a_3)$. To avoid doing this casework for $n = 3$, we can see that there are only 3 possible operations -> (1,2) , (2,3), (1,3). We will be required to perform operations (1,2) and (2,3) at most two times. So we can brute force all possible combinations of operations [(1,2),(1,2),(2,3),(2,3),(1,3)] to find the maximum sum.
[ "brute force", "constructive algorithms", "greedy" ]
2,000
#include <bits/stdc++.h> using namespace std; #define int long long #define ll long long #define pii pair<ll, ll> int32_t mod = 1e9 + 7; void solve() { ll n; cin >> n; vector<ll> a(n); for (auto &i : a) cin >> i; if (n == 2) cout << max({2 * abs(a[0] - a[1]), a[0] + a[1]}); else if (n == 3) cout << max({3 * (abs(a[0] - a[1])), 3 * (abs(a[2] - a[1])), 3 * a[0], 3 * a[2], a[0] + a[1] + a[2]}); else { ll mx = 0; for (auto i : a) mx = max(i, mx); cout << n * mx; } cout<<'\n'; } int32_t main() { ios::sync_with_stdio(false), cin.tie(NULL); ll t = 0; cin >> t; while (t--) solve(); }
1763
D
Valid Bitonic Permutations
You are given five integers $n$, $i$, $j$, $x$, and $y$. Find the number of bitonic permutations $B$, of the numbers $1$ to $n$, such that $B_i=x$, and $B_j=y$. Since the answer can be large, compute it modulo $10^9+7$. A bitonic permutation is a permutation of numbers, such that the elements of the permutation first increase till a certain index $k$, $2 \le k \le n-1$, and then decrease till the end. Refer to notes for further clarification.
Can you solve the problem when $x < y$? When $x > y$, perform $i'=n-j+1$, $j'=n-i +1$, $x' = y$, and $y' = x$. Can you solve the problem for a fixed value of $k$? Iterate over possible values of $k$. The total count is the sum of the individual counts. Club the remaining numbers into ranges as follows: $[1,x-1]$, $[x+1,y-1]$, and $[y+1,n-1]$. For simplicity, if $x > y$, perform $i' = n-j+1$, $j' = n-i+1$, $x' = y$, and $y' = x$. Hereafter, the variables $i$, $j$, $x$, and $y$, will refer to these values. Now, $i < j$ and $x < y$. For now, assume that $y < n$. We shall consider the case where $y = n$ at a later stage. Let us consider solving the problem for fixed $k$. Valid values for $k$ are $[2,i-1]$, $[i+1,j-1]$, $[j+1,n-1]$. If we think about it, when $x < y$, $k$ cannot lie in the range $[2, i-1]$. So, we can discard them as possible values for $k$. Let us consider the case where $k$ belongs to $[i+1,j-1]$. The permutation adheres to the following pattern: $B_1 < .. < B_i = x < .. < B_k = n > .. > B_j = y > .. > B_n$. Numbers to the left of $i$ must lie in the range $[1,x-1]$. We choose $i-1$ elements from $[1,x-1]$ and place them to the left of $i$. There are ${x-1 \choose i-1}$ ways to do this. The remaining $x-i$ elements from $[1,x-1]$ lie to the right of $j$ by default. Numbers to the right of $j$ must lie in the range $[1,x-1]$ or $[x+1,y-1]$. Since numbers in the range $[1,x-1]$ have already been placed, therefore, we choose numbers in the range $[x+1,y-1]$, and place them in the $n-j-(x-i)$ remaining positions. There are ${y-x-1 \choose n-j-(x-i)}$ ways to do this. The remaining elements in the range $[x+1,y-1]$ lie between $i$ and $k$ by default. Numbers between $k$ and $j$ must lie in the range $[y+1,n-1]$. We choose $j-k-1$ elements from $[y+1,n-1]$ and place them between $k$ and $j$. There are ${n-y-1 \choose j-k-1}$ ways to do this. Afterwards, the remaining elements in the range lie between $i$ and $k$ by default, and the permutation is full. ${x-1 \choose i-1} * {y-x-1 \choose n-j-(x-i)} * {n-y-1 \choose j-k-1}$ Let us consider the case where $k$ belongs to the range $[j+1,n-1]$. The permutation adheres to the following pattern: $B_1 < .. < B_i = x < .. < B_j= y < .. < B_k = n > .. > B_n$. Similar to above, the numbers to the left of $i$ must lie in the range $[1,x-1]$. We choose $i-1$ elements from $[1,x-1]$, and place them to the left of $i$. The remaining $x-i$ elements from $[1,x-1]$ lie to the right of $k$ by default. Numbers between $i$ and $j$ must lie in the range $[x+1,y-1]$. We choose $j-i-1$ elements from $[x+1,y-1]$ and place them between $i$ and $j$. There are ${y-x-1 \choose j-i-1}$ ways to do this, and the remaining elements from $[x+1,y-1]$ lie to the right of $k$ by default. Numbers between $j$ and $k$ must lie in the range $[y+1,n-1]$. We choose $k-j-1$ elements from $[y+1,n-1]$ and place them in these positions. Afterwards, the remaining elements in the range get placed to the right of $k$ by default, and the permutation is full. ${x-1 \choose i-1} * {y-x-1 \choose j-i-1} * {n-y-1 \choose k-j-1}$ The answer to the problem is the sum of individual answers for all iterated values of $k$. $ans = {x-1 \choose i-1} * {y-x-1 \choose n-j-(x-i)} * \sum_{k=i+1}^{j-1} {n-y-1 \choose j-k-1}$ + ${x-1 \choose i-1} * {y-x-1 \choose j-i-1} * \sum_{k=j+1}^{n-1} {n-y-1 \choose k-j-1}$ Let us now consider the case where $y = n$. The permutation adheres to the following pattern: $B_1 < .. < B_i = x < .. < B_j = B_k = n > .. > B_n$. Again, the numbers to the left of $i$ must lie in the range $[1,x-1]$. We choose $i-1$ elements from $[1,x-1]$ and place them to the left of $i$. The remaining $x-i$ elements from $[1,x-1]$ lie to the right of $j$ (here, $k$) by default. Numbers between $i$ and $j$ must lie in the range $[x+1,y-1]$. We choose $j-i-1$ elements form $[x+1,y-1]$ and place them between $i$ and $j$. The remaining elements from $[x+1,y-1]$ lie to the right of $j$ (here, $k$) by default, and the permutation is full. ${x-1 \choose i-1} * {y-x-1 \choose j-i-1}$ With $O(n_{max}*log(10^9+7-2))$ precomputation for factorials and their modular inverses, each individual test can be solved as above in $O(n)$. Therefore, the overall complexity of this approach is $O(n_{max}*log(10^9+7-2) + t*n)$, but the constraints allowed for slower solutions as well. $Bonus:$ Can you solve the problem when $1 \le t, n \le 10^5$.
[ "combinatorics", "dp", "implementation", "math", "number theory" ]
2,200
#include <iostream> #include <vector> using namespace std; const int MOD = 1000000007; vector<int> fac; vector<int> ifac; int binExp(int base, int exp) { base %= MOD; int res = 1; while (exp > 0) { if (exp & 1) { res = (int) ((long long) res * base % MOD); } base = (int) ((long long) base * base % MOD); exp >>= 1; } return res; } void precompute(int n) { fac.resize(n + 1); fac[0] = fac[1] = 1; for (int i = 2; i <= n; i++) { fac[i] = (int) ((long long) i * fac[i-1] % MOD); } ifac.resize(n + 1); for (int i = 0; i < fac.size(); i++) { ifac[i] = binExp(fac[i], MOD - 2); } return; } int nCr(int n, int r) { if ((n < 0) || (r < 0) || (r > n)) { return 0; } return (int) ((long long) fac[n] * ifac[r] % MOD * ifac[n - r] % MOD); } int countValidBitonicPerm(int n, int i, int j, int x, int y) { if (x > y) { i = n - i + 1; j = n - j + 1; swap(i, j); swap(x, y); } int sum = 0; for (int k = i + 1; k < j; k++) { sum += nCr(n - y - 1, j - k - 1); sum %= MOD; } int count = (int) ((long long) nCr (x - 1, i - 1) * nCr(y - x - 1, n - j - (x - i)) % MOD * sum % MOD); sum = 0; for (int k = j + 1; k < n; k++) { sum += nCr(n - y - 1, k - j - 1); sum %= MOD; } count += (int) ((long long) nCr(x - 1, i - 1) * nCr(y - x - 1, j - i - 1) % MOD * sum % MOD); count %= MOD; if (y == n) { if (j == n) { return 0; } else { return (int) ((long long) nCr(x - 1, i - 1) * nCr(y - x - 1, j - i - 1) % MOD); } } return count; } int main() { const int MAXN = 100; precompute(MAXN); int testCases; cin >> testCases; for (int test = 1; test <= testCases; test++) { int n, i, j, x, y; cin >> n >> i >> j >> x >> y; cout << countValidBitonicPerm(n, i, j, x, y) << endl; } return 0; }
1763
E
Node Pairs
Let's call an ordered pair of nodes $(u, v)$ in a directed graph unidirectional if $u \neq v$, there exists a path from $u$ to $v$, and there are no paths from $v$ to $u$. A directed graph is called $p$-reachable if it contains exactly $p$ ordered pairs of nodes $(u, v)$ such that $u < v$ and $u$ and $v$ are reachable from each other. Find the minimum number of nodes required to create a $p$-reachable directed graph. Also, among all such $p$-reachable directed graphs with the minimum number of nodes, let $G$ denote a graph which maximizes the number of unidirectional pairs of nodes. Find this number.
In a directed graph, which nodes are reachable from each other? How many such pairs of nodes exist? Think about a sequence of SCCs. For two nodes $u$ and $v$ to be reachable from each other, they must lie in the same strongly connected component (SCC). Let's define $f(i)$ as the minimum number of nodes required to construct an $i$-reachable graph. We can use dynamic programming and calculate $f(i)$ as $f(i) = \min(f(i - \frac{s (s - 1)}{2}) + s)$ over all the valid SCC sizes $s$ for which $\frac{s (s - 1)}{2} \leq i$, i.e., over those $s$ which have less pairs of the required type than $i$. Thus, $f(p)$ gives us the minimum number of nodes required to create a $p$-reachable graph. In all $p$-reachable graphs with $f(p)$ nodes, the upper bound on the number of unidirectional pairs of nodes is $\binom{f(p)}{2} - p$, because we have exactly $p$ pairs of nodes which are reachable from each other. It is possible to achieve this upper bound using the following construction: let $s_1, s_2, \ldots, s_k$ be any sequence of SCC sizes which agrees with the dp values we calculated earlier. Let the first SCC contain the nodes $[1, s_1]$, the second one contain $[s_1 + 1, s_1 + s_2]$, and so on. We add a directed edge from $u$ to $v$ if $u < v$. Time Complexity: $\mathcal{O}(p\sqrt{p})$
[ "dp", "graphs", "math", "number theory" ]
2,200
#include <bits/stdc++.h> using namespace std; const int INF = 1e9; void solve() { int p; cin >> p; vector<int> dp(p + 1, INF); dp[0] = 0; for (int i = 1; i <= p; ++i) for (int s = 1; (s * (s - 1)) / 2 <= i; ++s) dp[i] = min(dp[i], dp[i - (s * (s - 1)) / 2] + s); cout << dp[p] << ' ' << ((long long) dp[p] * (dp[p] - 1)) / 2 - p << '\n'; } int main() { solve(); return 0; }
1763
F
Edge Queries
You are given an undirected, connected graph of $n$ nodes and $m$ edges. All nodes $u$ of the graph satisfy the following: - Let $S_u$ be the set of vertices in the longest simple cycle starting and ending at $u$. - Let $C_u$ be the union of the sets of vertices in any simple cycle starting and ending at $u$. - $S_u = C_u$. You need to answer $q$ queries. For each query, you will be given node $a$ and node $b$. Out of all the edges that belong to any simple path from $a$ to $b$, count the number of edges such that if you remove that edge, $a$ and $b$ are reachable from each other.
What kind of graph meets the conditions given in the statement? A graph with bridges connecting components with a hamiltonian cycle. Which edges will never be counted in answer to any query? Of course, the bridges. Restructure the graph to be able to answer queries. $query(u, v)$ on a tree can be solved efficiently via Lowest Common Ancestor (LCA). First, let us see examples of graphs that are valid or invalid according to the statement. In this graph, for node $4$, the longest simple cycle is $4 \rightarrow 1 \rightarrow 2 \rightarrow 3 \rightarrow 4$. $S_4 = {1, 2, 3, 4}$ All simple cycles from node $4$ are $4 \rightarrow 1 \rightarrow 2 \rightarrow 3 \rightarrow 4$ and $4 \rightarrow 1 \rightarrow 3 \rightarrow 4$. $C_4 = {1, 2, 3, 4}$ So, $S_4 = C_4$. Similarly, $S_u = C_u$ for all $u$. A tree of such components is also a valid graph! Here, $S_4 = {1, 2, 3, 4}$ and $C_4 = {1, 2, 3, 4, 5, 6}$. So, $S_4 \neq C_4$ The queries ask us to count all non-bridge edges in any simple path from $u$ to $v$. There are many ways to proceed with the solution. We will first go into a simple one that gives more insight into the problem. We can see our graph as a tree of BiConnected Components (BCCs). The edges of the tree are all bridges. Let's define a few things before continuing further. The first node of a BCC that is visited in the DFS tree will represent that BCC. Let $rep[u]$ be the representative of the BCC of node $u$. $cnt[u]$ be the number of edges in the BCC of $u$. Root node of our tree of BCCs is $root$. $lca(u, v)$ is the lowest common ancestor of $u$ and $v$. With all that set, let us now look at the DFS tree. We can build an array $dp$ to store the answer to $query(root, u)$, for all $u$, and to answer queries, we can use LCA. In a typical LCA use case, query(u, v) would be $dp[u] + dp[v] - 2 * dp[lca(u, v)]$, But is that the case here? Let us bring attention to a few things. Is $u = rep[u]$? If $u = rep[u]$, $u$ is the first vertex of its BCC in the DFS tree. Therefore all the edges in the BCC of $u$ will not lie in any simple path from $u$ to $root$. Example: In this graph, Let's say $root$ is $1$. See that, $dp[3]$ should be $0$, and $dp[4]$ should be $3$. They are in the same BCC, but $3$ is the topmost node, that is, the representative. Let $p$ be the parent of $u$. So, to calculate $dp[u]$, $\begin{align} dp[u] = \begin{cases} dp[p] & \text{if $rep[u] = u$,}\\dp[rep[u]] + cnt[u] & \text{otherwise} \end{cases} \end{align}$ Passing through the representative of a BCC. Let's say we have a graph of this type, Let's choose our $root$ to be $0$ and look at node $6$. There will be no simple path from $root$ to $6$ that uses the edges of the BCC of node $2$. Therefore, $dp[6]$ should not include edges from the BCC of node $2$. This is already dealt with by our earlier definition of $dp[u]$! The cases of $query(u, v)$. Now, $query(u, v)$ depends upon how $u$ and $v$ are connected in the graph. These are some significant cases. Case 1: $rep[u] = rep[v]$ That is, $u$ and $v$ are part of the same BCC. Therefore, the answer to $query(u, v)$ is just $cnt[u]$. Then, we have two cases concerning $lca(u, v)$. Case 2.1: We must visit only one node in the BCC of $lca(u, v)$. Case 2.2: We must visit at least two nodes in the BCC of $lca(u, v)$. Example: $u = 6, v = 5$ In 2.1, in any simple path from $u$ to $v$ we won't have any edge from the BCC of $lca(u, v)$. Therefore, we don't need to include $cnt[lca(u, v)]$ in the answer. While in 2.2, those edges will be included. In conclusion, in this setup, we need to determine how the simple paths from $u$ to $v$ cross through the BCC of $lca(u,v)$, then the queries will be answered. We can use binary lifting to determine which node is the lowest ancestor of $u$ in the DFS tree that is a part of the BCC of $lca(u, v)$. Similarly, we can find that node for $v$. We can judge which of the above cases any $query(u, v)$ is based on these two nodes. There are other ways to distinguish, including using a link-cut tree. We can create a smart graph to make it so that $query(u, v)$ is $dp[u] + dp[v] - 2 * dp[lca(u, v)] + val[lca(u, v)]$, with no casework involved. We will create virtual nodes representing each BCC. Remove all non-bridges from the graph, and connect all nodes of a BCC to its virtual node. For example: --> Here $v$ is the virtual node, and all the nodes present in BCC of $2$ are directly connected to the BCC's virtual node. Let us define the value of each actual node to be $0$ and every virtual node to be the count of edges of its BCC. Build an array $dp$ that stores the sum of the values of all vertices from $root$ to the current node. You can go back and see how each of the cases would be dealt with by this new graph.
[ "data structures", "dfs and similar", "dp", "dsu", "graphs", "trees" ]
3,000
null
1764
A
Doremy's Paint
Doremy has $n$ buckets of paint which is represented by an array $a$ of length $n$. Bucket $i$ contains paint with color $a_i$. Let $c(l,r)$ be the number of distinct elements in the subarray $[a_l,a_{l+1},\ldots,a_r]$. Choose $2$ integers $l$ and $r$ such that $l \leq r$ and $r-l-c(l,r)$ is maximized.
Assume that you have picked a interval $[L,R]$ as your answer. Now try to move the left point to make the answer larger. Compare $[L,R]$ with $[L-1,R]$. $\Delta(r-l)=1$ because the length of the interval increases by $1$. $\Delta c(l,r)$ increases at most $1$. If $a_L$ appears in $[L,R]$, $c(L,R)=c(L-1,R)$; If $a_L$ does not appear in $[L,R]$, $c(L,R)+1=c(L-1,R)$. So $[L-1,R]$ is always better than $[L,R]$. Furthermore, we can see that if $a\le b \le c \le d$, then $[a,d]$ is better than $[b,c]$. Since $[1,n]$ includes all intervals, it is always better than any other interval. So just output $1$ and $n$.
[ "greedy" ]
800
#include <iostream> void solve(){ int n; std::cin >> n; for(int i = 1 ,nouse ; i <= n ; ++i){ std::cin >> nouse; } std::cout << "1 " << n << std::endl; } int main(){ int t; std::cin >> t; while(t--) solve(); return 0; }
1764
B
Doremy's Perfect Math Class
"Everybody! Doremy's Perfect Math Class is about to start! Come and do your best if you want to have as much IQ as me!" In today's math class, Doremy is teaching everyone subtraction. Now she gives you a quiz to prove that you are paying attention in class. You are given a set $S$ containing \textbf{positive} integers. You may perform the following operation some (possibly zero) number of times: - choose two integers $x$ and $y$ from the set $S$ such that $x > y$ and $x - y$ is not in the set $S$. - add $x-y$ into the set $S$. You need to tell Doremy the maximum possible number of integers in $S$ if the operations are performed optimally. It can be proven that this number is finite.
For any two natural numbers $x,y$ , assign $|x-y|$ to the bigger number. Repeat this process until the smaller number becomes $0$, and then the bigger number will become $\gcd(x,y)$. So we can know that if $x,y\in S$, then it is guaranteed that $\gcd(x,y)\in S$. So $\gcd(a_1,a_2,\dots,a_n)\in S$. Let $t=\gcd(a_1,a_2,\dots,a_n),A=\max(a_1,a_2,\dots,a_n)=Kt$, then $t,A\in S$. So $t,2t,\dots,Kt\in S$. Because $|x-y|\le \max(x,y)$, any number bigger than $A$ will not be in $S$. Because $|xt-yt|=|x-y|t$, any number which is not divisible by $t$ will not be in $S$. And $0\not\in S$. So $t,2t,\dots,Kt$ are all the numbers that can be in $S$.
[ "math", "number theory" ]
900
#include<bits/stdc++.h> using namespace std; const int maxn=100005; int a[maxn]; int gcd(int a,int b){ return b==0?a:gcd(b,a%b); } int main(){ int t;scanf("%d",&t); while(t--){ int n;scanf("%d",&n); int tmp=0; for(int i=1;i<=n;++i){ scanf("%d",&a[i]); tmp=gcd(tmp,a[i]); } printf("%d\n",a[n]/tmp+(a[1]==0)); } return 0; }
1764
C
Doremy's City Construction
Doremy's new city is under construction! The city can be regarded as a simple undirected graph with $n$ vertices. The $i$-th vertex has altitude $a_i$. Now Doremy is deciding which pairs of vertices should be connected with edges. Due to economic reasons, there should be no self-loops or multiple edges in the graph. Due to safety reasons, there should not be \textbf{pairwise distinct} vertices $u$, $v$, and $w$ such that $a_u \leq a_v \leq a_w$ and the edges $(u,v)$ and $(v,w)$ exist. Under these constraints, Doremy would like to know the maximum possible number of edges in the graph. Can you help her? Note that the constructed graph is allowed to be disconnected.
We can first assume that no edge links two vertices with the same value, then for each vertex $u$ and his neighbors $S_u$, either $a_u>\max\limits_{v\in S_u}a_v$ or $a_u<\min\limits_{v\in S_u}a_v$. If $a_u>\max\limits_{v\in S_u}a_v$, we paint $u$ black, otherwise we paint it white. Then it's obviously that any edge connects two vertices with different color. So we can first determine the color of each vertex and then add as many as edges according to the color. $(u,v)$ is addable only when $u$ is black, $v$ is white and $a_u>a_v$. If we paint $u$ black and $v$ white and $a_u<a_v$, we can swap the colors and the edges connecting to these 2 vertices and the answer will be no worse. So the best painting plan is determining a value $A$, painting $u$ black if and only if $a_u\ge A$, painting $u$ white if and only if $a_u<A$. If we add an edge linking two vertices with the same value, then the two vertices can not connect other vertices. This plan will only be considered when $a_1=a_2=\cdots=a_n$. When $a_1=a_2=\cdots=a_n$, the answer is $\lfloor\frac{n}{2}\rfloor$.
[ "graphs", "greedy" ]
1,400
#include<cstdio> #include<cstring> #include<iostream> #include<algorithm> #define ch() getchar() #define pc(x) putchar(x) using namespace std; template<typename T>void read(T&x){ static char c;static int f; for(f=1,c=ch();c<'0'||c>'9';c=ch())if(c=='-')f=-f; for(x=0;c>='0'&&c<='9';c=ch()){x=x*10+(c&15);}x*=f; } template<typename T>void write(T x){ static char q[64];int cnt=0; if(x==0)return pc('0'),void(); if(x<0)pc('-'),x=-x; while(x)q[cnt++]=x%10+'0',x/=10; while(cnt--)pc(q[cnt]); } const int maxn=200005; int a[maxn]; int main(){ int t;read(t); while(t--){ int n;read(n); for(int i=1;i<=n;++i) read(a[i]); sort(a+1,a+n+1); if(a[1]==a[n]){ write(n/2),pc('\n'); continue; } long long ans=0; for(int l=1,r=1;l<=n;l=r=r+1){ while(r+1<=n&&a[r+1]==a[l])++r; ans=max(ans,1ll*(n-r)*r); } write(ans),pc('\n'); } return 0; }
1764
D
Doremy's Pegging Game
Doremy has $n+1$ pegs. There are $n$ red pegs arranged as vertices of a regular $n$-sided polygon, numbered from $1$ to $n$ in anti-clockwise order. There is also a blue peg of \textbf{slightly smaller diameter} in the middle of the polygon. A rubber band is stretched around the red pegs. Doremy is very bored today and has decided to play a game. Initially, she has an empty array $a$. While the rubber band does not touch the blue peg, she will: - choose $i$ ($1 \leq i \leq n$) such that the red peg $i$ has not been removed; - remove the red peg $i$; - append $i$ to the back of $a$. Doremy wonders how many possible different arrays $a$ can be produced by the following process. Since the answer can be big, you are only required to output it modulo $p$. $p$ is guaranteed to be a prime number. \begin{center} {\small game with $n=9$ and $a=[7,5,2,8,3,9,4]$ and another game with $n=8$ and $a=[3,4,7,1,8,5,2]$} \end{center}
The game will end immediately when the blue peg is not inside the convex formed by all remaining red pegs. Namely, there are $\lfloor \frac{n}{2} \rfloor$ consecutive red pegs removed. It can be proven by geometry. Assume $t=\lfloor \frac{n}{2} \rfloor$, and $n$ is odd. Let's enumerate the ending status: there are $i$ ($t \le i\le n -2$) consecutive red pegs removed and another $j$ ($0 \le j \le n - 2 - i$) red pegs removed. The last move makes the rubber band to stretch and touch the blue peg. So there is $2t-i$ ways to choose the last move. There are $\binom{n-2-i}{j}$ ways to choose another $j$ pegs. And there are $\binom{i+j-1}{j}j!(i-1)!$ ways to combine them. When $n$ is even, the analysis is similar. The only difference is that there is a special case when $i = n-1$. So the answer is: $\begin{aligned} & n\sum_{i=t}^{n-2} \sum_{j=0}^{n-i-2}\binom{n-i-2}{j}(i+j-1)!\cdot(2t-i) \\ +&[n \text{ is even}]n(n-2)! \end{aligned}$
[ "combinatorics", "dp", "math" ]
2,000
#include <cstdio> #include <iostream> #define LL long long const int MX = 5000 + 233; LL C[MX][MX] ,n ,p ,fac[MX]; void init(){ for(int i = 0 ; i < MX ; ++i) C[i][0] = 1; for(int i = 1 ; i < MX ; ++i) for(int j = 1 ; j < MX ; ++j) C[i][j] = (C[i - 1][j] + C[i - 1][j - 1]) % p; fac[0] = fac[1] = 1 % p; for(int i = 2 ; i < MX ; ++i) fac[i] = fac[i - 1] * i % p; } int main(){ std::cin >> n >> p; init(); int t = n / 2; LL Ans = 0; for(int i = t ; i <= n - 1 ; ++i){ if((n & 1) && i == n - 1) break; int upper = (i == n - 1) ? n - i - 1 : n - i - 2; for(int j = 0 ; j <= upper ; ++j){ Ans = (Ans + n * (2 * t - i) * C[upper][j] % p * fac[j + i - 1]) % p; } } std::cout << Ans << std::endl; return 0; }
1764
E
Doremy's Number Line
Doremy has two arrays $a$ and $b$ of $n$ integers each, and an integer $k$. Initially, she has a number line where no integers are colored. She chooses a permutation $p$ of $[1,2,\ldots,n]$ then performs $n$ moves. On the $i$-th move she does the following: - Pick an \textbf{uncolored} integer $x$ on the number line such that either: - $x \leq a_{p_i}$; or - there exists a \textbf{colored} integer $y$ such that $y \leq a_{p_i}$ and $x \leq y+b_{p_i}$. - Color integer $x$ with color $p_i$. Determine if the integer $k$ can be colored with color $1$.
First if $k$ can be colored with certain color, then any $K<k$ can also be colored the this color. So we only need to calculate what is the biggest number color $1$ can be. Apparently this value will not exceed $x_1+y_1$. If $x_1$ is not maximum among all $x$, you can pick any $j$ such that $x_j\ge x_1$. Color point $x_1$ with color $j$, then color point $x_1+y_1$ with color $1$. So $x_1+y_1$ is the answer in this case. If $x_1$ is maximum, just enumerate which color precedes color $1$. No color precedes. Directly color $x_1$ with color $1$. Color $j$ precedes, but $x_j$ is not maximum among $x_2,x_3,\cdots,x_n$. In this case, point $x_j+y_j$ can always be colored with color $j$. Point $\min\{x_j+y_j,x_1\}+y_1$ can be colored with color $1$. Color $j$ precedes, meanwhile $x_j$ is maximum among $x_2,x_3,\cdots,x_n$. In this case, we can just enumerate which color precedes color $j$, which leads to a recursion, until there is only one color remaining. Time complexity is $O(n \log n)$ due to sorting.
[ "dp", "greedy", "sortings" ]
2,400
#include <bits/stdc++.h> #define debug(...) fprintf(stderr ,__VA_ARGS__) #define LL long long const int MX = 2e5 + 5; int n ,s; struct Goat{ int x ,y ,id; }A[MX]; bool cmp(Goat a ,Goat b){ return a.x < b.x; } int mx[MX]; int calc(int id){ if(id == 1) return A[id].x; int far = std::max(calc(id - 1) ,mx[id - 2]); return std::max(std::min(far ,A[id].x) + A[id].y ,A[id].x); } int ans[MX]; void solve(){ scanf("%d%d" ,&n ,&s); for(int i = 1 ,x ,y ; i <= n ; ++i){ scanf("%d%d" ,&x ,&y); A[i] = (Goat){x ,y ,i}; ans[i] = false; } std::sort(A + 1 ,A + 1 + n ,cmp); for(int i = 1 ; i <= n ; ++i){ mx[i] = std::max(A[i].x + A[i].y ,mx[i - 1]); } for(int i = 1 ; i < n ; ++i){ if(A[i].x + A[i].y >= s){ ans[A[i].id] = true; } } if(calc(n) >= s) ans[A[n].id] = true; puts(ans[1] ? "YES" : "NO"); } int main(){ int t; scanf("%d" ,&t); while(t--) solve(); return 0; }
1764
F
Doremy's Experimental Tree
Doremy has an edge-weighted tree with $n$ vertices whose weights are \textbf{integers} between $1$ and $10^9$. She does $\frac{n(n+1)}{2}$ experiments on it. In each experiment, Doremy chooses vertices $i$ and $j$ such that $j \leq i$ and connects them directly with an edge with weight $1$. Then, there is exactly one cycle (or self-loop when $i=j$) in the graph. Doremy defines $f(i,j)$ as the sum of lengths of shortest paths from every vertex to the cycle. Formally, let $\mathrm{dis}_{i,j}(x,y)$ be the length of the shortest path between vertex $x$ and $y$ when the edge $(i,j)$ of weight $1$ is added, and $S_{i,j}$ be the set of vertices that are on the cycle when edge $(i,j)$ is added. Then, $$ f(i,j)=\sum_{x=1}^{n}\left(\min_{y\in S_{i,j}}\mathrm{dis}_{i,j}(x,y)\right). $$ Doremy writes down all values of $f(i,j)$ such that $1 \leq j \leq i \leq n$, then goes to sleep. However, after waking up, she finds that the tree has gone missing. Fortunately, the values of $f(i,j)$ are still in her notebook, and she knows which $i$ and $j$ they belong to. Given the values of $f(i,j)$, can you help her restore the tree? It is guaranteed that at least one suitable tree exists.
If path $(x,y) \subset (X,Y)$, then $f(x,y) > f(X,Y)$. Then we build a graph based on $f(i,j)$, the weight of the edge between $i,j$ is $f(i,j)$. It can be shown that the MST of the graph have the same structure as the original tree. Then we need to compute the weight for each edge. The weight of the edge between $x$ and $y$ is $\frac{f(x,x)-f(x,y)}{size_y}$, where $x$ is $y$'s parent on the tree, $size_y$ is the size of subtree $y$.
[ "brute force", "constructive algorithms", "dfs and similar", "dsu", "sortings", "trees" ]
2,500
#include <bits/stdc++.h> #define debug(...) fprintf(stderr ,__VA_ARGS__) #define LL long long const int MX = 3e3 + 5; bool vis[MX]; LL w[MX][MX] ,dis[MX]; std::vector<int> e[MX]; int size[MX]; void dfs(int x ,int f){ size[x] = 1; for(auto i : e[x]){ if(i == f) continue; dfs(i ,x); size[x] += size[i]; } for(auto i : e[x]){ if(i == f) continue; printf("%d %d %lld\n" ,x ,i ,(w[1][x] - w[1][i]) / size[i]); } } int main(){ int n; scanf("%d" ,&n); memset(w ,-0x3f ,sizeof w); for(int i = 1 ; i <= n ; ++i){ for(int j = 1 ; j <= i ; ++j){ scanf("%lld" ,&w[i][j]); w[j][i] = w[i][j]; } } memset(dis ,-0x3f ,sizeof dis); dis[1] = 0; for(int i = 1 ; i <= n ; ++i){ int x = 0; for(int j = 1 ; j <= n ; ++j){ if(!vis[j] && (!x || dis[j] > dis[x])){ x = j; } } //debug("x = %d " ,x); //ans += dis[x]; if(i != 1) for(int j = 1 ; j <= n ; ++j){ if(w[j][x] == dis[x] && vis[j]){ e[x].push_back(j); e[j].push_back(x); //debug("%d %d\n" ,x ,j); } } vis[x] = true; for(int j = 1 ; j <= n ; ++j){ dis[j] = std::max(dis[j] ,w[x][j]); } } //return 0; dfs(1 ,1); return 0; }
1764
G3
Doremy's Perfect DS Class (Hard Version)
\textbf{The only difference between this problem and the other two versions is the maximum number of queries. In this version, you are allowed to ask at most $\mathbf{20}$ queries. You can make hacks only if all versions of the problem are solved.} This is an interactive problem. "Everybody! Doremy's Perfect Data Structure Class is about to start! Come and do your best if you want to have as much IQ as me!" In today's Data Structure class, Doremy is teaching everyone a powerful data structure — Doremy tree! Now she gives you a quiz to prove that you are paying attention in class. Given an array $a$ of length $m$, Doremy tree supports the query $Q(l,r,k)$, where $1 \leq l \leq r \leq m$ and $1 \leq k \leq m$, which returns the number of distinct integers in the array $\left[\lfloor\frac{a_l}{k} \rfloor, \lfloor\frac{a_{l+1}}{k} \rfloor, \ldots, \lfloor\frac{a_r}{k} \rfloor\right]$. Doremy has a secret permutation $p$ of integers from $1$ to $n$. You can make queries, in one query, you give $3$ integers $l,r,k$ ($1 \leq l \leq r \leq n$, $1 \leq k \leq n$) and receive the value of $Q(l,r,k)$ for the array $p$. Can you find the index $y$ ($1 \leq y \leq n$) such that $p_y=1$ in \textbf{at most} $\mathbf{20}$ queries? Note that the permutation $p$ is fixed before any queries are made.
If we add an edge between numbers $a,b$ when $\lfloor\frac{a}{2}\rfloor=\lfloor\frac{b}{2}\rfloor$, then when $n$ is an odd number, $1$ is the only one which has no neighbor, so let's consider the odd case at first. Let the number of edges $(u,v)$ satisfying $u,v\in [l,r]$ be $C(l,r)$, then $C(l,r)+Q(l,r,2)=r-l+1$. That is to say, we can get any $C(l,r)$ with a query. Try to use binary search to solve this problem. The key problem is for a certain $m$, determining whether $1$ is in $[1,m]$ or $[m+1,n]$. Let $2C(1,m)+x=m,2C(m+1,n)+y=n-m$. The edges between $[1,m]$ and $[m+1,n]$ must connect the remaining $x$ numbers in $[1,m]$ and the remaining $y$ numbers in $[m+1,n]$. There is exactly one number with no neighbor, so if $x=y+1$, $1$ is in $[1,m]$, otherwise $y=x+1$ and $1$ is in $[m+1,n]$. So we can do this case in $20$ queries. If $n$ is an even number, then $1,n$ will be the only two number which has no neighbor. With the similar analysis as above, we can know if $x>y$, $1,n$ are in $[1,m]$, if $y>x$, $1,n$ are in $[m+1,n]$, otherwise $1,n$ are in the different sides. When $l<r$, we can use $Q(l,r,n)$ to check whether $n$ is in $[l,r]$. Because $n\ge 3$, we can use this method to determine $1$ is in which side. So we can do this case in $30$ queries. Finding that we only need one extra query $Q(l,r,n)$, we can do the even case in $21$ queries. When we decrease the range of $1$ to $[l,r]$, we have already got $C(1,l-1),C(1,r),C(l,n),C(r+1,n)$. When $l+1=r$, if $C(1,l-1)+1=C(1,r)$, we only need to know $C(1,l)$ to get the answer, if $C(l,n)=C(r+1,n)+1$, we only need to know $C(r,n)$ to get the answer, otherwise it means that $p_l=1,p_r=n$ or $p_l=n,p_r=1$, this case we also only need one more query. So we decrease the number of queries by $1$, solving this case in $20$ queries.
[ "binary search", "interactive" ]
3,300
#include<cstdio> #include<cstring> #include<iostream> #include<algorithm> using namespace std; int n; int query(int l,int r,int k){ printf("? %d %d %d\n",l,r,k); fflush(stdout);int re;scanf("%d",&re); return re; } void answer(int x){ printf("! %d\n",x); fflush(stdout); } int rt=-1; void divide(int l,int r,int l1,int l2,int r1,int r2){ if(l==r){ answer(l); return; } if(l+1==r){ if(r1==r2+1){ if(query(r,n,2)==r2+1)answer(r); else answer(l); } else if(l1==l2+1){ if(query(1,l,2)==l2+1)answer(l); else answer(r); } else{ if(l>1){ if(query(1,l,n)==2)answer(r); else answer(l); } else{ if(query(r,n,n)==2)answer(l); else answer(r); } } return; } int mid=(l+r)>>1; int L=query(1,mid,2),R=query(mid+1,n,2); if(L*2-mid>R*2-(n-mid))divide(l,mid,L,l2,r1,R); else if(L*2-mid<R*2-(n-mid))divide(mid+1,r,l1,L,R,r2); else{ if(~rt){ if(rt)divide(l,mid,L,l2,r1,R); else divide(mid+1,r,l1,L,R,r2); } if(query(1,mid,n)==2)rt=0,divide(mid+1,r,l1,L,R,r2); else rt=1,divide(l,mid,L,l2,r1,R); } } int main(){ scanf("%d",&n); divide(1,n,n/2+1,0,n/2+1,0); return 0; }
1764
H
Doremy's Paint 2
Doremy has $n$ buckets of paint which is represented by an array $a$ of length $n$. Bucket $i$ contains paint with color $a_i$. Initially, $a_i=i$. Doremy has $m$ segments $[l_i,r_i]$ ($1 \le l_i \le r_i \le n$). Each segment describes an operation. Operation $i$ is performed as follows: - For each $j$ such that $l_i < j \leq r_i$, set $a_j := a_{l_i}$. Doremy also selects an integer $k$. She wants to know for each integer $x$ from $0$ to $m-1$, the number of distinct colors in the array after performing operations $x \bmod m +1, (x+1) \bmod m + 1, \ldots, (x+k-1) \bmod m +1$. Can you help her calculate these values? Note that for each $x$ individually we start from the initial array and perform only the given $k$ operations in the given order.
The main idea of the solution is that we can try to the answers for position $x,x+1,\ldots,x+k$ all at once. We do this by representing the interval $[x+i,x+k+i)$ as $[x+i,x+k) \cup [x+k,x+k+i)$ and perform sweep line on both halves of interval. Firstly, after applying some operations on the array $a$, we will have $a_i \leq a_{i+1}$. That is, we can instead count the number of indices such that $a_i \neq a_{i+1}$ for one less than the number of distinct elements. From here on, we will be counting the number of elements that $a_i \neq a_{i+1}$ instead. Let us consider that the state of the array is $a$ after applying operations $[s+1,e]$. Consider the effect of applying operations $[s,e]$ instead. The change this operation will make is that we replace all $i$ such that $a_i \in [l_{s},r_{s}]$ with $a_i=l_{s}$. We can think about it as only counting $a_i \not\cong a_{i+1}$ where we can view the above operation of replacing $a_i \in [l_{s},r_{s}]$ with $l_{s}$ as instead merging $[l_{s},r_{s}]$ values as a single equivalence class which we label $l_s$. So for interval $[x+i,x+k+i) = [x+i,x+k) \cup [x+k,x+k+i)$ we will get the equivalence class for values when considering ranges $[l_j,r_j$] with $j \in [x+i,x+k-1]$. This is a natural line sweep when considering going from $[s,x+k-1]$ to $[s-1,x+k-1]$ that uses $O(n \log n)$ by amortized analysis. We can find the state of the array when applying operations $[x+k,x+k],[x+k,x+k+1],\ldots,[x+k,x+2k-1]$ in $O(n \log n)$ time by amoritzed analysis by only storing contiguous values as a set. By storing the changes we made when we go from $[x+k,e]$ to $[x+k,e+1]$, we are able to "undo" the changes to go from $[x+k,e+1]$ to $[x+k,e]$. Now, we want to go from applying operations in $[x+i+1,x+k+i+1) = [x+i+1,x+k) \cup [x+k,x+k+i+1)$ to $[x+i,x+k+i) = [x+i,x+k) \cup [x+k,x+k+i)$, first we go from $[x+k,x+k+i+1)$ to $[x+k,x+k+i)$ by undoing the operation (and checking how many adjacent values are $\not\cong$), then we update our equivalence classes to go from $[x+i+1,x+k)$ to $[x+i,x+k)$. However, The problem is that this algorithm runs in $O(\frac{m}{k} n \log n)$ time. However, we note that we can discretize the range $[1,n]$ using the ranges $[l_j,r_j]$ with $j \in [x,x+2k)$, so that our amortization works in $O(k \log k)$, to obtain a complexity of $O(\frac{m}{k} k \log k) = O(m \log k)$ (with a super huge constant). Note that since the element of the array $a$ are integers but are instead ranges now, there are some implementation details that are left as an exercise to the reader.
[ "data structures" ]
3,400
#include <bits/stdc++.h> using namespace std; #define int long long #define ii pair<int,int> #define iii tuple<int,int,int> #define fi first #define se second #define endl '\n' #define pub push_back #define pob pop_back #define puf push_front #define pof pop_front #define lb lower_bound #define ub upper_bound #define rep(x,start,end) for(int x=(start)-((start)>(end));x!=(end)-((start)>(end));((start)<(end)?x++:x--)) #define all(x) (x).begin(),(x).end() #define sz(x) (int)(x).size() mt19937 rng(chrono::system_clock::now().time_since_epoch().count()); int n,m,k; ii arr[600005]; int ans[400005]; vector<int> uni; int nxt[200005]; int state[200005]; int state2[200005]; bool has(int l,int r,set<int> &s){ auto it=s.lb(l); return *it<r; } signed main(){ ios::sync_with_stdio(0); cin.tie(0); cout.tie(0); cin.exceptions(ios::badbit | ios::failbit); cin>>n>>m>>k; rep(x,0,m) cin>>arr[x].fi>>arr[x].se; rep(x,0,m) arr[x].fi--,arr[x].se--; rep(x,0,2*m) arr[x+m]=arr[x]; int l=0; while (l<m){ uni={0,n}; rep(x,l,l+2*k) uni.pub(arr[x].fi),uni.pub(arr[x].se+1); sort(all(uni)); uni.erase(unique(all(uni)),uni.end()); rep(x,0,sz(uni)-1) nxt[uni[x]]=uni[x+1]; set<ii> s; for (auto it:uni) s.insert({it,it}); //position, color rep(x,0,sz(uni)) state[uni[x]]=state2[uni[x]]=1; vector<iii> proc; //time, position, state rep(x,l+k,l+2*k){ if (s.count({arr[x].fi,arr[x].fi}) && state[arr[x].fi]){ proc.pub({x,arr[x].fi,state[arr[x].fi]}); state[arr[x].fi]=0; } while (true){ auto it=s.ub(ii(arr[x].fi,1e9)); if ((*it).fi>arr[x].se) break; if (arr[x].se+1<(*next(it)).fi) s.insert({arr[x].se+1,(*it).se}); else{ proc.pub({x,(*it).se,state[(*it).se]}); state[(*it).se]=0; } s.erase(it); } } int curr=0; set<int> pos={n}; for (auto [a,b]:s) if (b!=n){ curr++; if (state[b]) curr+=nxt[b]-b-1; pos.insert(b); } s.clear(); for (auto it:uni) s.insert({it,it}); //color, position set<ii> ranges; rep(x,0,sz(uni)-1) ranges.insert({uni[x],uni[x+1]}); rep(x,l+k,l){ //merge things auto it=s.lb({arr[x].fi,-1}); vector<int> v={(*it).se}; while ((*it).fi<=arr[x].se){ it=next(it); s.erase(prev(it)); v.pub((*it).se); } if (sz(v)>1){ rep(x,0,sz(v)-1){ if (state[v[x]] && state2[v[x]]) curr-=v[x+1]-v[x]-1; state2[v[x]]=0; curr-=has(v[x],v[x+1],pos); ranges.erase({v[x],v[x+1]}); } curr+=has(v[0],v[sz(v)-1],pos); s.insert({arr[x].fi,v[0]}); ranges.insert({v[0],v[sz(v)-1]}); } while (!proc.empty() && get<0>(proc.back())==x+k){ int a,b,c; tie(a,b,c)=proc.back(); proc.pob(); if (c){ state[b]=c; if (state[b] && state2[b]) curr+=nxt[b]-b-1; } if (!pos.count(b)){ auto it=prev(ranges.ub({b,1e9})); int l,r; tie(l,r)=*it; if (!has(l,r,pos)) curr++; pos.insert(b); } } ans[x]=curr; } l+=k; } rep(x,0,m) cout<<ans[x]<<" "; cout<<endl; }
1765
A
Access Levels
BerSoft is the biggest IT corporation in Berland, and Monocarp is the head of its security department. This time, he faced the most difficult task ever. Basically, there are $n$ developers working at BerSoft, numbered from $1$ to $n$. There are $m$ documents shared on the internal network, numbered from $1$ to $m$. There is a table of access requirements $a$ such that $a_{i,j}$ (the $j$-th element of the $i$-th row) is $1$ if the $i$-th developer should have access to the $j$-th document, and $0$ if they should have no access to it. In order to restrict the access, Monocarp is going to perform the following actions: - choose the number of access groups $k \ge 1$; - assign each document an access group (an integer from $1$ to $k$) and the required access level (an integer from $1$ to $10^9$); - assign each developer $k$ integer values (from $1$ to $10^9$) — their access levels for each of the access groups. The developer $i$ has access to the document $j$ if their access level for the access group of the document is greater than or equal to the required access level of the document. What's the smallest number of access groups Monocarp can choose so that it's possible to assign access groups and access levels in order to satisfy the table of access requirements?
Suppose two documents $i$ and $j$ belong to the same access group, and the access level for the document $i$ is greater than the access level for document $j$. Then, every developer which has the access to the document $i$, has the access to the document $j$ as well; so, for every $d \in [1, n]$, the condition $a_{d,i} \le a_{d,j}$ must hold. We can build a directed graph where the arc $i \rightarrow j$ represents that this condition holds for the (ordered) pair of documents $(i, j)$. Every access group should be a path in this graph - so, our problem now requires us to cover a directed graph with the minimum number of vertex-disjoint paths. Let's assume that the graph is acyclic. Then every path is acyclic as well, so the number of vertices in a path is equal to the number of arcs in a path, plus one. Let $k$ be the number of paths, $v$ be the number of vertices, and $e$ be the total number of arcs used in the paths. It's easy to see that $k + e = v$; so, by maximizing the total number of arcs in the paths, we minimize the number of paths. So, we need to choose the maximum number of arcs so that each vertex belongs to only one path among those formed by these arcs. It is equivalent to the combination of the following two conditions: each vertex should have at most one incoming chosen arc; each vertex should have at most one outgoing chosen arc. Now our problem can be solved with network flows or bipartite matching. For example, one of the solutions is to create a bipartite graph where each vertex of the original graph is represented by two vertices, one for each part; and an arc $i \rightarrow j$ from the original graph is converted to the edge connecting the vertex $i$ in the left part and the vertex $j$ in the right part. It's easy to see that every matching in this graph fulfills the aforementioned two conditions, thus giving us a correct decomposition of an acyclic directed graph into vertex-disjoint paths - so, in order to minimize the number of such paths, we need to find the maximum matching. All that's left is to actually find these paths and convert them into the access groups/levels for the documents, and set access levels for the developers, which is pretty easy and straightforward to implement. But wait, what if the graph is not acyclic? Fortunately, a cycle can exist only between two documents with identical access requirements. We can deal with these in one of two ways: compress all identical documents into one vertex; or use the document index as the tiebreak if two documents are completely identical otherwise. Even the most basic implementation of bipartite matching with Kuhn's algorithm will yield a solution in $O(m^3 + m^2n)$.
[ "bitmasks", "dsu", "flows", "graph matchings" ]
2,400
#include <bits/stdc++.h> #define forn(i, n) for (int i = 0; i < int(n); i++) using namespace std; int n, m, m2; vector<vector<char>> g; int T; vector<int> mt; vector<int> used; bool try_kuhn(int v){ if (used[v] == T) return false; used[v] = T; forn(u, m2) if (g[v][u] && (mt[u] == -1 || try_kuhn(mt[u]))){ mt[u] = v; return true; } return false; } int main() { cin >> n >> m; vector<string> b(m, string(n, '0')); forn(i, n){ string t; cin >> t; forn(j, m) b[j][i] = t[j]; } vector<string> nw = b; sort(nw.begin(), nw.end()); nw.resize(unique(nw.begin(), nw.end()) - nw.begin()); m2 = nw.size(); g.assign(m2, vector<char>(m2, 0)); forn(i, m2) forn(j, m2) if (i != j){ bool in = true; forn(k, n) in &= nw[i][k] >= nw[j][k]; if (in) g[i][j] = 1; } mt.assign(m2, -1); used.assign(m2, -1); T = 0; int k = m2; forn(i, m2) if (try_kuhn(i)){ ++T; --k; } vector<int> nxt(m2, -1); vector<char> st(m2, true); forn(i, m2) if (mt[i] != -1){ nxt[mt[i]] = i; st[i] = false; } vector<int> gr(m2), req(m2); int t = 0; forn(i, m2) if (st[i]){ int v = i; int pos = 2; while (v != -1){ gr[v] = t; req[v] = pos; ++pos; v = nxt[v]; } ++t; } assert(t == k); vector<int> num(m); forn(i, m) num[i] = lower_bound(nw.begin(), nw.end(), b[i]) - nw.begin(); printf("%d\n", k); forn(i, m) printf("%d ", gr[num[i]] + 1); puts(""); forn(i, m) printf("%d ", req[num[i]]); puts(""); forn(i, n){ vector<int> l(k, 1); forn(j, m2) if (nw[j][i] == '1') l[gr[j]] = max(l[gr[j]], req[j]); forn(j, k) printf("%d ", l[j]); puts(""); } return 0; }
1765
B
Broken Keyboard
Recently, Mishka started noticing that his keyboard malfunctions — maybe it's because he was playing rhythm games too much. Empirically, Mishka has found out that every other time he presses a key, it is registered as if the key was pressed twice. For example, if Mishka types text, the first time he presses a key, exactly one letter is printed; the second time he presses a key, two same letters are printed; the third time he presses a key, one letter is printed; the fourth time he presses a key, two same letters are printed, and so on. Note that the number of times a key was pressed is counted for the whole keyboard, not for each key separately. For example, if Mishka tries to type the word osu, it will be printed on the screen as ossu. You are given a word consisting of $n$ lowercase Latin letters. You have to determine if it can be printed on Mishka's keyboard or not. You may assume that Mishka cannot delete letters from the word, and every time he presses a key, the new letter (or letters) is appended to the end of the word.
There are many ways to solve this problem. Basically, we need to check two conditions. The first one is the condition on the number of characters: $n \bmod 3 \ne 2$, since after the first key press, we get the remainder $1$ modulo $3$, after the second key press, we get the remainder $0$ modulo $3$, then $1$ again, then $0$ - and so on, and we cannot get the remainder $2$. Then we need to check that, in each pair of characters which appeared from the same key press, these characters are the same - that is, $s_2 = s_3$, $s_5 = s_6$, $s_8 = s_9$, and so on.
[ "greedy" ]
800
for _ in range(int(input())): n = int(input()) s = input() print('YES' if n % 3 != 2 and not False in [s[i * 3 + 1] == s[i * 3 + 2] for i in range(n // 3)] else 'NO')
1765
C
Card Guessing
Consider a deck of cards. Each card has one of $4$ suits, and there are exactly $n$ cards for each suit — so, the total number of cards in the deck is $4n$. The deck is shuffled randomly so that each of $(4n)!$ possible orders of cards in the deck has the same probability of being the result of shuffling. Let $c_i$ be the $i$-th card of the deck (from top to bottom). Monocarp starts drawing the cards from the deck one by one. Before drawing a card, he tries to guess its suit. Monocarp remembers the suits of the $k$ last cards, and his guess is the suit that appeared the least often among the last $k$ cards he has drawn. So, while drawing the $i$-th card, Monocarp guesses that its suit is the suit that appears the minimum number of times among the cards $c_{i-k}, c_{i-k+1}, \dots, c_{i-1}$ (if $i \le k$, Monocarp considers all previously drawn cards, that is, the cards $c_1, c_2, \dots, c_{i-1}$). If there are multiple suits that appeared the minimum number of times among the previous cards Monocarp remembers, he chooses a random suit out of those for his guess (all suits that appeared the minimum number of times have the same probability of being chosen). After making a guess, Monocarp draws a card and compares its suit to his guess. If they match, then his guess was correct; otherwise it was incorrect. Your task is to calculate the expected number of correct guesses Monocarp makes after drawing all $4n$ cards from the deck.
Obviously, linearity of expectation, we are calculating the sum of probability to guess correctly for each position from $1$ to $4n$. Let's start with a minor observation which can help us in calculations. The probability for all positions from $k+1$ onwards is the same. The probability for each position $i$ depends on the number of ways (and some properties of them) to pick $\min(i - 1, k)$ cards prior to it. Thus, when this $\min$ evaluates to $k$, it's all the same. We'll calculate the first $k+1$ positions and then multiply the last one by $4n - k$. How about we start with a naive solution? Let's pretend all cards are distinguishable from each other (which, in my opinion, is a rare case for combinatorics solutions). Then we can pick $a$ cards of the first suit, $b$ cards of the second suit, $c$ cards of the third suit and $d$ cards of the fourth suit to go prior to the position $a + b + c + d$. Check if the sum doesn't exceed $k$. The number of ways to choose these cards is $C(n, a) \cdot C(n, b) \cdot C(n, c) \cdot C(n, d) \cdot (a + b + c + d)!$. We should also permute the remaining cars - multiply by $(4n - (a + b + c + d))!$. And divide everything by $(4n)!$ to get the probability of getting exactly that permutation. The probability to guess the suit correctly is $\frac{\min(a, b, c, d)}{4n - (a + b + c + d)}$. Thus, we should add the product of these two probabilities to the answer. The annoying aspect of this solution is actually the $\min$ function. We'd prefer to always know that $a$ is the smallest one. Let's try to rewrite this naive in such a way that $a \le b \le c \le d$. Frankly, almost nothing changes. To account for the fact that we removed the order of $a, b, c$ and $d$, we want to multiply the number of ways by the number of ways to permute these amounts. That number is a multinomial that depends on the sizes of equivalence classes of the amounts. As in, if the amounts are $1, 1, 2, 3$, then it's $\frac{4!}{2! \cdot 1! \cdot 1!}$. If it's $1, 1, 1, 3$, then it's $\frac{4!}{3! \cdot 1!}$. All the rest stays the same. Then the probability to guess correctly is just $\frac{a}{4n - (a + b + c + d)}$. Great, now we can make a dp out of this naive solution. Basically, the answer depends on three things. The smallest amount, the total amount, and the counts of each amount. The first one can be accounted for during the initialization phase. The second one can be accounted for at the very end. And the third one can be accounted for during the transition :) We want to construct non-decreasing sequences of length $4$ with a known sum and with a known first element. Let that $dp$ be $dp[i][j][k]$ - some sum of probabilities over all sequences such that the current amount is $i$, the sum of the sequence is $j$, and we placed $k$ elements in the sequence. For the transition, we want to either increase the current value $i$ by $1$, or to place from $1$ to $4 - k$ copies of the current value. If we place it, we immediately apply the cnk and divide by the multinomial coefficient - the factorial of the number of copies. We increase $i$ as well, so that we can't place more elements equal to $i$. For the initialization, we iterate over the smallest element and the count of it. This way, we can put the numerator of the probability to guess the suit along with the other things. When we collect the answers, we iterate over the sum and account for the rest of the terms in the product: factorial of the sum, permuting the remaining cards, the denominator of the guess probability and so on. The formulas might get tricky, but they all make enough sense, and hopefully nothing incorrect works on the examples, which should help you to debug. Overall complexity: $O(n^2)$. There are cubic solutions, that are also smart enough in our opinion, which is why the constraints are set like that.
[ "combinatorics", "dp", "probabilities" ]
2,600
#include <bits/stdc++.h> #define forn(i, n) for (int i = 0; i < int(n); i++) #define fore(i, l, r) for (int i = int(l); i < int(r); i++) using namespace std; const int MOD = 998244353; int add(int a, int b){ a += b; if (a >= MOD) a -= MOD; return a; } int mul(int a, int b){ return a * 1ll * b % MOD; } int binpow(int a, int b){ int res = 1; while (b){ if (b & 1) res = mul(res, a); a = mul(a, a); b >>= 1; } return res; } int main() { int n, k; scanf("%d%d", &n, &k); vector<int> fact(4 * n + 1); fact[0] = 1; fore(i, 1, fact.size()) fact[i] = mul(fact[i - 1], i); vector<int> rfact(4 * n + 1); rfact.back() = binpow(fact.back(), MOD - 2); for (int i = int(fact.size()) - 2; i >= 0; --i) rfact[i] = mul(rfact[i + 1], i + 1); auto cnk = [&](int n, int k){ return mul(fact[n], mul(rfact[k], rfact[n - k])); }; vector<vector<int>> sv(n + 1, vector<int>(5)); forn(i, n + 1) forn(t, 5) sv[i][t] = mul(binpow(cnk(n, i), t), rfact[t]); vector<vector<vector<int>>> dp(2, vector<vector<int>>(4 * n + 1, vector<int>(5))); forn(ii, n + 1){ int i = ii & 1; int ni = i ^ 1; dp[ni] = vector<vector<int>>(4 * n + 1, vector<int>(5)); for (int t = 1; t <= 4; ++t) dp[ni][ii * t][t] = mul(n - ii, sv[ii][t]); forn(j, k + 1) for (int p = 1; p <= 4; ++p) if (dp[i][j][p]){ dp[ni][j][p] = add(dp[ni][j][p], dp[i][j][p]); for (int t = 1; p + t <= 4; ++t) dp[ni][j + ii * t][p + t] = add(dp[ni][j + ii * t][p + t], mul(dp[i][j][p], sv[ii][t])); } } int ans = 0; forn(sum, k + 1){ ans = add(ans, mul(mul(mul( sum < k ? 1 : 4 * n - k, dp[(n & 1) ^ 1][sum][4]), mul(binpow(4 * n - sum, MOD - 2), mul(rfact[4 * n], fact[4 * n - sum]))), mul(fact[4], fact[sum])) ); } printf("%d\n", ans); return 0; }
1765
D
Watch the Videos
Monocarp wants to watch $n$ videos. Each video is only one minute long, but its size may be arbitrary. The $i$-th video has the size $a_i$ megabytes. All videos are published on the Internet. A video should be downloaded before it can be watched. Monocarp has poor Internet connection — it takes exactly $1$ minute to download $1$ megabyte of data, so it will require $a_i$ minutes to download the $i$-th video. Monocarp's computer has a hard disk of $m$ megabytes. The disk is used to store the downloaded videos. Once Monocarp starts the download of a video of size $s$, the $s$ megabytes are immediately reserved on a hard disk. If there are less than $s$ megabytes left, the download cannot be started until the required space is freed. Each single video can be stored on the hard disk, since $a_i \le m$ for all $i$. Once the download is started, it cannot be interrupted. It is not allowed to run two or more downloads in parallel. Once a video is fully downloaded to the hard disk, Monocarp can watch it. Watching each video takes exactly $1$ minute and does not occupy the Internet connection, so Monocarp can start downloading another video while watching the current one. When Monocarp finishes watching a video, he doesn't need it on the hard disk anymore, so he can delete the video, instantly freeing the space it occupied on a hard disk. Deleting a video takes negligible time. Monocarp wants to watch all $n$ videos as quickly as possible. The order of watching does not matter, since Monocarp needs to watch all of them anyway. Please calculate the minimum possible time required for that.
In this solution, we assume that the sequence $a_1, a_2, \dots, a_n$ is sorted (if it is not - just sort it before running the solution). Suppose we download and watch the videos in some order. The answer to the problem is $n + \sum a_i$, reduced by $1$ for every pair of adjacent videos that can fit onto the hard disk together (i. e. their total size is not greater than $m$), because for every such pair, we can start downloading the second one while watching the first one. So, we need to order the videos in such a way that the number of such pairs is the maximum possible. Suppose we want to order them so that every pair of adjacent videos is "good". We need to pick the ordering that minimizes the maximum sum of adjacent elements. There are multiple ways to construct this ordering; one of them is $[a_n, a_1, a_{n-1}, a_2, a_{n - 2}, a_3, \dots]$, and the maximum sum of adjacent elements will be $\max_{i=1, 2i \ne n}^{n} a_i + a_{n + 1 - i}$. Proof that this ordering is optimal starts here Suppose $j$ is such value of $i$ that $a_i + a_{n + 1 - i}$ is the maximum, and $j < n + 1 - j$. Let's prove that we cannot make the maximum sum of adjacent elements less than $a_j + a_{n + 1 - j}$. There are at most $j - 1$ values in $a$ which are less than $a_j$ (let's call them Group A), and at least $j$ values that are not less than $a_j$ (let's call them Group B). If we want each sum of adjacent elements to be less than $a_j + a_{n + 1 - j}$, we want the elements of Group B to be adjacent only to the elements of the Group A. The elements of Group B have at least $2j - 2$ links to the neighbors (since at most two of them will have only one neighbor), the elements of Group A have at most $2(j - 1)$ links to the neighbors, but we cannot link Group A with Group B without any outside links since it will disconnect them from all other elements of the array. So, we cannot get the maximum sum less than $a_j + a_{n + 1 - j}$. Proof that this ordering is optimal ends here Okay, now what if we cannot make all pairs of adjacent elements "good"? We can run binary search on the number of pairs that should be "bad". Let this number be $k$, then if we need to make at most $k$ pairs "bad", we can check that it's possible by removing $k$ maximum elements from $a$ and checking that now we can make the array "good". So, in total, our solution will work in $O(n \log n)$.
[ "binary search", "constructive algorithms", "two pointers" ]
1,700
#include <iostream> #include <algorithm> using namespace std; #define N 200000 int a[N], n, s; bool can(int x) { int l = x - 1, r = n - 1; while (l < r) { if (a[r] > s - a[l]) return false; ++l; if (l < r) { if (a[l] > s - a[r]) return false; --r; } } return true; } int main() { long long sum = 0; scanf("%d %d\n", &n, &s); for(int i = 0; i < n; ++i) { scanf("%d", &a[i]); sum += a[i]; } sort(a, a + n, greater<int>()); int l = 1, r = n; while (l < r) { int m = (l + r) >> 1; if (can(m)) r = m; else l = m + 1; } long long ans = sum + r; cout << ans << endl; return 0; }
1765
E
Exchange
Monocarp is playing a MMORPG. There are two commonly used types of currency in this MMORPG — gold coins and silver coins. Monocarp wants to buy a new weapon for his character, and that weapon costs $n$ silver coins. Unfortunately, right now, Monocarp has no coins at all. Monocarp can earn gold coins by completing quests in the game. Each quest yields exactly one gold coin. Monocarp can also exchange coins via the in-game trading system. Monocarp has spent days analyzing the in-game economy; he came to the following conclusion: it is possible to sell one gold coin for $a$ silver coins (i. e. Monocarp can lose one gold coin to gain $a$ silver coins), or buy one gold coin for $b$ silver coins (i. e. Monocarp can lose $b$ silver coins to gain one gold coin). Now Monocarp wants to calculate the minimum number of quests that he has to complete in order to have at least $n$ silver coins after some abuse of the in-game economy. Note that Monocarp can perform exchanges of both types (selling and buying gold coins for silver coins) any number of times.
If $a > b$, then Monocarp can go infinite by obtaining just one gold coin: exchanging it for silver coins and then buying it back will earn him $a-b$ silver coins out of nowhere. So, the answer is $1$ no matter what $n$ is. If $a \le b$, then it's suboptimal to exchange gold coins for silver coins and then buy the gold coins back. Monocarp should earn the minimum possible number of gold coins so that they all can be exchanged into at least $n$ silver coins; so, the number of gold coins he needs is $\lceil \frac{n}{a} \rceil$. One small note: you shouldn't use the functions like ceil to compute a fraction rounded up, since you may get some computation errors related to using floating-point numbers (and possibly getting precision loss). Instead, you should calculate $\lceil \frac{n}{a} \rceil$ as $\lfloor \frac{n + a - 1}{a} \lfloor$ (this works for non-negative $n$ and positive $a$), and every programming language provides an integer division operator which discards the fractional part and thus doesn't use floating-point computations at all.
[ "brute force", "math" ]
1,000
#include <bits/stdc++.h> using namespace std; void solve() { int n, a, b; cin >> n >> a >> b; int x = (n + a - 1) / a; if(a > b) x = 1; cout << x << endl; } int main() { int t; cin >> t; for(int i = 0; i < t; i++) solve(); }
1765
F
Chemistry Lab
Monocarp is planning on opening a chemistry lab. During the first month, he's going to distribute solutions of a certain acid. First, he will sign some contracts with a local chemistry factory. Each contract provides Monocarp with an unlimited supply of some solution of the same acid. The factory provides $n$ contract options, numbered from $1$ to $n$. The $i$-th solution has a concentration of $x_i\%$, the contract costs $w_i$ burles, and Monocarp will be able to sell it for $c_i$ burles per liter. Monocarp is expecting $k$ customers during the first month. Each customer will buy a liter of a $y\%$-solution, where $y$ is a \textbf{real} number chosen uniformly at random from $0$ to $100$ independently for each customer. More formally, the probability of number $y$ being less than or equal to some $t$ is $P(y \le t) = \frac{t}{100}$. Monocarp can mix the solution that he signed the contracts with the factory for, at any ratio. More formally, if he has contracts for $m$ solutions with concentrations $x_1, x_2, \dots, x_m$, then, for these solutions, he picks their volumes $a_1, a_2, \dots, a_m$ so that $\sum \limits_{i=1}^{m} a_i = 1$ (exactly $1$ since each customer wants exactly one liter of a certain solution). The concentration of the resulting solution is $\sum \limits_{i=1}^{m} x_i \cdot a_i$. The price of the resulting solution is $\sum \limits_{i=1}^{m} c_i \cdot a_i$. If Monocarp can obtain a solution of concentration $y\%$, then he will do it while maximizing its price (the cost for the customer). Otherwise, the customer leaves without buying anything, and the price is considered equal to $0$. Monocarp wants to sign some contracts with a factory (possibly, none or all of them) so that the expected profit is maximized — the expected total price of the sold solutions for all $k$ customers minus the total cost of signing the contracts from the factory. Print the maximum expected profit Monocarp can achieve.
Let's start without picking the contracts. Buy them all and at least learn to calculate the answer. Obviously, $k$ doesn't really matter for the answer. Linearity of expectation, it will be just something multiplied by $k$. Imagine there's just one contract. Since we can't really mix it with anything, and the probability that the customer picks exactly that concentration is $0$, the answer is $0$. What about two contracts? If you draw them as points $(x_i, c_i)$ on the grid, then all solutions, that can be obtained from a mix of them, are a segment. Basically, the formula is exactly the parametric equation for the segment, and that's how you could come to this conclusion. The expected cost is some integral, which is basically an area below the segment. Three contracts? As a reasonable conclusion, now it's a triangle on these points. To show that, let's fix one of these points $A$, opposite to some base of the triangle $BC$. Pick a percentage of the solution $A$, then draw a segment from $A$ to that percentage of $BC$. If you fix the percentages of $B$ and $C$ (the point on the segment $BC$), then you can basically continuously gravitate this point towards $A$ along the drawn segment by increasing the concentration of $A$ and decreasing the concentrations of $B$ and $C$, while keeping their proportion to each other the same. The union of all options covers the entire triangle and can never go outside it. Ok, let's make an educated guess about more contracts and try to show that. All solutions, that can be obtained with a mix of several solutions, are insides of a convex hull of the set of points. Let the set of points be convex already. If it isn't, we can say that the answer is the union of all convex subsets of it, which is equal to the convex hull. To prove it for the convex set, we can choose any three consecutive points on it and contract them into a segment, with the proof for the triangle. Continue until we reach two points. All solutions are a convex hull, but we only care about the ones with the maximum cost. That would be the set of the highest intersection points of the convex hull and the vertical lines for all concentrations. Which is known as an upper envelope of the convex hull. The expected cost is the area below that upper envelope. So we learned to calculate the answer for a fixed set of solutions. Let's learn how to pick them. We want to directly construct that upper envelope. Let me first tell you a direct but a slow solution. Do dynamic programming. Sort the solutions first in the increasing order of their concentration, second in the increasing order of their cost per litre. Now do $dp[i][j]$ - the maximum answer if the last point in the upper envelope is $j$ and the second last is $i$. To transition, iterate over the next point $k$ in that upper envelope and check if the triple $(i, j, k)$ is clockwise. To update the answer, add the newly added area below the segment $(j, k)$, which is a trapezoid, multiplied by $k$, and subtract the cost of the contract $k$. That would be $O(n^3)$. Now for the funny transition. Basically, we can just not care that the points we pick give us an upper envelope. If we picked a set of points, which is not convex, then we can safely remove some points (pay less money for the contracts) and our calculated area can never decrease from that (since we still are picking points in the increasing order of $x$ and $y$). Thus, we can remove the second to last point in the dp and only store the last one. That is enough to recalculate the area. Overall complexity: $O(n^2)$.
[ "dp", "geometry", "probabilities" ]
2,200
#include <bits/stdc++.h> #define forn(i, n) for (int i = 0; i < int(n); i++) using namespace std; typedef long long li; const li INF64 = 1e18; struct base{ int x, w, c; }; li area(const base &a, const base &b){ return (a.c + b.c) * li(abs(a.x - b.x)); } int main() { int n, k; scanf("%d%d", &n, &k); vector<base> a(n); forn(i, n) scanf("%d%d%d", &a[i].x, &a[i].w, &a[i].c); sort(a.begin(), a.end(), [](const base &a, const base &b){ return a.x > b.x; }); vector<li> dp(n, -INF64); li ans = 0; forn(i, n){ dp[i] = max(dp[i], -a[i].w * 200ll); for (int j = i + 1; j < n; ++j) dp[j] = max(dp[j], dp[i] + area(a[i], a[j]) * k - a[j].w * 200ll); ans = max(ans, dp[i]); } printf("%.15Lf\n", ans / (long double)(200)); return 0; }
1765
G
Guess the String
\textbf{This is an interactive problem. You have to use flush operation right after printing each line. For example, in C++ you should use the function fflush(stdout), in Java or Kotlin — System.out.flush(), and in Python — sys.stdout.flush().} The jury has a string $s$ consisting of characters 0 and/or 1. The first character of this string is 0. The length of this string is $n$. You have to guess this string. Let's denote $s[l..r]$ as the substring of $s$ from $l$ to $r$ (i. e. $s[l..r]$ is the string $s_ls_{l+1} \dots s_r$). Let the prefix function of the string $s$ be an array $[p_1, p_2, \dots, p_n]$, where $p_i$ is the greatest integer $j \in [0, i-1]$ such that $s[1..j] = s[i-j+1..i]$. Also, let the antiprefix function of the string $s$ be an array $[q_1, q_2, \dots, q_n]$, where $q_i$ is the greatest integer $j \in [0, i-1]$ such that $s[1..j]$ differs from $s[i-j+1..i]$ in \textbf{every} position. For example, for the string 011001, its prefix function is $[0, 0, 0, 1, 1, 2]$, and its antiprefix function is $[0, 1, 1, 2, 3, 4]$. You can ask queries of two types to guess the string $s$: - $1$ $i$ — "what is the value of $p_i$?"; - $2$ $i$ — "what is the value of $q_i$?". You have to guess the string by asking no more than $789$ queries. Note that giving the answer does not count as a query. \textbf{In every test and in every test case, the string $s$ is fixed beforehand}.
We will design a randomized solution which spends $1.5$ queries on average to guess $2$ characters. First of all, let's ask $p_2$ to learn which character is $s_2$. Depending on whether it is 0 or 1, the details of the solution might change, but the general idea stays the same. We will assume that it is 0. We will divide the string into blocks of $2$ characters and guess it block by block. Suppose a block contains the characters $s_{i - 1}$ and $s_i$. If we want to guess it using less than $2$ queries, we should start with querying the $i$-th position (if we start by querying the position $i-1$, we won't know anything about the $i$-th position). Suppose our query has returned a number greater than $1$. Then we successfully obtained the full information about the block in one query. So, let's analyze the bad case: what if we got $0$ or $1$ as the result of the query? If we used prefix function, the result equal to $1$ shows us that the block is 10 - 00 will give the result of at least $2$, and neither 01 nor 11 can give $1$ as the result. But if the result is $0$, then it could mean either 01 or 11, and we need a second query to find out the exact combination. If we used antiprefix function, every character gets inverted; so, the result equal to $1$ tells us that the block is 01, and the result equal to $0$ tells us that the block is either 10 or 00. So, one type of query cannot distinguish 01 from 11, and the other type cannot distinguish 10 from 00. If we pick the type of query randomly, with probability of $\frac{1}{2}$ we will obtain the information about the whole block in just one query; so, this method will spend $1.5$ queries per block of $2$ characters on average.
[ "constructive algorithms", "interactive", "probabilities" ]
2,600
#include <bits/stdc++.h> using namespace std; mt19937 rnd(42); uniform_int_distribution<int> d(1, 2); int ask(int t, int i) { cout << t << " " << i + 1 << endl; int x; cin >> x; return x; } void giveAnswer(const string& s) { cout << 0 << " " << s << endl; int x; cin >> x; assert(x == 1); } void guessOne(string& s, int i) { int res = ask(1, i); if(res == 0) s[i] = '1'; else s[i] = s[res - 1]; } char inv(char c) { if(c == '0') return '1'; return '0'; } void guessTwo(string& s, int i) { if(s[1] == '0') { if(d(rnd) == 1) { int res = ask(1, i); if(res >= 2) { s[i] = s[res - 1]; s[i - 1] = s[res - 2]; } else if(res == 1) { s[i] = '0'; s[i - 1] = '1'; } else { s[i] = '1'; guessOne(s, i - 1); } } else { int res = ask(2, i); if(res >= 2) { s[i] = inv(s[res - 1]); s[i - 1] = inv(s[res - 2]); } else if(res == 1) { s[i] = '1'; s[i - 1] = '0'; } else { s[i] = '0'; guessOne(s, i - 1); } } } else { if(d(rnd) == 1) { int res = ask(1, i); if(res >= 2) { s[i] = s[res - 1]; s[i - 1] = s[res - 2]; } else if(res == 1) { s[i] = '0'; guessOne(s, i - 1); } else { s[i] = '1'; s[i - 1] = '1'; } } else { int res = ask(2, i); if(res >= 2) { s[i] = inv(s[res - 1]); s[i - 1] = inv(s[res - 2]); } else if(res == 1) { s[i] = '1'; guessOne(s, i - 1); } else { s[i] = '0'; s[i - 1] = '0'; } } } } int main() { int t; cin >> t; for(int i = 0; i < t; i++) { int n; cin >> n; string s(n, '0'); for(int j = 1; j < n; j += 2) { if(j == 1) guessOne(s, j); else guessTwo(s, j); } if(n % 2 == 1) guessOne(s, n - 1); giveAnswer(s); } }
1765
H
Hospital Queue
There are $n$ people (numbered from $1$ to $n$) signed up for a doctor's appointment. The doctor has to choose in which order he will appoint these people. The $i$-th patient should be appointed among the first $p_i$ people. There are also $m$ restrictions of the following format: the $i$-th restriction is denoted by two integers $(a_i, b_i)$ and means that the patient with the index $a_i$ should be appointed earlier than the patient with the index $b_i$. For example, if $n = 4$, $p = [2, 3, 2, 4]$, $m = 1$, $a = [3]$ and $b = [1]$, then the only order of appointment of patients that does not violate the restrictions is $[3, 1, 2, 4]$. For $n =3$, $p = [3, 3, 3]$, $m = 0$, $a = []$ and $b = []$, any order of appointment is valid. For each patient, calculate the minimum position in the order that they can have among all possible orderings that don't violate the restrictions.
Let's solve the problem separately for each patient. Let's assume that we are solving a problem for a patient with the number $s$. Let's iterate through the positions in the queue from the end, and the current position is $i$. Why do we try to construct the queue from the end, and not from its beginning? This allows us to handle the constraint on $p_i$ for each patient easier. If we try to form the queue starting from the beginning, we have to be very careful with these constraints on $p_i$, since placing a patient on some position might make it impossible to continue the order, but it's very difficult to understand when it concerns us. On the other hand, if we go from the end to the beginning, each patient that we can place on the current position can be placed on any of the positions we consider later as well, so our actions won't "break" the correct order; any order we build while maintaining all of the constraints for its suffix is correct. That's why this problem is easier to solve if we construct the queue backwards. Now, all we need to do is try not to place patient $s$ as long as possible. So, we can maintain all patients we can place (such a patient $x$ that $x$ is not placed yet, $p_x \ge i$ and there is no such patient $y$, that $x$ should be in the queue before $y$, but $y$ is not placed yet) now in any data structure, and when choosing who to place on each position, we delay placing patient $s$ as long as possible.
[ "binary search", "graphs", "greedy", "implementation" ]
2,200
#include <bits/stdc++.h> using namespace std; int main() { ios::sync_with_stdio(false); cin.tie(0); int n, m; cin >> n >> m; vector<int> a(n); for (auto &x : a) cin >> x; vector<vector<int>> g(n); for (int i = 0; i < m; ++i) { int x, y; cin >> x >> y; g[y - 1].push_back(x - 1); } vector<int> ans(n, -1); for (int s = 0; s < n; ++s) { vector<int> deg(n); for (int i = 0; i < n; ++i) for (int j : g[i]) ++deg[j]; priority_queue<pair<int, int>> q; for (int v = 0; v < n; ++v) if (deg[v] == 0 && v != s) q.push({a[v], v}); for (int i = n; i > 0; --i) { if (q.empty() || q.top().first < i) { if (deg[s] == 0 && i <= a[s]) ans[s] = i; break; } int v = q.top().second; q.pop(); for (int u : g[v]) { --deg[u]; if (deg[u] == 0 && u != s) q.push({a[u], u}); } } } for (int &x : ans) cout << x << ' '; }
1765
I
Infinite Chess
The black king lives on a chess board with an infinite number of columns (files) and $8$ rows (ranks). The columns are numbered with all integer numbers (including negative). The rows are numbered from $1$ to $8$. Initially, the black king is located on the starting square $(x_s, y_s)$, and he needs to reach some target square $(x_t, y_t)$. Unfortunately, there are also white pieces on the board, and they threaten the black king. After negotiations, the white pieces agreed to let the black king pass to the target square on the following conditions: - each turn, the black king makes a move according to the movement rules; - the black king cannot move to a square occupied by a white piece; - the black king cannot move to a square which is under attack by any white piece. A square is under attack if a white piece can reach it in one move according to the movement rules; - the white pieces never move. Help the black king find the minimum number of moves needed to reach the target square while not violating the conditions. The black king cannot leave the board at any time. The black king moves according to the movement rules below. Even though the white pieces never move, squares which they can reach in one move are considered to be under attack, so the black king cannot move into those squares. Below are the movement rules. Note that the pieces (except for the knight) cannot jump over other pieces. - a king moves exactly one square horizontally, vertically, or diagonally. - a rook moves any number of vacant squares horizontally or vertically. - a bishop moves any number of vacant squares diagonally. - a queen moves any number of vacant squares horizontally, vertically, or diagonally. - a knight moves to one of the nearest squares not on the same rank, file, or diagonal (this can be thought of as moving two squares horizontally then one square vertically, or moving one square horizontally then two squares vertically — i. e. in an "L" pattern). Knights are not blocked by other pieces, they can simply jump over them. There are no pawns on the board. \begin{center} {\small King and knight possible moves, respectively. Dotted line shows that knight can jump over other pieces.} \end{center} \begin{center} {\small Queen, bishop, and rook possible moves, respectively.} \end{center}
First, we can limit the field to a non-infinite amount of columns. Take the leftmost position, the rightmost one and leave like $10$ more cells from each side so that everything is nice on the borders. If the field wasn't as huge, we could just do a BFS over it. Mark the cells that are taken by the pieces and that are attacked by them. Then just go and avoid them. Unfortunately, the field is like $10^9$ cells large, and we also have up to $8$ transitions from each cell. Too much. Let's use the fact that there are not too many pieces on the board. Take a look at two pieces that have only empty columns between them. If you go far enough inwards, then all the columns between them will look the same in terms of cells to be avoided. That far enough is the columns at distance more than $8$ from both of the pieces. The only pieces that can possibly affect these columns are rooks or queens far way that fill the entire rows. Since these columns are the same, let's get rid of them and account for the distance by using a Dijkstra instead of a BFS. The solution becomes the following. Leave only the columns that have a piece at distance at most $8$ from them. There will be at most $16 \cdot n$ of them. Mark all the attacked cells on the taken columns. Do Dijkstra and carefully handle the moves over the missing columns. Let's think more about moving over the missing columns. Let the distance between adjacent taken columns be $d$. Then the king can go from some row $i$ of the first column into some row $j$ of the second column in $max(|i - j|, d)$ moves (it can save moves by going diagonally). You can code it exactly like that, but you can also do it a little neater. Leave not $8$ but $16$ columns. Now, the moves that the king saves by going diagonally will be accounted for automatically. And you can only consider going to the previous, the current or the next row for any pair of adjacent columns.
[ "implementation", "shortest paths" ]
2,800
#include <bits/stdc++.h> #define forn(i, n) for (int i = 0; i < int(n); i++) using namespace std; const string al = "KQRBN"; const int INF = 1e9; struct mv{ int dx, dy; }; struct piece{ int x, y, t; }; struct seg{ int l, r; }; struct pos{ int x, y; }; bool operator <(const pos &a, const pos &b){ if (a.x != b.x) return a.x < b.x; return a.y < b.y; } vector<vector<mv>> mvs({ {{-1, -1}, {-1, 0}, {-1, 1}, {0, 1}, {1, 1}, {1, 0}, {1, -1}, {0, -1}}, {{-1, 0}, {0, 1}, {1, 0}, {0, -1}, {-1, -1}, {1, -1}, {1, 1}, {-1, 1}}, {{-1, 0}, {0, 1}, {1, 0}, {0, -1}}, {{-1, -1}, {1, -1}, {1, 1}, {-1, 1}}, {{-2, -1}, {-2, 1}, {-1, 2}, {1, 2}, {2, 1}, {2, -1}, {1, -2}, {-1, -2}} }); int main() { int sx, sy, fx, fy; cin >> sx >> sy >> fx >> fy; swap(sx, sy), swap(fx, fy); --sx, --fx; int k; cin >> k; vector<piece> a(k); forn(i, k){ string t; cin >> t >> a[i].x >> a[i].y; swap(a[i].x, a[i].y); --a[i].x; a[i].t = al.find(t[0]); } sort(a.begin(), a.end(), [](const piece &a, const piece &b){ if (a.x != b.x) return a.x < b.x; return a.y < b.y; }); vector<int> base({sy, fy}); forn(i, k) base.push_back(a[i].y); vector<int> ys; for (int y : base) for (int i = y - 16; i <= y + 16; ++i) ys.push_back(i); sort(ys.begin(), ys.end()); ys.resize(unique(ys.begin(), ys.end()) - ys.begin()); vector<vector<char>> tk(8, vector<char>(ys.size())); forn(i, k){ a[i].y = lower_bound(ys.begin(), ys.end(), a[i].y) - ys.begin(); tk[a[i].x][a[i].y] = true; } sy = lower_bound(ys.begin(), ys.end(), sy) - ys.begin(); fy = lower_bound(ys.begin(), ys.end(), fy) - ys.begin(); vector<vector<char>> bad = tk; forn(i, k){ int x = a[i].x, y = a[i].y; if (a[i].t == 0 || a[i].t == 4){ for (auto it : mvs[a[i].t]){ int nx = x + it.dx; int ny = y + it.dy; if (0 <= nx && nx < 8) bad[nx][ny] = true; } } else{ for (auto it : mvs[a[i].t]){ for (int nx = x + it.dx, ny = y + it.dy; ; nx += it.dx, ny += it.dy){ if (nx < 0 || nx >= 8) break; if (ny < 0 || ny >= int(ys.size())) break; if (tk[nx][ny]) break; bad[nx][ny] = true; } } } } vector<vector<int>> d(8, vector<int>(ys.size(), INF)); vector<vector<pos>> p(8, vector<pos>(ys.size())); set<pair<int, pos>> q; d[sx][sy] = 0; q.insert({0, {sx, sy}}); while (!q.empty()){ int x = q.begin()->second.x; int y = q.begin()->second.y; q.erase(q.begin()); if (x == fx && y == fy){ cout << d[x][y] << endl; return 0; } for (int ny : {y - 1, y, y + 1}){ if (ny < 0 || ny >= int(ys.size())) continue; int dy = max(1, abs(ys[y] - ys[ny])); for (int nx = max(0, x - 1); nx <= min(7, x + 1); ++nx) if (!bad[nx][ny]){ int nd = d[x][y] + dy; if (d[nx][ny] > nd){ q.erase({d[nx][ny], {nx, ny}}); d[nx][ny] = nd; p[nx][ny] = {x, y}; q.insert({d[nx][ny], {nx, ny}}); } } } } cout << -1 << endl; return 0; }
1765
J
Hero to Zero
There are no heroes in this problem. I guess we should have named it "To Zero". You are given two arrays $a$ and $b$, each of these arrays contains $n$ non-negative integers. Let $c$ be a matrix of size $n \times n$ such that $c_{i,j} = |a_i - b_j|$ for every $i \in [1, n]$ and every $j \in [1, n]$. Your goal is to transform the matrix $c$ so that it becomes the zero matrix, i. e. a matrix where \textbf{every} element is \textbf{exactly} $0$. In order to do so, you may perform the following operations any number of times, in any order: - choose an integer $i$, then decrease $c_{i,j}$ by $1$ for every $j \in [1, n]$ (i. e. decrease all elements in the $i$-th row by $1$). In order to perform this operation, you \textbf{pay} $1$ coin; - choose an integer $j$, then decrease $c_{i,j}$ by $1$ for every $i \in [1, n]$ (i. e. decrease all elements in the $j$-th column by $1$). In order to perform this operation, you \textbf{pay} $1$ coin; - choose two integers $i$ and $j$, then decrease $c_{i,j}$ by $1$. In order to perform this operation, you \textbf{pay} $1$ coin; - choose an integer $i$, then increase $c_{i,j}$ by $1$ for every $j \in [1, n]$ (i. e. increase all elements in the $i$-th row by $1$). When you perform this operation, you \textbf{receive} $1$ coin; - choose an integer $j$, then increase $c_{i,j}$ by $1$ for every $i \in [1, n]$ (i. e. increase all elements in the $j$-th column by $1$). When you perform this operation, you \textbf{receive} $1$ coin. You have to calculate the minimum number of coins required to transform the matrix $c$ into the zero matrix. Note that all elements of $c$ should be equal to $0$ \textbf{simultaneously} after the operations.
The order of operations is interchangeable, so we assume that we perform mass operations first, and operations affecting single elements last. Suppose $p_i$ is the value we subtract from the $i$-th row using the operations affecting the whole row, and $q_j$ is the value we subtract from the $j$-th column using the operations affecting the whole column. Then the number of coins we have to pay is $S - (n-1) \cdot (\sum p_i + \sum q_j)$, since every such operation "saves" us exactly $n-1$ coins by decreasing the sum in the matrix by $n$ using only one coin. Obviously, we want to maximize $\sum p_i + \sum q_j$, but if we subtract too much, some elements of the matrix may become negative (and it will be impossible to set them to zero using single element operations). So, for every $i \in [1, n]$ and $j \in [1, n]$, the condition $p_i + q_j \le c_{i,j}$ must hold. Now let's take a look at how the Hungarian algorithm for assignment problem works. We can notice that the potentials in the Hungarian algorithm have the same constraints as the values $p_i$ and $q_j$ in our problem, and the sum of those potentials is maximized. So, if we apply the Hungarian algorithm to the matrix $c$, we can use the potentials as the values $p_i$ and $q_j$. But the matrix is too large, so what should we do? We can use the fact that the maximum possible sum of potentials is equal to the optimal solution of the assignment problem, so we should solve the assignment problem in some other way. And in this particular way of constructing the matrix $c$ (where $c_{i,j} = |a_i - b_j|$), the solution of the assignment problem is simple: we need to reorder the elements of both arrays $a$ and $b$ so that the value of $\sum |a_i - b_i|$ is minimized, and it's easy to see (and to prove using exchange argument method) that sorting both arrays will give us the optimal solution. All that's left is to calculate the sum of elements in the matrix, and that can be done if for every element in $a$, we calculate the number of elements in $b$ which are less than it, the number of elements in $b$ which are greater than it, and the sum of elements of $b$ greater than it (to quickly compute the sum of $|a_i - b_j|$ for every $j \in [1, n]$). All these values can be obtained with the help of binary search, two pointers method or some data structures. The complexity of the solution is $O(n \log n)$.
[ "graph matchings", "math" ]
2,900
#include<bits/stdc++.h> using namespace std; int main() { int n; scanf("%d", &n); vector<int> a(n), b(n); for(int i = 0; i < n; i++) scanf("%d", &a[i]); for(int i = 0; i < n; i++) scanf("%d", &b[i]); sort(a.begin(), a.end()); sort(b.begin(), b.end()); long long total = 0; for(int i = 0; i < n; i++) { total += n * 1ll * (a[i] + b[i]); total -= a[i] * 2ll * (b.end() - lower_bound(b.begin(), b.end(), a[i])); total -= b[i] * 2ll * (a.end() - upper_bound(a.begin(), a.end(), b[i])); } for(int i = 0; i < n; i++) total -= (n - 1) * 1ll * abs(a[i] - b[i]); cout << total << endl; }
1765
K
Torus Path
You are given a square grid with $n$ rows and $n$ columns, where each cell has a non-negative integer written in it. There is a chip initially placed at the top left cell (the cell with coordinates $(1, 1)$). You need to move the chip to the bottom right cell (the cell with coordinates $(n, n)$). In one step, you can move the chip to the neighboring cell, but: - you can move only right or down. In other words, if the current cell is $(x, y)$, you can move either to $(x, y + 1)$ or to $(x + 1, y)$. There are two special cases: - if the chip is in the last column (cell $(x, n)$) and you're moving right, you'll teleport to the first column (to the cell $(x, 1)$); - if the chip is in the last row (cell $(n, y)$) and you're moving down, you'll teleport to the first row (to the cell $(1, y)$). - you \textbf{cannot} visit the same cell twice. The starting cell is counted visited from the beginning (so you cannot enter it again), and you can't leave the finishing cell once you visit it. Your total score is counted as the sum of numbers in all cells you have visited. What is the maximum possible score you can achieve?
Note that you can't visit all vertices on the antidiagonal (vertices $(1, n), (2, n - 1), \dots (n, 1)$) at the same time. Let's prove it: since you are starting outside the antidiagonal then the only way to visit vertex $(x, n + 1 - x)$ is to move from $(x - 1, n + 1 - x)$ or from $(x, n - x)$. In total, there are $n$ vertices you can move from - it's vertices $(1, n - 1), (2, n - 2), \dots, (n - 1, 1)$ and $(n, n)$. But $(n, n)$ is the finishing vertices you can't leave, so there are only $n - 1$ positions left. As a result, you can visit at most $n - 1$ vertices on the antidiagonal. Now, let's prove that if you've chosen a vertex $(x, n + 1 - x)$ you decided to skip, you can always visit all other vertices. Let's use the following simple strategy: let's move right until we meet the restricted or already visited vertex, then move down once then continue moving right and so on. For example, let's visit all vertices of $4 \times 4$ matrix except vertex $(2, 3)$. The path would be the following: $(1, 1)$ $-$ $(1, 2)$ $-$ $(1, 3)$ $-$ $(1, 4)$ $-$ $(2, 4)$ $-$ $(2, 1)$ $-$ $(2, 2)$ $-$ $(3, 2)$ $-$ $(3, 3)$ $-$ $(3, 4)$ $-$ $(3, 1)$ $-$ $(4, 1)$ $-$ $(4, 2)$ $-$ $(4, 3)$ $-$ $(4, 4)$. As a result, the answer is the sum of all elements minus the minimum among $a[x][n + 1 - x]$, or $\sum_{i = 1}^{n}\sum_{j = 1}^{n}a[i][j] - \min_{1 \le x \le n}(a[x][n + 1 - x])$.
[ "greedy", "math" ]
1,500
#include<bits/stdc++.h> using namespace std; int n; vector< vector<int> > a; bool read() { if (!(cin >> n)) return false; a.resize(n, vector<int>(n)); for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) cin >> a[i][j]; } return true; } void solve() { long long sum = 0; for (int i = 0; i < n; i++) sum += accumulate(a[i].begin(), a[i].end(), 0LL); int mn = a[0][n - 1]; for (int i = 0; i < n; i++) mn = min(mn, a[i][n - 1 - i]); cout << sum - mn << endl; } int main() { #ifdef _DEBUG freopen("input.txt", "r", stdin); int tt = clock(); #endif if(read()) { solve(); #ifdef _DEBUG cerr << "TIME = " << clock() - tt << endl; tt = clock(); #endif } return 0; }
1765
L
Project Manager
There are $n$ employees at Bersoft company, numbered from $1$ to $n$. Each employee works on some days of the week and rests on the other days. You are given the lists of working days of the week for each employee. There are regular days and holidays. On regular days, only those employees work that have the current day of the week on their list. On holidays, no one works. You are provided with a list of days that are holidays. The days are numbered from $1$ onwards, day $1$ is Monday. The company receives $k$ project offers they have to complete. The projects are numbered from $1$ to $k$ in the order of decreasing priority. Each project consists of multiple parts, where the $i$-th part must be completed by the $a_i$-th employee. The parts must be completed in order (i. e. the $(i+1)$-st part can only be started when the $i$-th part is completed). Each part takes the corresponding employee a day to complete. The projects can be worked on simultaneously. However, one employee can complete a part of only one project during a single day. If they have a choice of what project to complete a part on, they always go for the project with the highest priority (the lowest index). For each project, output the day that project will be completed on.
First, notice that the answer can't be that large. Even the worst case: one developer works one day of the week, responsible for a $2 \cdot 10^5$ part project, the first $2 \cdot 10^5$ of his workdays are holidays. It's just $7 \cdot (2 \cdot 10^5 + 2 \cdot 10^5)$. Thus, we'd like to iterate over time and complete project parts exactly as they are due. If we manage to deduce the parts that have to be completed during the current day, the complexity will be limited by the total number of parts plus the maximum time. To determine that, I propose to maintain the following data structures. First, maintain $7$ vectors of maps: for each day of the week, maintain pairs (the developer, the number of projects they are currently the blocking person for). Each developer is only counted in days they work at and only if they have a non-zero number of projects. Second, maintain $n$ vectors of sets: for each developer, maintain the projects they are currently the blocking person for. To process a day, extract all pairs from the map for the current day of the week. All these developers will complete a part of a project. The index of that project is the first element in the corresponding set. Update all maps and sets and proceed. If the number of projects for any developers becomes zero, remove them from the maps. If the current day is a holiday, just skip it.
[ "brute force", "data structures", "implementation" ]
2,400
#include <bits/stdc++.h> #define forn(i, n) for (int i = 0; i < int(n); i++) using namespace std; const string days[] = {"Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"}; int main() { cin.tie(0); iostream::sync_with_stdio(false); int n, m, k; cin >> n >> m >> k; vector<vector<char>> ds(n, vector<char>(7)); forn(i, n){ int t; cin >> t; forn(_, t){ string s; cin >> s; ds[i][find(days, days + 7, s) - days] = true; } } vector<int> h(m); forn(i, m) cin >> h[i]; vector<vector<int>> a(k); forn(i, k){ int p; cin >> p; a[i].resize(p); forn(j, p){ cin >> a[i][j]; --a[i][j]; } } int j = 0; vector<int> ans(k, -1), lst(k); int done = 0; vector<map<int, int>> cur(7); vector<set<int>> wk(n); forn(i, k) forn(j, 7) if (ds[a[i][0]][j]) ++cur[j][a[i][0]]; forn(i, k) wk[a[i][0]].insert(i); for (int d = 1;; ++d){ if (j < m && h[j] == d){ ++j; continue; } int wd = (d - 1) % 7; vector<int> now, sv; for (auto it : cur[wd]) now.push_back(it.first); for (int x : now){ forn(i, 7){ auto it = cur[i].find(x); if (it != cur[i].end()){ if (it->second == 1) cur[i].erase(it); else --it->second; } } int y = *wk[x].begin(); sv.push_back(y); wk[x].erase(wk[x].begin()); } forn(i, now.size()){ int y = sv[i]; ++lst[y]; if (lst[y] == int(a[y].size())){ ans[y] = d; ++done; continue; } wk[a[y][lst[y]]].insert(y); forn(j, 7) if (ds[a[y][lst[y]]][j]) ++cur[j][a[y][lst[y]]]; } if (done == k) break; } forn(i, k) cout << ans[i] << " "; cout << endl; }
1765
M
Minimum LCM
You are given an integer $n$. Your task is to find two positive (greater than $0$) integers $a$ and $b$ such that $a+b=n$ and the least common multiple (LCM) of $a$ and $b$ is the minimum among all possible values of $a$ and $b$. If there are multiple answers, you can print any of them.
Suppose $a \le b$. Let's show that if $b \bmod a \ne 0$, the answer is suboptimal. If $b \bmod a = 0$, then $LCM(a, b) = b$, so the answer is less than $n$. But if $b \bmod a \ne 0$, then $LCM(a, b)$ is at least $2b$, and $b$ is at least $\frac{n}{2}$, so in this case, the answer is at least $n$. Okay, now we know that in the optimal answer, $b \bmod a = 0$. This also means that $n \bmod a = 0$, since $n = a + b$. So we need to search for $a$ only among the divisors of $n$, and it is possible to iterate through all of them in $O(n^{0.5})$.
[ "math", "number theory" ]
1,000
#include <bits/stdc++.h> using namespace std; int main() { int t; cin >> t; while (t--) { int n; cin >> n; int a = 1; for (int g = 2; g * g <= n; ++g) { if (n % g == 0) { a = n / g; break; } } cout << a << ' ' << n - a << '\n'; } }
1765
N
Number Reduction
You are given a positive integer $x$. You can apply the following operation to the number: remove one occurrence of any digit in such a way that the resulting number \textbf{does not contain any leading zeroes} and \textbf{is still a positive integer}. For example, $10142$ can be converted to $1142$, $1042$, $1012$ or $1014$ (note that $0142$ is not a valid outcome); $10$ can be converted to $1$ (but not to $0$ since it is not positive). Your task is to find the minimum positive integer that you can obtain from $x$ if you can apply the aforementioned operation exactly $k$ times.
To begin with, in order to minimize the answer, we minimize the first (highest) digit of the answer. Let's check if the highest digit can be equal to $1$. To do this, there must be $1$ in the number among the first $k+1$ digits (because we can delete no more than $k$ of them). If $1$ is not among the first $k+1$ digits, then we proceed to check $2$, then $3$ and so on. Otherwise, let's say that this digit is the first digit of the answer, and remove all the digits before it and itself from the number $x$, reducing the value of $k$. For example, if $x=741819$ and $k=4$, then after the operation $ans=1$, $x=819$ and $k=2$. Thus, we get the same problem, but with new values of $x$ and $k$, which is solved by the same algorithm (searching for the minimum first digit). The only difference is that after the first such iteration, the first digit can be equal to $0$. It remains to understand how to quickly check whether the digit $d$ is among the first $k+1$ positions of the number. To do this, let's write, for each digit, its positions in the original number $x$. At each iteration of the algorithm, the current value of $x$ is some suffix of the original $x$, let it be the suffix of positions $[lst; n]$, where $n$ is the length of the original $x$. Now to check the digit, it is enough to look into its array of positions and find the minimum occurrence greater than or equal to $lst$. To do this, you can use binary search or delete all positions that are strictly less than $lst$ from the array or set (because the value of $lst$ only increases for subsequent iterations of the algorithm). Thus, the solution works in $O(n)$ or $O(n\log{n})$ time.
[ "greedy" ]
1,500
#include <bits/stdc++.h> using namespace std; int main() { ios::sync_with_stdio(false); cin.tie(0); int t; cin >> t; while (t--) { string x; cin >> x; int k; cin >> k; int n = x.size(); vector<vector<int>> pos(10); for (int i = 0; i < n; ++i) pos[x[i] - '0'].push_back(i); for (int i = 0; i < 10; ++i) reverse(pos[i].begin(), pos[i].end()); string ans; int lst = 0, len = n - k; for (int i = 0; i < len; ++i) { for (int d = (i == 0); d <= 9; ++d) { while (!pos[d].empty() && pos[d].back() < lst) pos[d].pop_back(); if (!pos[d].empty() && pos[d].back() - lst <= k) { ans += d + '0'; k -= pos[d].back() - lst; lst = pos[d].back() + 1; break; } } } cout << ans << '\n'; } }
1766
A
Extremely Round
Let's call a positive integer extremely round if it has only one non-zero digit. For example, $5000$, $4$, $1$, $10$, $200$ are extremely round integers; $42$, $13$, $666$, $77$, $101$ are not. You are given an integer $n$. You have to calculate the number of extremely round integers $x$ such that $1 \le x \le n$.
There are many ways to solve this problem. The most naive one (iterating through all numbers from $1$ to $n$ in each test case and checking if they are extremely round) fails, since it is $O(tn)$, but you can optimize it by noticing that extremely round numbers are rare. So, for example, we can iterate through all numbers from $1$ to $999999$ once, remember which ones are extremely round, store them into an array, and while answering the test case, only check the numbers from the array we have created. There is also a solution in $O(1)$ per test case with a formula, try to invent it yourself.
[ "brute force", "implementation" ]
800
def check(x): s = str(x) cnt = 0 for c in s: if c != '0': cnt += 1 return cnt == 1 a = [] for i in range(1, 1000000): if check(i): a.append(i) t = int(input()) for i in range(t): n = int(input()) ans = 0 for x in a: if x <= n: ans += 1 print(ans)
1766
B
Notepad#
You want to type the string $s$, consisting of $n$ lowercase Latin letters, using your favorite text editor Notepad#. Notepad# supports two kinds of operations: - append any letter \textbf{to the end} of the string; - copy a \textbf{continuous} substring of an already typed string and paste this substring \textbf{to the end} of the string. Can you type string $s$ in \textbf{strictly less} than $n$ operations?
Why does the problem ask us only to check if we can do less than $n$ operations instead of just asking the minimum amount? That must be making the problem easier, so let's focus our attention on that. What if it was $\le n$ instead of $< n$? Well, then the problem would be trivial. You can type the word letter by letter and be done in $n$ operations. So we only have to save one operation. In order to save at least one operation, we have to use the copy operation and copy more than one character in that. Let's take a closer look at any of the copy operations we do. Basically, it has to be a substring that has at least two non-intersection occurrences in the string. Thus, if the string has any substring that has length at least two that appears at least twice in the string, we can copy it, and the answer will be "YES". That's still not enough to solve the problem - we'd have to check all substrings, which is $O(n^2)$. Let's think further. Imagine we found a substring that works. Let it have length $k$. Notice how you can remove its last character, obtaining a substring of length $k-1$, and it will still occure in the same set of positions (possibly, even more occurrences will be found). Remove characters until the substring has length $2$. Thus, if any appropriate substring exists, an appropriate substring of length $2$ also exists. Finally, we can check if there exists a substring of length $2$ that appears at least twice in the string so that the occurrences are at least $2$ apart. That can be done with a set/hashset or a map/hashmap. Some implementations might require careful handling of the substrings of kind "aa", "bb" and similar. Overall complexity: $O(n)$ or $O(n \log n)$ per testcase.
[ "implementation" ]
1,000
for _ in range(int(input())): n = int(input()) s = input() cur = {} for i in range(n - 1): t = s[i:i+2] if t in cur: if cur[t] < i - 1: print("YES") break else: cur[t] = i else: print("NO")
1766
C
Hamiltonian Wall
Sir Monocarp Hamilton is planning to paint his wall. The wall can be represented as a grid, consisting of $2$ rows and $m$ columns. Initially, the wall is completely white. Monocarp wants to paint a black picture on the wall. In particular, he wants cell $(i, j)$ (the $j$-th cell in the $i$-th row) to be colored black, if $c_{i, j} =$ 'B', and to be left white, if $c_{i, j} =$ 'W'. Additionally, he wants each column to have at least one black cell, so, for each $j$, the following constraint is satisfied: $c_{1, j}$, $c_{2, j}$ or both of them will be equal to 'B'. In order for the picture to turn out smooth, Monocarp wants to place down a paint brush in some cell $(x_1, y_1)$ and move it along the path $(x_1, y_1), (x_2, y_2), \dots, (x_k, y_k)$ so that: - for each $i$, $(x_i, y_i)$ and $(x_{i+1}, y_{i+1})$ share a common side; - all black cells appear in the path \textbf{exactly once}; - white cells don't appear in the path. Determine if Monocarp can paint the wall.
Why is there a constraint of each column having at least one black cell? Does the problem change a lot if there were white columns? Well, if such a column was inbetween some black cells, then the answer would be "NO". If it was on the side of the grid, you could remove it and proceed to solve without it. So, that doesn't really change the problem other than removing some casework. Let's try to fix a start. Find a column that has only one black cell in it. If there are no such columns, the answer is immediately "YES". Otherwise, the path will always go through it in known directions: to the left and to the right (if both of them exist). Let's solve the problem separately for the left part of the path and for the right one - find a path that starts to the left of it and covers everything to the left and the same for the right part. Consider the right part. If the next column also has one black cell, then we can determine where to go uniquely. If this cell is on the opposite row, then the answer is "NO". Otherwise, go there and proceed further. Let it have two black cells now. Find the entire two black row rectangle of maximum size that starts there. If there's nothing after it, you can easily traverse it any way you like. Otherwise, you have to traverse it in such a way that you end up in its last column, then go to the right from there. Turns out, there's only one way to achieve that. Go up/down to another row, go right, up/down to another row, right and so on. Now you just have to check if you end up in the correct row. Thus, you can simulate the path to the left and to the right and check if you never get stuck. Overall comlexity: $O(n)$ per testcase.
[ "dp", "implementation" ]
1,300
for _ in range(int(input())): n = int(input()) s = [input() for i in range(2)] pos = -1 for i in range(n): if s[0][i] != s[1][i]: pos = i if pos == -1: print("YES") continue ok = True cur = 0 if s[0][pos] == 'B' else 1 for i in range(pos + 1, n): if s[cur][i] == 'W': ok = False if s[cur ^ 1][i] == 'B': cur ^= 1 cur = 0 if s[0][pos] == 'B' else 1 for i in range(pos - 1, -1, -1): if s[cur][i] == 'W': ok = False if s[cur ^ 1][i] == 'B': cur ^= 1 print("YES" if ok else "NO")
1766
D
Lucky Chains
Let's name a pair of positive integers $(x, y)$ lucky if the greatest common divisor of them is equal to $1$ ($\gcd(x, y) = 1$). Let's define a chain induced by $(x, y)$ as a sequence of pairs $(x, y)$, $(x + 1, y + 1)$, $(x + 2, y + 2)$, $\dots$, $(x + k, y + k)$ for some integer $k \ge 0$. The length of the chain is the number of pairs it consists of, or $(k + 1)$. Let's name such chain lucky if all pairs in the chain are lucky. You are given $n$ pairs $(x_i, y_i)$. Calculate for each pair the length of the longest lucky chain induced by this pair. Note that if $(x_i, y_i)$ is not lucky itself, the chain will have the length $0$.
Suppose, $\gcd(x + k, y + k) = g$. It means that $(y + k) - (x + k) = (y - x)$ is also divisible by $g$, or $\gcd(x + k, y - x) = h$ is divisible by $g$. And backward: if $\gcd(x + k, y - x) = h$, then $(x + k) + (y - x) = (y + k)$ is also divisible by $h$, or $\gcd(x + k, y + k) = g$ is divisible by $h$. Since $h$ is divisible by $g$ and $g$ is divisible by $h$, so $h = g$. In other words, we proved that $\gcd(x + k, y + k) = \gcd(x + k, y - x)$. Now, knowing the equivalence above, we can understand that we are looking for the smallest $k \ge 0$ such that $\gcd(x + k, y - x) > 1$. In other words, we are searching $k$ such that $x + k$ is divisible by some $d > 1$, where $d$ is some divisor of $(y - x)$. The problem is that there are a handful of divisors for some $(y - x)$. But we can note that we can consider only prime divisors of $(y - x)$: if $d | (y - x)$ and $d$ is composite then there is some prime $p | d$, thus $p | (y - x)$. It's easy to prove that there are no more than $\log_2{n}$ prime divisors of some $n$. Now the question is how to find all these prime divisors. Note that if you know only one prime divisor for each value from $1$ to $n$, then you can find all prime divisors for all $k \le n$ in $O(\log{k})$. The prime divisors $p_i$ are next: $p_1 = minD[k]$, $k_1 = \frac{k}{minD[k]}$; $p_2 = minD[k_1]$, $k_2 = \frac{k_1}{minD[k_1]}$; $p_3 = minD[k_2]$, $k_3 = \frac{k_2}{minD[k_2]}$; and so on until $k_i > 1$. The final step is to calculate a prime divisor $minD[i]$ for each value from $1$ to $A$, where $A \ge \max(y_i)$ or $A \ge 10^7$. We can do it by slight modifications of Sieve of Eratosthenes: at the step, where you have some prime $p$ and want to "throw out" all values $k \cdot p$, set $minD[kp] = p$ for each $kp$ (plus set $minD[p] = p$). As a result, we, firstly, calculate Sieve in $O(N \log{\log{N}})$ and, secondly, calculate answer for each pair $(x_i, y_i)$ in $O(\log{N})$. Note that the input and output is large, so you should you tricks to speed up your input and output.
[ "math", "number theory" ]
1,600
#include<bits/stdc++.h> using namespace std; #define fore(i, l, r) for(int i = int(l); i < int(r); i++) #define sz(a) int((a).size()) typedef long long li; const int INF = int(1e9); const int N = int(1e7) + 5; int mind[N]; void precalc() { fore (i, 0, N) mind[i] = i; for (int p = 2; p < N; p++) { if (mind[p] != p) continue; for (int d = 2 * p; d < N; d += p) mind[d] = min(mind[d], p); } } int x, y; inline bool read() { if(!(cin >> x >> y)) return false; return true; } vector<int> getPrimes(int v) { vector<int> ps; while (v > 1) { if (ps.empty() || ps.back() != mind[v]) ps.push_back(mind[v]); v /= mind[v]; } return ps; } inline void solve() { int d = y - x; if (d == 1) { cout << -1 << '\n'; return; } int r = INF; for (int p : getPrimes(d)) r = min(r, ((x + p - 1) / p) * p); cout << r - x << '\n'; } int main() { #ifdef _DEBUG freopen("input.txt", "r", stdin); int tt = clock(); #endif ios_base::sync_with_stdio(false); cin.tie(0), cout.tie(0); cout << fixed << setprecision(15); precalc(); int t; cin >> t; while (t--) { read(); solve(); #ifdef _DEBUG cerr << "TIME = " << clock() - tt << endl; tt = clock(); #endif } return 0; }
1766
E
Decomposition
For a sequence of integers $[x_1, x_2, \dots, x_k]$, let's define its decomposition as follows: Process the sequence from the first element to the last one, maintaining the list of its subsequences. When you process the element $x_i$, append it to the end of the \textbf{first} subsequence in the list such that the bitwise AND of its last element and $x_i$ is greater than $0$. If there is no such subsequence in the list, create a new subsequence with only one element $x_i$ and append it to the end of the list of subsequences. For example, let's analyze the decomposition of the sequence $[1, 3, 2, 0, 1, 3, 2, 1]$: - processing element $1$, the list of subsequences is empty. There is no subsequence to append $1$ to, so we create a new subsequence $[1]$; - processing element $3$, the list of subsequences is $[[1]]$. Since the bitwise AND of $3$ and $1$ is $1$, the element is appended to the first subsequence; - processing element $2$, the list of subsequences is $[[1, 3]]$. Since the bitwise AND of $2$ and $3$ is $2$, the element is appended to the first subsequence; - processing element $0$, the list of subsequences is $[[1, 3, 2]]$. There is no subsequence to append $0$ to, so we create a new subsequence $[0]$; - processing element $1$, the list of subsequences is $[[1, 3, 2], [0]]$. There is no subsequence to append $1$ to, so we create a new subsequence $[1]$; - processing element $3$, the list of subsequences is $[[1, 3, 2], [0], [1]]$. Since the bitwise AND of $3$ and $2$ is $2$, the element is appended to the first subsequence; - processing element $2$, the list of subsequences is $[[1, 3, 2, 3], [0], [1]]$. Since the bitwise AND of $2$ and $3$ is $2$, the element is appended to the first subsequence; - processing element $1$, the list of subsequences is $[[1, 3, 2, 3, 2], [0], [1]]$. The element $1$ cannot be appended to any of the first two subsequences, but can be appended to the third one. The resulting list of subsequences is $[[1, 3, 2, 3, 2], [0], [1, 1]]$. Let $f([x_1, x_2, \dots, x_k])$ be the number of subsequences the sequence $[x_1, x_2, \dots, x_k]$ is decomposed into. Now, for the problem itself. You are given a sequence $[a_1, a_2, \dots, a_n]$, where each element is an integer from $0$ to $3$. Let $a[i..j]$ be the sequence $[a_i, a_{i+1}, \dots, a_j]$. You have to calculate $\sum \limits_{i=1}^n \sum \limits_{j=i}^n f(a[i..j])$.
Let's assume that we don't have any zeroes in our array. We'll deal with them later. The key observation is that the number of sequences in the decomposition is not more than $3$. To prove this, we can use the fact that each element $3$ will be appended to the first subsequence in the decomposition; so, if the second/third subsequence in the decomposition ends with the number $2$ or $1$, all such numbers can be appended to that subsequence, thus they won't create any new subsequences. So, if we consider the combination of the last elements in the subsequences of the decomposition, there are only $3^3 + 3^2 + 3^1 + 3^0 = 40$ such combinations (even less in practice). Okay, now let's try to use the fact that the number of such combinations is small. There are many ways to abuse it, but, in my opinion, the most straightforward one (and also a bit slow, but fast enough to easily pass the time limit) is to run the following dynamic programming: $dp_{i,c}$, where $i$ is the index of the element we are processing, and $c$ is the vector representing the combination of last elements of subsequences in the decomposition. But it's not clear what do we store in this dynamic programming. The model solution stores the total number of subsequences added to the decomposition, if right now the state of decomposition is $c$, we process the $i$-th element, and we consider all possible stopping points (i. e. we will consider the number of subsequences added while processing the elements $a[i..i], a[i..i+1], a[i..i+2], \dots, a[i..n]$). So, our dynamic programming automatically sums up the answers for all possible right borders of the segment we decompose. Transitions in this dynamic programming is easy: we need to see how does the element $a_i$ alter the state of decomposition $c$ (let it change it to $c'$), take the value of $dp_{i+1, c'}$, and if the element $a_i$ forms a new subsequence, let's account for it by increasing $dp_{i,c}$ by $n-i+1$, because this increase will affect $n-i+1$ different right endpoints of the segment we decompose. And now it's easy to see how to add zeroes to our solution. We can just assume they don't change the state of decomposition, they simply add a new subsequence which won't take any other elements. So, in our transitions, processing $0$ means that $c' = c$, but the size of decomposition increases. To actually get the answer to the problem, we need to consider all possible starting points of the segment, so we sum up $dp_{i,o}$ (where $o$ is the empty vector) for all $i \in [1, n]$.
[ "binary search", "brute force", "data structures", "divide and conquer", "dp", "two pointers" ]
2,300
#include<bits/stdc++.h> using namespace std; const int N = 300043; int n; int v[N]; map<vector<int>, long long> dp[N]; pair<int, vector<int>> go(vector<int> a, int x) { if(x == 0) return {1, a}; else { bool f = false; for(int i = 0; i < a.size() && !f; i++) if((a[i] & x) > 0) { f = true; a[i] = x; } int c = 0; if(!f) { c = 1; a.push_back(x); } return {c, a}; } } long long calc(int i, vector<int> a) { if(i == n) return 0ll; if(dp[i].count(a)) return dp[i][a]; auto p = go(a, v[i]); return (dp[i][a] = p.first * 1ll * (n - i) + calc(i + 1, p.second)); } int main() { scanf("%d", &n); for(int i = 0; i < n; i++) scanf("%d", &v[i]); long long ans = 0; for(int i = 0; i < n; i++) ans += calc(i, vector<int>(0)); printf("%lld\n", ans); }
1766
F
MCF
You are given a graph consisting of $n$ vertices and $m$ directed arcs. The $i$-th arc goes from the vertex $x_i$ to the vertex $y_i$, has capacity $c_i$ and weight $w_i$. No arc goes into the vertex $1$, and no arc goes from the vertex $n$. There are no cycles of negative weight in the graph (it is impossible to travel from any vertex to itself in such a way that the total weight of all arcs you go through is negative). You have to assign each arc a flow (an integer between $0$ and its capacity, inclusive). For every vertex \textbf{except $1$ and $n$}, the total flow on the arcs going to this vertex must be equal to the total flow on the arcs going from that vertex. Let the flow on the $i$-th arc be $f_i$, then the cost of the flow is equal to $\sum \limits_{i = 1}^{m} f_i w_i$. You have to find a flow which \textbf{minimizes} the cost. Sounds classical, right? Well, we have some additional constraints on the flow on every edge: - if $c_i$ is even, $f_i$ must be even; - if $c_i$ is odd, $f_i$ must be odd. Can you solve this problem?
This problem is solved using minimum cost flows (duh). Suppose all arcs have even capacity. Then we can just divide each arc's capacity by $2$ and solve a usual minimum cost flow problem. However, when we have arcs with odd capacity, it's not that simple. We will deal with them as follows: split an arc with capacity $2k+1$ into two arcs: one with capacity $2k$, the other with capacity $1$, and somehow enforce that the second arc must be saturated. We cannot divide all arcs by $2$ now, because that would lead to non-integer capacities; instead, we will exclude these arcs with capacity $1$ and somehow handle the fact that they must be saturated, and only then divide all capacities by $2$. Okay, how do we handle the edges we deleted? For each vertex, let's check if the number of such arcs connected to it is even. If it is not - the total flow for this vertex cannot be $0$, so it's impossible to find the answer (the only case when it might be possible is if this vertex is the source or the sink; in this case, we need to check that both of these vertices have an odd number of arcs we want to delete connected to them, and consider an additional arc $1 \rightarrow n$ with capacity $1$ and weight $0$ to make it even). If for each vertex, the number of odd arcs connected to it is even, let's consider how much excess flow these arcs bring into the vertices. For example, if a vertex has $4$ ingoing odd arcs, it has $4$ units of flow going into it, which will be lost if we remove the edges we want to ignore. To handle this, add a new source and a new sink to our network (let's call them $s$ and $t$), and process excess flow going into the vertex using an arc from $s$ to that vertex (in the previous example, we can add an arc from $s$ to the vertex with capacity $2$ - not $4$ since we divide all capacities by $2$). Similarly, excess flow going outside the vertex can be processed with an arc from that vertex to $t$. We need to make sure that all these edges must be saturated. Okay, what about actually running the flow from $1$ to $n$? We can do it as in "flow with lower bounds" problem by adding an arc $n \rightarrow 1$ with infinite capacity... Wait a minute, this may cause a negative cycle to appear! If your implementation of mincost flow handles them, you can use this approach; but if you don't want to mess with negative cycles, instead do the following: add an arc $s \rightarrow 1$ and an arc $n \rightarrow t$, both with infinite capacities, to make sure that flow can go from $1$ to $n$; since these arcs don't have to be saturated, but other arcs going from $s$ or into $t$ must be saturated, set the costs of these "other" arcs to $-10^9$. Okay, that's it - we just need to find the minimum cost flow in the resulting network. The constraints are low enough so any minimum cost flow algorith can pass.
[ "flows" ]
2,800
#include <bits/stdc++.h> using namespace std; const int N = 243; struct edge { int y, c, w, f; edge() {}; edge(int y, int c, int w, int f) : y(y), c(c), w(w), f(f) {}; }; vector<edge> e; vector<int> g[N]; int rem(int x) { return e[x].c - e[x].f; } void add_edge(int x, int y, int c, int w) { g[x].push_back(e.size()); e.push_back(edge(y, c, w, 0)); g[y].push_back(e.size()); e.push_back(edge(x, 0, -w, 0)); } int n, m, s, t, v; pair<int, long long> MCMF() { int flow = 0; long long cost = 0; while(true) { vector<long long> d(v, (long long)(1e18)); vector<int> p(v, -1); vector<int> pe(v, -1); queue<int> q; vector<bool> inq(v); q.push(s); inq[s] = true; d[s] = 0; while(!q.empty()) { int k = q.front(); q.pop(); inq[k] = false; for(auto ei : g[k]) { if(rem(ei) == 0) continue; int to = e[ei].y; int w = e[ei].w; if(d[to] > d[k] + w) { d[to] = d[k] + w; p[to] = k; pe[to] = ei; if(!inq[to]) { inq[to] = true; q.push(to); } } } } if(p[t] == -1 || d[t] >= 0) break; flow++; cost += d[t]; int cur = t; while(cur != s) { e[pe[cur]].f++; e[pe[cur] ^ 1].f--; cur = p[cur]; } } return make_pair(flow, cost); } void no_answer() { cout << "Impossible" << endl; exit(0); } int main() { cin >> n >> m; vector<int> excess_flow(n, 0); vector<int> orc(m); for(int i = 0; i < m; i++) { int x, y, c, w; cin >> x >> y >> c >> w; orc[i] = c; --x; --y; add_edge(x, y, c / 2, w); if(c % 2 == 1) { excess_flow[x]--; excess_flow[y]++; } } s = n; t = n + 1; v = n + 2; int total_excess = 0; if(excess_flow[0] % 2 == -1) { excess_flow[0]--; excess_flow[n - 1]++; } for(int i = 0; i < n; i++) { if(excess_flow[i] % 2 != 0) no_answer(); int val = abs(excess_flow[i]) / 2; if(excess_flow[i] > 0) { total_excess += val; add_edge(s, i, val, -int(1e9)); } if(excess_flow[i] < 0) { add_edge(i, t, val, -int(1e9)); } } add_edge(s, 0, 100000, 0); add_edge(n - 1, t, 100000, 0); auto ans = MCMF(); bool good_answer = true; for(int x = 0; x < e.size(); x++) if(e[x].w == -int(1e9) && rem(x) != 0) good_answer = false; if(!good_answer) no_answer(); cout << "Possible" << endl; for(int i = 0; i < 2 * m; i += 2) { if(i) cout << " "; cout << e[i].f * 2 + orc[i / 2] % 2; } cout << endl; }
1767
A
Cut the Triangle
You are given a non-degenerate triangle (a non-degenerate triangle is a triangle with positive area). The vertices of the triangle have coordinates $(x_1, y_1)$, $(x_2, y_2)$ and $(x_3, y_3)$. You want to draw a straight line to cut the triangle into \textbf{two non-degenerate triangles}. Furthermore, the line you draw should be \textbf{either horizontal or vertical}. Can you draw the line to meet all the constraints? Here are some suitable ways to draw the line: However, these ways to draw the line are not suitable (the first line cuts the triangle into a triangle and a quadrangle; the second line doesn't cut the triangle at all; the third line is neither horizontal nor vertical):
The line we draw must go through a triangle's vertex; otherwise, two sides of the triangle are split, and one of the resulting parts becomes a quadrilateral. So we need to check if it is possible to make a horizontal or vertical cut through a vertex. A horizontal cut is possible if all $y$-coordinates are different (we can draw it through a vertex with the median $y$-coordinate); a vertical cut is possible if all $x$-coordinates are different (we can draw it through a vertex with the median $x$-coordinate). So, all we need to check is the following pair of conditions: all $x_i$ are different; all $y_i$ are different.
[ "implementation" ]
800
t = int(input()) for i in range(t): input() xs = [] ys = [] for j in range(3): x, y = map(int, input().split()) xs.append(x) ys.append(y) print('YES' if len(set(xs)) == 3 or len(set(ys)) == 3 else 'NO')