contest_id
stringlengths
1
4
index
stringclasses
43 values
title
stringlengths
2
63
statement
stringlengths
51
4.24k
tutorial
stringlengths
19
20.4k
tags
listlengths
0
11
rating
int64
800
3.5k
code
stringlengths
46
29.6k
1534
D
Lost Tree
This is an interactive problem. Little Dormi was faced with an awkward problem at the carnival: he has to guess the edges of an unweighted tree of $n$ nodes! The nodes of the tree are numbered from $1$ to $n$. The game master only allows him to ask one type of question: - Little Dormi picks a node $r$ ($1 \le r \le n$), and the game master will reply with an array $d_1, d_2, \ldots, d_n$, where $d_i$ is the length of the shortest path from node $r$ to $i$, for all $1 \le i \le n$. Additionally, to \sout{make the game unfair} challenge Little Dormi the game master will allow at most $\lceil\frac{n}{2}\rceil$ questions, where $\lceil x \rceil$ denotes the smallest integer greater than or equal to $x$. Faced with the stomach-churning possibility of not being able to guess the tree, Little Dormi needs your help to devise a winning strategy! Note that the game master creates the tree before the game starts, and does not change it during the game.
If we had $n$ queries, solving this problem would be easy as we could just query every single node and add edges when $d_i=1$. However, notice that as long as we make a query for at least $1$ endpoint of every edge, we will be able to find all the edges using this method. Observe that a tree is bipartite, so we would be able to achieve a bound of $\lceil \frac{n}{2} \rceil$ as long as we only query the smaller bipartite set. To figure out which set is smaller, we can just query any node and look at which nodes have odd depth and which ones have even depth. Lastly, be careful with your queries so that your worst-case bound is $\lceil \frac{n}{2} \rceil$ rather than $\lfloor \frac{n}{2} \rfloor + 1$. One way to do this is to not include the initial node you query in either bipartite set (so you are effectively working with $n-1$ nodes rather than $n$). Time complexity: $\mathcal{O}(n^2)$
[ "constructive algorithms", "interactive", "trees" ]
1,800
#include "bits/stdc++.h" #include <random> using namespace std; // Defines #define fs first #define sn second #define pb push_back #define eb emplace_back #define mpr make_pair #define mtp make_tuple #define all(x) (x).begin(), (x).end() // Basic type definitions using ll = long long; using ull = unsigned long long; using ld = long double; using pii = pair<int, int>; using pll = pair<long long, long long>; #ifdef __GNUG__ // PBDS order statistic tree #include <ext/pb_ds/assoc_container.hpp> // Common file #include <ext/pb_ds/tree_policy.hpp> using namespace __gnu_pbds; template <typename T, class comp = less<T>> using os_tree = tree<T, null_type, comp, rb_tree_tag, tree_order_statistics_node_update>; template <typename K, typename V, class comp = less<K>> using treemap = tree<K, V, comp, rb_tree_tag, tree_order_statistics_node_update>; // HashSet #include <ext/pb_ds/assoc_container.hpp> template <typename T, class Hash> using hashset = gp_hash_table<T, null_type, Hash>; template <typename K, typename V, class Hash> using hashmap = gp_hash_table<K, V, Hash>; const ll RANDOM = chrono::high_resolution_clock::now().time_since_epoch().count(); struct chash { ll operator()(ll x) const { return x ^ RANDOM; } }; #endif // More utilities int SZ(string &v) { return v.length(); } template <typename C> int SZ(C &v) { return v.size(); } template <typename C> void UNIQUE(vector<C> &v) { sort(v.begin(), v.end()); v.resize(unique(v.begin(), v.end()) - v.begin()); } template <typename T, typename U> void maxa(T &a, U b) { a = max(a, b); } template <typename T, typename U> void mina(T &a, U b) { a = min(a, b); } const ll INF = 0x3f3f3f3f, LLINF = 0x3f3f3f3f3f3f3f3f; const int MN = 2001; int N, dis[MN]; mt19937 mt(69696969); vector<pii> edges; void read(int r) { cout << ('?') << ' ' << (r) << '\n'; cout.flush(); for (auto i = 1; i <= N; i++) cin >> dis[i]; } int main() { ios_base::sync_with_stdio(false); cin.tie(NULL); cin >> N; read(1); vector<int> s[2]; for (auto i = 2; i <= N; i++) s[dis[i] & 1].pb(i); if (s[0].size() > s[1].size()) swap(s[0], s[1]); for (auto i = 1; i <= N; i++) if (dis[i] == 1) edges.eb(1, i); for (auto x : s[0]) { read(x); for (auto i = 1; i <= N; i++) { if (dis[i] == 1) { int a = x, b = i; if (a > b) swap(a, b); edges.eb(a, b); } } } UNIQUE(edges); shuffle(all(edges), mt); uniform_int_distribution<int> dis(0, 1); for (auto &[a, b] : edges) if (dis(mt)) swap(a, b); cout << ('!') << '\n'; for (auto &[a, b] : edges) cout << (a) << ' ' << (b) << '\n'; }
1534
E
Lost Array
This is an interactive problem. Note: the XOR-sum of an array $a_1, a_2, \ldots, a_n$ ($1 \le a_i \le 10^9$) is defined as $a_1 \oplus a_2 \oplus \ldots \oplus a_n$, where $\oplus$ denotes the bitwise XOR operation. Little Dormi received an array of $n$ integers $a_1, a_2, \ldots, a_n$ for Christmas. However, while playing with it over the winter break, he accidentally dropped it into his XOR machine, and the array got lost. The XOR machine is currently configured with a query size of $k$ (which you cannot change), and allows you to perform the following type of query: by giving the machine $k$ \textbf{distinct} indices $x_1, x_2, \ldots, x_k$, it will output $a_{x_1} \oplus a_{x_2} \oplus \ldots \oplus a_{x_k}$. As Little Dormi's older brother, you would like to help him recover the \textbf{XOR-sum} of his array $a_1, a_2, \ldots, a_n$ by querying the XOR machine. Little Dormi isn't very patient, so to be as fast as possible, you must query the XOR machine the \textbf{minimum} number of times to find the XOR-sum of his array. Formally, let $d$ be the minimum number of queries needed to find the XOR-sum of any array of length $n$ with a query size of $k$. Your program will be accepted if you find the correct XOR-sum in at most $d$ queries. Lastly, you also noticed that with certain configurations of the machine $k$ and values of $n$, it may not be possible to recover the XOR-sum of Little Dormi's lost array. If that is the case, you should report it as well. The array $a_1, a_2, \ldots, a_n$ is fixed before you start querying the XOR machine and does not change with the queries.
"tl;dr it's pure BFS" The only information we can obtain from a query is the XOR sum of the subset we queried. We can also try and obtain some frequency information about the bits (whether the number of bits at position $i$ is odd for every bit $i$), but trying to combine frequency information is ultimately analogous to just combining the XOR sums of different subsets. Additionally, trying to combine XOR sums with other operations such as OR and AND will cause us to lose the information about the XOR sum of the combined subsets, and are thus not useful. Thus, the only way to find the answer is to repeated query different subsets until their XOR sum includes every element in the array. To find the minimum number of queries, we can run a BFS to find the shortest number of queries to go from the XOR sum of $0$ numbers to the XOR sum of all of them. Observe that only the number of values currently present in the XOR sum matters, as we can query the XOR sum of any subset of numbers in the array as long as it has size $k$. In our graph, we can transition from the XOR sum of a subset of size $i$ by picking $j$ ($0 \le j \le k$) selected numbers and $k-j$ unselected numbers. This changes the size of our subset by $k-2j$. Time Complexity: $\mathcal{O}(nk)$
[ "graphs", "greedy", "interactive", "shortest paths" ]
2,300
#include <bits/stdc++.h> using namespace std; using ll = long long; const ll INF = 0x3f3f3f3f; const int MN = 2001; int N, K, dis[MN], par[MN]; int main() { cin >> N >> K; // BFS memset(dis, 0x3f, sizeof dis); queue<int> q; dis[0] = 0; par[0] = -1; q.push(0); while (!q.empty()) { int c = q.front(); q.pop(); // c -> num selected for (auto i = 0; i <= K; i++) { // num unselected we add if (i <= N-c && K-i <= c) { int to = c + i-(K-i); if (dis[to] == INF) { dis[to] = dis[c]+1; par[to] = c; q.push(to); } } } } if (dis[N] == INF) cout << "-1\n"; else { vector<int> sel, nosel(N); iota(nosel.begin(), nosel.end(), 1); // get ans vector<int> path; for (auto c = N; c != -1; c = par[c]) path.push_back(c); reverse(path.begin(), path.end()); // for (auto x : path) printf("path=%d\n", x); // printf("\n"); fflush(stdout); int sz = path.size(), ans = 0; for (auto i = 0; i < sz-1; i++) { // a-(K-a) = d // a-K+a=d // 2a-K=d // 2a=d+K // a=(d+K)/2 int a = path[i], b = path[i+1], d = b-a, nsel = (d+K)/2, nnosel = K-nsel; assert((d+K) % 2 == 0); vector<int> cnosel, csel; for (auto j = 0; j < nsel; j++) { csel.push_back(nosel.back()); nosel.pop_back(); } for (auto j = 0; j < nnosel; j++) { cnosel.push_back(sel.back()); sel.pop_back(); } // make query cout << "? "; for (auto x : csel) cout << x << ' '; for (auto x : cnosel) cout << x << ' '; cout << '\n'; cout.flush(); // update nosel,sel nosel.insert(nosel.end(), cnosel.begin(), cnosel.end()); sel.insert(sel.end(), csel.begin(), csel.end()); int res; cin >> res; ans ^= res; } assert(nosel.empty()); cout << "! " << ans << '\n'; cout.flush(); } }
1534
F1
Falling Sand (Easy Version)
\textbf{This is the easy version of the problem. The difference between the versions is the constraints on $a_i$. You can make hacks only if all versions of the problem are solved.} Little Dormi has recently received a puzzle from his friend and needs your help to solve it. The puzzle consists of an upright board with $n$ rows and $m$ columns of cells, some empty and some filled with blocks of sand, and $m$ non-negative integers $a_1,a_2,\ldots,a_m$ ($0 \leq a_i \leq n$). In this version of the problem, $a_i$ will be \textbf{equal to} the number of blocks of sand in column $i$. When a cell filled with a block of sand is disturbed, the block of sand will fall from its cell to the sand counter at the bottom of the column (each column has a sand counter). While a block of sand is falling, other blocks of sand that are adjacent at any point to the falling block of sand will also be disturbed and start to fall. Specifically, a block of sand disturbed at a cell $(i,j)$ will pass through all cells below and including the cell $(i,j)$ within the column, disturbing all adjacent cells along the way. Here, the cells adjacent to a cell $(i,j)$ are defined as $(i-1,j)$, $(i,j-1)$, $(i+1,j)$, and $(i,j+1)$ (if they are within the grid). Note that the newly falling blocks can disturb other blocks. In one operation you are able to disturb any piece of sand. The puzzle is solved when there are \textbf{at least} $a_i$ blocks of sand counted in the $i$-th sand counter for each column from $1$ to $m$. You are now tasked with finding the minimum amount of operations in order to solve the puzzle. Note that Little Dormi will never give you a puzzle that is impossible to solve.
Let's model the grid as a directed graph. Take every block of sand in the puzzle as a node. Now add an edge from a node $A$ to a node $B$ if: $B$ is the first block of sand below $A$. $B$ is the first block of sand next to or below $A$ and on the first column to the left of $A$. $B$ is the first block of sand next to or below $A$ and on the first column to the right of $A$. $B$ is on the cell directly above $A$. Within this graph, the nodes which are reachable from a node $A$ are identical to the set of blocks of sand which will be disturbed as a result of $A$ being disturbed. The question is then converted to, given a directed graph, what is the size of the smallest set of nodes such that all nodes within the graph are reachable from this set. To solve this, we can compress all strongly connected components in the graph (using Tarjan's Algorithm or Kosaraju's Algorithm), leaving us with a directed acyclic graph. From here, it can be observed that all nodes which have an in-degree of $0$ need to be disturbed manually, as no other nodes can disturb them. Moreover, as the graph is acyclic, it can also be proven that all nodes which have an in-degree greater than $0$ are reachable from a node that has an in-degree of $0$. As such, it is minimal and sufficient to disturb all nodes which have an in-degree of $0$. Proof: Take any node $X$ that does not have an in-degree of $0$. Take a node $Y$ that has an edge connecting it to $X$. This node must exist as $X$ has a positive in-degree. Now all nodes that can reach $Y$ can also reach $X$. Thus, we just need to prove that $Y$ is reachable from a node with in-degree $0$ to prove that $X$ is reachable from a node with in-degree $0$. Repeat this process on the node $Y$, taking it as the new $X$. Continue to do this until $X$ has an in-degree of $0$. This process must end and find such an $X$ because there are only a finite number of nodes in the graph, and any repeated node within this process would indicate that there is a cycle in the graph. However as the graph is acyclic, this can not happen. Once we have found this $X$, we can say that $X$ is reachable from a node with an in-degree of $0$ as it itself is a node within in-degree $0$. We can then follow the graph to prove that the first $X$ is reachable from a node with in-degree $0$. Thus, the answer is the number of nodes with in-degree $0$ in the compressed graph. Final Complexity: $\mathcal{O}(nm)$ or $\mathcal{O}(nm \cdot \log(nm))$ depending on implementation.
[ "dfs and similar", "graphs", "greedy" ]
2,500
#include "bits/stdc++.h" using namespace std; using ll = long long; using pii = pair<int,int>; using pll = pair<ll,ll>; template<typename T> int sz(const T &a){return int(a.size());} const int MN=4e5+1; vector<vector<char>> arr; vector<vector<int>> ind; int am[MN]; vector<int> adj[MN]; int nodecnt=0; int id[MN],low[MN]; bool inst[MN]; vector<int> st; int et; int in[MN]; vector<vector<int>> comps; int indeg[MN]; void dfs(int loc){ id[loc]=low[loc]=et++; inst[loc]=true,st.push_back(loc); for(auto x:adj[loc]){ if(!id[x])dfs(x),low[loc]=min(low[loc],low[x]); else if(inst[x])low[loc]=min(low[loc],id[x]); } if(id[loc]==low[loc]){ comps.push_back({}); while(1){ int cur=st.back(); st.pop_back(); in[cur]=sz(comps)-1; inst[cur]=false; comps.back().push_back(cur); if(cur==loc)break; } } } int main(){ cin.tie(NULL); ios_base::sync_with_stdio(false); int n,m; cin>>n>>m; arr.resize(n+1,vector<char>(m+1)); ind.resize(n+1,vector<int>(m+1)); for(int i=1;i<=n;i++)for(int j=1;j<=m;j++)cin>>arr[i][j]; for(int i=1;i<=m;i++)cin>>am[i]; for(int i=1;i<=n;i++){ for(int j=1;j<=m;j++){ if(arr[i][j]=='#'){ ind[i][j]=++nodecnt; } } } for(int i=1;i<=n;i++){ for(int j=1;j<=m;j++){ if(arr[i][j]=='#'){ if(i-1>=1&&arr[i-1][j]=='#')adj[ind[i][j]].push_back(ind[i-1][j]); for(int k=i+1;k<=n;k++){ if(arr[k][j]=='#'){ adj[ind[i][j]].push_back(ind[k][j]); break; } } bool leftdone=false,rightdone=false; for(int k=i;k<=n&&(!leftdone||!rightdone)&&(arr[k][j]!='#'||k==i);k++){ if(j-1>=1&&!leftdone&&arr[k][j-1]=='#'){ adj[ind[i][j]].push_back(ind[k][j-1]),leftdone=true; } if(j+1<=m&&!rightdone&&arr[k][j+1]=='#'){ adj[ind[i][j]].push_back(ind[k][j+1]),rightdone=true; } } } } } et=1; comps.push_back({}); for(int i=1;i<=nodecnt;i++)if(!id[i])dfs(i); for(int i=1;i<sz(comps);i++){ for(auto x:comps[i])for(auto y:adj[x])if(in[y]!=i)indeg[in[y]]++; } int ans=0; for(int i=1;i<sz(comps);i++){ if(indeg[i]==0)ans++; } printf("%d\n",ans); return 0; }
1534
F2
Falling Sand (Hard Version)
\textbf{This is the hard version of the problem. The difference between the versions is the constraints on $a_i$. You can make hacks only if all versions of the problem are solved.} Little Dormi has recently received a puzzle from his friend and needs your help to solve it. The puzzle consists of an upright board with $n$ rows and $m$ columns of cells, some empty and some filled with blocks of sand, and $m$ non-negative integers $a_1,a_2,\ldots,a_m$ ($0 \leq a_i \leq n$). In this version of the problem, $a_i$ will always be \textbf{not greater than} the number of blocks of sand in column $i$. When a cell filled with a block of sand is disturbed, the block of sand will fall from its cell to the sand counter at the bottom of the column (each column has a sand counter). While a block of sand is falling, other blocks of sand that are adjacent at any point to the falling block of sand will also be disturbed and start to fall. Specifically, a block of sand disturbed at a cell $(i,j)$ will pass through all cells below and including the cell $(i,j)$ within the column, disturbing all adjacent cells along the way. Here, the cells adjacent to a cell $(i,j)$ are defined as $(i-1,j)$, $(i,j-1)$, $(i+1,j)$, and $(i,j+1)$ (if they are within the grid). Note that the newly falling blocks can disturb other blocks. In one operation you are able to disturb any piece of sand. The puzzle is solved when there are \textbf{at least} $a_i$ blocks of sand counted in the $i$-th sand counter for each column from $1$ to $m$. You are now tasked with finding the minimum amount of operations in order to solve the puzzle. Note that Little Dormi will never give you a puzzle that is impossible to solve.
Note: If you have not already read the editorial for $F1$, please do so, as this editorial continues on from where that editorial left off. Where $F2$ differs from $F1$ is that not all blocks of sand have to fall, instead only some subset of them within each column need to fall. Let's go back to the Directed Acyclic Graph constructed in $F1$. Instead of all nodes being required to be reachable, we only need the nodes which are the first $a_i$ blocks of sand within each column (counting from the bottom) to be reachable. Let's denote the set which contains the nodes which have to be reachable as $S$. We can then observe that $S$ can be reduced down to just the $a_i^\texttt{th}$ block of sand within each column as all blocks of sand below will be distributed when it is disturbed. Let us call the nodes that are left in this set "special nodes". From here, observe that any special node $X$ that is reachable from another special node $Y$ can be removed from $S$ as a disturbance of the node $Y$ will also guarantee that $X$ will be disturbed. Now remove all such nodes from $S$. Note that these nodes can be found with BFS or DFS. Now I claim that all nodes within the graph will reach some subarray of $S$ when $S$ is sorted by column index (if a node represents multiple nodes within the puzzle, take the left-most column). Let us call this sorted array of $S$ as $A$. Proof: Imagine a node $X$ that does not reach an exact subsegment of $A$. This means that it must reach some node $i$ on the puzzle and then another node $j$ such that they are both in $A$ and none of the special nodes on the columns between them are reachable. Let's denote a random special node that is not reachable as $Z$. As you can only go directly from one column to the next, $X$ must reach some set of nodes on the columns between $i$ and $j$. Denote the reachable node which is in the same column as $Z$ as $K$. If $K$ is above $Z$ in the column, $Z$ is reachable from $K$, and thus reachable from $X$. Thus, $K$ must be below $Z$. However, this means that $K$ is reachable from $Z$. As $K$ is used to reach either $i$ or $j$ depending on which side $X$ is on (WLOG, assume its $j$), $j$ must also be reachable from $Z$. But since $Z$ is a special node, $j$ can not be part of the set $S$ and array $A$. This creates a contradiction, proving that $X$ must reach an exact subsegment of $A$. Now run a dp on the directed acyclic graph and find the minimum and maximum index of $A$ that is reachable from every node. The question is then converted to, given some intervals, what is the minimal amount of intervals that will be able to cover the entire range of $A$. This question is well known and can be solved in many ways. One of the simplest methods is a greedy approach. Loop from left to right on $A$, maintaining the rightmost index of an interval that starts before or at the current index and also maintain the furthest right that any taken interval reaches. As soon as you reach an index that is not within the range of taken intervals, take the interval with the rightmost index and add one to the answer. The final answer is then the value after the loop has completed. Final Complexity: $\mathcal{O}(nm)$ or $\mathcal{O}(nm \cdot \log(nm))$ depending on implementation.
[ "dfs and similar", "dp", "graphs", "greedy" ]
3,000
#include "bits/stdc++.h" using namespace std; using ll = long long; using pii = pair<int,int>; using pll = pair<ll,ll>; template<typename T> int sz(const T &a){return int(a.size());} const int MN=4e5+1; vector<vector<char>> arr; vector<vector<int>> ind; int am[MN]; vector<int> adj[MN]; int nodecnt=0; int id[MN],low[MN]; bool inst[MN]; vector<int> st; int et; int in[MN]; vector<vector<int>> comps; vector<int> nadj[MN]; int marked[MN]; int leftspec[MN]; int trans[MN]; pii dp[MN]; void dfs(int loc){ id[loc]=low[loc]=et++; inst[loc]=true,st.push_back(loc); for(auto x:adj[loc]){ if(!id[x])dfs(x),low[loc]=min(low[loc],low[x]); else if(inst[x])low[loc]=min(low[loc],id[x]); } if(id[loc]==low[loc]){ comps.push_back({}); while(1){ int cur=st.back(); st.pop_back(); in[cur]=sz(comps)-1; inst[cur]=false; comps.back().push_back(cur); if(cur==loc)break; } } } void mark(int loc){ for(auto x:nadj[loc]){ if(!marked[x]) { marked[x] = true; mark(x); } } } pii solve(int loc){ if(dp[loc].first)return dp[loc]; dp[loc]={INT_MAX,INT_MIN}; for(auto x:nadj[loc]){ if(!marked[x]){ pii te=solve(x); dp[loc]={min(dp[loc].first,te.first),max(dp[loc].second,te.second)}; } } return dp[loc]; } int main(){ cin.tie(NULL); ios_base::sync_with_stdio(false); int n,m; cin>>n>>m; arr.resize(n+1,vector<char>(m+1)); ind.resize(n+1,vector<int>(m+1)); for(int i=1;i<=n;i++)for(int j=1;j<=m;j++)cin>>arr[i][j]; for(int i=1;i<=m;i++)cin>>am[i]; for(int i=1;i<=n;i++){ for(int j=1;j<=m;j++){ if(arr[i][j]=='#'){ ind[i][j]=++nodecnt; } } } for(int i=1;i<=n;i++){ for(int j=1;j<=m;j++){ if(arr[i][j]=='#'){ if(i-1>=1&&arr[i-1][j]=='#')adj[ind[i][j]].push_back(ind[i-1][j]); for(int k=i+1;k<=n;k++){ if(arr[k][j]=='#'){ adj[ind[i][j]].push_back(ind[k][j]); break; } } bool leftdone=false,rightdone=false; for(int k=i;k<=n&&(!leftdone||!rightdone)&&(arr[k][j]!='#'||k==i);k++){ if(j-1>=1&&!leftdone&&arr[k][j-1]=='#'){ adj[ind[i][j]].push_back(ind[k][j-1]),leftdone=true; } if(j+1<=m&&!rightdone&&arr[k][j+1]=='#'){ adj[ind[i][j]].push_back(ind[k][j+1]),rightdone=true; } } } } } et=1; comps.push_back({}); for(int i=1;i<=nodecnt;i++)if(!id[i])dfs(i); for(int i=1;i<sz(comps);i++){ leftspec[i]=INT_MAX; for(auto x:comps[i])for(auto y:adj[x])if(in[y]!=i)nadj[i].push_back(in[y]); sort(nadj[i].begin(),nadj[i].end()),nadj[i].erase(unique(nadj[i].begin(),nadj[i].end()),nadj[i].end()); } vector<int> spec; for(int i=1;i<=m;i++){ for(int j=n;j>=1;j--){ if(arr[j][i]=='#'){ am[i]--; if(am[i]==0)spec.push_back(in[ind[j][i]]),leftspec[in[ind[j][i]]]=min(leftspec[in[ind[j][i]]],i); } } } sort(spec.begin(),spec.end()),spec.erase(unique(spec.begin(),spec.end()),spec.end()); for(auto x:spec)if(!marked[x])mark(x); vector<int> compress; for(auto x:spec)if(!marked[x])compress.push_back(leftspec[x]); sort(compress.begin(),compress.end()); for(auto x:spec)if(!marked[x])dp[x].first=dp[x].second=lower_bound(compress.begin(),compress.end(),leftspec[x])-compress.begin()+1; for(int i=1;i<sz(comps);i++){ if(!marked[i]){ pii te=solve(i); if(te.first!=INT_MAX){ trans[te.first]=max(trans[te.first],te.second); } } } for(int i=2;i<=sz(compress);i++)trans[i]=max(trans[i-1],trans[i]); int cur=1,ans=0; while(cur<=sz(compress))ans++,cur=trans[cur]+1; printf("%d\n",ans); return 0; }
1534
G
A New Beginning
Annie has gotten bored of winning every coding contest and farming unlimited rating. Today, she is going to farm potatoes instead. Annie's garden is an infinite 2D plane. She has $n$ potatoes to plant, and the $i$-th potato must be planted at $(x_i,y_i)$. Starting at the point $(0, 0)$, Annie begins walking, in one step she can travel one unit \textbf{right} or \textbf{up} (increasing her $x$ or $y$ coordinate by $1$ respectively). At any point $(X,Y)$ during her walk she can plant some potatoes at arbitrary points using her potato gun, consuming $\max(|X-x|,|Y-y|)$ units of energy in order to plant a potato at $(x,y)$. Find the minimum total energy required to plant every potato. Note that Annie may plant any number of potatoes from any point.
Observe that for any point $(x,y)$ and some path $A$, the minimum distance from $A$ to $(x,y)$ will occur on the intersection of $A$ and the antidiagonal of $(x,y)$. Here the antidiagonal is defined as a line $y=-x+c$. Proof: Assume $(a,b)$ is the intersection of $A$ and the antidiagonal of $(x,y)$. If $(a,b) = (x,y)$, this distance is obviously minimal as it is equal to 0. Now consider the case where $(a,b) \neq (x,y)$. As a result of $(a,b)$ being on the antidiagonal, two properties will be true: $|x-a| = |y-b|$. Either $a > x$ and $b < y$ or $a < x$ and $b > y$. When you move forward on the path, increasing $b$ will only make $|y-b|$ larger. Thus, no matter how you change $a$, $\max(|x-a|,|y-b|)$ will always increase or stay the same. When you move backward on the path, decreasing $a$ will only make $|x-a|$ larger. Thus, no matter how you change $b$, $\max(|x-a|,|y-b|)$ will always increase or stay the same. Thus, the shortest distance will always occur at $(a,b)$. Now we rotate the grid $45$ degrees clockwise so that the antidiagonals are now vertical lines on the new grid. Your movements on the grid now go from $(x,y)$ to $(x+1,y+1)$ or $(x+1,y-1)$. Furthermore, it is now optimal to plant a potato when you are on the same x coordinate as the potato. When you are at $(x,a)$, planting a potato at $(x,b)$ costs $\frac{|a-b|}{2}$. From here we can observe a slow dp. Define $dp[x][y]$ as the minimum cost of any path that goes from $(0,0)$ to $(x,y)$ and plant all potatoes $(a,b)$ such that $a \leq x$. Note: this function is only defined for when $x$ and $y$ have the same parity. This is as a point $(x,y)$ in the old grid translates to $(x+y,x-y)$ in the new grid and $x+y$ has the same parity as $x-y$. For speed, we also only calculate $dp[x][y]$ when there is a potato with a x-coordinate of $x$. The transition is then established as $dp[x][y] = (\min{dp[a][b]} \forall y-(x-a) \leq b \leq y+(x-a)) + (\sum{\frac{|y-z|}{2}} \forall potatoes (x,z))$, where $a$ is the last x-coordinate containing a potato. This runs in $\mathcal{O}(n \cdot 10^9)$. This dp can then be optimized using slope trick. If you are not familiar with slope trick, we recommend learning it first at Slope trick explained and [Tutorial] Slope Trick. Instead of maintaining a $2$ dimensional array, we can maintain functions $f_x$ where $f_x(y) = dp[x][y]$. To transition from $f_i$ to $f_j$, we first set $f_i(a) = \min{f_i(b)} \forall a-(j-i) \leq b \leq a+(j-i)$. This can be done by maintaining the center/minimum of the slope trick function and then offsetting all values in the left priority queue by $-(j-i)$ and offsetting all values in the right priority queue by $j-i$. Finally, we have to add the costs of the potatoes. Each of the potatoes $(x,y)$ is just a function $g$ such that $g(a) = \frac{|y-a|}{2}$. These functions can then be added onto $f_i$ finishing the conversion to $f_j$. The final answer is then the minimum of the last function. Final Complexity: $\mathcal{O}(n \log n)$.
[ "data structures", "dp", "geometry", "sortings" ]
3,300
#include <bits/stdc++.h> #include <ext/pb_ds/assoc_container.hpp> #include <ext/pb_ds/tree_policy.hpp> #include <ext/pb_ds/priority_queue.hpp> using namespace std; using namespace __gnu_pbds; #define foru(i,a,b) for(int i=(a);i<(b);i++) #define ford(i,a,b) for(int i=(a);i>=(b);i--) #define fori(a,b) foru(i,a,b) #define forj(a,b) foru(j,a,b) #define fork(a,b) foru(k,a,b) #define seto(x,i) memset(x,i,sizeof x) #define pf first #define ps second #define pb push_back #define eb emplace_back #define em emplace #define mp make_pair #define mt make_tuple #define popcount __builtin_popcount #define popcountll __builtin_popcountll #define clz __builtin_clz #define clzll __builtin_clzll #define ctz __builtin_ctz #define ctzll __builtin_ctzll #define P2(x) (1LL<<(x)) #define sz(x) (int)x.size() #define all(x) begin(x),end(x) typedef int64_t ll; typedef uint64_t ull; typedef int8_t byte; typedef long double lld; typedef pair<int,int> pii; typedef pair<ll,ll> pll; typedef pair<lld,lld> pdd; template<class T1,class T2> using ordered_map=tree<T1,T2,less<T1>,rb_tree_tag,tree_order_statistics_node_update>; template<class T1> using ordered_set=ordered_map<T1,null_type>; template<class T> using minpq=std::priority_queue<T,vector<T>,greater<T>>; template<class T> using maxpq=std::priority_queue<T,vector<T>,less<T>>; template<class T> using minpairingheap=__gnu_pbds::priority_queue<T,greater<T>,pairing_heap_tag>; template<class T>using maxpairingheap=__gnu_pbds::priority_queue<T,less<T>,pairing_heap_tag>; const int inf=0x3f3f3f3f,MOD=1e9+7; const ll INF=0x3f3f3f3f3f3f3f3f; const lld PI=acos((lld)-1); const ll SEED=443214^chrono::duration_cast<chrono::nanoseconds>(chrono::high_resolution_clock::now().time_since_epoch()).count(); mt19937 randgen(SEED); int randint(int a, int b){return uniform_int_distribution<int>(a,b)(randgen);} ll randll(ll a, ll b){return uniform_int_distribution<ll>(a,b)(randgen);} ll gcd(ll a, ll b){return b?gcd(b,a%b):a;} ll fpow(ll a,ll b,ll M=MOD){ll ret=1;for(;b;b>>=1){if(b&1) ret=ret*a%M;a=a*a%M;}return ret;} template<class T1,class T2>constexpr const auto _min(const T1&x,const T2&y){return x<y?x:y;} template<class T,class...Ts>constexpr auto _min(const T&x,const Ts&...xs){return _min(x,_min(xs...));} template<class T1,class T2>constexpr const auto _max(const T1&x,const T2&y){return x>y?x:y;} template<class T,class...Ts>constexpr auto _max(const T&x,const Ts&...xs){return _max(x,_max(xs...));} #define min(...) _min(__VA_ARGS__) #define max(...) _max(__VA_ARGS__) template<class T1,class T2>constexpr bool ckmin(T1&x,const T2&y){return x>y?x=y,1:0;} template<class T,class...Ts>constexpr bool ckmin(T&x,const Ts&...xs){return ckmin(x,min(xs...));} template<class T1,class T2>constexpr bool ckmax(T1&x,const T2&y){return x<y?x=y,1:0;} template<class T,class...Ts>constexpr bool ckmax(T&x,const Ts&...xs){return ckmax(x,max(xs...));} struct chash{ static ll splitmix64(ll x){x+=0x9e3779b97f4a7c15; x=(x^(x>>30))*0xbf58476d1ce4e5b9; x=(x^(x>>27))*0x94d049bb133111eb; return x^(x>>31);} template<class T> size_t operator()(const T &x) const{return splitmix64(hash<T>()(x)+SEED);} template<class T1,class T2> size_t operator()(const pair<T1,T2>&x)const{return 31*operator()(x.first)+operator()(x.second);}}; void fIn(string s){freopen(s.c_str(),"r",stdin);} void fOut(string s){freopen(s.c_str(),"w",stdout);} void fIO(string s){fIn(s+".in"); fOut(s+".out");} string to_string(char c){return string(1,c);} string to_string(char* s){return (string)s;} string to_string(string s){return s;} template<class T> string to_string(complex<T> c){stringstream ss; ss<<c; return ss.str();} template<class T1,class T2> string to_string(pair<T1,T2> p){return "("+to_string(p.pf)+","+to_string(p.ps)+")";} template<size_t SZ> string to_string(bitset<SZ> b){string ret=""; fori(0,SZ) ret+=char('0'+b[i]); return ret;} template<class T> string to_string(T v){string ret="{"; for(const auto& x:v) ret+=to_string(x)+","; return ret+"}";} void DBG(){cerr<<"]"<<endl;} template<class T,class... Ts> void DBG(T x,Ts... xs){cerr<<to_string(x); if(sizeof...(xs)) cerr<<", "; DBG(xs...);} #ifdef LOCAL_PROJECT #define dbg(...) cerr<<"Line("<< __LINE__<<") -> ["<<#__VA_ARGS__<<"]: [", DBG(__VA_ARGS__) #else #define dbg(...) 0 #endif #define nl "\n" const int N=800010,M=1e1; //8e5 ll T,n,x,y,a,b,c,d,ans; string ff; int main(){ cin.tie(0)->sync_with_stdio(0); T=1; foru(cnt,1,T+1){ vector<pll> p; maxpq<ll> pa; minpq<ll> pb; n=x=y=a=b=ans=0; cin>>n; fori(0,n){ cin>>x>>y; p.eb(x+y,x-y); } sort(all(p)); pa.em(0); pb.em(0); for(auto [y,x]:p){ ans+=max(pa.top()-y-x,x-pb.top()-y,0); if(!sz(pa)||pb.top()+y>x){ pa.em(x+y); pa.em(x+y); pb.em(pa.top()-2*y); pa.pop(); } else{ pb.em(x-y); pb.em(x-y); pa.em(pb.top()+2*y); pb.pop(); } } ans/=2; cout<<ans<<nl; } return 0; }
1534
H
Lost Nodes
This is an interactive problem. As he qualified for IOI this year, Little Ericyi was given a gift from all his friends: a tree of $n$ nodes! On the flight to IOI Little Ericyi was very bored, so he decided to play a game with Little Yvonne with his new tree. First, Little Yvonne selects two (not necessarily different) nodes $a$ and $b$ on the tree (without telling Ericyi), and then gives him a hint $f$ (which is some node on the path from $a$ to $b$). Then, Little Ericyi is able to ask the following question repeatedly: - If I rooted the tree at node $r$ (Ericyi gets to choose $r$), what would be the Lowest Common Ancestor of $a$ and $b$? Little Ericyi's goal is to find the nodes $a$ and $b$, and report them to Little Yvonne. However, Little Yvonne thought this game was too easy, so before he gives the hint $f$ to Little Ericyi, he also wants him to first find the maximum number of queries required to determine $a$ and $b$ over all possibilities of $a$, $b$, and $f$ assuming Little Ericyi plays optimally. Little Ericyi defines an optimal strategy as one that makes the minimum number of queries. Of course, once Little Ericyi replies with the maximum number of queries, Little Yvonne will only let him use that many queries in the game. The tree, $a$, $b$, and $f$ are all fixed before the start of the game and do not change as queries are made.
Finding the theoretical maximum For now, let's only look at finding $k$ for fixed $f$. We will expand for all $f$ later. We know that $f$ is on the path from $a$ to $b$. Let us root the tree at $f$. Thus, we know that the path will pass through the root, and that there are exactly $2$ non-negative length chains beginning at the root. With that in mind, let's take a look at what a question "? r really does. What happens if $r$ is on the path from $a$ to $b$? In this case, we obtain $r$ as the answer. What happens if $r$ is not on the path from $a$ to $b$? In this case, we obtain the closest node to $r$ on the path from $a$ to $b$. Let's try to find one endpoint of one of these chains first. Consider the following dynamic programming structure: Let $dp[u]$ denote the minimum number of questions required to "solve" the subtree rooted at $u$. By "solve", we mean to determine if the endpoint of a chain is in the subtree or not, and if it is in the subtree, the exact node. The base case is pretty obvious. For a leaf node, it will take exactly $1$ question to solve a leaf node. If the endpoint of the chain does indeed end at the leaf node, then a question will return the leaf node. Otherwise it'll return an ancestor of the leaf. Let's move on to a non-base case at a node $x$. Let's call the most "expensive" child of $x$ as the child $c$ with the largest $dp[c]$ (i.e. takes the most number of questions to "solve"). For now, assume we know that an endpoint exists in the subtree rooted at $x$, and is not $x$. Thus, we will need to iterate through each child to determine in which child's subtree the endpoint exists. We will need to use one question per child. However, note that not all of these questions count towards our question count. There are two cases: In the current child we are exploring, the endpoint does exist there. In this case, the question we used does not count towards our question count. We can use a question from the child's question count. This may seem a bit strange, but if we query correctly, then in the same time we check if the endpoint exists in the child's subtree, we also process the most "expensive" child of the child and determine if the endpoint exist in that child of the child's subtree. In the current child we are exploring, the endpoint doesn't exist there. In this case, the question we used must count towards our question count. We could consider this a "wasted" question as we didn't learn anything new about where the endpoint is, just that it's not in this child. Now, in which order should be explore the children? We can greedy this. Recall that any "wasted" questions count towards our question count. Thus, for more "expensive" children, we want to have wasted less questions when we get to the child, as in the case the endpoint does exist there, we would need even more questions (adding the "wasted" questions to $dp[c]$ where $c$ is the child). This gives us the following idea: To determine if the $i^\text{th}$ ($0$-indexed) child $c$ of $x$ contains the endpoint, we would need to waste $i$ questions. If the endpoint does exist in $c$, we would need to use a total of $dp[c] + i$ questions to "solve" the subtree rooted at $x$. Since we are trying to find the worst case, we want to order the children of $x$ such as to minimize the maximum of all $dp[c] + i$. The optimal ordering is the non-increasing order of $dp[c]$. The transition is thus: $dp[x] = \max\{dp[c_i] + i\} \,\, \forall \,\, 0 \le i < |c|$, where the $dp$ values are sorted in non-increasing order, and $c$ are the children of $x$. Now wait! We haven't handled the case where the endpoint doesn't exist in $x$ or the case where the endpoint is $x$ yet! Turns out, both of these cases are already handled for us. We only need to be a bit careful when we actually perform the interaction later on. If the endpoint doesn't exist in $x$, we would have already realized when one of the questions to a child of $x$ returns an ancestor of $x$. This question count won't be lost though. It will simply be counted towards a "wasted" question for one of the ancestors of $x$. If the endpoint is $x$, then all of the questions to a child of $x$ will return $x$ (not a ancestor of $x$). Thus, we will know that the endpoint is $x$ with exactly $|c|$ wasted questions. Notice, however, that $|c| \le dp[x]$ by the nature of the transition. Thus, this case will never be our worst case, so we can ignore it for the dynamic programming. We now have the dynamic programming complete, but notice that it's only for the endpoint of one chain. At our root $f$, there are two chains with two endpoints. Thus, our final answer won't be simply taking $dp[f]$. It will require a bit more computation. For now, assume that both endpoints are not $f$ (i.e. both chains have positive length). Following a similar analysis as the dynamic programming transition, we have that for any child that does not contain either chain endpoint, we would need to "waste" a question. Once again, the optimal ordering is to process in non-increasing order. However, adding $i$ to the $i^\text{th}$ child is no longer correct. Consider the following observations: When processing a child that doesn't contain a chain, that child only takes one question to determine that both chains are not in that child. For the first child that we know contains a chain, we don't "waste" a question. This question is included in that child's question count. With observation $1$, we are basically figuring out the first chain's endpoint for free. There are no wasted questions, as all wasted questions will be counted for the second chain. With observation $2$, we have that adding $i$ wasted questions is no longer correct. We only waste $i - 1$ questions for the $i^\text{th}$ child, as one of the processed children contains the first chain (and thus doesn't waste a question). Thus, for the root, our formula is: $ans = \max\{dp[c_j] + dp[c_i] + i - 1\} \,\, \forall \,\, 0 \le j < i < |c|$, where the $dp$ values are sorted in non-increasing order, and $c$ is the children of $f$. Note that this formula has an $\mathcal{O}(n^2)$ worst case for a star graph, so we will need to optimize it. Our optimized formula is: $ans = dp[c_0] + \max\{dp[c_i] + i - 1\} \,\, \forall \,\, 1 \le i < |c|$, where the $dp$ values are sorted in non-increasing order, and $c$ is the children of $f$. Once again, with a similar analysis as what was done for the $dp$ computation, the case that one (or both) chains have a zero length will never be the worst case, and so we can ignore it for the dynamic programming. However, the case must still be handled during interaction. Recall that the above was for a fixed $f$. To find the maximum for all $f$, we can perform a tree walk. When transitioning from one root to an adjacent root, use a prefix and suffix max array to compute $\max{dp[c_i] + i}$, ignoring the $c_i$ that is the new root. Both max arrays should store $dp[c_i] + i$, not $dp[c_i]$. Remember to subtract $1$ from the suffix array values, as there is one less wasted question. ------------------ Interacting If you understood the initial dynamic programming computation for determining the theoretical maximum, the interaction should be relatively straightforward, and so we will leave it as an exercise for the reader. Be careful when handling the special cases that were ignored during the dynamic programming. ------------------ The final time complexity is $\mathcal{O}(n \log n)$ and memory complexity is $\mathcal{O}(n)$.
[ "constructive algorithms", "dp", "graphs", "interactive", "sortings", "trees" ]
3,500
#include "bits/stdc++.h" using namespace std; using ll = long long; using pii = pair<int,int>; using pll = pair<ll,ll>; template<typename T> int sz(const T &a){return int(a.size());} const int MN=1e5+1; vector<int> adj[MN]; int dp[MN],depth[MN]; void dfs(int loc, int parent){ vector<int> children; depth[loc]=depth[parent]+1; for(auto x:adj[loc]){ if(x!=parent){ dfs(x,loc); children.push_back(dp[x]); } } sort(children.begin(),children.end(),greater<>()); dp[loc]=1; if(parent) { for (int i = 0; i < sz(children); i++) { dp[loc] = max(dp[loc], children[i] + i); } } else{ dp[loc]=0; for (int i = 1; i < sz(children); i++) { dp[loc] = max(dp[loc], children[i] + i - 1); } if(sz(children))dp[loc]+=children.front(); } } int ans=0; void walkdfs(int loc, int parent, int parentdp){ vector<pii> children; if(parentdp!=-1)children.push_back({parentdp,parent}); for(auto x:adj[loc])if(x!=parent)children.push_back({dp[x],x}); sort(children.begin(),children.end(),greater<>()); int nodeans=0; for (int i = 1; i < sz(children); i++) { nodeans = max(nodeans, children[i].first + i - 1); } if(sz(children))nodeans+=children.front().first; ans=max(ans,nodeans); vector<int> pre,suf(sz(children)); for(int i=0;i<sz(children);i++){ pre.push_back(max(sz(pre)?pre.back():0,children[i].first+i)); } for(int i=sz(children)-1;i>=0;i--){ suf[i]=max((i==sz(children)-1?0:suf[i+1]+1),children[i].first); } for(int i=0;i<sz(children);i++){ if(children[i].second!=parent){ walkdfs(children[i].second,loc,max((i-1>=0?pre[i-1]:1),(i+1<sz(suf)?suf[i+1]+i:1))); } } } int query(int a){ int ret; printf("? %d\n",a); fflush(stdout); cin>>ret; if(ret==-1)exit(0); return ret; } int simulate(int loc, int parent){ vector<int> children; for(auto x:adj[loc]){ if(x!=parent){ children.push_back(x); } } if(sz(children)==0&&parent)return query(loc); sort(children.begin(),children.end(),[&](const auto &lhs, const auto &rhs){ return dp[lhs]>dp[rhs]; }); if(parent){ for (int i = 0; i < sz(children); i++) { int te=simulate(children[i],loc); if(depth[te]!=depth[loc])return te; } return loc; } else{ vector<int> nodes; for (int i = 0; i < sz(children); i++) { int te=simulate(children[i],loc); if(depth[te]!=depth[loc]){ nodes.push_back(te); if(sz(nodes)==2)break; } } while(sz(nodes)<2)nodes.push_back(loc); printf("! %d %d\n",nodes[0],nodes[1]); fflush(stdout); } return -1; } int main(){ cin.tie(NULL); ios_base::sync_with_stdio(false); int n,a,b; cin>>n; for(int i=1;i<n;i++){ cin>>a>>b; adj[a].push_back(b); adj[b].push_back(a); } dfs(1,0); walkdfs(1,0,-1); printf("%d\n",ans); fflush(stdout); cin>>a; dfs(a,0); simulate(a,0); return 0; }
1535
A
Fair Playoff
Four players participate in the playoff tournament. The tournament is held according to the following scheme: the first player will play with the second, and the third player with the fourth, then the winners of the pairs will play in the finals of the tournament. It is known that in a match between two players, the one whose skill is greater will win. The skill of the $i$-th player is equal to $s_i$ and all skill levels are pairwise different (i. e. there are no two identical values in the array $s$). The tournament is called \textbf{fair} if the two players with the highest skills meet in the finals. Determine whether the given tournament is \textbf{fair}.
It is easier to determine the case when the players with the maximum skills will not meet in the finals. It means that they met in the semifinals, and in the other semifinals, both players are weaker. It's easy to check this case with the following formula: $\min(s_1, s_2) > \max(s_3, s_4)$ or $\max(s_1, s_2) < \min(s_3, s_4)$.
[ "brute force", "implementation" ]
800
#include <bits/stdc++.h> using namespace std; int main() { int t; cin >> t; while (t--) { vector<int> s(4); for (int& x : s) cin >> x; if (min(s[0], s[1]) > max(s[2], s[3]) || max(s[0], s[1]) < min(s[2], s[3])) cout << "NO\n"; else cout << "YES\n"; } }
1535
B
Array Reodering
You are given an array $a$ consisting of $n$ integers. Let's call a pair of indices $i$, $j$ \textbf{good} if $1 \le i < j \le n$ and $\gcd(a_i, 2a_j) > 1$ (where $\gcd(x, y)$ is the greatest common divisor of $x$ and $y$). Find the maximum number of \textbf{good} index pairs if you can reorder the array $a$ in an arbitrary way.
If the value of $a_i$ is even, then $\gcd(a_i, 2a_j)$ at least $2$, regardless of the value of $a_j$. Therefore, we can put all the even values before the odd ones (it does not matter in what order). Now it remains to arrange the odd values. In fact, their order is not important, because $\gcd(a_i, 2a_j) = \gcd(a_i, a_j)$ (for odd $a_i$ and $a_j$). This means that each pair will be considered exactly $1$ time, regardless of the order of the odd elements.
[ "brute force", "greedy", "math", "number theory", "sortings" ]
900
#include <bits/stdc++.h> using namespace std; int main() { int t; cin >> t; while (t--) { int n; cin >> n; vector<int> a(n); for (int &x : a) cin >> x; sort(a.begin(), a.end(), [](int x, int y) { return x % 2 < y % 2; }); int ans = 0; for (int i = 0; i < n; ++i) { for (int j = i + 1; j < n; ++j) { ans += __gcd(a[i], a[j] * 2) > 1; } } cout << ans << endl; } }
1535
C
Unstable String
You are given a string $s$ consisting of the characters 0, 1, and ?. Let's call a string \textbf{unstable} if it consists of the characters 0 and 1 and any two adjacent characters are different (i. e. it has the form 010101... or 101010...). Let's call a string \textbf{beautiful} if it consists of the characters 0, 1, and ?, and you can replace the characters ? to 0 or 1 (for each character, the choice is independent), so that the string becomes \textbf{unstable}. For example, the strings 0??10, 0, and ??? are beautiful, and the strings 00 and ?1??1 are not. Calculate the number of beautiful contiguous substrings of the string $s$.
Let's find a simple condition when the string is not beautiful. A string is not beautiful if there are two characters 0 (or two characters 1) at an odd distance, or 0 and 1 at an even distance (because in this case, the string cannot be made unstable). Iterate over the right border of the substring $r$. Let $l$ be the maximum index such that the substring $s[l, r]$ is not beautiful (or $0$ if the substring $s[1, r]$ is beautiful). Then we have to add $r - l$ to the answer (since any substring of a beautiful string is also beautiful). Denote $lst_{c, p}$ as the last occurrence of $c$ ($0$ or $1$) at the position of parity $p$. Let $s_r = 0$, $p$ is the parity of $r$, then $l = \max(lst_{0, p \oplus 1}, lst_{1, p})$, i. e. find the nearest character that breaks a beautiful substring (0 at an odd distance or 1 at an even distance) The case for $s_r = 1$ is similar. If $s_r = ?$, then we can choose what this character will be. Obviously, we need to choose the option with the smaller value of $l$.
[ "binary search", "dp", "greedy", "implementation", "strings", "two pointers" ]
1,400
#include <bits/stdc++.h> using namespace std; int main() { int t; cin >> t; while (t--) { string s; cin >> s; vector<vector<int>> lst(2, vector<int>(2, -1)); long long ans = 0; for (int i = 0; i < int(s.size()); ++i) { int j = i - 1; int p = i & 1; if (s[i] != '1') j = min(j, max(lst[0][p ^ 1], lst[1][p])); if (s[i] != '0') j = min(j, max(lst[0][p], lst[1][p ^ 1])); ans += i - j; if (s[i] != '?') lst[s[i] - '0'][p] = i; } cout << ans << '\n'; } }
1535
D
Playoff Tournament
$2^k$ teams participate in a playoff tournament. The tournament consists of $2^k - 1$ games. They are held as follows: first of all, the teams are split into pairs: team $1$ plays against team $2$, team $3$ plays against team $4$ (exactly in this order), and so on (so, $2^{k-1}$ games are played in that phase). When a team loses a game, it is eliminated, and each game results in elimination of one team (there are no ties). After that, only $2^{k-1}$ teams remain. If only one team remains, it is declared the champion; otherwise, $2^{k-2}$ games are played: in the first one of them, the winner of the game "$1$ vs $2$" plays against the winner of the game "$3$ vs $4$", then the winner of the game "$5$ vs $6$" plays against the winner of the game "$7$ vs $8$", and so on. This process repeats until only one team remains. For example, this picture describes the chronological order of games with $k = 3$: Let the string $s$ consisting of $2^k - 1$ characters describe the results of the games in chronological order as follows: - if $s_i$ is 0, then the team with lower index wins the $i$-th game; - if $s_i$ is 1, then the team with greater index wins the $i$-th game; - if $s_i$ is ?, then the result of the $i$-th game is unknown (any team could win this game). Let $f(s)$ be the number of possible winners of the tournament described by the string $s$. A team $i$ is a possible winner of the tournament if it is possible to replace every ? with either 1 or 0 in such a way that team $i$ is the champion. You are given the initial state of the string $s$. You have to process $q$ queries of the following form: - $p$ $c$ — replace $s_p$ with character $c$, and print $f(s)$ as the result of the query.
Denote $cnt_i$ as the number of teams that can be winners in the $i$-th game. The answer to the problem is $cnt_{2^k-1}$. If the $i$-th game is played between the winners of games $x$ and $y$ ($x < y$), then: $cnt_i = cnt_x$ if $s_i = 0$; $cnt_i = cnt_y$ if $s_i = 1$; $cnt_i = cnt_x + cnt_y$ if $s_i = ?$. So we can calculate all values of $cnt$ for the initial string. Note that the result of no more than $k$ other games depends on the result of any game. So, if we change $s_p$, it will change no more than $k$ values of $cnt$, and we can recalculate all of them. For convenience, you can renumerate the games so that the playoff looks like a segment tree, i. e. the final has the number $0$, the semifinals have numbers $1$ and $2$, etc.
[ "data structures", "dfs and similar", "dp", "implementation", "trees" ]
1,800
#include <bits/stdc++.h> using namespace std; int main() { ios_base::sync_with_stdio(false); cin.tie(NULL); int k; cin >> k; string s; cin >> s; reverse(s.begin(), s.end()); int n = 1 << k; vector<int> cnt(2 * n, 1); auto upd = [&](int i) { return cnt[i] = (s[i] != '0' ? cnt[i * 2 + 1] : 0) + (s[i] != '1' ? cnt[i * 2 + 2] : 0); }; for (int i = n - 2; i >= 0; i--) { upd(i); } int q; cin >> q; while (q--) { int p; char c; cin >> p >> c; p = n - p - 1; s[p] = c; while (p) { upd(p); p = (p - 1) / 2; } cout << upd(0) << '\n'; } }
1535
E
Gold Transfer
You are given a rooted tree. Each vertex contains $a_i$ tons of gold, which costs $c_i$ per one ton. Initially, the tree consists only a root numbered $0$ with $a_0$ tons of gold and price $c_0$ per ton. There are $q$ queries. Each query has one of two types: - Add vertex $i$ (where $i$ is an index of query) as a son to some vertex $p_i$; vertex $i$ will have $a_i$ tons of gold with $c_i$ per ton. It's guaranteed that $c_i > c_{p_i}$. - For a given vertex $v_i$ consider the simple path from $v_i$ to the root. We need to purchase $w_i$ tons of gold from vertices on this path, spending the minimum amount of money. If there isn't enough gold on the path, \textbf{we buy all we can}. If we buy $x$ tons of gold in some vertex $v$ the remaining amount of gold in it decreases by $x$ (of course, we can't buy more gold that vertex has at the moment). For each query of the second type, calculate the resulting amount of gold we bought and the amount of money we should spend. Note that you should solve the problem in online mode. It means that you can't read the whole input at once. You can read each query only after writing the answer for the last query, so don't forget to flush output after printing answers. You can use functions like fflush(stdout) in C++ and BufferedWriter.flush in Java or similar after each writing in your program. In standard (if you don't tweak I/O), endl flushes cout in C++ and System.out.println in Java (or println in Kotlin) makes automatic flush as well.
Note, that $c_i > c_{p_i}$ for each vertex $i$. So if we consider a path from some vertex $v$ to $0$, the closer you are to $0$, the cheaper the cost. In other words, it's always optimal to choose the highest vertex on the path with $a_i > 0$. Suppose we can find such vertex $u$ for a given $v$. How many times we will repeat this search operation? If we need to buy $w$ tons and $u$ has $a_u$ tons, then it's optimal to buy $mn = \min(w, a_u)$ tons in $u$. After we buy $mn$ tons, either $w$ becomes $0$ or $a_u$ becomes $0$. Since for each vertex $u$, $a_u$ can become equal to zero at most once, and since after $w$ is zero we stop buying, then there will be $O(q)$ searches in total. The next question is how to find $u$ efficiently for a given $v$? Consider the path from $0$ to some vertex $v$. Since we prefer to buy from higher vertices, all empty vertices on this path will form some prefix of it (possibly, empty prefix). So we can make some sort of binary search to find the first non-empty vertex $u$. But instead of binary search we will use binary lifting technique. If we know for each $k$ ($0 \le k < 20$) which vertex $p[k][v]$ on the path from $v$ to $0$ on distance $2^k$ from $v$ then we can efficiently jump up the path. Let's firstly jump at distance $2^{19}$: if $a[p[19][v]] = 0$ then we jump too high - let's not jump. But if $a[p[19][v]] > 0$ then we can safely jump (or $v = p[19][v]$). Now we know that we don't need a second $2^{19}$ jump, so we try $2^{18}$ jump and so on. In other words, using binary lifting we can find the highest vertex $u$ with $a_u > 0$ in $O(\log(q))$ steps. Also, we can calculate array $p[k][v]$ for vertex $v$ right after we add vertex $v$ to the tree, since $p[0][v] = p_i$ and $p[k][v] = p[k - 1][p[k - 1][v]]$. The resulting complexity is $O(q \log(q))$.
[ "binary search", "data structures", "dp", "greedy", "interactive", "trees" ]
2,200
#include<bits/stdc++.h> using namespace std; #define fore(i, l, r) for(int i = int(l); i < int(r); i++) #define sz(a) int((a).size()) #define x first #define y second typedef long long li; typedef pair<int, int> pt; template<class A, class B> ostream& operator <<(ostream& out, const pair<A, B> &p) { return out << "(" << p.x << ", " << p.y << ")"; } template<class A> ostream& operator <<(ostream& out, const vector<A> &v) { out << "["; fore(i, 0, sz(v)) { if(i) out << ", "; out << v[i]; } return out << "]"; } const int INF = int(1e9); const li INF64 = li(1e18); const int LOG = 20; int q; vector<int> a, c; vector<int> p[LOG]; int main() { cin >> q; a.resize(q + 1); c.resize(q + 1); fore (lg, 0, LOG) p[lg].resize(q + 1); fore (lg, 0, LOG) p[lg][0] = 0; cin >> a[0] >> c[0]; fore (id, 1, q + 1) { int tp; cin >> tp; if (tp == 1) { int pr; cin >> pr; cin >> a[id] >> c[id]; p[0][id] = pr; fore (lg, 1, LOG) p[lg][id] = p[lg - 1][p[lg - 1][id]]; } else { int v, w; cin >> v >> w; int ansR = 0; li ansS = 0; while (w > 0 && a[v] > 0) { int u = v; for (int lg = LOG - 1; lg >= 0; lg--) { if (a[p[lg][u]] > 0) u = p[lg][u]; } int mn = min(a[u], w); a[u] -= mn; w -= mn; ansR += mn; ansS += mn * 1ll * c[u]; } cout << ansR << " " << ansS << endl; } } return 0; }
1535
F
String Distance
Suppose you are given two strings $a$ and $b$. You can apply the following operation any number of times: choose any \textbf{contiguous} substring of $a$ or $b$, and sort the characters in it in non-descending order. Let $f(a, b)$ the minimum number of operations you have to apply in order to make them equal (or $f(a, b) = 1337$ if it is impossible to make $a$ and $b$ equal using these operations). For example: - $f(\text{ab}, \text{ab}) = 0$; - $f(\text{ba}, \text{ab}) = 1$ (in one operation, we can sort the whole first string); - $f(\text{ebcda}, \text{ecdba}) = 1$ (in one operation, we can sort the substring of the second string starting from the $2$-nd character and ending with the $4$-th character); - $f(\text{a}, \text{b}) = 1337$. You are given $n$ strings $s_1, s_2, \dots, s_k$ having equal length. Calculate $\sum \limits_{i = 1}^{n} \sum\limits_{j = i + 1}^{n} f(s_i, s_j)$.
Disclaimer: the model solution is very complicated compared to most participants' solutions. Feel free to discuss your approaches in the comments! First of all, it's easy to determine when two strings cannot be made equal using these operations: it's when their multisets of characters differ. So, we divide the strings into different equivalence classes, and for any pair of strings from different classes, the answer is $1337$. For any pair of strings from the same class, the answer is either $1$ or $2$, since $2$ operations are always enough to make the strings from the same equivalence class equal (we just sort both of them). Okay, now, for each class, we have to calculate the number of pairs of strings with the distance equal to $1$. Okay, suppose you have two strings $s_1$ and $s_2$, and you want to make them equal using one operation. Suppose that $s_1 < s_2$ lexicographically. Since applying an operation can't result in getting a lexicographically larger string, we should apply the operation on the string $s_2$, not $s_1$. Suppose we choose a substring $[l, r]$ of the string $s_2$ and sort it. All characters to the left of position $l$ and to the right of position $r$ are untouched, and all characters in $[l, r]$ are ordered in non-descending order; so, in order to transform $s_2$ into $s_1$, we should choose a subsegment $[l, r]$ such that all characters outside this segment are the same in both strings, and the substring $[l, r]$ of $s_1$ is sorted. So, the best way to choose a subsegment $[l, r]$ is to compute the longest common prefix of $s_1$ and $s_2$, the longest common suffix of $s_1$ and $s_2$, and try sorting everything in the middle in $s_2$. This gives us a solution in $O(n^2)$: for a pair of strings, we can check that one of them can be transformed into the other in $O(1)$. To do so, we need to build some data structure allowing to query longest common prefixes/suffixes in $O(1)$ (a trie with $O(1)$ LCA or precalculating LCP and building a sparse table of them can do the trick); furthermore, we want to be able to check if some subsegment of some string is sorted in $O(1)$ (but precalculating them is quite easy). So, we have a solution that works if the strings are long (in the model solution, this approach is used on classes having not more than $12000$ strings). The second approach can be used on classes having many strings. If the number of strings is big, it means that they are short, so we can do the following thing: for each string, iterate on the subsegment we will sort and check if the resulting string exists. The model solution uses some very complicated data structures to implement this, but I believe that it's quite easy to get this approach working using string hashes. The only dangerous thing in the second solution you have to consider is that choosing different substrings to sort may result in getting the same resulting string. One good way to deal with this is to ignore some substrings if sorting them doesn't change the leftmost or the rightmost character in the substring; for example, if we sort the substring acb in the string zacb, the character in the beginning of this substring is unchanged, so we can get the same result by sorting cb. So, we consider sorting the substring only if it changes both the first and the last characters of the substring. Okay, so we have two approaches: one works well with a small number of long strings, and the other works well with a big number of short strings. We can choose which of them to run depending on the size of the equivalence class we are considering, and this idea gives us a working solution.
[ "binary search", "brute force", "data structures", "hashing", "implementation", "strings" ]
3,000
#include<bits/stdc++.h> using namespace std; const int LN = 20; const int K = 12000; int pw2[1 << LN]; vector<int> sorted_segments(const string& s) { int n = int(s.size()) - 1; vector<int> res(n); for(int i = 0; i < n; i++) if(s[i] <= s[i + 1]) res[i] = 0; else res[i] = 1; return res; } vector<int> prefix_sum(const vector<int>& s) { int n = s.size(); vector<int> p(n + 1); for(int i = 0; i < n; i++) p[i + 1] = p[i] + s[i]; return p; } int naiveLCP(const string& s, const string& t) { int ans = 0; int n = s.size(); int m = t.size(); while(ans < n && ans < m && s[ans] == t[ans]) ans++; return ans; } vector<vector<int>> build_table(const vector<int>& a) { int n = a.size(); vector<vector<int>> table(LN, vector<int>(n)); for(int i = 0; i < n; i++) table[0][i] = a[i]; for(int i = 1; i < LN; i++) for(int j = 0; j < n; j++) if(j + (1 << (i - 1)) < n) table[i][j] = min(table[i - 1][j], table[i - 1][j + (1 << (i - 1))]); else table[i][j] = table[i - 1][j]; return table; } struct LCP { vector<int> idx; vector<vector<int>> table; int query_inner(int x, int y) { if(x > y) swap(x, y); int len = y - x; int d = pw2[len]; return min(table[d][x], table[d][y - (1 << d)]); } int query(int x, int y) { return query_inner(idx[x], idx[y]); } LCP() {}; LCP(vector<string> s) { int n = s.size(); vector<pair<string, int>> t; for(int i = 0; i < n; i++) { t.push_back(make_pair(s[i], i)); } sort(t.begin(), t.end()); idx.resize(n); for(int i = 0; i < n; i++) { idx[t[i].second] = i; } vector<int> LCPs; for(int i = 0; i < n - 1; i++) LCPs.push_back(naiveLCP(t[i].first, t[i + 1].first)); table = build_table(LCPs); }; }; const int T = int(2e7); map<char, int> nxt[T]; int cur = 1; int root = 0; int cnt[T]; void clear_trie() { root = cur++; } int go(int x, char c) { if(!nxt[x].count(c)) nxt[x][c] = cur++; return nxt[x][c]; } void add(int v, const string& s, int l, int r, int n, bool sw, const vector<int>& ps) { if(sw && l + r < n - 1 && ps[n - r - 1] == ps[l]) { cnt[v]++; } if(sw) { if(l + r < n - 1) add(go(v, s[n - r - 1]), s, l, r + 1, n, sw, ps); } else { add(go(v, '$'), s, l, r, n, true, ps); if(l < n - 1) add(go(v, s[l]), s, l + 1, r, n, sw, ps); } } int calc(int v, const string& s, int l, int r, int n, bool sw, const vector<vector<int>>& good) { int ans = 0; if(sw && l + r < n - 1 && good[l][r]) { ans = cnt[v]; } if(sw) { if(l + r < n) ans += calc(go(v, s[n - r - 1]), s, l, r + 1, n, sw, good); } else { ans += calc(go(v, '$'), s, l, r, n, true, good); if(l < n) ans += calc(go(v, s[l]), s, l + 1, r, n, sw, good); } return ans; } long long solve_short(vector<string> s, int n) { long long ans = 0; clear_trie(); sort(s.begin(), s.end()); for(int i = 0; i < n; i++) { string cur = s[i]; int len = cur.size(); vector<vector<int>> good(len + 1, vector<int>(len + 1)); for(int l = 0; l < len; l++) { set<char> q; for(int r = l; r < len; r++) { q.insert(cur[r]); if(cur[l] != *q.begin() && cur[r] != *q.rbegin()) { good[l][len - r - 1] = 1; } } } vector<int> p = prefix_sum(sorted_segments(cur)); add(root, cur, 0, 0, len, false, p); ans += calc(root, cur, 0, 0, len, false, good); } ans = n * 1ll * (n - 1) - ans; return ans; } long long solve_long(vector<string> s, int n) { int len = s[0].size(); sort(s.begin(), s.end()); vector<string> t = s; for(int i = 0; i < n; i++) reverse(t[i].begin(), t[i].end()); LCP ls(s); LCP lt(t); long long ans = 0; for(int i = 0; i < n; i++) { vector<int> aux = prefix_sum(sorted_segments(s[i])); for(int j = i + 1; j < n; j++) { int lf = ls.query(i, j); int rg = lt.query(i, j); if(aux[len - rg - 1] - aux[lf] == 0) ans++; else ans += 2; } } return ans; } long long solve_class(vector<string> s, int n) { if(n <= K) return solve_long(s, n); else return solve_short(s, n); } vector<int> get_class(string s) { vector<int> c(26); for(auto x : s) c[x - 'a']++; return c; } int main() { pw2[1] = 0; for(int i = 2; i < (1 << LN); i++) pw2[i] = pw2[i >> 1] + 1; int n; cin >> n; vector<string> s(n); for(int i = 0; i < n; i++) cin >> s[i]; long long ans = 0; map<vector<int>, vector<string>> classes; for(int i = 0; i < n; i++) classes[get_class(s[i])].push_back(s[i]); int cnt = 0; for(auto x : classes) { ans += solve_class(x.second, x.second.size()); ans += cnt * 1337ll * x.second.size(); cnt += x.second.size(); } cout << ans << endl; }
1536
A
Omkar and Bad Story
Omkar has received a message from Anton saying "Your story for problem A is confusing. Just make a formal statement." Because of this, Omkar gives you an array $a = [a_1, a_2, \ldots, a_n]$ of $n$ distinct integers. An array $b = [b_1, b_2, \ldots, b_k]$ is called \textbf{nice} if for any two distinct elements $b_i, b_j$ of $b$, $|b_i-b_j|$ appears in $b$ at least once. In addition, all elements in $b$ must be distinct. Can you add several (maybe, $0$) integers to $a$ to create a \textbf{nice} array $b$ \textbf{of size at most $300$}? If $a$ is already \textbf{nice}, you don't have to add any elements. For example, array $[3, 6, 9]$ is \textbf{nice}, as $|6-3|=|9-6| = 3$, which appears in the array, and $|9-3| = 6$, which appears in the array, while array $[4, 2, 0, 6, 9]$ is not \textbf{nice}, as $|9-4| = 5$ is not present in the array. For integers $x$ and $y$, $|x-y| = x-y$ if $x > y$ and $|x-y| = y-x$ otherwise.
Consider what happens when $a$ contains a negative number. We first claim that if any negative number exists in $a$, then no solution exists. Denote $p$ as the smallest number in $a$ and $q$ as another arbitrary number in the array (as $n \geq 2$, $q$ always exists). Clearly, $|q - p| = q - p > 0$. However, because $p$ is negative, $q - p > q$. As such, adding $q - p$ to the output array would create the pair $(q - p, p)$ with difference $q - 2p > q - p$. We have the same problem as before; thus, it is impossible to create a nice array if there exists a negative number in $a$. After we deal with this case, we now claim that $b = [0, 1, 2, ..., 100]$ is a valid nice array for any $a$ that contains no negative numbers. It is easy to verify that this is a valid nice array. And since in this case, every element of $a$ is nonnegative and distinct, it is always possible to rearrange and add elements to $a$ to obtain $b$.
[ "brute force", "constructive algorithms" ]
800
import Data.List (intercalate) import Control.Monad (replicateM) main = do t <- read <$> getLine replicateM t solve solve = do getLine xs <- (map read . words) <$> getLine putStrLn (if any (< 0) xs then "nO" else ("yEs\n101\n" ++ intercalate " " (map show [0..100])))
1536
B
Prinzessin der Verurteilung
I, Fischl, Prinzessin der Verurteilung, descend upon this land by the call of fate an — Oh, you are also a traveler from another world? Very well, I grant you permission to travel with me. It is no surprise Fischl speaks with a strange choice of words. However, this time, not even Oz, her raven friend, can interpret her expressions! Maybe you can help us understand what this young princess is saying? You are given a string of $n$ lowercase Latin letters, the word that Fischl just spoke. You think that the MEX of this string may help you find the meaning behind this message. The MEX of the string is defined as the shortest string that \textbf{doesn't} appear as a contiguous substring in the input. If multiple strings exist, the lexicographically smallest one is considered the MEX. Note that the empty substring does NOT count as a valid MEX. A string $a$ is lexicographically smaller than a string $b$ if and only if one of the following holds: - $a$ is a prefix of $b$, but $a \ne b$; - in the first position where $a$ and $b$ differ, the string $a$ has a letter that appears earlier in the alphabet than the corresponding letter in $b$. A string $a$ is a substring of a string $b$ if $a$ can be obtained from $b$ by deletion of several (possibly, zero or all) characters from the beginning and several (possibly, zero or all) characters from the end. Find out what the MEX of the string is!
Pigeonhole principle What is the longest the answer can be? Let's brute force check all substrings of length <= 3 and print the lexicographically smallest one that doesn't appear as a substring in the input. We can guarantee that we will come across the answer due to the pigeonhole principle. There are at most $n+n-1+n-2 = 3n-3$ possible substrings of length 3 or shorter in the input. There exist $26+26^2+26^3 = 18278$ total substrings of length 3 or shorter. It is impossible for the input to contain all $18278$ substrings, as $3n-3 < 18278$ for $n \leq 1000$. Final runtime looks something like $O(18278n)$ or $O(n)$ depending on how you implement substring checking.
[ "brute force", "constructive algorithms", "strings" ]
1,200
import Data.List (intercalate, tails, isPrefixOf, head) import Control.Monad (replicateM) import Data.Maybe (fromJust, listToMaybe, catMaybes) main = do t <- read <$> getLine replicateM t solve solve = do getLine s <- getLine putStrLn (leastNonSubstring s) leastNonSubstring s = head $ catMaybes [leastOfLength l | l <- [1..]] where leastOfLength l = helper "" l helper prefix 0 | isSubstring prefix s = Nothing | otherwise = Just prefix helper prefix l = listToMaybe $ catMaybes [helper (prefix ++ [letter]) (l - 1) | letter <- ['a'..'z']] isSubstring s t = any id (map (isPrefixOf s) (tails t))
1536
C
Diluc and Kaeya
The tycoon of a winery empire in Mondstadt, unmatched in every possible way. A thinker in the Knights of Favonius with an exotic appearance. This time, the brothers are dealing with a strange piece of wood marked with their names. This plank of wood can be represented as a string of $n$ characters. Each character is either a 'D' or a 'K'. You want to make some number of cuts (possibly $0$) on this string, partitioning it into several contiguous pieces, each with length at least $1$. Both brothers act with dignity, so they want to split the wood as evenly as possible. They want to know the maximum number of pieces you can split the wood into such that the ratios of the number of occurrences of 'D' to the number of occurrences of 'K' in each chunk are the same. Kaeya, the curious thinker, is interested in the solution for multiple scenarios. He wants to know the answer for every \textbf{prefix} of the given string. Help him to solve this problem! For a string we define a ratio as $a:b$ where 'D' appears in it $a$ times, and 'K' appears $b$ times. Note that $a$ or $b$ can equal $0$, but not both. Ratios $a:b$ and $c:d$ are considered equal if and only if $a\cdot d = b\cdot c$. For example, for the string 'DDD' the ratio will be $3:0$, for 'DKD' — $2:1$, for 'DKK' — $1:2$, and for 'KKKKDD' — $2:4$. Note that the ratios of the latter two strings are equal to each other, but they are not equal to the ratios of the first two strings.
Turn into geometry problem Represent every prefix as $(x, y)$ point in cartesian plane where $x$ = frequency of 'D' and $y$ = frequency of 'K'. Draw a polyline connecting these points in order of increasing length of prefix. Draw a line from origin to point. What can we say about intersections of poly-line with this line? For each prefix, label it with a pair $(a, b)$ where $a$ = frequency of 'D' in this prefix and $b$ = frequency of 'K' in this prefix. Divide $a$ and $b$ by $gcd(a, b)$. If we iterate over all prefixes from to left, we can notice that the answer for the prefix equals the # of occurrences of this pair we have seen so far! This can be visualized by drawing a poly-line as mentioned in the hints. As for implementation, you can use a map in C++ or a HashMap in Java to achieve $O(n \log n)$ or $O(n)$ runtime.
[ "data structures", "dp", "hashing", "number theory" ]
1,500
import Data.List (intercalate) import Control.Monad (mapM, replicateM) import Data.Ratio ((%)) import Data.Map (empty, findWithDefault, insert) main = do t <- read <$> getLine replicateM t solve solve = do getLine s <- getLine putStrLn (intercalate " " (map show (maxBlocks s))) maxBlocks s = helper s empty 0 0 where helper "" _ _ _ = [] helper (c:s) prev d k = x : helper s prev' d' k' where (d', k') | c == 'D' = (d + 1, k) | c == 'K' = (d, k + 1) r | k' == 0 = 69000000 % 1 | otherwise = d' % k' x = (findWithDefault 0 r prev) + 1 prev' = insert r x prev
1536
D
Omkar and Medians
Uh oh! Ray lost his array yet again! However, Omkar might be able to help because he thinks he has found the OmkArray of Ray's array. The OmkArray of an array $a$ with elements $a_1, a_2, \ldots, a_{2k-1}$, is the array $b$ with elements $b_1, b_2, \ldots, b_{k}$ such that $b_i$ is equal to the median of $a_1, a_2, \ldots, a_{2i-1}$ for all $i$. Omkar has found an array $b$ of size $n$ ($1 \leq n \leq 2 \cdot 10^5$, $-10^9 \leq b_i \leq 10^9$). Given this array $b$, Ray wants to test Omkar's claim and see if $b$ actually is an OmkArray of some array $a$. Can you help Ray? The median of a set of numbers $a_1, a_2, \ldots, a_{2i-1}$ is the number $c_{i}$ where $c_{1}, c_{2}, \ldots, c_{2i-1}$ represents $a_1, a_2, \ldots, a_{2i-1}$ sorted in nondecreasing order.
For some $k<n$, assume $b_1, b_2, \cdots, b_{k}$ is the OmkArray of some $a_1, a_2, \cdots, a_{2k-1}$, and we want to see what values of $a_{2k}, a_{2k+1}$ we can add so that $b_1, b_2, \cdots, b_{k+1}$ is the OmkArray of $a_1, a_2, \cdots, a_{2k+1}$. Let $c_1, c_2, \cdots, c_{2k-1}$ be $a_1, a_2, \cdots, a_{2k-1}$ sorted in ascending order. If $b_{k+1} \geq b_k$, note that $b_{k} = c_k$ and there are $k-2$ elements of $a$ $\geq c_{k+1}$, so no matter how large $a_{2k}, a_{2k+1}$ are there will be at most $k$ elements larger than $c_{k+1}$ in $a_1, a_2, \cdots, a_{2k+1}$. This gives $b_{k+1} \leq c_{k+1}$. We can use a similar argument to show $b_{k+1} \geq c_{k-1}$. Now we want to bound $c_{k+1}$ and $c_{k-1}$. Note that each distinct value among $b_1, b_2, \cdots, b_k$ must appear at least once in $a_1, a_2, \cdots, a_{2k-1}$. Therefore, if $i$, $j$ satisfy that $b_i$ is the largest value of $b_i\leq b_k$ and $i \neq k$, and $b_j$ is the smallest value of $b_j \geq b_k$, $j \neq k$, then we have $c_{k+1} \leq b_j$, $c_{k-1} \geq b_i$, and so $b_i \leq b_{k+1} \leq b_j$. If no such largest/smallest values exist, then we can assume $b_{k+1}$ is not bounded above/below. Therefore, if $b$ has an OmkArray, it is necessary that for all $i$, there does not exist a $j\leq i$ such that $b_j$ is between $b_i$ and $b_{i+1}$, exclusive. I claim this is also sufficient. We can construct such an array $a$ using the following algorithm: Let $a_1 = b_1$. Let $a_1 = b_1$. If $b_{i+1}=b_j$ for some $b_j<b_i$ with $j<i$, let $a_{2k-2}, a_{2k-1} = -\infty$ (we can replace $-\infty$ with some sufficiently small constant at the end of our array creation process). If $b_{i+1}=b_j$ for some $b_j<b_i$ with $j<i$, let $a_{2k-2}, a_{2k-1} = -\infty$ (we can replace $-\infty$ with some sufficiently small constant at the end of our array creation process). Otherwise, if $b_{i+1}<b_i$ then let $a_{2k-2} = -\infty$, $a_{2k-1} = b_{i+1}$. Otherwise, if $b_{i+1}<b_i$ then let $a_{2k-2} = -\infty$, $a_{2k-1} = b_{i+1}$. If $b_{i+1}=b_j$ for some $b_j>b_i$ with $j<i$, let $a_{2k-2}, a_{2k-1} = \infty$ (we can replace $\infty$ with some sufficiently large constant at the end of our array creation process). If $b_{i+1}=b_j$ for some $b_j>b_i$ with $j<i$, let $a_{2k-2}, a_{2k-1} = \infty$ (we can replace $\infty$ with some sufficiently large constant at the end of our array creation process). Otherwise, if $b_{i+1}>b_i$ then let $a_{2k-2} = \infty$, $a_{2k-1} = b_{i+1}$. Otherwise, if $b_{i+1}>b_i$ then let $a_{2k-2} = \infty$, $a_{2k-1} = b_{i+1}$. Finally, if $b_{i+1} = b_i$, let $a_{2k-2} = -\infty$, $a_{2k-1} = \infty$. Finally, if $b_{i+1} = b_i$, let $a_{2k-2} = -\infty$, $a_{2k-1} = \infty$. This means that an equivalent condition to having an OmkArray is for all $i$, there does not exist a $j\leq i$ such that $b_j$ is between $b_i$ and $b_{i+1}$, exclusive. There are multiple ways to check this for an array $b$, but one clean way would be to keep some TreeSet $s$, and check if $b_{i+1}$ is between \t{s.ceil($b_i$)} and \t{s.floor($b_i$)} for all $i$, and then adding $b_{i+1}$ to $s$ if it is not already added.
[ "data structures", "greedy", "implementation" ]
2,000
import Data.List (intercalate) import Data.Set (singleton, lookupLT, lookupGT, insert) import Control.Monad (mapM, replicateM) main = do t <- read <$> getLine replicateM t solve solve = do getLine xs <- (map read . words) <$> getLine putStrLn (if isOmkArray xs then "yEs" else "nO") isOmkArray :: [Int] -> Bool isOmkArray (x:xs) = helper xs x (singleton x) where helper [] _ _ = True helper (x:xs) prev allPrev | maybe False (x <) (lookupLT prev allPrev) || maybe False (x >) (lookupGT prev allPrev) = False | otherwise = helper xs x (insert x allPrev)
1536
E
Omkar and Forest
Omkar's most recent follower, Ajit, has entered the Holy Forest. Ajit realizes that Omkar's forest is an $n$ by $m$ grid ($1 \leq n, m \leq 2000$) of some non-negative integers. Since the forest is blessed by Omkar, it satisfies some special conditions: - For any two adjacent (sharing a side) cells, the absolute value of the difference of numbers in them is at most $1$. - If the number in some cell is strictly larger than $0$, it should be strictly greater than the number in \textbf{at least one} of the cells adjacent to it. Unfortunately, Ajit is not fully worthy of Omkar's powers yet. He sees each cell as a "0" or a "#". If a cell is labeled as "0", then the number in it must equal $0$. Otherwise, the number in it can be any nonnegative integer. Determine how many different assignments of elements exist such that these special conditions are satisfied. Two assignments are considered different if there exists at least one cell such that the numbers written in it in these assignments are different. Since the answer may be enormous, find the answer modulo $10^9+7$.
Consider forcing some set of '#' positions to be $0$ and the rest to be positive integers. Multisource BFS Imagine picking some subset of '#' and making them $0$. Then there is exactly one way to make all the remaining '#' positive integers. To see why, imagine multisource BFS with all $0$ as the sources. After the BFS, each '#' will be equal to the minimum distance from itself to any $0$ cell. Difference between adjacent cells will be at most $1$. Proof can be shown by contradiction: if two cells with difference $\geq 2$ existed, then the larger of these cells is not labeled with the shortest distance to a source (since the distance from the smaller cell $+ 1$ will be a better choice). Because of the nature of BFS, we can also ensure the second condition is also satisfied, since the only cells that have no neighbor strictly smaller will be the source cells. This is the only valid assignment because if we make any number larger, there will exist a pair of cells with difference $\geq 2$. If we try to make any number smaller, there will exist a cell with positive karma that has no strictly smaller neighbor. If we let $k$ equal to the frequency of '#' in the input, then the answer is $2^k$. Keep in mind of the special case where the input is all '#', in which case you have to subtract $1$. This is because a possible arrangement must contain at least one cell with karma of $0$. Obviously the solution runs in $O(nm)$ time.
[ "combinatorics", "graphs", "math", "shortest paths" ]
2,300
import Data.List (intercalate) import Control.Monad (replicateM) md x = mod x 1000000007 main = do t <- read <$> getLine replicateM t solve solve = do n:m:[] <- (map read . words) <$> getLine free <- (sum . map (sum . map (\chara -> if chara == '#' then 1 else 0))) <$> replicateM n getLine putStrLn $ show (if free == n * m then pow 2 free - 1 else pow 2 free) pow :: (Integral a) => Integer -> a -> Integer pow _ 0 = 1 pow x y = md $ x * (pow x (y - 1))
1536
F
Omkar and Akmar
Omkar and Akmar are playing a game on a circular board with $n$ ($2 \leq n \leq 10^6$) cells. The cells are numbered from $1$ to $n$ so that for each $i$ ($1 \leq i \leq n-1$) cell $i$ is adjacent to cell $i+1$ and cell $1$ is adjacent to cell $n$. Initially, each cell is empty. Omkar and Akmar take turns placing either an A or a B on the board, with Akmar going first. The letter must be placed on an empty cell. In addition, the letter cannot be placed adjacent to a cell containing the same letter. A player loses when it is their turn and there are no more valid moves. Output the number of possible distinct games where both players play optimally modulo $10^9+7$. Note that we only consider games where some player has lost and there are no more valid moves. Two games are considered distinct if the number of turns is different or for some turn, the letter or cell number that the letter is placed on were different. A move is considered optimal if the move maximizes the player's chance of winning, assuming the other player plays optimally as well. More formally, if the player who has to move has a winning strategy, they have to make a move after which they will still have a winning strategy. If they do not, they can make any move.
Solve a simpler version of the problem, where you just need to print who would win if both players play optimally. Consider the possible ending states of the board. The 2nd player, Omkar, always wins no matter what either player does. The easiest way to see this is by considering ending states of the board. An ending state with an even number of letters means that the 2nd player wins (because the first player is the next player and there are no more moves), and an ending state with an odd number of letters means that the 1st player wins. An ending state must be in the form ABABA... or BABA..., where there are 0 or 1 empty cells in between each letter and the letters form an alternating pattern. If there is more than 1 empty cell in between two cells, then a player will be able to play a letter, thus it is not a valid ending state. If an ending state has two of the same letters next to each other, then it is not a valid ending state. Either they are next to each other, which is illegal, or there is at least one empty cell in between them, which means that a player can play the other letter in between. Since the ending state must form an alternating pattern, there must be an even number of states. Thus, the 2nd player, Omkar, always wins. Find the implication of the 2nd player always winning on the number of optimal games. Because the 2nd player always wins no matter what, the number of optimal games basically means the total number of possible games. Because of Hint 1 and Hint 2, we want to find the total number of possible games. This can be done by iterating over the number of moves and counting the number of ways to play a game with that number of moves. We want to find the number of games that end in $x$ moves on a board of size $n$. The first step is to calculate the total number of ending states. If $x=n$, the total number of ending states is just $2$ because you can either have ABABA... or BABAB... Otherwise, a game that ends in $x$ moves will consist of $x$ letters, for example A|B|A|B|... where a | is a possible location of a single empty cell (there cannot be multiple empty cells next to each other or else it would not be a valid ending state). There are $x$ possible places where there can be an empty cell, and $n-x$ empty cells, so there are $\binom x {n-x}$ ways to choose places to put empty cells. Due to the circular nature of the board, you need to account for the case where the first cell on the board is an empty cell (the previous formula only works if the first cell is not empty). If you set the first cell to be empty, there are not $x-1$ possible places to put an empty cell and $n-x-1$ remaining empty cells, so you have to add $\binom {x-1} {n-x-1}$. Multiply the answer by $2$ to account for starting with an A or B. Finally, multiply by $x!$ to account for all the ways you can reach each ending configuration. Thus, if $x=n$, there are $2 \cdot x!$ optimal games, otherwise there are $2 \cdot (\binom x {n-x} + \binom {x-1} {n-x-1} ) \cdot x!$ optimal games. Add up the number of games that end in $x$ moves for all even $x$ from $\lceil \frac{n}{2} \rceil$ to $n$, inclusive. Thus, the solution is $O(n)$.
[ "chinese remainder theorem", "combinatorics", "constructive algorithms", "fft", "games", "geometry", "math", "meet-in-the-middle", "string suffix structures" ]
2,600
import Data.List (reverse) import Data.Array (listArray, (!)) chara = 1000000007 md x = mod x chara main = do n <- read <$> getLine putStrLn $ show (solve n) solve n = md $ 2 * (sum (map (\k -> md (factorials!k * (choose k (n - k) + choose (k - 1) (n - k - 1)))) [0,2..n])) where factorials = listArray (0, n) (reverse (helper n)) where helper 0 = 1:[] helper n' = md (n' * f):fs where fs@(f:_) = helper (n' - 1) invFactorials = listArray (0, n) (helper 0) where helper n' | n' == n = inv (factorials!n):[] | otherwise = md ((n' + 1) * f):fs where fs@(f:_) = helper (n' + 1) choose a b | 0 <= b && b <= a = md $ factorials!a * (md (invFactorials!b * invFactorials!(a - b))) | otherwise = 0 modPow :: (Integral a) => Integer -> a -> Integer modPow _ 0 = 1 modPow x y = md $ (if even y then 1 else x) * modPow (md (x * x)) (div y 2) inv x = modPow x (chara - 2)
1537
A
Arithmetic Array
An array $b$ of length $k$ is called good if its arithmetic mean is equal to $1$. More formally, if $$\frac{b_1 + \cdots + b_k}{k}=1.$$ Note that the value $\frac{b_1+\cdots+b_k}{k}$ is not rounded up or down. For example, the array $[1,1,1,2]$ has an arithmetic mean of $1.25$, which is not equal to $1$. You are given an integer array $a$ of length $n$. In an operation, you can append a \textbf{non-negative} integer to the end of the array. What's the minimum number of operations required to make the array good? We have a proof that it is always possible with finitely many operations.
To make the arithmetic mean be equal to exactly $1$ the sum needs to be equal to the number of elements in the array. Let's consider $3$ cases for this problem: 1) The sum of the array equals $n$: Here the answer is $0$ since the arithmetic mean of the array is initially $1$. 2) The sum of the array is smaller than $n$: The answer is always $1$ since we can add a single integer $k$ such that $sum + k = n + 1$ is satisfied and more specifically $k = n - sum + 1$. 3) The sum of the array is greater than $n$: If we add any number apart from $0$ will add to the sum more or equal than to the number of elements. The number of $0$'s to add can be found by a loop of adding $0$'s until the number of elements is equal to the sum or by the simple formula of $sum-n$.
[ "greedy", "math" ]
800
#include "bits/stdc++.h" using namespace std; int main() { int t; cin >> t; while(t--){ int n; cin >> n; int sum = 0; for (int i = 0;i < n; i++){ int a; cin >> a; sum += a; } if(sum < n)cout << "1\n"; else cout << sum - n << "\n"; } }
1537
B
Bad Boy
Riley is a very bad boy, but at the same time, he is a yo-yo master. So, he decided to use his yo-yo skills to annoy his friend Anton. Anton's room can be represented as a grid with $n$ rows and $m$ columns. Let $(i, j)$ denote the cell in row $i$ and column $j$. Anton is currently standing at position $(i, j)$ in his room. To annoy Anton, Riley decided to throw exactly \textbf{two} yo-yos in cells of the room (they can be in the same cell). Because Anton doesn't like yo-yos thrown on the floor, he has to pick up both of them and return back to the initial position. The distance travelled by Anton is the shortest path that goes through the positions of both yo-yos and returns back to $(i, j)$ by travelling only to adjacent by side cells. That is, if he is in cell $(x, y)$ then he can travel to the cells $(x + 1, y)$, $(x - 1, y)$, $(x, y + 1)$ and $(x, y - 1)$ in one step (if a cell with those coordinates exists). Riley is wondering where he should throw these two yo-yos so that the distance travelled by Anton is \textbf{maximized}. But because he is very busy, he asked you to tell him.
We can notice that the optimal strategy is to put the yoyos in the corners of the board. One solution may be checking the best distance for all pairs of corners. But, if we think a bit more, we can notice that placing the yoyos in opposite corners the distance will always be maximum possible (the distance always being $2 \cdot (n - 1) + 2 \cdot (m - 1)$). So, one possible answer is to always place the first yoyo in the top-left cell and the second one in the bottom-right cell. This is always optimal because, for any initial position of Anton, the distance will still be the same ($2 \cdot (n - 1) + 2 \cdot (m - 1)$), this being the largest possible distance. The distance can not get larger than that, because if we move one of the yoyos it will get closer to the other yoyo and the distance will decrease by $1$ or won't decrease, but it's impossible for it to increase.
[ "constructive algorithms", "greedy", "math" ]
900
#include "bits/stdc++.h" using namespace std; int main() { int t; cin >> t; while(t--){ int n, m, i, j; cin >> n >> m >> i >> j; cout << 1 << " " << 1 << " " << n << " " << m << "\n"; } }
1537
C
Challenging Cliffs
You are a game designer and want to make an obstacle course. The player will walk from left to right. You have $n$ heights of mountains already selected and want to arrange them so that the absolute difference of the heights of the first and last mountains is as small as possible. In addition, you want to make the game difficult, and since walking uphill or flat is harder than walking downhill, the difficulty of the level will be the number of mountains $i$ ($1 \leq i < n$) such that $h_i \leq h_{i+1}$ where $h_i$ is the height of the $i$-th mountain. You don't want to waste any of the mountains you modelled, so you have to use all of them. From all the arrangements that minimize $|h_1-h_n|$, find one that is the most difficult. If there are multiple orders that satisfy these requirements, you may find any.
We claim that the maximum difficulty is at least $n-2$. Assume the array is sorted. We first need to find the two mountains which go on the ends. To do this, we can iterate through every mountain in the sorted array and check the difference between a mountain and its neighbours in the array. Let $m_k$ and $m_{k+1}$ be the mountains with the smallest height difference. We can achieve at least a difficulty of $n-2$ by arranging the mountains as $m_k, m_{k+2}, m_{k+3} ... m_n, m_1, m_2, ....., m_{k+1}$. To get difficulty $n-1$, we need $m_k$ to be the shortest mountain and $m_{k+1}$ to be the tallest mountain. This will only happen if $n = 2$.
[ "constructive algorithms", "greedy", "implementation", "math" ]
1,200
#include "bits/stdc++.h" using namespace std; int main() { int t; cin >> t; while(t--){ int n; cin >> n; vector<int> h(n); for (int i = 0;i < n; i++){ cin >> h[i]; } sort(h.begin(), h.end()); if(n == 2){ cout << h[0] << " " << h[1] << "\n"; continue; } int pos = -1, mn = INT_MAX; for (int i = 1;i < n; i++){ if(mn > abs(h[i] - h[i - 1])){ pos = i; mn = abs(h[i] - h[i - 1]); } } for (int i = pos;i < n; i++){ cout << h[i] << " "; } for(int i = 0;i < pos; i++){ cout << h[i] << " "; } cout << "\n"; } }
1537
D
Deleting Divisors
Alice and Bob are playing a game. They start with a positive integer $n$ and take alternating turns doing operations on it. Each turn a player can subtract from $n$ one of its divisors that isn't $1$ or $n$. The player who cannot make a move on his/her turn loses. Alice always moves first. Note that they subtract a divisor of the \textbf{current} number in each turn. You are asked to find out who will win the game if both players play optimally.
Let's consider $3$ cases for this problem: 1) n is odd 2) n is even, and $n$ is not a power of $2$ 3) n is a power of $2$ If $n$ is odd, the only move is to subtract an odd divisor (since all the divisors are odd). Doing this, we will obtain an even number that is not a power of $2$(case 2). If $D$ is the divisor of $n$, then $n-D$ must also be divisible by $D$, and since $D$ is odd, $n-D$ cannot be a power of $2$. If $n$ is even and is not a power of $2$, it means that $n$ has an odd divisor. By subtracting this odd divisor, we will obtain $n-D$ is odd(case 1). Now let's show that subtracting an odd divisor every move results in a win. Primes are losing since the only move you can make on them is subtracting the entire number, which results in $n = 0$ and a loss. Since every prime is odd or a power of 2, it works to give the other player an odd number because it will either be a prime(the other player loses), or they will make a move and give you another even number that is not a power of 2. You can continue this process because you will never land on a losing number and because the game must end after a finite number of moves, your opponent must always lose. So we proved that odd numbers are losing and even numbers that are not powers of $2$ are winning. What if $n$ is a power of $2$? You can do two things in one move, halve $n$ or make n an even number that is not a power of $2$(we proved that this is a winning position for the other player). The only optimal move is to halve $n$, making it another power of $2$. The players continue like this until one gets $2$, which is a prime number, so it's losing. If $log_2(n)$ is even, Alice wins, otherwise Bob wins.
[ "games", "math", "number theory" ]
1,700
#include "bits/stdc++.h" using namespace std; int main() { int t; cin >> t; while(t--){ int n; cin >> n; if(n % 2 == 1){ cout << "Bob\n"; continue; } int cnt = 0; while(n % 2 == 0){ cnt++; n /= 2; } if(n > 1){ cout << "Alice\n"; }else if(cnt % 2 == 0){ cout << "Alice\n"; }else cout << "Bob\n"; } }
1537
E1
Erase and Extend (Easy Version)
\textbf{This is the easy version of the problem. The only difference is the constraints on $n$ and $k$. You can make hacks only if all versions of the problem are solved.} You have a string $s$, and you can do two types of operations on it: - Delete the last character of the string. - Duplicate the string: $s:=s+s$, where $+$ denotes concatenation. You can use each operation any number of times (possibly none). Your task is to find the lexicographically smallest string of length exactly $k$ that can be obtained by doing these operations on string $s$. A string $a$ is lexicographically smaller than a string $b$ if and only if one of the following holds: - $a$ is a prefix of $b$, but $a\ne b$; - In the first position where $a$ and $b$ differ, the string $a$ has a letter that appears earlier in the alphabet than the corresponding letter in $b$.
We claim that it is optimal to choose a prefix of the string, then duplicate it until we have a length bigger than $k$, then delete the excess elements. Let's relax the requirement so you have a position in the string and each time you either return to the beginning or advance to the next character. The answer will be the first k characters of the lexicographically smallest infinite string. From a given position, the optimal infinite string from it is unique, so you can pick exactly one optimal decision. Now the optimal string is going around some cycle from the start, and we see it also satisfies the original requirement, not just our relaxation We proved that it is optimal to choose a prefix, duplicate it until the length is bigger than k then delete the excess. As the constraints are low, we can iterate through every prefix on the original string and make a string of length $k$ with it. Then we take the minimum of all these strings as the answer. Solution complexity $O(n\cdot k)$.
[ "binary search", "brute force", "dp", "greedy", "hashing", "implementation", "string suffix structures", "strings", "two pointers" ]
1,600
#include "bits/stdc++.h" using namespace std; string get(string s, int k){ while((int)s.size() < k){ s = s + s; } while((int)s.size() > k) s.pop_back(); return s; } int main() { int n, k; string s; cin >> n >> k; cin >> s; string pref = ""; pref += s[0]; string mn = get(pref, k); for(int i = 1;i < n;i++){ if(s[i] > s[0])break; pref += s[i]; mn = min(mn, get(pref, k)); } cout << mn << "\n"; }
1537
E2
Erase and Extend (Hard Version)
\textbf{This is the hard version of the problem. The only difference is the constraints on $n$ and $k$. You can make hacks only if all versions of the problem are solved.} You have a string $s$, and you can do two types of operations on it: - Delete the last character of the string. - Duplicate the string: $s:=s+s$, where $+$ denotes concatenation. You can use each operation any number of times (possibly none). Your task is to find the lexicographically smallest string of length exactly $k$ that can be obtained by doing these operations on string $s$. A string $a$ is lexicographically smaller than a string $b$ if and only if one of the following holds: - $a$ is a prefix of $b$, but $a\ne b$; - In the first position where $a$ and $b$ differ, the string $a$ has a letter that appears earlier in the alphabet than the corresponding letter in $b$.
We know that the final string is some prefix repeated a bunch of times. Incrementally for $i$ from $1$ to $n$ we will keep the longest among the first $i$ prefixes that gives the best answer we've seen so far. So assume the $m-th$ prefix is currently the best and we're considering position $p$. If the $p-th$ character is greater than the corresponding character in $s_{1..m} \cdot (a lot)$ then the $p-th$ prefix and any further prefixes can't possibly give a smaller answer, so we just print the current one and finish. Otherwise all the characters before the $p-th$ are all less than or equal to the corresponding characters in $s_1..m \cdot (a lot)$, so if the $p-th$ is smaller than the corresponding we set the $p-th$ prefix as the best. Now the interesting case is if the current character is the same as the corresponding one. Say then that $p = m + t$, by the logic of the previous paragraph we must have $s_{(m + 1)..(m + t)} = s_{1..t}$. If $t = m$ then the new prefix is just the old one twice, so set $p$ as the best prefix now. This ensures that otherwise $t < m$. Denote $A = s_{1..t}$ and $B = s_{(t + 1)..m}$, so the string formed by the current best prefix is $ABABABABA...$ and the new one is $ABAABAABA...$ Now if $AB = BA$ then these strings are in fact the same, so set $m + t$ as the new best prefix. Otherwise we can find the first position where $AB$ and $BA$ differ, and use that to determine whether the new prefix is better. This can be done in $O(1)$ with Z function, thus giving a linear solution for the full problem.
[ "binary search", "data structures", "greedy", "hashing", "string suffix structures", "strings", "two pointers" ]
2,200
#include <bits/stdc++.h> using namespace std; using ll = long long; string s; int n, k; vector<int> z_func(string &s) { int n = s.size(), L = -1, R = -1; vector<int> z(n); z[0] = n; for(int i = 1; i < n; i++) { if(i <= R) z[i] = min(z[i - L], R - i + 1); while(i + z[i] < n && s[i + z[i]] == s[z[i]]) z[i]++; if(i + z[i] - 1 > R) { L = i; R = i + z[i] - 1; } } return z; } void finish(int m) { for(int i = 0; i < k; i++) cout << s[i % m]; cout << '\n'; exit(0); } int main() { ios_base::sync_with_stdio(false); cin.tie(0); cin >> n >> k >> s; auto z = z_func(s); int cur = 1; for(int i = 1; i < n; i++) { if(s[i] > s[i % cur]) finish(cur); if(s[i] < s[i % cur]) { cur = i + 1; continue; } int off = i - cur + 1; if(off == cur) { cur = i + 1; continue; } if(z[off] < cur - off) { if(cur + off + z[off] >= k) { cur = i + 1; continue; } if(s[off + z[off]] > s[z[off]]) cur = i + 1; continue; } if(z[cur - off] < off) { if(2 * cur + z[cur - off] >= k) { cur = i + 1; continue; } if(s[cur - off + z[cur - off]] < s[z[cur - off]]) cur = i + 1; continue; } cur = i + 1; } finish(cur); }
1537
F
Figure Fixing
You have a connected undirected graph made of $n$ nodes and $m$ edges. The $i$-th node has a value $v_i$ and a target value $t_i$. In an operation, you can choose an edge $(i, j)$ and add $k$ to both $v_i$ and $v_j$, where $k$ can be any \textbf{integer}. In particular, $k$ can be negative. Your task to determine if it is possible that by doing some finite number of operations (possibly zero), you can achieve for every node $i$, $v_i = t_i$.
If the parity of the sum of the initial values doesn't match the parity of the sum of the target values then there is no solution. Because $k$ is an integer and we always add the value $2 \cdot k$ to the sum of the initial values in each operation it's easy to notice that the parity of the sum of the initial values never changes. Otherwise, let's consider $2$ cases: 2) The graph is bipartite. 3) The graph is not bipartite. If the graph is bipartite, let the nodes be coloured red and blue with the condition that all the neighbors of any red node are blue and all the neighbours of any blue node are red. Let us call $sum1 = \sum target_i-value_i$ for each blue node and $sum2 = \sum target_i-value_i$ for each red node. We want to determine if we can make $target_i = value_i$ for each node, which is equivalent to saying $sum1 = 0$ and $sum2 = 0$. We notice that the difference between $sum1$ and $sum2$ is invariant in a bipartite graph because all operations will add to $sum1$ and $sum2$ at the same time. So to make $sum1 = 0$ and $sum2 = 0$ we need $sum1-sum2$ to be equal to $0$ initially. If the graph is not bipartite, then it is always possible because if the graph is not bipartite, it contains two neighboring vertices of the same color, which can be used to add or subtract from their color sum.
[ "constructive algorithms", "dfs and similar", "dsu", "graphs", "greedy", "math" ]
2,200
#include "bits/stdc++.h" using namespace std; const int N = 2e5 + 10; vector<long long> adj[N]; long long s[N], n, m; bool bipartite() { bool bip = true; for(long long i = 0;i < n;i++) s[i] = -1; queue<long long> q; for(long long i = 0;i < n;i++){ if(s[i] != -1)continue; q.push(i); s[i] = 0; while(!q.empty()){ long long v = q.front(); q.pop(); for(long long u: adj[v]){ if(s[u] == -1){ s[u] = s[v] ^ 1; q.push(u); }else bip &= s[u] != s[v]; } } } return bip; } int main() { ios_base::sync_with_stdio(0);cin.tie(0); long long T; cin >> T; while(T--) { cin >> n >> m; for(long long i = 0;i < n;i++) adj[i].clear(); vector<long long> v(n), t(n); long long p1 = 0, p2 = 0; for(long long i = 0;i < n; i++){ cin >> v[i]; p1 = (p1 + abs(v[i])) % 2; } for (long long i = 0;i < n; i++){ cin >> t[i]; p2 = (p2 + abs(t[i])) % 2; } for(long long i = 0;i < m;i++){ long long a, b; cin >> a >> b; --a, --b; adj[a].push_back(b); adj[b].push_back(a); } if(p1 != p2){ cout << "NO\n"; continue; } if(bipartite() == false){ cout << "YES\n"; }else{ vector<long long> c(2, 0); for(int i = 0;i < n;i++){ c[s[i]] += v[i] - t[i]; } if(c[0] == c[1]){ cout << "YES\n"; }else cout << "NO\n"; } } }
1538
A
Stone Game
Polycarp is playing a new computer game. This game has $n$ stones in a row. The stone on the position $i$ has integer power $a_i$. \textbf{The powers of all stones are distinct}. Each turn Polycarp can destroy either stone on the first position or stone on the last position (in other words, either the leftmost or the rightmost stone). When Polycarp destroys the stone it does not exist any more. Now, Polycarp wants two achievements. He gets them if he destroys the stone with the \textbf{least} power and the stone with the \textbf{greatest} power. Help Polycarp find out what is the minimum number of moves he should make in order to achieve his goal. For example, if $n = 5$ and $a = [1, 5, 4, 3, 2]$, then Polycarp could make the following moves: - Destroy the leftmost stone. After this move $a = [5, 4, 3, 2]$; - Destroy the rightmost stone. After this move $a = [5, 4, 3]$; - Destroy the leftmost stone. After this move $a = [4, 3]$. Polycarp destroyed the stones with the greatest and least power, so he can end the game. Please note that in the example above, you can complete the game in two steps. For example: - Destroy the leftmost stone. After this move $a = [5, 4, 3, 2]$; - Destroy the leftmost stone. After this move $a = [4, 3, 2]$. Polycarp destroyed the stones with the greatest and least power, so he can end the game.
If we want to destroy the largest and smallest stone, then there are only four options: Destroy the stones on the left until we destroy the smallest stone. Then destroy the stones on the right, until we destroy the largest stone. Destroy the stones on the right until we destroy the smallest stone. Then destroy the stones on the left, until we destroy the largest stone. Destroy the stones on the left until we destroy both stones. Destroy the stones on the right until we destroy both stones. You need to check all four options and choose the minimum answer. You need to check all four options and choose the minimum answer.
[ "brute force", "dp", "greedy" ]
800
#include <bits/stdc++.h> #include "random" using namespace std; using ll = long long; using ld = long double; using pii = pair<int, int>; using cd = complex<ld>; void solve() { int n; cin >> n; vector<int> v(n); for (int &e : v) { cin >> e; } int maxPos = max_element(v.begin(), v.end()) - v.begin(); int minPos = min_element(v.begin(), v.end()) - v.begin(); cout << min({ max(maxPos, minPos) + 1, (n - 1) - min(maxPos, minPos) + 1, (n - 1) - maxPos + minPos + 2, (n - 1) - minPos + maxPos + 2 }) << "\n"; } int main() { ios_base::sync_with_stdio(false); cin.tie(nullptr); int t; cin >> t; while (t--) { solve(); } }
1538
B
Friends and Candies
Polycarp has $n$ friends, the $i$-th of his friends has $a_i$ candies. Polycarp's friends do not like when they have different numbers of candies. In other words they want all $a_i$ to be the same. To solve this, Polycarp performs the following set of actions exactly \textbf{once}: - Polycarp chooses $k$ ($0 \le k \le n$) arbitrary friends (let's say he chooses friends with indices $i_1, i_2, \ldots, i_k$); - Polycarp distributes their $a_{i_1} + a_{i_2} + \ldots + a_{i_k}$ candies among all $n$ friends. During distribution for each of $a_{i_1} + a_{i_2} + \ldots + a_{i_k}$ candies he chooses new owner. That can be any of $n$ friends. Note, that any candy can be given to the person, who has owned that candy before the distribution process. Note that the number $k$ is not fixed in advance and can be arbitrary. Your task is to find the minimum value of $k$. For example, if $n=4$ and $a=[4, 5, 2, 5]$, then Polycarp could make the following distribution of the candies: - Polycarp chooses $k=2$ friends with indices $i=[2, 4]$ and distributes $a_2 + a_4 = 10$ candies to make $a=[4, 4, 4, 4]$ (two candies go to person $3$). Note that in this example Polycarp cannot choose $k=1$ friend so that he can redistribute candies so that in the end all $a_i$ are equal. For the data $n$ and $a$, determine the \textbf{minimum} value $k$. With this value $k$, Polycarp should be able to select $k$ friends and redistribute their candies so that everyone will end up with the same number of candies.
Let's denote for $s$ the number of candies all friends have: $s = \sum\limits_{i=1}^{n} a_i$. Note that at the end, each friend must have $\frac{s}{n}$ of candy. If $s$ is not completely divisible by $n$, then there is no answer. How to get the answer if it exists? If the $i$-th friend has more candies than $\frac{s}{n}$, then he must be chosen by Polycarp (otherwise this friend will have more candies than the others). If the $i$-th friend has no more than $\frac{s}{n}$, then Polycarp may not choose it. Then, if the answer exists, it is equal to the number of $a_i > \frac{s}{n}$.
[ "greedy", "math" ]
800
#include <bits/stdc++.h> using namespace std; using ll = long long; using ld = long double; using pii = pair<int, int>; using cd = complex<ld>; void solve() { int n; cin >> n; vector<int> a(n); int s = 0; for (int i = 0; i < n; i++) { cin >> a[i]; s += a[i]; } if (s % n != 0) { cout << "-1" << endl; return; } s /= n; int res = 0; for (int i = 0; i < n; i++) { if (s < a[i]) { res++; } } cout << res << endl; } int main() { int t; cin >> t; while (t--) { solve(); } return 0; }
1538
C
Number of Pairs
You are given an array $a$ of $n$ integers. Find the number of pairs $(i, j)$ ($1 \le i < j \le n$) where the sum of $a_i + a_j$ is greater than or equal to $l$ and less than or equal to $r$ (that is, $l \le a_i + a_j \le r$). For example, if $n = 3$, $a = [5, 1, 2]$, $l = 4$ and $r = 7$, then two pairs are suitable: - $i=1$ and $j=2$ ($4 \le 5 + 1 \le 7$); - $i=1$ and $j=3$ ($4 \le 5 + 2 \le 7$).
The problem can be divided into two classic ones: Count the number of pairs $a_i+a_j \le r$; Count the number of pairs $a_i+a_j \le l-1$. Let $A$ - be the answer to the first problem, and $B$ - be the answer to the second problem. Then $A-B$ is the answer to the original problem. The new problem can be solved by binary search. Iterate over the first element of the pair. Then you need to count the number of elements such that $a_j \le r - a_i$. If you sort the array, this value can be calculated by running a single binary search.
[ "binary search", "data structures", "math", "two pointers" ]
1,300
#include <bits/stdc++.h> #include "random" using namespace std; using ll = long long; using ld = long double; using pii = pair<int, int>; using cd = complex<ld>; void solve() { int n, l, r; cin >> n >> l >> r; vector<int> v(n); for (int &e : v) { cin >> e; } sort(v.begin(), v.end()); ll ans = 0; for (int i = 0; i < n; i++) { ans += upper_bound(v.begin(), v.end(), r - v[i]) - v.begin(); ans -= lower_bound(v.begin(), v.end(), l - v[i]) - v.begin(); if (l <= 2 * v[i] && 2 * v[i] <= r) { ans--; } } cout << ans / 2 << "\n"; } int main() { ios_base::sync_with_stdio(false); cin.tie(nullptr); int t; cin >> t; while (t--) { solve(); } }
1538
D
Another Problem About Dividing Numbers
You are given two integers $a$ and $b$. In one turn, you can do one of the following operations: - Take an integer $c$ ($c > 1$ and \textbf{$a$ should be divisible by $c$}) and replace $a$ with $\frac{a}{c}$; - Take an integer $c$ ($c > 1$ and \textbf{$b$ should be divisible by $c$}) and replace $b$ with $\frac{b}{c}$. Your goal is to make $a$ equal to $b$ using exactly $k$ turns. For example, the numbers $a=36$ and $b=48$ can be made equal in $4$ moves: - $c=6$, divide $b$ by $c$ $\Rightarrow$ $a=36$, $b=8$; - $c=2$, divide $a$ by $c$ $\Rightarrow$ $a=18$, $b=8$; - $c=9$, divide $a$ by $c$ $\Rightarrow$ $a=2$, $b=8$; - $c=4$, divide $b$ by $c$ $\Rightarrow$ $a=2$, $b=2$. For the given numbers $a$ and $b$, determine whether it is possible to make them equal using exactly $k$ turns.
Let's denote for $n$ the maximum number of moves for which the numbers $a$ and $b$ can be made equal. It is easy to understand that the number of moves is maximum when $a=b=1$ and each time we divided $a$ or $b$ by a prime number. That is, $n=$ sum of exponents of prime divisors of $a+$ sum of exponents of prime divisors of $b$. Let's denote by $m$ the minimum number of moves for which the numbers $a$ and $b$ can be made equal. Consider a few cases: If $a=b$, then $m=0$; If $gcd (a, b)=a$ or $gcd (a, b)=b$, then $m=1$; Otherwise, then $m=2$. Then, the answer "Yes" is possible in the following cases: $m \le k \le n$ and $k=1$ and $m = 1$, or, $m \le k \le n$ and $k \ne 1$.
[ "constructive algorithms", "math", "number theory" ]
1,700
#include <bits/stdc++.h> using namespace std; using ll = long long; using ld = long double; using pii = pair<int, int>; using cd = complex<ld>; const int N = 50'000; bool isPrime[N]; vector<int> primes; void precalc() { fill(isPrime + 2, isPrime + N, true); for (int i = 2; i * i < N; i++) { for (int j = i * i; j < N; j += i) { isPrime[j] = false; } } for (int i = 2; i < N; i++) { if (isPrime[i]) { primes.push_back(i); } } } int calcPrime(int n) { int res = 0; for (int i : primes) { if (i * i > n) { break; } while (n % i == 0) { n /= i; res++; } } if (n > 1) { res++; } return res; } map<int, int> decompose(int n) { map<int, int> a; for (int i : primes) { if (i * i > n) { break; } int p = 0; while (n % i == 0) { n /= i; p++; } if (p > 0) { a[i] = p; } } if (n > 1) { a[n] = 1; } return a; } bool check(const map<int, int> &divs, map<int, int>::const_iterator it, map<int, int> &divsA, map<int, int> &divsB, int low, int high, int k) { if (it == divs.end()) { return __builtin_popcount(low) <= k && k <= high; } for (int p = 0; p <= it->second; p++) { int pa = divsA[it->first]; int pb = divsB[it->first]; int nextLow = low; if (p != pa) { nextLow |= 1; } if (p != pb) { nextLow |= 2; } if (check(divs, next(it), divsA, divsB, nextLow, high + pa + pb - 2 * p, k)) { return true; } } return false; } void solve() { int a, b, k; cin >> a >> b >> k; int g = __gcd(a, b); int low = 0; int high = 0; { int t; int ta = 1; while ((t = __gcd(a, g)) != 1) { a /= t; ta *= t; } high += calcPrime(a); if (a != 1) { low |= 1; } a = ta; } { int t; int tb = 1; while ((t = __gcd(b, g)) != 1) { b /= t; tb *= t; } high += calcPrime(b); if (b != 1) { low |= 2; } b = tb; } auto divs = decompose(g); auto divsA = decompose(a); auto divsB = decompose(b); cout << (check(divs, divs.begin(), divsA, divsB, low, high, k) ? "YES" : "NO") << endl; } int main() { precalc(); int t; cin >> t; while (t--) { solve(); } return 0; }
1538
E
Funny Substrings
Polycarp came up with a new programming language. There are only two types of statements in it: - "x := s": assign the variable named x the value s (where s is a string). For example, the statement var := hello assigns the variable named var the value hello. Note that s is the value of a string, not the name of a variable. Between the variable name, the := operator and the string contains exactly one space each. - "x = a + b": assign the variable named x the concatenation of values of two variables a and b. For example, if the program consists of three statements a := hello, b := world, c = a + b, then the variable c will contain the string helloworld. It is guaranteed that the program is correct and the variables a and b were previously defined. There is exactly one space between the variable names and the = and + operators. All variable names and strings only consist of lowercase letters of the English alphabet and do not exceed $5$ characters. The result of the program is the number of occurrences of string haha in the string that was written to the variable in the last statement. Polycarp was very tired while inventing that language. He asks you to implement it. Your task is — for given program statements calculate the number of occurrences of string haha in the last assigned variable.
We can't model this process directly, since the maximum string length reaches $2^{50}$ (look at the second example from the statements). To optimize this process, you can store each row as a set of the following values. Number of occurrences of haha in the string - $cnt$. String length - $length$. The first three characters of the string are - $pref$. The last three characters of the string are - $suff$. Then, to process the second type of request and combine the two strings $a$ and $b$ into the string $c$, you need: $c.length = a.length + b.length$. $c.cnt = a.cnt + b.cnt + count(a.suff + b.pref, haha)$. (New occurrences may be added at the junction of two words) $c. pref = a. pref$ (However, if the string length is less than $6=3+3$, then you need to handle this case carefully with your hands) $c. suff = b. suff$ (Similarly, you need to process small strings separately).
[ "data structures", "hashing", "implementation", "matrices", "strings" ]
2,100
#include <bits/stdc++.h> #include "random" using namespace std; using ll = long long; using ld = long double; using pii = pair<int, int>; using cd = complex<ld>; vector<string> split(const string& s, char p) { vector<string> res(1); for (char c : s) { if (c == p) { res.emplace_back(); } else { res.back() += c; } } return res; } struct Word { ll len; ll cnt; string s; }; string getFirst(string s) { if (s.size() < 3) { return s; } return s.substr(0, 3); } string getLast(string s) { if (s.size() < 3) { return s; } return s.substr(s.size() - 3, 3); } int count(const string& s, const string& p) { int cnt = 0; for (int i = 0; i + p.size() <= s.size(); i++) { if (s.substr(i, p.size()) == p) { cnt++; } } return cnt; } Word merge(const Word& a, const Word& b) { Word c; c.len = a.len + b.len; c.s = a.s + b.s; c.cnt = a.cnt + b.cnt + count(getLast(a.s) + getFirst(b.s), "haha"); if (c.s.size() >= 7) { c.s = getFirst(c.s) + "@" + getLast(c.s); } return c; } void solve() { int n; cin >> n; map<string, Word> vars; ll ans = 0; for (int i = 0; i < n; i++) { string var; cin >> var; string opr; cin >> opr; if (opr == "=") { string a, plus, b; cin >> a >> plus >> b; vars[var] = merge(vars[a], vars[b]); } else { string str; cin >> str; Word word; word.len = str.length(); word.cnt = count(str, "haha"); word.s = str; vars[var] = word; } ans = vars[var].cnt; } cout << ans << "\n"; } int main() { ios_base::sync_with_stdio(false); cin.tie(nullptr); int t; cin >> t; while (t--) { solve(); } }
1538
F
Interesting Function
You are given two integers $l$ and $r$, where $l < r$. We will add $1$ to $l$ until the result is equal to $r$. Thus, there will be exactly $r-l$ additions performed. For each such addition, let's look at the number of digits that will be changed after it. For example: - if $l=909$, then adding one will result in $910$ and $2$ digits will be changed; - if you add one to $l=9$, the result will be $10$ and $2$ digits will also be changed; - if you add one to $l=489999$, the result will be $490000$ and $5$ digits will be changed. Changed digits always form a suffix of the result written in the decimal system. Output the total number of changed digits, if you want to get $r$ from $l$, adding $1$ each time.
For each digit, we will count how many times it has changed. The number of changes for the first digit (the lowest) is calculated using the formula $r-l$. The number of changes for the second digit is calculated by the formula $\left\lfloor\frac{r}{10}\right\rfloor-\left\lfloor\frac{l}{10}\right\rfloor$. That is, it is equivalent to the number of first-digit changes for numbers from $\left\lfloor\frac{l}{10}\right\rfloor$ to $\left\lfloor\frac{r}{10}\right\rfloor$. To calculate the number of changes for the remaining digits, you need to apply similar reasoning with dividing the numbers by $10$.
[ "binary search", "dp", "math", "number theory" ]
1,500
#include <iostream> using namespace std; void solve () { int L, R; cin >> L >> R; int ans = 0; while (L != 0 || R != 0) { ans += R - L; L /= 10; R /= 10; } cout << ans << '\n'; } int main () { ios::sync_with_stdio(false); cin.tie(0); int testc; cin >> testc; for (int i = 0; i < testc; i++) { solve(); } }
1538
G
Gift Set
Polycarp has $x$ of red and $y$ of blue candies. Using them, he wants to make gift sets. Each gift set contains either $a$ red candies and $b$ blue candies, or $a$ blue candies and $b$ red candies. Any candy can belong to at most one gift set. Help Polycarp to find the largest number of gift sets he can create. For example, if $x = 10$, $y = 12$, $a = 5$, and $b = 2$, then Polycarp can make three gift sets: - In the first set there will be $5$ red candies and $2$ blue candies; - In the second set there will be $5$ blue candies and $2$ red candies; - In the third set will be $5$ blue candies and $2$ red candies. Note that in this example there is one red candy that Polycarp does not use in any gift set.
In this problem, we can use a binary search for the answer (If we can make $x$ sets, then we can make $y < x$ sets). So, we need to come up with the following test, whether we can make $n$ sets knowing the parameters $x, y, a, b$. Let $a > b$ (otherwise we will swap them). If $a == b$, the answer is $\lfloor\frac{min(x, y)}{a}\rfloor$. Otherwise, let's say we want to make $k$ sets of the first kind. Then we get a system of inequalities $x \le a \cdot k + b \cdot (n - k)$ $y \le a \cdot (n - k) + b \cdot k$ Let's express $k$from here $\frac{(x - b \cdot n)}{a - b} \le k$ $\frac{(x - a \cdot n)}{b - a} \ge k$ $0 \le k$ $n \ge k$ We need to check whether these four equations have an intersection in integers. If there is, then the division into $n$ gifts exists.
[ "binary search", "greedy", "math", "ternary search" ]
2,100
#include <bits/stdc++.h> #include "random" using namespace std; using ll = long long; using ld = long double; using pii = pair<int, int>; using cd = complex<ld>; void solve() { ll x, y, a, b; cin >> x >> y >> a >> b; ll l = 0, r = 1e9 + 100; if (a == b) { cout << min(x, y) / a << "\n"; return; } if (a < b) { swap(a, b); } while (r - l > 1) { ll m = (l + r) / 2; ll right = floorl((x - m * b) * 1.0l / (a - b)); ll left = ceill((y - m * a) * 1.0l / (b - a)); if (max(left, 0ll) <= min(right, m)) { l = m; } else { r = m; } } cout << l << "\n"; } int main() { ios_base::sync_with_stdio(false); cin.tie(nullptr); int t; cin >> t; while (t--) { solve(); } }
1539
A
Contest Start
There are $n$ people participating in some contest, they start participating in $x$ minutes intervals. That means the first participant starts at time $0$, the second participant starts at time $x$, the third — at time $2 \cdot x$, and so on. Duration of contest is $t$ minutes for each participant, so the first participant finishes the contest at time $t$, the second — at time $t + x$, and so on. When a participant finishes the contest, their dissatisfaction equals to the number of participants that started the contest (or starting it now), but haven't yet finished it. Determine the sum of dissatisfaction of all participants.
Let's find which participants will disturb participant $i$. Those are participants with number between $i + 1$ and $i + min(t / x, n)$. So each of first $max(0, n - t / x)$ participants will get $t / x$ dissatisfaction, and each next participant will get 1 dissatisfaction less, than previous. So the total answer is $max(0, n - t / x) \cdot t / x + min(n - 1, t / x - 1) \cdot min(n, t / x) / 2$.
[ "combinatorics", "geometry", "greedy", "math" ]
1,000
null
1539
B
Love Song
Petya once wrote a sad love song and shared it to Vasya. The song is a string consisting of lowercase English letters. Vasya made up $q$ questions about this song. Each question is about a subsegment of the song starting from the $l$-th letter to the $r$-th letter. Vasya considers a substring made up from characters on this segment and repeats each letter in the subsegment $k$ times, where $k$ is the index of the corresponding letter in the alphabet. For example, if the question is about the substring "abbcb", then Vasya repeats letter 'a' once, each of the letters 'b' twice, letter 'c" three times, so that the resulting string is "abbbbcccbb", its length is $10$. Vasya is interested about the length of the resulting string. Help Petya find the length of each string obtained by Vasya.
One can notice that letter with number $x$ will add exactly $x$ to the answer. So, all we have to do is calculate the sum of numbers of letters in our substring. This can be done using prefix sums.
[ "dp", "implementation", "strings" ]
800
null
1539
C
Stable Groups
There are $n$ students numerated from $1$ to $n$. The level of the $i$-th student is $a_i$. You need to split the students into stable groups. A group of students is called stable, if in the sorted array of their levels no two neighboring elements differ by more than $x$. For example, if $x = 4$, then the group with levels $[1, 10, 8, 4, 4]$ is stable (because $4 - 1 \le x$, $4 - 4 \le x$, $8 - 4 \le x$, $10 - 8 \le x$), while the group with levels $[2, 10, 10, 7]$ is not stable ($7 - 2 = 5 > x$). Apart from the $n$ given students, teachers can invite at most $k$ additional students with \textbf{arbitrary} levels (at teachers' choice). Find the minimum number of stable groups teachers can form from all students (including the newly invited). For example, if there are two students with levels $1$ and $5$; $x = 2$; and $k \ge 1$, then you can invite a new student with level $3$ and put all the students in one stable group.
Firstly, we will find the amount of groups needed if we don't add any new students. Let's consider the students in the increasing order of their knowledge level. Students are greedily determined to the same group if the difference of their knowledge levels is not greater than $x$. Else we create another group. After that all students will be split into continuous non-intersecting segments - stable groups. Merging two segments with knowledge difference $d$ may be done by adding $\lceil\frac{d}{x}\rceil - 1$ new students. Each such merge decreases the answer by $1$ so we should maximize the amount of merges. To do that we should just consider the merges in increasing order of their costs.
[ "greedy", "sortings" ]
1,200
null
1539
D
PriceFixed
Lena is the most economical girl in Moscow. So, when her dad asks her to buy some food for a trip to the country, she goes to the best store  — "PriceFixed". Here are some rules of that store: - The store has an infinite number of items of every product. - All products have the same price: $2$ rubles per item. - For every product $i$ there is a discount for experienced buyers: if you buy $b_i$ items of products (\textbf{of any type}, not necessarily type $i$), then for all future purchases of the $i$-th product there is a $50\%$ discount (so you can buy an item of the $i$-th product for $1$ ruble!). Lena needs to buy $n$ products: she must purchase at least $a_i$ items of the $i$-th product. Help Lena to calculate the minimum amount of money she needs to spend if she optimally chooses the order of purchasing. Note that if she wants, she can buy more items of some product than needed.
Let $m$ be the sum of all $a_i$ Important greedy observations: If there is an item which costs 1, then we will not make the answer worse by buying this item. If all prices are 2, then we will not make the answer worse by buying the item with max $b_i$. Therefore we can sort all items by $b_i$ and on each iteration we will only need to consider two items: with max $b_i$ and with min $b_i$ among us all not bought items. Another important observation: We already know how many items with price 2 we should buy to be able to buy something with a discount. This means that we can buy multiple items with full price together. Similarly, we can buy multiple items with a discount at once. This solution can be implemented using a two pointers technique which allows to find the answer in $O(n \cdot \log{n})$.
[ "binary search", "greedy", "implementation", "sortings", "two pointers" ]
1,600
null
1539
E
Game with Cards
The Alice's computer is broken, so she can't play her favorite card game now. To help Alice, Bob wants to answer $n$ her questions. Initially, Bob holds one card with number $0$ in the left hand and one in the right hand. In the $i$-th question, Alice asks Bob to replace a card in the left or right hand with a card with number $k_i$ (Bob chooses which of two cards he changes, Bob must replace exactly one card). After this action, Alice wants the numbers on the left and right cards to belong to given segments (segments for left and right cards can be different). Formally, let the number on the left card be $x$, and on the right card be $y$. Then after the $i$-th swap the following conditions must be satisfied: $a_{l, i} \le x \le b_{l, i}$, and $a_{r, i} \le y \le b_{r,i}$. Please determine if Bob can answer all requests. If it is possible, find a way to do it.
Let's use dynamic programming to solve the problem. $dp_L[i]$ is equal to 1 if we can correcly answer queries on suffix the way that $i$-th card is taken in left hand and $i+1$-th card is taken in right hand. $dp_R[i]$ is equal to 1 if we can correcly answer queries on suffix the way that $i$-th card is taken in right hand and $i+1$-th card is taken in left hand. Let's consider transitions to count $dp_L$. Let's suppose we have $j$ such that $dp_R[j] = 1$. Then we can tell that $dp_L[i] = 1$, if these 2 conditions hold: We can take all cards with indexes $[i + 1, j]$ in right hand. Card in query $i$ fits constraints on value written on card in left hand in queries with indexes $[i, j]$.
[ "binary search", "constructive algorithms", "data structures", "dp", "greedy", "implementation" ]
2,500
null
1539
F
Strange Array
Vasya has an array of $n$ integers $a_1, a_2, \ldots, a_n$. Vasya thinks that all numbers in his array are strange for some reason. To calculate how strange the $i$-th number is, Vasya created the following algorithm. He chooses a subsegment $a_l, a_{l+1}, \ldots, a_r$, such that $1 \le l \le i \le r \le n$, sort its elements in increasing order in his head (he can arrange equal elements arbitrary). After that he finds the center of the segment. The center of a segment is the element at position $(l + r) / 2$, if the length of the segment is odd, and at position $(l + r + 1) / 2$ otherwise. Now Vasya finds the element that was at position $i$ before the sorting, and calculates the distance between its current position and the center of the subsegment (the distance between the elements with indices $j$ and $k$ is $|j - k|$). The strangeness of the number at position $i$ is the maximum distance among all suitable choices of $l$ and $r$. Vasya wants to calculate the strangeness of each number in his array. Help him to do it.
Note that the distance from the given element to the median element (the center of a sorted segment) can be defined in terms of numbers of elements that are less, equal or bigger than the given element. Let $cnt_L$ be the number of elements that are less, $cnt_M$ - equal (excluding the given) and $cnt_R$ - bigger than the given element. Then the distance may be calculated in the following way: If $a_i$ is bigger than the median element: $ans = \lfloor \frac{cnt_L + cnt_M - cnt_R}{2}\rfloor$ otherwise $ans = \lfloor \frac{cnt_R + cnt_M - cnt_L + 1}{2}\rfloor$ To solve the problem you firstly need to assume that the given element is greater than the median element, then consider the other case and take the maximum of two answers. Hereinafter only the second case is considered (in which the element is not greater than the median one), the first case can be done analogically. Since we need to maximize $cnt_R + cnt_M - cnt_L$, let's do it separately for the elements to the left and to the right of ours. Let's sort the indices so that the corresponding elements go in increasing order. Let array $D = [1, 1 \ldots, 1]$ (its size is $n$) and $P = [1, 2, \ldots n]$. We will need operations "+= on the segment" and "min/max on the segment", so let's build a segment tree for $P$. We will iterate through the indices in the received order and when considering the index $i$ we'll change $D$ and $P$ so that they correspond to the following conditions: For each $1 \le j \le n$ if $a_j < a_i$ then $D_j = -1$, else $D_j = 1$ Array $P$ is a prefix sum array for $D$ (changes to $P$ are made via a segment tree) In order to find $\max(cnt_R + cnt_M - cnt_L)$ among elements to the left of $i$ we need to find $\min\limits_{j=1}^{i} P_j$. In order to find $\max(cnt_R + cnt_M - cnt_L)$ among elements to the right of i will find $\max \limits_{j=i}^{n} P_j$. We will find these values using the segment tree for $P$ and consider the next index. Note that for all the changes we will need only $n$ actions, because in array $D$ each element is firstly equal to 1, and then once becomes -1 and never changes again. The described solution's time complexity is $O(n \cdot \log(n))$
[ "data structures", "greedy", "sortings" ]
2,600
null
1540
A
Great Graphs
Farmer John has a farm that consists of $n$ pastures connected by one-directional roads. Each road has a weight, representing the time it takes to go from the start to the end of the road. The roads could have negative weight, where the cows go so fast that they go back in time! However, Farmer John guarantees that it is impossible for the cows to get stuck in a time loop, where they can infinitely go back in time by traveling across a sequence of roads. Also, each pair of pastures is connected by at most one road in each direction. Unfortunately, Farmer John lost the map of the farm. All he remembers is an array $d$, where $d_i$ is the smallest amount of time it took the cows to reach the $i$-th pasture from pasture $1$ using a sequence of roads. The cost of his farm is the sum of the weights of each of the roads, and Farmer John needs to know the \textbf{minimal} cost of a farm that is consistent with his memory.
Note that if there are two nodes $a$ and $b$ and you want to add an edge between them, the value of the edge must be $\geq d_b - d_a$. Otherwise, the cows could take a path to $b$ that goes through $d_a$ that's strictly less than $d_b$. With this in mind, let's add all edges $(a, b)$ with weight $d_b - d_a$ if and only if $d_b \leq d_a$. All of these numbers are not positive, which means they can't make our answer worse. They also don't change the shortest paths, from our first observation. Now, let's call the node with the max $d_i$ node $x$. You can add a single edge from node $1$ to node $x$ with cost $d_x$, and now the graph is good. This is because node $x$ is already connected to all other nodes, which means there is always a shortest path to some node $a$ with the right length by going from $1 \rightarrow x \rightarrow a$. However, naively looping through all pairs is too slow. Instead, you can sort $d$ and calculate the contribution of each node to the answer. The complexity is $O(n log n)$.
[ "constructive algorithms", "graphs", "greedy", "shortest paths", "sortings" ]
1,400
null
1540
B
Tree Array
You are given a tree consisting of $n$ nodes. You generate an array from the tree by marking nodes one by one. Initially, when no nodes are marked, a node is equiprobably chosen and marked from the entire tree. After that, until all nodes are marked, a node is equiprobably chosen and marked from the set of unmarked nodes with at least one edge to a marked node. It can be shown that the process marks all nodes in the tree. The final array $a$ is the list of the nodes' labels in order of the time each node was marked. Find the expected number of inversions in the array that is generated by the tree and the aforementioned process. The number of inversions in an array $a$ is the number of pairs of indices $(i, j)$ such that $i < j$ and $a_i > a_j$. For example, the array $[4, 1, 3, 2]$ contains $4$ inversions: $(1, 2)$, $(1, 3)$, $(1, 4)$, $(3, 4)$.
Parsing through the problem statement, the process can be seen as choosing a starting node and "expanding" the subtree of marked nodes to nodes adjacent to the marked component. Fixing a given root $r$, the expected value of the entire process is obviously the sum of the expected values for a fixed root divided by $n$. Find the contribution of the inversion of two nodes $(a, b)$ where $a<b$. The expected contribution for any pair $(a, b)$ is equal to the probability that $b$ appears before $a$ with a given root. Set $l = lca(a, b)$. Note that, until reaching $l$, every possible process still has the same probability of reaching $b$ before $a$ as it did when the first node was chosen. Therefore, we can assume that the process has reached $l$ and calculate the probability from there. Once $l$ is reached, we now note that the probability that the process "gets closer" to $b$ is always equal to the probability of getting closer to $a$. The problem can be rephrased as having two stacks of size $dist(l, a)$ and $dist(l, b)$ with an arbitrary $p$ to remove a node from one of the two stack (and $1-2p$ to nothing) and finding the probability that $dist(l, b)$ reaches zero before $dist(l, a)$. However, it turns out that the actual probability $p$ does not matter. We propose a function $F[x][y]$ that defines the probability that a stack of size $y$ becomes $0$ before a stack of size $x$. In fact a function exists and it is defined as $F[x][y] = \frac{F[x-1][y]+F[x][y-1]}{2}$. Intuitively, this is because the probability of decreasing $x$ or decreasing $y$ is always the same, so the probability of transitioning the state we end up transitioning to is always the same, regardless of $p$. So, the solution is clear. Iterate over and root at all nodes. Then at the given root, iterate over all pairs of node $a < b$ and add $F[dist(l, a)][dist(l, b)]$ to the answer. Finally, divide by $n$. In total, the solution works in $O(N^3 \log N)$ or $O(N^3)$.
[ "brute force", "combinatorics", "dp", "graphs", "math", "probabilities", "trees" ]
2,300
null
1540
C2
Converging Array (Hard Version)
\textbf{This is the hard version of the problem. The only difference is that in this version $1 \le q \le 10^5$. You can make hacks only if both versions of the problem are solved.} There is a process that takes place on arrays $a$ and $b$ of length $n$ and length $n-1$ respectively. The process is an infinite sequence of operations. Each operation is as follows: - First, choose a random integer $i$ ($1 \le i \le n-1$). - Then, simultaneously set $a_i = \min\left(a_i, \frac{a_i+a_{i+1}-b_i}{2}\right)$ and $a_{i+1} = \max\left(a_{i+1}, \frac{a_i+a_{i+1}+b_i}{2}\right)$ without any rounding (so values may become non-integer). See notes for an example of an operation.It can be proven that array $a$ converges, i. e. for each $i$ there exists a limit $a_i$ converges to. Let function $F(a, b)$ return the value $a_1$ converges to after a process on $a$ and $b$. You are given array $b$, but not array $a$. However, you are given a third array $c$. Array $a$ is good if it contains only \textbf{integers} and satisfies $0 \leq a_i \leq c_i$ for $1 \leq i \leq n$. Your task is to count the number of good arrays $a$ where $F(a, b) \geq x$ for $q$ values of $x$. Since the number of arrays can be very large, print it modulo $10^9+7$.
First, reduce the operations into something more manageable. It turns out operation $i$ sets $a_{i+1}-a_i=\max(b_i, a_{i+1}-a_i)$ while keeping $a_{i+1}+a_i$ constant. Visually, this is simultaneously moving $a_i$ up and $a_{i+1}$ down until $a_{i+1}-a[i] \geq b_i$. Define $f$ to be the final converged array. Let's some observations If $a_{i+1}-a_i = b_i$ is ever satisfied throughout the process (if an operation ever moves anything), $f_{i+1}-f_i = b_i$. Equivalently, if $f_{i+1}-f_i > b_i$ then no operation $i$ could have ever been conducted If no operation $i$ has been conducted, then $[1, i]$ is independent of $[i+1, n]$. If $i$ is the first operation that has never been conducted, then $\sum_{j=1}^i a_j = \sum_{j=1}^i f_j$ because no sum could have been exchanged between $a_i$ and $a_{i+1}$. Let's assume that we know that $i$ is the first observation that hasn't been conducted. We can then restore $f_1$ because we know that $f_1 + f_1+b_1+f_1+b_1+b_2 \ldots = a_1+a_2+\ldots+a_i$. To keep the tutorial clean, we refer to the prefix of prefix summation of $b_i$ as $bp_i$ and the prefix summation of $a_i$ as $ap_i$. Namely, we can solve for $f_1 = \frac{ap_i - bp_i}{i}$ given that $i$ is the first observation never conducted. It turns out that $f_1 = \min(\frac{ap_i - bp_i}{i})$ over all $i$. This can be shown to be a lower bound on $f_1$ because the answer is obviously in this set as there must be some $i$ that is the first operation never conducted. This can also be shown to be an upperbound on the answer by playing the game on each prefix $i$. At each prefix $i$, the term $\frac{ap_i - bp_i}{i}$ is an upperbound because, if it's not equal to that term, there must be some $f_{i+1}-f_i > b_i$ so $f_1 < \frac{ap_i - bp_i}{i}$ because $f_1+f_2+\ldots+f_i$ remains the same. Returning to the actual problem, we need to count arrays $\min(\frac{ap_i-bp_i}{i}) \geq x$. In other words, $ap_i \geq i \cdot x + bp_i$ must hold for all $i$. Let's do dynamic programming on $ap_i$. Call $dp[i][ap_i]$ the number of prefixes of length $i$ with current prefix sum of $ap_i$. We can transition to $i+1$ from $i$ using prefix sums on each valid $ap_i$. Define $M = max(a_i)$. The current complexity is $O(Q M N^2)$ The final step is noticing that there are only $O(M)$ valid integer positions that end up being important for $f_1$. Intuitively, this is because in nearly all cases every operation $i$ ends up being used. To rigorously prove, let's find an upperbound on relevant $x$. If $M\cdot n < x\cdot n + bp_i$ then there are $0$ valid arrays. Because $x\cdot n + bp_i$ is concave and negative on the decreasing portion (i.e. the function goes down immediately into negative if it ever becomes negative, otherwise strictly increases), we can draw the inequality $0 \geq x\cdot n + bp_i$, otherwise every array ends up being good. Reducing the inequalities, we can realize that there is exactly $M$ different possible solutions. So, we can precalculate in $O(M^2N^2)$ and answer in $O(1)$.
[ "dp", "math" ]
2,900
null
1540
D
Inverse Inversions
You were playing with permutation $p$ of length $n$, but you lost it in Blair, Alabama! Luckily, you remember some information about the permutation. More specifically, you remember an array $b$ of length $n$, where $b_i$ is the number of indices $j$ such that $j < i$ and $p_j > p_i$. You have the array $b$, and you want to find the permutation $p$. However, your memory isn't perfect, and you constantly change the values of $b$ as you learn more. For the next $q$ seconds, one of the following things happen: - $1$ $i$ $x$ — you realize that $b_i$ is equal to $x$; - $2$ $i$ — you need to find the value of $p_i$. If there's more than one answer, print any. It can be proven that there's always at least one possible answer under the constraints of the problem. Answer the queries, so you can remember the array!
We'll assume the arrays and the final permutation are 0-indexed from this point forward (you can shift values accordingly at the end). Let's start with calculating the final array, without any updates. Let $c_i$ be the number of indices $j$ such that $p_j < p_i$ and $i < j$. It is easy to see that $c_i = i - b_i$. Now imagine sweeping from left to right, maintaining the array $a$. Let's say you're currently at index $i$, and you have a list of all indices $\leq i$, where the location of some index $j$ is the value of $a_j$. You know that $i$ must be the $c_i$-th out of those (after inserting it into the list) as the $c_i$ smallest values must be before it. This means that you can insert index $i$ at the $c_i-th$ position in the list, and you need to find the final location of index $i$ in each query. Now, let's support $O(1)$ updates and $O(n)$ queries. The first thing to note is that you don't need the entire array $a$ in each query, you just need to query some specific element. Assume you're querying the $i$-th element of the array. Let's repeat the algorithm mentioned above except instead of storing the lists, you only store the location of the $i$-th element for each query. Now, you keep some variable $loc$ which stores the location of the $i$-th element in the list. It is initialized to $c_i$, as it is first inserted to the $c_i$-th position. Now you sweep through all indices $j$ where $j > i$, and check if $loc$ is changed by the insert. This is only true if $loc \geq a[j]$. This allows for $O(1)$ updates but $O(n)$ queries, which is still too slow. To speed this up, let's use sqrt-decomp. Divide the array into multiple blocks. For each block, let's store an array $next$ where $next_{loc}$ represents the final value of the variable $loc$ if it passes through the block. If we have this array, you can loop over all of the blocks and "jump" over them using the next array, querying in $O(n/B)$ time, where $B$ is the size of the block. But how do you calculate and update the $next$ array? Initially, you can calculate the next array in $O(n \cdot B)$, by going to each index and naively tracking its position. Updating it is harder. One observation to make is that for each index inside a block, there is some value such that if $loc \geq x$ it will be changed by the index, otherwise it will not. You can find all of those values for a single block using a binary indexed tree: sweep from left to right, query for the the smallest update $loc$ using a lower bound on the BIT, and increment a suffix. Then, you can increment all of those suffixes in the next array using another BIT, which would lead to $O(n \sqrt{n} + q \sqrt{n} \log{n})$, as queries are $O((n/B)\log{n})$ and updates are $O(B \log{n})$, which will probably be too slow. To make this faster, note that the suffix that each element is responsible changes by at most one for all non-updated elements. This means that you can update the next array in $O(1)$ for these elements. However, the updated element's suffix might change by $O(n)$. To account for this, you can use another square root decomposition on the next array, which allows for $O(\sqrt{n})$ range updates, $O(1)$ point updates, and $O(1)$ point queries. This means that updates will remain $O(B \log{n})$, but queries will become $O(1)$, so the final complexity is $O(q \sqrt{n \log{n}})$ with the right blocksize, which is definitely fast enough (the model solution runs in $2$ seconds). If you know of any faster solutions ($O(n\sqrt{n})$ or even $O(n \cdot \log^k{})$ let us know down in the comments).
[ "binary search", "brute force", "data structures" ]
3,200
null
1540
E
Tasty Dishes
\textbf{Note that the memory limit is unusual.} There are $n$ chefs numbered $1, 2, \ldots, n$ that must prepare dishes for a king. Chef $i$ has skill $i$ and initially has a dish of tastiness $a_i$ where $|a_i| \leq i$. Each chef has a list of other chefs that he is allowed to copy from. To stop chefs from learning bad habits, the king makes sure that chef $i$ can only copy from chefs of larger skill. There are a sequence of days that pass during which the chefs can work on their dish. During each day, there are two stages during which a chef can change the tastiness of their dish. - At the beginning of each day, each chef can choose to work (or not work) on their own dish, thereby multiplying the tastiness of their dish of their skill ($a_i := i \cdot a_i$) (or doing nothing). - After all chefs (who wanted) worked on their own dishes, each start observing the other chefs. In particular, for each chef $j$ on chef $i$'s list, chef $i$ can choose to copy (or not copy) $j$'s dish, thereby adding the tastiness of the $j$'s dish to $i$'s dish ($a_i := a_i + a_j$) (or doing nothing). It can be assumed that all copying occurs simultaneously. Namely, if chef $i$ chooses to copy from chef $j$ he will copy the tastiness of chef $j$'s dish at the end of stage $1$. All chefs work to maximize the tastiness of their own dish in order to please the king. Finally, you are given $q$ queries. Each query is one of two types. - $1$ $k$ $l$ $r$ — find the sum of tastiness $a_l, a_{l+1}, \ldots, a_{r}$ after the $k$-th day. Because this value can be large, find it modulo $10^9 + 7$. - $2$ $i$ $x$ — the king adds $x$ tastiness to the $i$-th chef's dish before the $1$-st day begins ($a_i := a_i + x$). Note that, because the king wants to see tastier dishes, he only adds positive tastiness ($x > 0$). Note that queries of type $1$ are independent of each all other queries. Specifically, each query of type $1$ is a scenario and does not change the initial tastiness $a_i$ of any dish for future queries. Note that queries of type $2$ are cumulative and only change the initial tastiness $a_i$ of a dish. See notes for an example of queries.
All operations are conducted under a modulo, it can be proven that each operation we conduct is valid within the modulo. Key Idea 1 It's optimal to perform each operation if the number being added/multiplied is strictly positive. Specifically, it's optimal to do $a_i := i\cdot a_i$ and $a_i := a_i+a+j$ iif $a_i > 0$ and $a_j > 0$ respectively. Key Idea 2 A chef's dish $i$ becomes positive after the $d$'th day where $d$ is the closest distance of chef $i$ from a positive element. We call this value $d_i$ for the $i$'th chef. Initial observations, there are only $O(N^2)$ pairs of $(i, d_i)$ where $d_i$ is not infinite (never reaches positive value and thus never changes). Key Idea 3 We refer to a vector of length $n$ with $n-1$ zeros and a single $1$ at the $i$'th index as $\vec{e}_i$. If we assume that all chefs are currently positive then every chef takes every opportunity copy and the transition matrix $T$ is well defined and obvious. We can represent the final array at time $k$ as $a = T^k \sum_{d_i \leq k} T^{-d_i} \vec{e}_i a_i$ + $\sum_{d_i>k} \vec{e}_i a_i$. This immediately offers a $O(QN^2+N^4)$ potential solution with very good constant. Because $T$ is diagonal, its inverse can with respect to a vector $\vec{v}$ be calculated in $O(N^2)$. So, we can precalculate $T^-k \vec{e}_i$ for all valid numbers in $O(N^2)$. In fact, this ends up being $N^4/12$ total operations if optimized. We can then answer each query in $O(N^2)$ by simply iterating over each chef and finding it's contribution to the answer. Multiplying a matrix $O(N^2)$ will be explained later. With some special hacks, it may even be possible to get this to pass. Key Idea 4 An arbitrary matrix $T$ has eigenvalues $\lambda_i$ and their paired eigenvectors $\vec{v}_i$. Ignoring how we find eigenvectors and eigenvalues, the point of the two is that they are pairs such that $T \vec{v}_i = \vec{v}_i \lambda_i$. Notably, given a eigenvector and its respective eigenvalue, we can calculate $T^k \cdot \vec{v}_i$ in $O(N \log k)$. In our case, the transition matrix $T$ is a diagonal matrix. A basic result in linear algebra is that the eigenvalues of a diagonal matrix is exactly the numbers on it's diagonal. In our case, this means that $\lambda_i = i$ for $1 \le i \le n$. Finding the eigenvectors is left as an exercise to the reader. We henceforth denote the eigenvector paired with the $i$'th eigenvalue as $\vec{v}_i$. Key Idea 5 Decompose $\vec{e}_i$ into a linear combination of eigenvectors. This can be precalculated in $O(N^3)$. Let's denote another vector $\vec{c}_i$ as this linear combination i.e. $\vec{c}_i$ satisfies $\vec{e}_i = \sum \vec{v}_i \vec{c}_{i, j}$. In fact, this is almost all we need. Let's return to to $a = T^k \sum_{d_i \leq k} T^{-d_i} \vec{e}_i a_i$ (the second part can be trivially calculated). We can calculate almost "separately" for each eigenvector $j$. In fact, the contribution of the $j$'th eigenvector from the $i$'th chef after the $k$'th day is $\lambda_j^k c_{i, j} a_i \lambda_j^{-d_i}$. Let's store $c_{i,j}a_i \lambda_j^-{d_i}$ in a segment tree/BIT on $d_i$. We can extract in $O(N \log N)$ total or $O(\log N)$ for a single eigenvector then find the total contribution by multiplying by $\lambda_j^k$ and using prefix sums on the eigenvector to extract the relevant range $[l, r]$. We can also process $O(N log N)$ per update if $d_i$ stays constant trivially. This is more than enough to pass, but a further optimization is that the array $d$ only changes at most $O(N)$ time (whenever a new element becomes positive). So, we can simply rebuild our segment tree/BIT in $O(N)$ to reach $O(N^3)$ complexity. Final complexity: $O(N^3 + QN\log N)$. It's also worth noting that several nonoptimal solutions can pass. We attempted to cut off any solutions that explicitly use matrix multiplication. However, solutions with complexity such as $O(QN^2)$ can pass in 64 bit C++ if only additions are used. The only way we found to do only additions was by making the same eigenvector reduction, so we were not too worried. It seems impossible to fail such solutions while maintaining a reasonable TL.
[ "math", "matrices" ]
3,500
null
1541
A
Pretty Permutations
There are $n$ cats in a line, labeled from $1$ to $n$, with the $i$-th cat at position $i$. They are bored of gyrating in the same spot all day, so they want to reorder themselves such that no cat is in the same place as before. They are also lazy, so they want to minimize the total distance they move. Help them decide what cat should be at each location after the reordering. For example, if there are $3$ cats, this is a valid reordering: $[3, 1, 2]$. No cat is in its original position. The total distance the cats move is $1 + 1 + 2 = 4$ as cat $1$ moves one place to the right, cat $2$ moves one place to the right, and cat $3$ moves two places to the left.
There are two cases: if $n$ is even, print: $[2, 1, 4, 3, 6, 5 \ldots n, n-1]$ Formally, you swap every other pair of adjacent elements. This is optimal because the total distance is $n$, which has to be minimal since the distance of one cat must be $\geq 1$. if $n$ is odd, first print $[3, 1, 2]$ then solve the even case for the remaining elements. This is optimal because the distance is $n+1$, which has to be minimal since a distance of $n$ is not achievable.
[ "constructive algorithms", "greedy", "implementation" ]
800
null
1541
B
Pleasant Pairs
You are given an array $a_1, a_2, \dots, a_n$ consisting of $n$ \textbf{distinct} integers. Count the number of pairs of indices $(i, j)$ such that $i < j$ and $a_i \cdot a_j = i + j$.
Loop over all values of $a_i$ and $a_j$. Because $i + j \leq 2 \cdot n$, we only care about pairs $(a_i, a_j)$ if $a_i \cdot a_j \leq 2 \cdot n$. The number of such pairs is $O(n log n)$, so you can brute force all pairs. The reason the total number of pairs is $O(n log n)$ is because if the first element of the pair is $x$, there are only $\frac{2 \cdot n}{x}$ possible values of $y$. $\frac{2 \cdot n}{1} + \frac{2 \cdot n}{2} + \frac{2 \cdot n}{3} + \ldots \frac{2 \cdot n}{n} = 2 \cdot n (\frac{1}{1} + \frac{1}{2} + \frac{1}{3} \ldots \frac{1}{n}) = O(n log n)$ by the harmonic series. Thus the solution runs in $O(n log n)$ time total.
[ "brute force", "implementation", "math", "number theory" ]
1,200
null
1542
A
Odd Set
You are given a multiset (i. e. a set that can contain multiple equal integers) containing $2n$ integers. Determine if you can split it into exactly $n$ pairs (i. e. each element should be in exactly one pair) so that the sum of the two elements in each pair is \textbf{odd} (i. e. when divided by $2$, the remainder is $1$).
The answer is 'yes' if and only if there are exactly $n$ odd numbers.
[ "math" ]
800
#include<bits/stdc++.h> using namespace std; int main() { int t; cin>>t; while(t--){ int n,cnt[2]={0}; cin>>n; for(int i=1,x;i<=n*2;i++)cin>>x,cnt[x%2]++; if(cnt[0]==n)puts("Yes"); else puts("No"); } return 0; }
1542
B
Plus and Multiply
There is an infinite set generated as follows: - $1$ is in this set. - If $x$ is in this set, $x \cdot a$ and $x+b$ both are in this set. For example, when $a=3$ and $b=6$, the five smallest elements of the set are: - $1$, - $3$ ($1$ is in this set, so $1\cdot a=3$ is in this set), - $7$ ($1$ is in this set, so $1+b=7$ is in this set), - $9$ ($3$ is in this set, so $3\cdot a=9$ is in this set), - $13$ ($7$ is in this set, so $7+b=13$ is in this set). Given positive integers $a$, $b$, $n$, determine if $n$ is in this set.
First check specially if $b=1$. Let's consider when $n$ is in $S$. The answer is when the smallest number $m$ in $S$ that $n\ \mathrm{mod}\ b=m\ \mathrm{mod}\ b$ is less than $n$. It's easy to see that a new case of $x\ \mathrm{mod}\ b$ can only appear when you use $\times a$ to generate a new element. So the smallest number $m$ in S that $m\ \mathrm{mod}\ b=k$ must be a power of $a$. Find all powers of a that is smaller than $n$. If there is one $m=a^k$ that $n\ \mathrm{mod}\ b=m\ \mathrm{mod}\ b$, the answer is yes. Otherwise it's no. Time complexity is $O(\log n)$.
[ "constructive algorithms", "math", "number theory" ]
1,500
t=(int)(input()) for i in range(t): w=input().split() n=(int)(w[0]) a=(int)(w[1]) b=(int)(w[2]) if a==1 : if (n-1)%b==0 : print("Yes") else : print("No") else : t=1 flag=0 while t<=n : if t%b==n%b: flag=1 break t=t*a if flag==1: print("Yes") else : print("No")
1542
C
Strange Function
Let $f(i)$ denote the minimum positive integer $x$ such that $x$ is \textbf{not} a divisor of $i$. Compute $\sum_{i=1}^n f(i)$ modulo $10^9+7$. In other words, compute $f(1)+f(2)+\dots+f(n)$ modulo $10^9+7$.
Enumerate the value of $f(i)$. Since $f(n)=i$ means $lcm(1,2,...,i-1)\le n$, $f(n)$ will not be too big (less than $100$). The number of $k$s such that $f(k)=i$ is $\lfloor n/lcm(1,2,...,i-1)\rfloor -\lfloor n/lcm(1,2,...,i)\rfloor$. ($k$ should be divisible by $1\sim i-1$ but not $i$) So the answer is $\sum_{i>1} i(\lfloor n/lcm(1,2,...,i-1)\rfloor -\lfloor n/lcm(1,2,...,i)\rfloor )$. We can also write the answer in another form, which is equivalent to the previous form: $\sum_{i\ge 1} \lfloor n/lcm(1,2,...,i)\rfloor +n$
[ "math", "number theory" ]
1,600
#include<bits/stdc++.h> #define int long long #define re register using namespace std; int t,n; const int M=1e9+7; inline int gcd(re int x,re int y){ return y?gcd(y,x%y):x; } inline int LCM(re int x,re int y){ return x/gcd(x,y)*y; } signed main(){ scanf("%lld",&t); while(t--){ scanf("%lld",&n); re int G=1,ans=0; for(re int i=1;G<=n;++i){ G=LCM(G,i); if(G>n)break; ans+=n/G; } printf("%lld ",(ans+n)%M); } }
1542
D
Priority Queue
You are given a sequence $A$, where its elements are either in the form + x or -, where $x$ is an integer. For such a sequence $S$ where its elements are either in the form + x or -, define $f(S)$ as follows: - iterate through $S$'s elements from the first one to the last one, and maintain a multiset $T$ as you iterate through it. - for each element, if it's in the form + x, add $x$ to $T$; otherwise, erase the smallest element from $T$ (if $T$ is empty, do nothing). - after iterating through all $S$'s elements, compute the sum of all elements in $T$. $f(S)$ is defined as the sum. The sequence $b$ is a subsequence of the sequence $a$ if $b$ can be derived from $a$ by removing zero or more elements without changing the order of the remaining elements. For all $A$'s subsequences $B$, compute the sum of $f(B)$, modulo $998\,244\,353$.
For each $x$, count how many $B$s make the final set contain $x$. Let's say we have picked the $x$ in the $I$-th operation, call it $X$. Then, the subsequence we choose must satisfy the following conditions: It must contain the $i$-th operation (otherwise $X$ won't be added). Let $s$ denote the number of numbers less than $X$ in the current $T$. Whenever we meet a - element in $S$ after the $i$-th operation, $s$ should be greater than $0$. With those conditions, we can come up with the following dp. Let $f(i,j)$ denote the number of subsequences of $a[1...i]$, that if we maintain $T$ with the subsequence, $s$ will become $j$. Then we have the following transitions: $f(i-1,j)\to f(i,j)$ (when we don't include the $i$-th element is $S$, here $i\ne I$) $f(i-1,j)\to f(i,\max(j-1,0))$ (here, $i<I$, and the $i$-th element of $A$ is -, so the number of numbers in $T$ less than $x$ decreases by one. If there is no such number, $s$ remains $0$) $f(i-1,j)\to f(i,j-1)$ (here, $i>I$, and and the $i$-th element of $A$ is -, so the number of numbers in $T$ less than $x$ decreases by one.) $f(i-1,j)\to f(i,j)$ (here, the $i$-th element of $A$ is + and its $x$ is greater than $X$) $f(i-1,j)\to f(i,j+1)$ (here, the $i$-th element of $A$ is + and its $x$ is less than $x$ or [equal to $x$ but $i>I$] ) [this is to deal with same elements] Then we add $ans$ by $X\times \sum_{i\ge 0} f(n,i)$. The time complexity is $O(n^3)$.
[ "combinatorics", "dp", "implementation", "math", "ternary search" ]
2,200
mod=998244353 n=(int)(input()) a=[0 for i in range(n+1)] for i in range(1,n+1): m=input().split() if m[0]=="+": a[i]=(int)(m[1]) ans=0 for t in range(1,n+1): if a[t]==0: continue f=[[0 for i in range(n+2)] for j in range(n+2)] f[0][0]=1 for i in range(1,n+1): for j in range(0,i+1): if a[i]==0: if ((i<=t) or (j>0)): f[i][max(j-1,0)]=(f[i][max(j-1,0)]+f[i-1][j])%mod else : if ((a[i]<a[t]) or ((a[i]==a[t]) and (i<t))): f[i][j+1]=(f[i][j+1]+f[i-1][j])%mod else : f[i][j]=(f[i][j]+f[i-1][j])%mod if (i!=t) : f[i][j]=(f[i][j]+f[i-1][j])%mod for i in range(0,n+1): ans=(ans+f[n][i]*a[t])%mod print(ans)
1542
E1
Abnormal Permutation Pairs (easy version)
\textbf{This is the easy version of the problem. The only difference between the easy version and the hard version is the constraints on $n$. You can only make hacks if both versions are solved.} A permutation of $1, 2, \ldots, n$ is a sequence of $n$ integers, where each integer from $1$ to $n$ appears exactly once. For example, $[2,3,1,4]$ is a permutation of $1, 2, 3, 4$, but $[1,4,2,2]$ isn't because $2$ appears twice in it. Recall that the number of inversions in a permutation $a_1, a_2, \ldots, a_n$ is the number of pairs of indices $(i, j)$ such that $i < j$ and $a_i > a_j$. Let $p$ and $q$ be two permutations of $1, 2, \ldots, n$. Find the number of permutation pairs $(p,q)$ that satisfy the following conditions: - $p$ is lexicographically smaller than $q$. - the number of inversions in $p$ is greater than the number of inversions in $q$. Print the number of such pairs modulo $mod$. Note that $mod$ may not be a prime.
Let's first calculate the number of permutation pair $(p,q)$s (with length $i$) that $p_1<q_1$ but $inv(p)>inv(q)$ ($inv(p)$ is the number of inversions in $p$). Call it $t_i$. Let's enumerate $p_1=j$ and $q_1=k$, then $inv(p[2...i])-inv(q[2...i])>k-j$. ($inv(p)=inv(p[2...i])+j-1,inv(q)=inv(q[2...i])+k-1$, with $inv(p)>inv(q)$ we get the following.) Precalculate $f(i,j)$: the number of permutation $p$s of length $i$ such that $inv(p)=j$. Let $s(i,j)$ be $\sum_{k\le j}f(i,k)$, then: $t_i=\sum_{1\le j\le i}\sum_{j<k\le i} \sum_{w} f(i-1,w)s(i-1,w-(k-j)-1)$$f$ and $s$ can be calculated in $O(n^4)$ or $O(n^3)$ in the following way: if you insert $i$ into a permutation of length $i-1$ after the $i-1-p$-th element $(0\le p\le i-1)$, it will bring $p$ inversions into the permutation. So $f(i,j)=\sum_{j-i+1\le k\le j} f(i-1,k)$. After calculating $t$, calculating the answer it easy. Let $ans_i$ be the answer for $n=i$, then $ans_i=i\times ans_{i-1}+t_i$Consider if $p_1=q_1$. If so, there are $i$ choices of $p_1$, and $ans_{i-1}$ choices of the following $n-1$ numbers. Otherwise, there are $t_i$ choices. Total complexity is $O(n^5)$, but it can be optimized to $O(n^4)$ if you consider the difference between $j-k$ only, and can be optimized to $O(n^3\log n)$ using FFT with arbitary mod (which we hope can't pass E2!).
[ "combinatorics", "dp", "fft", "math" ]
2,400
#include<bits/stdc++.h> using namespace std; typedef long long ll; int n,mod,f[55][2005]={1},s[55][2005]={1},ans[55]; int main(){ cin>>n>>mod; for(int i=1;i<=n*(n-1)/2;i++)s[0][i]=1; for(int i=1;i<=n;i++){ for(int j=0;j<=n*(n-1)/2;j++){ if(j-i>=0)f[i][j]=(s[i-1][j]-s[i-1][j-i]+mod)%mod; else f[i][j]=s[i-1][j]; s[i][j]=((j?s[i][j-1]:0)+f[i][j])%mod; } } for(int i=1;i<=n;i++){ for(int j=1;j<=i;j++){ for(int k=j+1;k<=i;k++){ for(int o=0;o<=(i-1)*(i-2)/2;o++){ int t=o-(k-j)-1; if(t<0)continue; ans[i]=(ans[i]+1ll*f[i-1][o]*s[i-1][t]%mod)%mod; } } } } for(int i=2;i<=n;i++)ans[i]=(ans[i]+1ll*i*ans[i-1])%mod; cout<<ans[n]; }
1542
E2
Abnormal Permutation Pairs (hard version)
\textbf{This is the hard version of the problem. The only difference between the easy version and the hard version is the constraints on $n$. You can only make hacks if both versions are solved.} A permutation of $1, 2, \ldots, n$ is a sequence of $n$ integers, where each integer from $1$ to $n$ appears exactly once. For example, $[2,3,1,4]$ is a permutation of $1, 2, 3, 4$, but $[1,4,2,2]$ isn't because $2$ appears twice in it. Recall that the number of inversions in a permutation $a_1, a_2, \ldots, a_n$ is the number of pairs of indices $(i, j)$ such that $i < j$ and $a_i > a_j$. Let $p$ and $q$ be two permutations of $1, 2, \ldots, n$. Find the number of permutation pairs $(p,q)$ that satisfy the following conditions: - $p$ is lexicographically smaller than $q$. - the number of inversions in $p$ is greater than the number of inversions in $q$. Print the number of such pairs modulo $mod$. Note that $mod$ may not be a prime.
We recommend you to read E1's editorial first. Let's directly count the number of permutation pairs $(p,q)$ of length $n$ with $inv(p)-inv(q)=k$, instead of counting it indirectly from "the number of permutation $p$s of length $i$ such that $inv(p)=j$.". Call this number $f(n,k)$. We have an $n^4$ transition: $f(n,k)=\sum_{|i|<n} f(n-1,k-i)\times (n-|i|)$ (Consider where to insert $n$ in the first and second permutation. If the two places are indices $(I,J)$, then the delta of $inv(p)$ is $n-I$, the other is $n-J$, so the delta of difference is $J-I$. In $[1,n]$ there are $n-|J-I|$ pairs of integers with difference $J-I$.) Let's speed it up. When $f(n,k)$ moves to $f(n,k+1)$, it looks like this: ($n=4$, as an example) $f(n-1,k-3)\times 1+f(n-1,k-2)\times 2+f(n-1,k-1)\times 3+f(n-1,k)\times 4+f(n-1,k+1)\times 3+f(n-1,k+2)\times 2+f(n-1,k+3)\times 1$ $f(n-1,k-2)\times 1+f(n-1,k-1)\times 2+f(n-1,k)\times 3+f(n-1,k+1)\times 4+f(n-1,k+2)\times 3+f(n-1,k+3)\times 2+f(n-1,k+4)\times 1$ So with prefix sums $s$ ($s(i,j)=\sum_{k\le j} f(i,k)$) we can write $f(n,k+1)=f(n,k)-(s(n-1,k)-s(n-1,k-n))+(s(n-1,k+n)-s(n-1,k))$. Note that the second indice of the array might be negative, so we should shift it by $130000$. The memory complexity is $O(n^3)$, so we should only keep two layers of transition to optimize it to $O(n^2)$ (If implemented well, $O(n^3)$ memory solutions can also pass.)
[ "combinatorics", "dp", "fft", "math" ]
2,700
#include<bits/stdc++.h> using namespace std; typedef long long ll; const int B=130000; int n,mod,w[2][2*B+5],s[2][2*B+5],ans[505]; int main(){ cin>>n>>mod; w[0][B]=s[0][B]=1; for(int i=B;i<=2*B;i++)s[0][i]=1; for(int i=1;i<=n;i++){ int curs=1,I=i&1,J=I^1; memset(w[I],0,sizeof(w[I])),memset(s[I],0,sizeof(s[I])); for(int u=i*(i-1)/2,j=-u+B;j<=u+B;j++){ w[I][j]=curs; curs=(0ll+curs-s[J][j]+s[J][j-i]+s[J][j+i]-s[J][j]+2ll*mod)%mod; } for(int j=B-i*(i-1)/2,v=(i+2)*(i+1)/2+B;j<=v;j++)s[I][j]=(s[I][j-1]+w[I][j])%mod; for(int j=1;j<i;j++)ans[i]=(ans[i]+1ll*(s[J][(i+1)*i/2+B]-s[J][j+B]+mod)%mod*(i-j))%mod; } for(int i=2;i<=n;i++)ans[i]=(ans[i]+1ll*i*ans[i-1])%mod; cout<<ans[n]; }
1543
A
Exciting Bets
Welcome to Rockport City! It is time for your first ever race in the game against Ronnie. To make the race interesting, you have bet $a$ dollars and Ronnie has bet $b$ dollars. But the fans seem to be disappointed. The excitement of the fans is given by $gcd(a,b)$, where $gcd(x, y)$ denotes the greatest common divisor (GCD) of integers $x$ and $y$. To make the race more exciting, you can perform two types of operations: - Increase both $a$ and $b$ by $1$. - Decrease both $a$ and $b$ by $1$. This operation can only be performed if both $a$ and $b$ are greater than $0$. In one move, you can perform any one of these operations. You can perform arbitrary (possibly zero) number of moves. Determine the maximum excitement the fans can get and the minimum number of moves required to achieve it. Note that $gcd(x,0)=x$ for any $x \ge 0$.
$GCD(a,b)=GCD(a-b,b)$ if $a>b$ $a-b$ does not change by applying any operation. However, $b$ can be changed arbitrarily. If $a=b$, the fans can get an infinite amount of excitement, and we can achieve this by applying the first operation infinite times. Otherwise, the maximum excitement the fans can get is $g=\lvert a-b\rvert$ and the minimum number of moves required to achieve it is $min(a\bmod g,g-a\bmod g)$. Without loss of generality, assume $a>b$ otherwise we can swap $a$ and $b$. We know that $GCD(a,b) = GCD(a-b,b)$. Notice that no matter how many times we apply any operation, the value of $a-b$ does not change. We can arbitrarily change the value of $b$ to a multiple of $a-b$ by applying the operations. In this way, we can achieve a $GCD$ equal to $a-b$. Now, since $GCD(x,y)\leq min(x,y)$ for any positive $x$ and $y$, $GCD(a-b,b)$ can never exceed $a-b$. So, we cannot achieve a higher GCD by any means. To achieve the required $GCD$, we need to make $b$ a multiple of $g=a-b$ using as few operations as possible. There are two ways to do so $-$ decrease $b$ to the largest multiple of $g$ less than or equal to $b$ or increase $b$ to the smallest multiple of $g$ greater than $b$. The number of operations required to do so are $b\bmod g$ and $g-b\bmod g$ respectively. We will obviously choose the minimum of the two. Also notice that $a\bmod g=b\bmod g$ since $a=b+g$. So, it doesn't matter if we use either $a$ or $b$ to determine the minimum number of operations. $\mathcal{O}(1)$ per test case.
[ "greedy", "math", "number theory" ]
900
#include <bits/stdc++.h> using namespace std; int main() { ios_base::sync_with_stdio(false); cin.tie(NULL); int t; cin >> t; while(t--) { long long a,b; cin >> a >> b; if(a==b) cout << 0 << " " << 0 << '\n'; else { long long g = abs(a-b); long long m = min(a%g,g-a%g); cout << g << " " << m << '\n'; } } }
1543
B
Customising the Track
Highway 201 is the most busy street in Rockport. Traffic cars cause a lot of hindrances to races, especially when there are a lot of them. The track which passes through this highway can be divided into $n$ sub-tracks. You are given an array $a$ where $a_i$ represents the number of traffic cars in the $i$-th sub-track. You define the inconvenience of the track as $\sum\limits_{i=1}^{n} \sum\limits_{j=i+1}^{n} \lvert a_i-a_j\rvert$, where $|x|$ is the absolute value of $x$. You can perform the following operation any (possibly zero) number of times: choose a traffic car and move it from its current sub-track to any other sub-track. Find the minimum inconvenience you can achieve.
In the optimal arrangement, the number of cars will be distributed as evenly as possible. In the optimal arrangement, the number of traffic cars will be distributed as evenly as possible, i.e., $\lvert a_i-a_j\rvert\leq 1$ for each valid $(i,j)$. Let's sort the array in non-decreasing order. Let $a_1=p$, $a_n=q$, $p\leq q-2$, $x$ elements of the array be equal to $p$, $y$ elements of the array be equal to $q$, $\displaystyle\sum\limits_{i=x+1}^{n-y}\sum\limits_{j=i+1}^{n-y} a_j-a_i=r$ and $\displaystyle\sum\limits_{i=x+1}^{n-y} a_i=s$. The inconvenience of the track will be equal to $S_1=r+x\cdot[s-(n-x-y)\cdot p]+y\cdot[(n-x-y)\cdot q-s]+[xy\cdot(q-p)]$ Suppose we increase $a_1$ by $1$ and decrease $a_n$ by $1$. Then, the number of occurrences of $p$ and $q$ will reduce by $1$, two new elements $p+1$ and $q-1$ will be formed, and $r$ and $s$ will remain unchanged. In such a case, the new inconvenience of the track will be $S_2=r+(x-1)\cdot[s-(n-x-y)\cdot p]$ $+(y-1)\cdot[(n-x-y)\cdot q-s]$ $+[(x-1)\cdot(y-1)\cdot(q-p)]$ $+[s-(n-x-y)\cdot(p+1)]$ $+[(n-x-y)\cdot(q-1)-s]$ $+[(y-1)\cdot(q-p-1)]$ $+[(x-1)\cdot(q-p-1)]$ $+(x-1)+(y-1)+(q-p-2)$ Change in inconvenience, $\Delta = S_1-S_2 = [s-(n-x-y)\cdot p]+[(n-x-y)\cdot q-s]+[(x+y-1)\cdot(q-p)]$ $-[s-(n-x-y)\cdot(p+1)]$ $-[(n-x-y)\cdot (q-1)-s]$ $-[(y-1)\cdot(q-p-1)]$ $-[(x-1)\cdot(q-p-1)]$ $-(x-1)-(y-1)-(q-p-2)$ $=2\cdot(n-x-y)+(x+y-1)\cdot(q-p)-(x+y-2)\cdot(q-p-1)-(x-1)-(y-1)-(q-p-2)$ $=2\cdot(n-x-y)+(q-p)+(x+y-1)-1-(x-1)-(y-1)-(q-p-2)$ $=2\cdot(n-x-y+1) > 0$ as $x+y\leq n$. So, it follows that if $p\leq q-2$. it is always beneficial to move a traffic car from the sub-track with the highest number of traffic cars to the sub-track with the lowest number of traffic cars. If $p=q-1$, applying this operation won't change the inconvenience as this will only swap the last and the first element of the array leaving everything else unchanged. If $p=q$, all the elements of the array are already equal, meaning that we have $0$ inconvenience which is the minimum possible. So, there is no point in applying any operation. Now that we have constructed the optimal arrangement of traffic cars, let's find out the value of minimum inconvenience of this optimal arrangement. Finding it naively in $\mathcal{O}(n^2)$ will time out. Notice that in the optimal arrangement we will have some (say $x$) elements equal to some number $p+1$ and the other $(n-x)$ elements equal to $p$. Let the sum of all elements in $a$ be $sum$. Then, $x=sum\bmod n$ and $p=\bigg\lfloor\dfrac{sum}{n}\bigg\rfloor$. For each pair $(p,p+1)$, we will get an absolute difference of $1$ and for all other pairs, we will get an absolute difference of $0$. Number of such pairs with difference $1$ is equal to $x\cdot(n-x)$. So, the minimum inconvenience we can achieve is equal to $x\cdot(n-x)$. That's all we need to find out! $\mathcal{O}(n)$
[ "combinatorics", "greedy", "math" ]
900
#include <bits/stdc++.h> using namespace std; int main() { ios_base::sync_with_stdio(false); cin.tie(NULL); int t; cin >> t; while(t--) { int n; cin >> n; int a[n]; long long s=0; for(int i=0;i<n;i++) { cin >> a[i]; s+=a[i]; } long long k = s%n; long long ans = k*(n-k); cout << ans << '\n'; } }
1543
C
Need for Pink Slips
After defeating a Blacklist Rival, you get a chance to draw $1$ reward slip out of $x$ hidden valid slips. Initially, $x=3$ and these hidden valid slips are Cash Slip, Impound Strike Release Marker and Pink Slip of Rival's Car. Initially, the probability of drawing these in a random guess are $c$, $m$, and $p$, respectively. There is also a volatility factor $v$. You can play any number of Rival Races as long as you don't draw a Pink Slip. Assume that you win each race and get a chance to draw a reward slip. In each draw, you draw one of the $x$ valid items with their respective probabilities. Suppose you draw a particular item and its probability of drawing before the draw was $a$. Then, - If the item was a Pink Slip, the quest is over, and you will not play any more races. - Otherwise, - If $a\leq v$, the probability of the item drawn becomes $0$ and the item is no longer a valid item for all the further draws, reducing $x$ by $1$. Moreover, the reduced probability $a$ is distributed equally among the other remaining valid items. - If $a > v$, the probability of the item drawn reduces by $v$ and the reduced probability is distributed equally among the other valid items. For example, - If $(c,m,p)=(0.2,0.1,0.7)$ and $v=0.1$, after drawing Cash, the new probabilities will be $(0.1,0.15,0.75)$. - If $(c,m,p)=(0.1,0.2,0.7)$ and $v=0.2$, after drawing Cash, the new probabilities will be $(Invalid,0.25,0.75)$. - If $(c,m,p)=(0.2,Invalid,0.8)$ and $v=0.1$, after drawing Cash, the new probabilities will be $(0.1,Invalid,0.9)$. - If $(c,m,p)=(0.1,Invalid,0.9)$ and $v=0.2$, after drawing Cash, the new probabilities will be $(Invalid,Invalid,1.0)$. You need the cars of Rivals. So, you need to find the expected number of races that you must play in order to draw a pink slip.
Did you notice that $v\geq 0.1$? The probability of drawing a pink slip can never decrease. What would be the complexity of a bruteforce solution? Make sure to account for precision errors while comparing floating point numbers. Bruteforce over all the possible drawing sequences until we are sure to get a pink slip, i.e., until the probability of drawing a pink slip becomes $1$. Whenever we draw a reward other than a pink slip, If $a\leq v$, one of the rewards becomes invalid, reducing $x$ by $1$ and this can happen at most $2$ times during the whole process. Else, the probability of drawing a pink slip increases by $\dfrac{v}{2}$. Notice that the probability of drawing a pink slip can never decrease. Now, since $v\geq 0.1$, each time we make a draw of the second type, the probability of drawing a pink slip increases by at least $0.05$. It will reach $1$ after just $20$ such draws. So, there will be at most $d=22$ draws before we are sure to get a pink slip. Simulating the whole process will take $\mathcal{O}(2^d)$ time which is sufficient in our case. What's left is just implementing the bruteforce solution taking care of precision errors while dealing with floating point numbers, especially while comparing $a$ with $v$ as this can completely change things up, keeping an item valid when it should become invalid. It follows that an error approximation of 1e-6 or smaller is sufficient while comparing any two values because all the numbers in the input have at most $4$ decimal places. Another alternative is to convert floating point numbers given in the input to integers using a scaling factor of $10^6$. $\mathcal{O}\big(2^{\frac{2}{v}}\big)$
[ "bitmasks", "brute force", "dfs and similar", "implementation", "math", "probabilities" ]
1,900
#include <bits/stdc++.h> using namespace std; const long double eps = 1e-9; const long double scale = 1e+6; long double expectedRaces(int c,int m,int p,int v) { long double ans = p/scale; if(c>0) { if(c>v) { if(m>0) ans += (c/scale)*(1+expectedRaces(c-v,m+v/2,p+v/2,v)); else ans += (c/scale)*(1+expectedRaces(c-v,0,p+v,v)); } else { if(m>0) ans += (c/scale)*(1+expectedRaces(0,m+c/2,p+c/2,v)); else ans += (c/scale)*(1+expectedRaces(0,0,p+c,v)); } } if(m>0) { if(m>v) { if(c>0) ans += (m/scale)*(1+expectedRaces(c+v/2,m-v,p+v/2,v)); else ans += (m/scale)*(1+expectedRaces(0,m-v,p+v,v)); } else { if(c>0) ans += (m/scale)*(1+expectedRaces(c+m/2,0,p+m/2,v)); else ans += (m/scale)*(1+expectedRaces(0,0,p+m,v)); } } return ans; } int main() { int t; cin >> t; while(t--) { long double cd,md,pd,vd; cin >> cd >> md >> pd >> vd; int c = round(cd*scale); int m = round(md*scale); int p = round(pd*scale); int v = round(vd*scale); long double ans = expectedRaces(c,m,p,v); cout << setprecision(12) << fixed << ans << '\n'; } }
1543
D1
RPD and Rap Sheet (Easy Version)
\textbf{This is the easy version of the problem. The only difference is that here $k=2$. You can make hacks only if both the versions of the problem are solved.} This is an interactive problem. Every decimal number has a base $k$ equivalent. The individual digits of a base $k$ number are called $k$-its. Let's define the $k$-itwise XOR of two $k$-its $a$ and $b$ as $(a + b)\bmod k$. The $k$-itwise XOR of two base $k$ numbers is equal to the new number formed by taking the $k$-itwise XOR of their corresponding $k$-its. The $k$-itwise XOR of two decimal numbers $a$ and $b$ is denoted by $a\oplus_{k} b$ and is equal to the decimal representation of the $k$-itwise XOR of the base $k$ representations of $a$ and $b$. All further numbers used in the statement below are in decimal unless specified. When $k = 2$ (it is always true in this version), the $k$-itwise XOR is the same as the bitwise XOR. You have hacked the criminal database of Rockport Police Department (RPD), also known as the Rap Sheet. But in order to access it, you require a password. You don't know it, but you are quite sure that it lies between $0$ and $n-1$ inclusive. So, you have decided to guess it. Luckily, you can try at most $n$ times without being blocked by the system. But the system is adaptive. Each time you make an incorrect guess, it changes the password. Specifically, if the password before the guess was $x$, and you guess a different number $y$, then the system changes the password to a number $z$ such that $x\oplus_{k} z=y$. Guess the password and break into the system.
In this version, $x\oplus z=y$ or in other words, $z=x\oplus y$ where $\oplus$ is the Bitwise XOR operator. The number of queries allowed is equal to the number of possible initial passwords. The grader provides us no information other than whether our guess was correct or not. So, we need to find a way to ask queries such that the $x$-th query will give the correct answer if the original password was $(x-1)$. Try to ask queries in such a way that the $i$-th query reverses the effect of the $(i-1)$-th query and simultaneously checks if the initial password was $(i-1)$. Try to use the self-inverse property of Bitwise XOR, i.e., $a\oplus a=0$. In this version of the problem, $k=2$. So, the $k$-itwise XOR is the same as Bitwise XOR. In case of incorrect guess, the system changes password to $z$ such that $x\oplus z=y$. Taking XOR with $x$ on both sides, $x\oplus x\oplus z=x\oplus y\implies z=x\oplus y$ because we know that $x\oplus x = 0$. Since the original password is less than $n$ and we have $n$ queries, we need to find a way to make queries such that if the original password was $(x-1)$, then the $x$-th query will be equal to the current password. There are many different approaches. I will describe two of them. Let $q_i$ denote the $i$-th query. Then, $q_1=0$. $q_i=(i-1)\oplus (i-2)$ for $2\leq i\leq n$. Let's see why this works. Claim - If the original password was $x$, after $i$ queries, the current password will be $x\oplus (i-1)$. Let's prove this by induction. Base Condition - After $1$-st query, the password becomes $x\oplus 0=x\oplus(1-1)$. Induction Hypothesis - Let the password after $i$-th query be $x\oplus (i-1)$. Inductive step - The $(i+1)$-th query will be $i\oplus(i-1)$. If this is not equal to the current password, the password will change to $(x\oplus(i-1))\oplus(i\oplus(i-1))$ $=x\oplus(i-1)\oplus i\oplus(i-1)$ $=x\oplus i$ $=x\oplus((i+1)-1)$. Hence, proved by induction. Now notice that after $x$ queries, the password will become $x\oplus(x-1)$. And our $(x+1)$-th query will be $x\oplus(x-1)$ which is the same as the current password. So, the problem will be solved after $(x+1)$ queries. Since $0\leq x < n$, the problem will be solved in at most $n$ queries. Again, let $q_i$ denote the $i$-th query. Then, $q_i=(i-1)\oplus q_1 \oplus q_2 \oplus \ldots \oplus q_{i-2} \oplus q_{i-1}$ Let's see why this works. Claim - If the original password was $x$, after $i$ queries, the current password will be $x \oplus q_1 \oplus q_2 \oplus \ldots \oplus q_{i-1} \oplus q_{i}$. Let's prove this by induction. Base Condition - The first query is $0$. After $1$-st query, the password becomes $x\oplus 0=x\oplus q_1$. Induction Hypothesis - Let the password after $i$-th query be $x \oplus q_1 \oplus q_2 \oplus \ldots \oplus q_{i-1} \oplus q_{i}$. Inductive step - The $(i+1)$-th query will be $q_{i+1}$. So, the password after $(i+1)$-th query will be $(x \oplus q_1 \oplus q_2 \oplus \ldots \oplus q_{i-1} \oplus q_{i})\oplus q_{i+1}$ $= x \oplus q_1 \oplus q_2 \oplus \ldots \oplus q_{i-1} \oplus q_{i}\oplus q_{i+1}$. Hence, proved by induction. Now notice that after $x$ queries, the password will become $x \oplus q_1 \oplus q_2 \oplus \ldots \oplus q_{x-1} \oplus q_{x}$. And our $(x+1)$-th query will be $x \oplus q_1 \oplus q_2 \oplus \ldots \oplus q_{x-1} \oplus q_{x}$ which is the same as the current password. So, the problem will be solved after $(x+1)$ queries. Since $0\leq x < n$, the problem will be solved in at most $n$ queries. But we are not done yet. We can't afford to calculate the value of each query naively in $\mathcal{O}(n)$ because this will time out. To handle this, we need to maintain a prefix XOR whose value will be $p=q_1\oplus q_2\oplus \ldots \oplus q_{i-1} \oplus q_i$ after $i$ queries. For the $(i+1)$-th query, find $q_{i+1}=p\oplus i$ and update $p=p\oplus q_{i+1}$. $\mathcal{O}(n)$ or $\mathcal{O}(n\cdot \log_{2} n)$ depending upon the implementation.
[ "bitmasks", "constructive algorithms", "interactive", "math" ]
1,700
#include <bits/stdc++.h> using namespace std; int main() { int t=1; cin >> t; while(t--) { int n,k; cin >> n >> k; int p=0; for(int i=0;i<n;i++) { int q=p^i; cout << q << endl; p=p^q; int v; cin >> v; if(v==1) break; } } return 0; }
1543
D2
RPD and Rap Sheet (Hard Version)
\textbf{This is the hard version of the problem. The only difference is that here $2\leq k\leq 100$. You can make hacks only if both the versions of the problem are solved.} \textbf{This is an interactive problem!} Every decimal number has a base $k$ equivalent. The individual digits of a base $k$ number are called $k$-its. Let's define the $k$-itwise XOR of two $k$-its $a$ and $b$ as $(a + b)\bmod k$. The $k$-itwise XOR of two base $k$ numbers is equal to the new number formed by taking the $k$-itwise XOR of their corresponding $k$-its. The $k$-itwise XOR of two decimal numbers $a$ and $b$ is denoted by $a\oplus_{k} b$ and is equal to the decimal representation of the $k$-itwise XOR of the base $k$ representations of $a$ and $b$. All further numbers used in the statement below are in decimal unless specified. You have hacked the criminal database of Rockport Police Department (RPD), also known as the Rap Sheet. But in order to access it, you require a password. You don't know it, but you are quite sure that it lies between $0$ and $n-1$ inclusive. So, you have decided to guess it. Luckily, you can try at most $n$ times without being blocked by the system. But the system is adaptive. Each time you make an incorrect guess, it changes the password. Specifically, if the password before the guess was $x$, and you guess a different number $y$, then the system changes the password to a number $z$ such that $x\oplus_{k} z=y$. Guess the password and break into the system.
The generalised $k$-itwise XOR does not satisfy the Self-Inverse property. So, the solution for the Easy Version won't work here. Any property which is satisfied by $k$-its will also be satisfied by base $k$ numbers since a base $k$ number is nothing but a concatenation of $k$-its. So, try to prove properties for $k$-its as they are easier to work with. Let $x_j$ denote the $j$-th $k$-it of $x$. Then, $x_j\oplus_k z_j=y_j$ $\forall$ valid $j$. Simplifying this, $x_j\oplus_k z_j=y_j$ $\implies$ $(x_j+z_j)\bmod k=y_j$ $\implies$ $z_j=(y_j-x_j)\bmod k$ $\implies$ $z_j=(y_j\ominus_k x_j)$ where $a\ominus_k b$ operation is defined as $a\ominus_k b=(a-b)\bmod k$. See Hints 2, 3 and 4 of the Easy Version. Try to generalise the solutions for easy version by exploring properties of $\oplus_k$ and $\ominus_k$ operators. Note - It is strongly recommended to read the proofs also to completely understand why the solutions work. The solutions described for the easy version won't work here because the general $k$-itwise operation does not satisfy self-inverse property, i.e., $a\oplus_k a\neq 0$. In this whole solution, we will work in base $k$ only and we will convert the numbers to decimal only for I/O purpose. Notice that any property which is satisfied by $k$-its will also be satisfied by base $k$ numbers since a base $k$ number is nothing but a concatenation of $k$-its. When we make an incorrect guess, the system changes the password to $z$ such that $x\oplus_k z=y$. Let's denote the $j$-th $k$-it of $x$ by $x_j$. Expanding this according to the definition of $k$-itwise XOR, for all individual $k$-its $(x_j+z_j)\bmod k=y_j$ $\implies z_j=(y_j-x_j)\bmod k$. So, let's define another $k$-itwise operation $a\ominus_k b$ $= (a-b)\bmod k$. Then, $z=y\ominus_k x$. Now, let's extend the solutions of the Easy Version for this version. Before moving to the solution, let's see some properties of the $\ominus_k$ operation over $k$-its. Property 1 - $(a\ominus_k b)\ominus_k(a\ominus_k c)=c\ominus_k b$ $(a\ominus_k b)\ominus_k(a\ominus_k c)$ $= ((a-b)\bmod k - (a-c)\bmod k)\bmod k$ $=(a-b-a+c)\bmod k$ $=(c-b)\bmod k$ $= c\ominus_k b$ Property 2 - $(b\ominus_k a)\ominus_k(c\ominus_k a)=b\ominus_k c$ $(b\ominus_k a)\ominus_k(c\ominus_k a)$ $= ((b-a)\bmod k - (c-a)\bmod k)\bmod k$ $=(b-a-c+a)\bmod k$ $=(b-c)\bmod k$ $= b\ominus_k c$ Solution - Let $q_i$ denote the $i$-th query. Then, $q_1=0$, $q_i=(i-2)\ominus_k (i-1)$ if $i$ is even and $q_i=(i-1)\ominus_k (i-2)$ if $i$ is odd. Let's see why this works. Claim - If the original password was $x$, after $i$ queries, the password becomes $x\ominus_k (i-1)$ if $i$ is even and $(i-1)\ominus_k x$ if $i$ is odd. Let's prove this by induction. Base Case - $q_1=0$. So, after $1$-st query, the password becomes $0\ominus_k x = (1-1)\ominus_k x$. Case 1 - $i$ is even Induction hypothesis - Let the current password after $i$ queries be $x\ominus_k (i-1)$. Inductive step - $(i+1)$ is odd. So, the $(i+1)$-th query is $i\ominus_k (i-1)$. The new password will be $(i\ominus_k (i-1))\ominus_k(x\ominus_k (i-1))$ $= i\ominus_k x$ by Property 2. Case 2 - $i$ is odd Induction hypothesis - Let the current password after $i$ queries be $(i-1)\ominus_k x$. Inductive step - $(i+1)$ is even. So, the $(i+1)$-th query is $(i-1)\ominus_k i$. The new password will be $((i-1)\ominus_k i)\ominus_k((i-1)\ominus_k x)$ $= x\ominus_k i$ by Property 1. Hence, proved by induction. Now notice that after $x$ queries, the password will become $x\ominus_k (x-1)$ if $x$ is even or $(x-1)\ominus_k x$ if $x$ is odd which will be equal to the $(x+1)$-th query. Hence, the problem will be solved after exactly $(x+1)$ queries. Since $0\leq x < n$, the problem will be solved after at most $n$ queries. Again, let's denote the $i$-th query by $q_i$. Then, $q_i=q_{i-1}\ominus_k\bigg[q_{i-2}\ominus_k\Big[q_{i-3}\ominus_k\ldots\ominus_k\big[q_2\ominus_k[q_1\ominus_k(i-1)]\big]\ldots\Big]\bigg]$ Let's see why this works. Claim - If the original password was $x$, after $i$ queries, the password will be $q_{i}\ominus_k\bigg[q_{i-1}\ominus_k\Big[q_{i-2}\ominus_k\ldots\ominus_k\big[q_2\ominus_k[q_1\ominus_k x]\big]\ldots\Big]\bigg]$ Let's prove this by induction. Base Case - After the $1$-st query which is $0$, the password will be $0\ominus_k x = q_1\ominus_k x$. Induction hypothesis - Let the password after $i$ queries be $q_{i}\ominus_k\bigg[q_{i-1}\ominus_k\Big[q_{i-2}\ominus_k\ldots\ominus_k\big[q_2\ominus_k[q_1\ominus_k x]\big]\ldots\Big]\bigg]$ Inductive Step - The $(i+1)$-th query is $q_{i+1}$. After $(i+1)$ queries, the password will becomes $q_{i+1}\ominus_k\Bigg[q_{i}\ominus_k\bigg[q_{i-1}\ominus_k\Big[q_{i-2}\ominus_k\ldots\ominus_k\big[q_2\ominus_k[q_1\ominus_k x]\big]\ldots\Big]\bigg]\Bigg]$ Hence, proved by induction. Now notice that after $x$ queries, the password will become $q_{x}\ominus_k\bigg[q_{x-1}\ominus_k\Big[q_{x-2}\ominus_k\ldots\ominus_k\big[q_2\ominus_k[q_1\ominus_k x]\big]\ldots\Big]\bigg]$ which will be equal to the $(x+1)$-th query. Hence, the problem will be solved after exactly $(x+1)$ queries. Since $0\leq x < n$, the problem will be solved after at most $n$ queries. But we are not done yet. This solution is $\mathcal{O}(n^2)$ which will time out. The solution for this isn't as simple as what we did for the Easy version because the $\ominus_k$ operation is neither associative nor commutative. So, it's time to explore some more properties of these operations. Property 3 - $a\ominus_k(b\ominus_k c) = (a\ominus_k b)\oplus_k c$ $a\ominus_k(b\ominus_k c)$ $= (a-(b-c)\bmod k)\bmod k$ $= (a-b+c)\bmod k$ $= ((a-b)\bmod k+c)\bmod k$ $= (a\ominus_k b)\oplus_k c$ Property 4 - $a\ominus_k(b\oplus_k c) = (a\ominus_k b)\ominus_k c$ $a\ominus_k(b\oplus_k c)$ $= (a-(b+c)\bmod k)\bmod k$ $= (a-b-c)\bmod k$ $= ((a-b)\bmod k-c)\bmod k$ $= (a\ominus_k b)\ominus_k c$ Now, let's try to simplify our queries. $q_1=0$ $q_2=q_1\ominus_k 1$ $q_3=q_2\ominus_k [q_1\ominus_k 2]=[q_2\ominus_k q_1]\oplus_k 2$ (by Property 3) $q_4=q_3\ominus_k \big[q_2\ominus_k [q_1\ominus_k 3]\big]=q_3\ominus_k \big[[q_2\ominus_k q_1]\oplus_k 3\big]=\big[q_3\ominus_k[q_2\ominus_k q_1]\big]\ominus_k 3$ (by Property 4) $\ldots$ See the pattern? You can generalize the $i$-th query as - $q_i=q_{i-1}\ominus_k\Big[q_{i-2}\ominus_k\big[q_{i-3}\ominus_k\ldots\ominus_k[q_2\ominus_k q_1]\ldots\big]\Big]\oplus_k (i-1)$ if $i$ is odd $q_i=q_{i-1}\ominus_k\Big[q_{i-2}\ominus_k\big[q_{i-3}\ominus_k\ldots\ominus_k[q_2\ominus_k q_1]\ldots\big]\Big]\ominus_k (i-1)$ if $i$ is even So, we will maintain a prefix Negative XOR whose value after $i$ queries will be $p=q_{i}\ominus_k\Big[q_{i-1}\ominus_k\big[q_{i-2}\ominus_k\ldots\ominus_k[q_2\ominus_k q_1]\ldots\big]\Big]$ Then, $q_{i+1}=p\ominus_k i$ if $i$ is odd $q_{i+1}=p\oplus_k i$ if $i$ is even Then update $p=q_{i+1}\ominus_k p$ Both the operations $\ominus_k$ and $\oplus_k$ can be implemented naively by converting the decimal numbers to base $k$, finding the $k$-itwise XOR of the base $k$ numbers and finally converting it back to decimal. The time complexity for each of these operations will be $\mathcal{O}(\log_k n)$. At any stage, the maximum number that we could be dealing with will be non-negative and will not exceed $n\times k$ as the $k$-itwise operations do not add extra $k$-its. This fits well within the limits of $y$ which is $2\cdot 10^7$. The total time complexity of the solution will be $\mathcal{O}(n\log_k n)$. This part may be interesting for hackers and those who would like to understand what goes in the background checkers and interactors which determine the correctness of their solutions. You must have noticed a large number of submissions failing on Test 1 itself. If you check the checker logs of those solutions, you will find that they fail on many different test cases although the test itself has 1 or 2 test cases. This is because the grader is also adaptive apart from the Rap Sheet System being adaptive. The initial password is not fixed and the grader checks if the solution will give correct answer or not for all possible initial passwords in just a single run! Here is how this is implemented. The grader maintains a set which initially contains all possible initial passwords. Whenever you ask a query, the grader finds out which initial password will give correct answer for this query. This is found using the generalised expression of queries derived in the Method 2 solution. Take some time to convince yourself that this initial password will be unique. It then removes this initial password from the set if it still exists. After that, if the set is empty, the grader returns 1 (meaning that there is no other password for which the solution can give Wrong Answer), otherwise 0 (obviously after checking the constraints on $y$). In this way, only a solution which gives correct answers for all possible initial passwords for a particular $n$ and $k$ defined in the input will manage to pass. Sometimes, checkers and adaptive interactors are more difficult to write than the solution itself! This was one such case.
[ "brute force", "constructive algorithms", "interactive", "math" ]
2,200
#include <bits/stdc++.h> using namespace std; int knxor(int x,int y,int k) { int z=0; int p=1; while(x>0 || y>0) { int a=x%k; x=x/k; int b=y%k; y=y/k; int c=(a-b+k)%k; z=z+p*c; p=p*k; } return z; } int kxor(int x,int y,int k) { int z=0; int p=1; while(x>0 || y>0) { int a=x%k; x=x/k; int b=y%k; y=y/k; int c=(a+b)%k; z=z+p*c; p=p*k; } return z; } int main() { int t=1; cin >> t; while(t--) { int n,k; cin >> n >> k; int p=0; for(int i=0;i<n;i++) { if(i==0) cout << 0 << endl; else if(i%2==0) { int q = kxor(p,i,k); cout << q << endl; p=knxor(q,p,k); } else { int q = knxor(p,i,k); cout << q << endl; p=knxor(q,p,k); } int v; cin >> v; if(v==1) break; } } return 0; }
1543
E
The Final Pursuit
Finally, you have defeated Razor and now, you are the Most Wanted street racer. Sergeant Cross has sent the full police force after you in a deadly pursuit. Fortunately, you have found a hiding spot but you fear that Cross and his force will eventually find you. To increase your chances of survival, you want to tune and repaint your BMW M3 GTR. The car can be imagined as a permuted $n$-dimensional hypercube. A simple $n$-dimensional hypercube is an undirected unweighted graph built recursively as follows: - Take two simple $(n-1)$-dimensional hypercubes one having vertices numbered from $0$ to $2^{n-1}-1$ and the other having vertices numbered from $2^{n-1}$ to $2^{n}-1$. A simple $0$-dimensional Hypercube is just a single vertex. - Add an edge between the vertices $i$ and $i+2^{n-1}$ for each $0\leq i < 2^{n-1}$. A permuted $n$-dimensional hypercube is formed by permuting the vertex numbers of a simple $n$-dimensional hypercube in any arbitrary manner. Examples of a simple and permuted $3$-dimensional hypercubes are given below: Note that a permuted $n$-dimensional hypercube has the following properties: - There are exactly $2^n$ vertices. - There are exactly $n\cdot 2^{n-1}$ edges. - Each vertex is connected to exactly $n$ other vertices. - There are no self-loops or duplicate edges. Let's denote the permutation used to generate the permuted $n$-dimensional hypercube, representing your car, from a simple $n$-dimensional hypercube by $P$. Before messing up the functionalities of the car, you want to find this permutation so that you can restore the car if anything goes wrong. But the job isn't done yet. You have $n$ different colours numbered from $0$ to $n-1$. You want to colour the vertices of this permuted $n$-dimensional hypercube in such a way that for each and every vertex $u$ satisfying $0\leq u < 2^n$ and for each and every colour $c$ satisfying $0\leq c < n$, there is at least one vertex $v$ adjacent to $u$ having a colour $c$. In other words, from each and every vertex, it must be possible to reach a vertex of any colour by just moving to an adjacent vertex. Given the permuted $n$-dimensional hypercube, find any valid permutation $P$ and colouring.
In a simple $n$-Dimensional Hypercube, two vertices are connected if and only if they differ by exactly $1$ bit in their binary representation. The $n$-Dimensional Hypercubes are highly symmetric and all vertices are equivalent. If we select a particular vertex, then all directions in which the edges connected to it goes are also equivalent. For any two vertices $a$ and $b$ separated at a distance of exactly $2$, there are exactly $2$ vertices connected to both $a$ and $b$. Any permuted $n$-Dimensional Hypercube is isomorphic to the simple $n$-Dimensional Hypercube. So, its structure is same as the simple $n$-Dimensional Hypercube. You can find the permutation greedily Before moving to the solution, notice a very important property of simple $n$-Dimensional Hypercubes - Two vertices $a$ and $b$ are connected if and only if $a$ and $b$ differ by exactly one bit in their binary representations. The permutation can be found using the following greedy algorithm - First, assign any arbitrary vertex as $p_0$. This is obvious since all vertices are equivalent. Then, in the simple $n$-Dimensional Hypercube, all powers of $2$ must be connected to the vertex $0$. Moreover, these vertices are added only when we are adding another dimension to the cube. Since all directions are also equivalent, it does not matter in which direction we add a new dimension. So, we can assign all the $n$ vertices connected to $p_0$ in the permuted $n$-Dimensional Hypercube as $p_1$, $p_2$, $p_4$, $p_8$, $\ldots$, $p_{2^{n-1}}$ in any arbitrary order. Now, we will find $p_u$ for the remaining vertices in increasing order of $u$. In order to find $p_u$, first find a set $S$ of vertices $v$ such that $v < u$ and $v$ is connected to $u$ in the simple $n$-Dimensional Hypercube. Then find any vertex $w$ connected to all the vertices $p_v$ such that $v\in S$ in the permuted $n$-Dimensional Hypercube and assign $p_u=w$. I claim that we can never make a wrong choice because we will never have a choice! There will only be one such vertex $w$ for any $u$. Let's prove it. Consider two vertices $v_1$ and $v_2$ in the set $S$. These vertices will differ by exactly $2$ bits in their binary representation. Let the bits in which they differ be $b_x$ and $b_y$. Then, they will have the form $v_1=\ldots b_x\ldots b_y\ldots$ and $v_2=\ldots b^{'}_{x}\ldots b^{'}_{y}\ldots$ where $\ldots$ represent the same bits. Now, only two vertices $u_1=\ldots b_{x}\ldots b^{'}_{y}\ldots$ and $u_2=\ldots b^{'}_{x}\ldots b_{y}\ldots$ can be connected to both $v_1$ and $v_2$. Since a permuted $n$-Dimensional Hypercube is isomorphic to a simple $n$-Dimensional Hypercube, there will only be two vertices connected to both $p_{v_1}$ and $p_{v_2}$ in the permuted $n$-Dimensional Hypercube also. If we iterate over $u$ in increasing order, then $b_x\neq b_y$, otherwise one of $v_1$ or $v_2$ will be greater than $u$ which is a contradiction. So, the only vertices connected to both $v_1$ and $v_2$ will have the forms $u_1=\ldots 0\ldots 0\ldots$ and $u_2=\ldots 1\ldots 1\ldots$. Now since $u_1<v_1$ and $u_1<v_2$, $p_{u_1}$ has already been calculated and so, one of the vertex connected to both $p_{v_1}$ and $p_{v_2}$ in the permuted $n$-Dimensional Hypercube has already been used. So, we are left with only one choice for such a vertex $w$. Let's call the vertex connected to a given vertex and which is in the opposite constituent smaller hypercube the image of the given vertex. Lemma - if there is an edge $(a, b)$ in the $n$-Dimensional hypercube where vertices $a$ and $b$ lie in different constituent $(n-1)$-Dimensional Hypercubes (in other words, $a$ and $b$ are images of each other), then for all vertices $q$ adjacent to $a$, the image of $q$ is adjacent to $b$. This lemma can be proved by using the fact that two vertex are connected if and only if they differ by exactly $1$ bit. Select any two vertices $a$ and $b$. They form a starting point: we treat them as two vertices in opposite constituents (by symmetry, we can prove that any pairs can be treated as such). Now let us perform multisource BFS with $a$ and $b$ as source nodes. Due to the lemma, the nodes which are discovered from $a$ first lie in the component of $a$, and those which are discovered from $b$ first lie in the component of $b$ (it is easy again to prove it using induction on depth of already discovered vertices). So we have separated these two constituent smaller dimension hypercubes. Lets call a recursive function on any one of them: this recursive function returns a permutation which transforms the permutated hypercube to the simple hypercube. Now, we find for each vertex, in the constituent hypercube whose permutation we just found, its image. Then we can find the permutation for the other constituent by just adding $2^{n-1}$ to the corresponding image. Hence we perform the merging process of recursion. The time complexity of this approach is $\mathcal{O}(n\cdot 2^{n})$ Instead of colouring the permuted $n$-Dimensional Hypercube, try to colour the simple $n$-Dimensional Hypercube and map these colours to the permuted one in the end using the permutation found in Part 1. The number of vertices of each colour will be equal. Try to colour a simple $4$-Dimensional Hypercube. This is not a graph problem, rather a constructive problem. The way in which vertices are connected in a simple $n$-Dimensional Hypercube suggests something related to Bitwise XOR. Let's try to colour the simple $n$-Dimensional Hypercube instead of the permuted one. We can map the colours to the permuted one in the end using the permutation found in Part 1. I claim that if $n$ is not a power of $2$, then no colouring exists. A simple explanation is that the graph is symmetric and also the colours. So, it is natural that the number of vertices of each colour must be equal meaning $2^n$ must be divisible by $n$ or in other words, $n$ must be a power of $2$ itself. But if symmetry doesn't satisfy you, I have a formal proof too. According to the condition, every vertex must be connected to at least one vertex of every colour or equivalently, exactly one vertex of every colour since there are $n$ colours and $n$ neighbours of each vertex. So, if we consider the set of neighbours of any vertex, every colour will appear exactly once in that set. If we consider the multi-set of neighbours of all vertices, every colour will appear $2^n$ times in that set. But every vertex has been counted $n$ times in this multi-set because a particular vertex is the neighbour of $n$ other vertices. So, if we consider the set of all vertices, every colour will appear $\frac{2^n}{n}$ times in that set. Obviously, this number must be a whole number. So, $2^n$ must be divisible by $n$ or in other words, $n$ must itself be a power of $2$ for a colouring to exist. Otherwise, it doesn't exist. To show that a colouring exists if $n$ is a power of $2$, we will construct a colouring. Construction - This is the most interesting and difficult part of the whole problem. The following construction works - Consider a vertex $u$. Let its binary representation be $b_{n-1}b_{n-2}\ldots b_2 b_1 b_0$. Then the colour of this vertex will be $\bigoplus\limits_{i=0}^{n-1} i\cdot b_i$. Let's show why this works. $\bigoplus\limits_{i=0}^{n-1} i\cdot b_i$ will always lie between $0$ and $n-1$ inclusive because $n$ is a power of $2$. If we are at a vertex $u$ with a colour $c_1$ and we want to reach a vertex of colour $c_2$, we can reach the vertex $v=u\oplus(1\ll (c_1\oplus c_2))$. This is a vertex adjacent to $u$ since it differs from it by exactly $1$ bit and this is the $(c_1\oplus c_2)$-th bit. Notice that the colour of this vertex $v$ will be $c_1\oplus (c_1\oplus c_2) = c_2$. $(c_1\oplus c_2)$ will always lie between $0$ and $n-1$ because $n$ is a power of $2$. So, $v$ will always be a valid vertex number. You can see that many of these facts break when $n$ is not a power of $2$. So, this colouring will not work in such cases. Finally, after colouring the simple hypercube, we need to restore the vertex numbers of the permuted hypercube. This can be simply done by replacing all vertices $u$ with $p_u$ using the permutation we found in Part 1. Well, that's enough of theoretical stuff. For those who like visualising things, here is a 4-D Hypercube and its colouring - Finding the permutation takes $\mathcal{O}(n\cdot\log_2 n\cdot 2^n)$ time if implemented using set or $\mathcal{O}(n^2\cdot 2^n)$ time if implemented using vector. Constructing the colouring for the simple $n$-Dimensional Hypercube takes $\mathcal{O}(n\cdot 2^n)$ time. Restoring colours for the permuted $n$-Dimensional Hypercube takes $\mathcal{O}(2^n)$ time. So, the overall time complexity is $\mathcal{O}(n\cdot\log_2 n\cdot 2^n)$ or $\mathcal{O}(n^2\cdot 2^n)$ depending upon implementation.
[ "bitmasks", "constructive algorithms", "divide and conquer", "graphs", "greedy", "math" ]
2,700
#include <iostream> #include <iomanip> #include <vector> #include <cmath> #include <algorithm> #include <set> #include <utility> #include <queue> #include <map> #include <assert.h> #include <stack> #include <string> #include <ctime> #include <chrono> #include <random> using namespace std; const int MAX=65536; int power[21]={0}; vector<int> adj[MAX+1]; bool xtra[MAX+1]; bool in[MAX+1]; vector<int> f(vector<int> s) //given a set of vertices [graph], returns the permutation that was applied to the simple hypercube to get this graph { /* cout << "set\n"; for (auto it : s) { cout << it << " "; } cout << '\n'; */ if ((int)s.size()==2) { vector<int> p(2); p[0]=s[0]; p[1]=s[1]; return p; } for (auto i: s) in[i]=true; int v1=s[0]; int v2=-1; for (auto i: adj[v1]) { if (in[i]) { v2=i; break; } } assert(v2!=-1); //cout << "v1= " << v1 << " v2 = " << v2 << '\n'; vector<int> component; //multisource BFS from (v1, v2) { queue<int> q; q.push(v1); q.push(v2); map<int, bool> vis; vis[v1]=true; vis[v2]=true; map<int, bool> cmp; cmp[v1]=true; component.push_back(v1); while (!q.empty()) { auto u=q.front(); q.pop(); for (auto i: adj[u]) { if ((!vis[i])&&in[i]) { vis[i]=true; q.push(i); cmp[i]=cmp[u]; if (cmp[i]) component.push_back(i); } } } } for (auto i: s) in[i]=false; for (auto i: component) in[i]=true; vector<int> p1=f(component); for (auto i: s) in[i]=true; for (auto i: component) xtra[i]=true; vector<int> p((int)s.size()); int z=component.size(); assert(2*z==(int)s.size()); for (int i=0; i<z; i++) p[i]=p1[i]; for (int i=0; i<z; i++) { //i+z int g=-1; for (auto i: adj[p[i]]) { if (in[i]&&(!xtra[i])) { g=i; break; } } assert(g!=-1); p[z+i]=g; } for (auto i: component) xtra[i]=false; for (auto i: s) in[i]=false; return p; } vector<int> colorit(int n) { vector<int> ret(n); for (int i=0; i<n; i++) { int res=0; for (int z=0; z<20; z++) res^=z*((power[z]&i)!=0); ret[i]=res; } return ret; } void solve() { int n; cin>>n; for (int i = 0; i < power[n]; i++) adj[i].clear(); for (int j = 0; j < n * power[n - 1]; j++) { int u, v; cin >> u >> v; adj[u].push_back(v); adj[v].push_back(u); } vector<int> p; { vector<int> s; for (int i=0; i<power[n]; i++) s.push_back(i); p=f(s); } for (int i=0; i<power[n]; i++) cout<<p[i]<<' '; cout<<'\n'; if (power[n]%n) { cout<<-1<<'\n'; return; } vector<int> col=colorit(power[n]); vector<int> out(power[n]); for (int i=0; i<power[n]; i++) { out[p[i]]=col[i]; } for (int i=0; i<power[n]; i++) cout<<out[i]<<' '; cout<<'\n'; return; } signed main() { ios::sync_with_stdio(0); cin.tie(NULL); cout.tie(NULL); int t; cin >> t; //t=1; power[0]=1; for (int j=1; j<=20; j++) power[j]=power[j-1]*2; while (t--) { solve(); } return 0; }
1545
A
AquaMoon and Strange Sort
AquaMoon has $n$ friends. They stand in a row from left to right, and the $i$-th friend from the left wears a T-shirt with a number $a_i$ written on it. Each friend has a direction (left or right). In the beginning, the direction of each friend is \textbf{right}. AquaMoon can make some operations on friends. On each operation, AquaMoon can choose two \textbf{adjacent} friends and swap their positions. After each operation, the direction of both chosen friends will also be flipped: left to right and vice versa. AquaMoon hopes that after some operations, the numbers written on the T-shirt of $n$ friends in the row, read from left to right, become \textbf{non-decreasing}. Also she wants, that all friends will have a direction of \textbf{right} at the end. Please find if it is possible.
It's easy to see that each number needs to move an even distance. For the same number, count how many of them are in the odd position and even position. Sort the array and count again. The given array named A, the sorted array named B. For every number, if the number of its occurrence in the odd position in A is different from its occurrence in the odd position in B, or the number of its occurrence in the even position in A is different from its occurrence in the even position in B, then the answer is NO. Otherwise the answer is YES.
[ "sortings" ]
1,500
#include <bits/stdc++.h> std::vector<int> a; int cnt[100001][2]; int main() { int n, T, flag; scanf("%d", &T); while(T--){ scanf("%d", &n), a.resize(n), flag = 0; for (int i = 0; i < n; ++i) scanf("%d", &a[i]), ++cnt[a[i]][i % 2]; std::sort(a.begin(), a.end()); for (int i = 0; i < n; ++i) --cnt[a[i]][i % 2]; for (int i = 0; i < n; ++i) if (cnt[a[i]][0] != 0 || cnt[a[i]][1] != 0) { puts("NO"), flag = 1; break; } if (flag == 0) puts("YES"); a.clear(); for (int i = 0; i < n; ++i) cnt[a[i]][0] = cnt[a[i]][1] = 0; } return 0; }
1545
B
AquaMoon and Chess
Cirno gave AquaMoon a chessboard of size $1 \times n$. Its cells are numbered with integers from $1$ to $n$ from left to right. In the beginning, some of the cells are occupied with at most one pawn, and other cells are unoccupied. In each operation, AquaMoon can choose a cell $i$ with a pawn, and do \textbf{either} of the following (if possible): - Move pawn from it to the $(i+2)$-th cell, if $i+2 \leq n$ and the $(i+1)$-th cell is occupied and the $(i+2)$-th cell is unoccupied. - Move pawn from it to the $(i-2)$-th cell, if $i-2 \geq 1$ and the $(i-1)$-th cell is occupied and the $(i-2)$-th cell is unoccupied. You are given an initial state of the chessboard. AquaMoon wants to count the number of states reachable from the initial state with some sequence of operations. But she is not good at programming. Can you help her? As the answer can be large find it modulo $998\,244\,353$.
We enumerate $i$ from $1$ to $n$. If position $i-1$ and $i$ both contain a chess and $i-1$ is not in other groups, then we divide them into one group. We can change the operation a little: Each time we can swap the two consecutive $1$ and the element to their left or right. It's easy to see this operation equals to the initial one. So that means we can take out the groups (two consecutive $1$) and insert them to any position of the chessboard. So let the number of groups be $m$, the number of zeros be $n$, it's easy to find that the answer is $\binom{n+m}{m}$(Since inserting one group to the left of some $1$ or to the right of it are the same).
[ "combinatorics", "math" ]
1,900
#include <bits/stdc++.h> using namespace std; const int MAXN = 100010; const int MOD = 998244353; char str[MAXN]; long long F[MAXN], rF[MAXN]; long long inv(long long a, long long m) { if (a == 1) return 1; return inv(m%a, m) * (m - m/a) % m; } int main() { int T; int n; F[0] = rF[0] = 1; for (int i = 1; i < MAXN; i++) { F[i] = F[i-1] * i % MOD; rF[i] = rF[i-1] * inv(i, MOD) % MOD; } scanf("%d", &T); while (T--) { scanf("%d", &n); scanf("%s", str); int cg = 0; int c0 = 0; int c1 = 0; for (int i = 0; i < n; i++) { if (str[i] == '0') { c0++; } else if (i+1 < n && str[i+1] == '1') { cg++; i++; } else { c1++; } } long long ans = F[cg + c0] * rF[c0] % MOD * rF[cg] % MOD; printf("%d\n", (int) ans); } return 0; }
1545
C
AquaMoon and Permutations
Cirno has prepared $n$ arrays of length $n$ each. Each array is a permutation of $n$ integers from $1$ to $n$. These arrays are special: for all $1 \leq i \leq n$, if we take the $i$-th element of each array and form another array of length $n$ with these elements, the resultant array is also a permutation of $n$ integers from $1$ to $n$. In the other words, if you put these $n$ arrays under each other to form a matrix with $n$ rows and $n$ columns, this matrix is a Latin square. Afterwards, Cirno added additional $n$ arrays, each array is a permutation of $n$ integers from $1$ to $n$. For all $1 \leq i \leq n$, there exists \textbf{at least one} position $1 \leq k \leq n$, such that for the $i$-th array and the $(n + i)$-th array, the $k$-th element of both arrays is the same. Notice that the arrays indexed from $n + 1$ to $2n$ \textbf{don't have to} form a Latin square. Also, Cirno made sure that for all $2n$ arrays, no two arrays are completely equal, i. e. for all pair of indices $1 \leq i < j \leq 2n$, there exists \textbf{at least one} position $1 \leq k \leq n$, such that the $k$-th elements of the $i$-th and $j$-th array are \textbf{different}. Finally, Cirno arbitrarily changed the order of $2n$ arrays. AquaMoon calls a subset of all $2n$ arrays of size $n$ \textbf{good} if these arrays from a Latin square. AquaMoon wants to know how many good subsets exist. Because this number may be particularly large, find it modulo $998\,244\,353$. Also, she wants to find any good subset. Can you help her?
Among all the arrays not be chosen, if an array have a number which appears exactly once at its column, that the array must belong to the $n$ original arrays. So, we can choose the array and delete all arrays have at least one same bit with it. If there not exists such an array discribed above, according to the Pigeonhole Principle, all numbers of the unchosen arrays must appear exactly twice on their columns. It means either the original arrays or the additional arrays can form a correct latin square. So, it's correct to choose any one of the unchosen array and delete all arrays have at least one same bit with it. Meanwhile, we need to multiply the number of solutions by 2. We need to repeat the above operations, until we have chosen $n$ arrays.
[ "2-sat", "brute force", "combinatorics", "constructive algorithms", "graph matchings", "graphs" ]
2,800
#include<bits/stdc++.h> using namespace std; const int maxn=500; const long long mod=998244353; typedef pair<int,int> pii; int n,x,y,s,t; int a[maxn*2+5][maxn+5],b[maxn+5][maxn+5],f[maxn+5]; vector <pii> v; vector <int> c[maxn+5][maxn+5]; int main() { int T; scanf("%d",&T); while (T--) { scanf("%d",&n); for (int i=1;i<=n;i++) { for (int j=1;j<=n;j++) { b[i][j]=0; c[i][j].clear(); } } for (int i=1;i<=n*2;i++) { f[i]=0; for (int j=1;j<=n;j++) { scanf("%d",&a[i][j]); b[j][a[i][j]]++; c[j][a[i][j]].push_back(i); } } int top=0,tail=0; int cnt=0,idx=1; long long ans=1; v.clear(); for (int i=1;i<=n;i++) { for (int j=1;j<=n;j++) { if (b[i][j]==1) { tail++; v.push_back(pii(i,j)); } } } while (cnt<n) { if (top<tail) { x=v[top].first; y=v[top].second; if (b[x][y]!=1) { top++; continue; } for (int i=0;i<c[x][y].size();i++) { if (f[c[x][y][i]]==0) { t=c[x][y][i]; break; } } } else { while (f[idx]!=0) { idx++; } t=idx; ans=ans*2%mod; } f[t]=1; cnt++; for (int i=1;i<=n;i++) { b[i][a[t][i]]=0; } for (int i=1;i<=n;i++) { for (int j=0;j<c[i][a[t][i]].size();j++) { s=c[i][a[t][i]][j]; if (f[s]==0) { f[s]=2; for (int k=1;k<=n;k++) { b[k][a[s][k]]--; if (b[k][a[s][k]]==1) { tail++; v.push_back(pii(k,a[s][k])); } } } } } top++; } s=0; printf("%lld\n",ans); for (int i=1;i<=n*2;i++) { if (f[i]==1) { s++; if (s<n) printf("%d ",i); else printf("%d\n",i); } } } }
1545
D
AquaMoon and Wrong Coordinate
Cirno gives AquaMoon a problem. There are $m$ people numbered from $0$ to $m - 1$. They are standing on a coordinate axis in points with positive integer coordinates. They are facing right (i.e. in the direction of the coordinate increase). At this moment everyone will start running with the constant speed in the direction of coordinate increasing. The initial coordinate of the $i$-th person on the line is $x_i$, and the speed of the $i$-th person is $v_i$. So the coordinate of the $i$-th person at the moment $t$ will be $x_i + t \cdot v_i$. Cirno captured the coordinates of $m$ people in $k$ consecutive integer moments from $0$ to $k - 1$. In every moment, the coordinates of $m$ people were recorded in \textbf{arbitrary order}. To make the problem more funny, Cirno modified one coordinate at the moment $y$ ($0 < y < k-1$) to a \textbf{different} integer. AquaMoon wants to find the moment $y$ and the original coordinate $p$ before the modification. Actually, she is not a programmer at all. So she wasn't able to solve it. Can you help her?
Let's denote for $sum[t]$ the sum of all coordinates at the moment $t$, and for $sum2[t]$ the sum of all squared coordinates at the moment $t$. If there is no error, the sum of the coordinates of all moments will be an arithmetic series, and the difference is $\sum_{i=1}^m v_i$. It's easy to find the moment that contains the modified coordinate. Assuming that the moment that contains the modified coordinate is found, first use three consecutive moments without the modified coordinate. Suppose it is $t$, $t + 1$, $t + 2$. Sum of squared coordinates of moment $t$ is $sum2[t] = \sum_{i=1}^m (x_i + t * v_i)^2$. Sum of squared coordinates of moment $t+1$ is $sum2[t+1] = \sum_{i=1}^m (x_i + (t+1) * v_i)^2$. Sum of squared coordinates of moment $t+2$ is $sum2[t+2] = \sum_{i=1}^n (x_i + (t+2) * v_i)^2$. We could easy to get $sum2[t] + sum2[t+2] - 2 \times sum2[t+1] = 2 \times \sum_{i=1}^m v_i^2$. In this way, we can know the value of $2 \times \sum_{i=1}^m v_i^2$. Then we can enumerate which integer was modified at the moment $y$. We could try to update the integer back to the original coordinate, so that it can meet both $sum[y-1] + sum[y+1] = 2 \times sum[y]$ and $sum2[y-1] + sum2[y+1] - 2 \times sum2[y] = 2 \times \sum_{i=1}^m v_i^2$. It would be easy to get the original coordinate.
[ "constructive algorithms", "interactive", "math" ]
3,000
#include<bits/stdc++.h> using namespace std; int n,m,i,j,k,ans1,ans2; long long a[1010][1010]; long long c[1010],x,y,s,t,temp; int main() { scanf("%d%d",&n,&m); for (i=0;i<m;i++) { for (j=1;j<=n;j++) { scanf("%lld",&a[i][j]); c[i]+=a[i][j]; } } x=(c[m-1]-c[0])/(m-1); for (i=1;i<m;i++) { if ((c[i]-c[0])!=x*i) { ans1=i; y=c[i]-c[0]-x*i; break; } } for (i=1;i<m-1;i++) { if (i-1!=ans1&&i!=ans1&&i+1!=ans1) { x=0; for (j=1;j<=n;j++) { x+=a[i-1][j]*a[i-1][j]+a[i+1][j]*a[i+1][j]-a[i][j]*a[i][j]*2; } break; } } i=ans1; t=s=0; for (j=1;j<=n;j++) { s+=a[i-1][j]*a[i-1][j]+a[i+1][j]*a[i+1][j]; t+=a[i][j]*a[i][j]*2; } s-=x; for (j=1;j<=n;j++) { temp=t-a[i][j]*a[i][j]*2+(a[i][j]-y)*(a[i][j]-y)*2; if (temp==s) { ans2=a[i][j]-y; break; } } cout<<ans1<<' '<<ans2<<endl; }
1545
E2
AquaMoon and Time Stop (hard version)
\textbf{Note that the differences between easy and hard versions are the constraints on $n$ and the time limit. You can make hacks only if both versions are solved.} AquaMoon knew through foresight that some ghosts wanted to curse tourists on a pedestrian street. But unfortunately, this time, these ghosts were hiding in a barrier, and she couldn't enter this barrier in a short time and destroy them. Therefore, all that can be done is to save any unfortunate person on the street from the ghosts. The pedestrian street can be represented as a one-dimensional coordinate system. There is one person hanging out on the pedestrian street. At the time $0$ he is at coordinate $x$, moving with a speed of $1$ unit per second. In particular, at time $i$ the person will be at coordinate $x+i$. The ghosts are going to cast $n$ curses on the street. The $i$-th curse will last from time $tl_i-1+10^{-18}$ to time $tr_i+1-10^{-18}$ (exclusively) and will kill people with coordinates from $l_i-1+10^{-18}$ to $r_i+1-10^{-18}$ (exclusively). Formally that means, that the person, whose coordinate is between $(l_i-1+10^{-18},r_i+1-10^{-18})$ in the time range $(tl_i-1+10^{-18},tr_i+1-10^{-18})$ will die. To save the person on the street, AquaMoon can stop time at any moment $t$, and then move the person from his current coordinate $x$ to any coordinate $y$ ($t$, $x$ and $y$ are not necessarily integers). The movement costs AquaMoon $|x-y|$ energy. The movement is continuous, so if there exists some cursed area between points $x$ and $y$ at time $t$, the person will \textbf{die too}. AquaMoon wants to know what is the minimum amount of energy she needs to spend in order to save the person on the street from all $n$ curses. But she is not good at programming. As her friend, can you help her?
We scan through the time, each time. We need to get minimum answer for all positions of this certain time. We can use some data structure to maintain it. In detail, we use balance tree to maintain every segments which are not covered for some time $t$. We can see that after some time, the answer of every position of the segment is shifted. A new arithmetic sequence is added to the left, and the rightmost part is erased. We use lazy tags to maintain them(We update the segment only when we need to use the answer of it). When a segment is banned, then we just erase it. When a segment is added, we need to consider the answer of at most two neighboring segment. There are two conditions: 1. We use two of the neighboring segment to get the answer of the new-added segment. We need to merge the two segments and insert two arithmetic sequences between them. 2. We use only one of the neighboring segment to get the answer of the new-added segment, and this one will even influence the other. We just use brute force to erase some arithmetic sequences of the influenced segment at the end. For each segment, we use a balance tree to maintain the arithmetic sequences in it. Since each time we will only add segments of a constant number, so the time complexity is $O(nlogn)$. (The easy version is for solution without too many data structures)
[ "data structures", "dp" ]
3,500
#include<bits/stdc++.h> using namespace std; const int N=2000005,E=1000001; struct str{ int l; long long x; int d; long long las(){return x+(l-1)*d;} }a[N]; int ch[N][2],fa[N],h[N],tot,hc,ls[N],siz[N],i; struct seg{ int l,r,x; bool operator <(const seg &a)const { return a.r>r; } }; set<seg> p; void pushup(int i) { siz[i]=siz[ch[i][0]]+siz[ch[i][1]]+a[i].l; } void rotate(int x) { int y=fa[x];bool d=(ch[y][0]==x); ch[y][!d]=ch[x][d]; if(ch[x][d]!=0)fa[ch[x][d]]=y; fa[x]=fa[y];if(fa[y])ch[fa[y]][ch[fa[y]][1]==y]=x; ch[x][d]=y;fa[y]=x;pushup(y); } void splay(int i,int x,int t=0) { for(int y=fa[x];y!=t;rotate(x),y=fa[x]) if(fa[y]!=t&&(ch[fa[y]][0]==y)==(ch[y][0]==x)) rotate(y); pushup(x); h[i]=x; } void Findmx(int x) { int i=h[x]; while(ch[i][1]) i=ch[i][1]; splay(x,i); } void Findmn(int x) { int i=h[x]; while(ch[i][0]) i=ch[i][0]; splay(x,i); } void MMerge(int x,int y) { if(h[y]==0||h[x]==0) { h[x]=h[y]=max(h[x],h[y]); return; } int i=h[y]; for(;ch[i][0];i=ch[i][0]); ch[i][0]=h[x]; fa[h[x]]=i; splay(y,h[x]); } int Merge(int x,int y) { Findmx(x),Findmn(y); if(abs(a[h[x]].las()-a[h[y]].x)<=1) { MMerge(x,y); return x; } if(a[h[x]].las()>a[h[y]].x) { long long la=a[h[y]].x; while(h[x]&&abs(a[h[x]].las()-la)>1) { if(a[h[x]].d==-1) la+=a[h[x]].l; else { if(la+a[h[x]].l<a[h[x]].x) la+=a[h[x]].l; else { int li=(a[h[x]].l-a[h[x]].x+la+2)/2; la+=a[h[x]].l-li; a[h[x]].l=li; break; } } h[x]=ch[h[x]][0]; fa[h[x]]=0; Findmx(x); } h[++hc]=++tot; a[tot]={(int)(la-a[h[y]].x),la,-1}; pushup(tot); ls[hc]=ls[x]; MMerge(x,hc); MMerge(hc,y); } else { long long la=a[h[x]].las(); while(h[y]&&abs(a[h[y]].x-la)>1) { if(a[h[y]].d==1) la+=a[h[y]].l; else { if(a[h[y]].las()>la+a[h[y]].l) la+=a[h[y]].l; else { int li=(a[h[y]].x-la)/2; a[h[y]].x-=li; a[h[y]].l-=li; la+=li; break; } } h[y]=ch[h[y]][1]; fa[h[y]]=0; Findmn(y); } h[++hc]=++tot; a[tot]={(int)(la-a[h[x]].las()),a[h[x]].las()+1,1}; pushup(tot); ls[hc]=ls[x]; MMerge(x,hc); MMerge(hc,y); } return hc; } void Find(int n,int x,int w) { if(w<siz[ch[x][0]]) Find(n,ch[x][0],w); else if(siz[ch[x][0]]+a[x].l>w) { if(siz[ch[x][0]]==w) { splay(n,x,0); return; } int tmp=ch[x][1]; ch[x][1]=++tot; ch[tot][1]=tmp; if(tmp) fa[tmp]=tot; fa[tot]=x; a[tot].l=a[x].l-(w-siz[ch[x][0]]); a[x].l=w-siz[ch[x][0]]; a[tot].x=a[x].x+a[x].d*a[x].l; a[tot].d=a[x].d; splay(n,tot,0); return; } else Find(n,ch[x][1],w-a[x].l-siz[ch[x][0]]); } void Update(int l,int x) { if(l==ls[x]) return; int ti=l-ls[x]; ls[x]=l; Findmn(x); long long w=a[h[x]].x; h[++hc]=++tot; a[tot]={ti,w+ti,-1}; pushup(tot); MMerge(hc,x); while(1) { Findmx(x); if(ti<a[h[x]].l) { a[h[x]].l-=ti; break; } ti-=a[h[x]].l; h[x]=ch[h[x]][0]; fa[h[x]]=0; } } void Add(int ti,int l,int r) { h[++hc]=++tot; a[tot]={r-l+1,1<<30,1}; pushup(tot); seg t={l,r,hc}; ls[hc]=ti; auto it=p.lower_bound(t); if(it!=p.end()&&it->l==r+1) { Update(ti,it->x); t.x=Merge(t.x,it->x); t.r=it->r; p.erase(it); } it=p.lower_bound(t); if(it!=p.begin()) { --it; if(it->r==l-1) { Update(ti,it->x); t.x=Merge(it->x,t.x); t.l=it->l; p.erase(it); } } p.insert(t); } void Del(int ti,int l,int r) { seg t=*p.lower_bound({l,r,0}); p.erase(t); Update(ti,t.x); Find(t.x,h[t.x],l-t.l); int u=ch[h[t.x]][0]; fa[u]=ch[h[t.x]][0]=0; pushup(h[t.x]); int v=0; if(siz[h[t.x]]!=r-l+1) { Find(t.x,h[t.x],r-l+1); v=ch[h[t.x]][0]; fa[v]=ch[h[t.x]][0]=0; v=h[t.x]; } if(l!=t.l) { h[++hc]=u; ls[hc]=ti; p.insert({t.l,l-1,hc}); } if(r!=t.r) { h[++hc]=v; ls[hc]=ti; p.insert({r+1,t.r,hc}); } } int n,l,r,x,y,u,v,tree[N*4],lazy[N*4]; long long as=1<<30; struct node{ int l,r; }; vector<node> ad[E+5],de[E+5]; void modify(int i,int l,int r,int ll,int rr,int x) { if(l>=ll&&r<=rr) { lazy[i]+=x; tree[i]+=x; return; } int mid=l+r>>1; if(mid>=ll) modify(i<<1,l,mid,ll,rr,x); if(mid<rr) modify(i<<1|1,mid+1,r,ll,rr,x); tree[i]=max(tree[i<<1],tree[i<<1|1])+lazy[i]; } int Query(int i,int l,int r,int ll,int rr) { if(l>=ll&&r<=rr) return tree[i]; int mid=l+r>>1,s=0; if(mid>=ll) s=max(s,Query(i<<1,l,mid,ll,rr)); if(mid<rr) s=max(s,Query(i<<1|1,mid+1,r,ll,rr)); return s+lazy[i]; } int Findmx(int i,int l,int r,int ll,int s) { if(s+tree[i]==0) return -1; if(l==r) return l-1; int mid=l+r>>1; s+=lazy[i]; if(l>=ll) { int y=Findmx(i<<1,l,mid,ll,s); if(y!=-1) return y; else return Findmx(i<<1|1,mid+1,r,ll,s); } if(mid>=ll) { int y=Findmx(i<<1,l,mid,ll,s); if(y!=-1) return y; } return Findmx(i<<1|1,mid+1,r,ll,s); } int Findmn(int i,int l,int r,int rr,int s) { if(s+tree[i]==0) return -1; if(l==r) return l+1; int mid=l+r>>1; s+=lazy[i]; if(r<=rr) { int y=Findmn(i<<1|1,mid+1,r,rr,s); if(y!=-1) return y; else return Findmn(i<<1,l,mid,rr,s); } if(mid<rr) { int y=Findmn(i<<1|1,mid+1,r,rr,s); if(y!=-1) return y; } return Findmn(i<<1,l,mid,rr,s); } void dfs(int i) { if(!i) return; as=min({as,a[i].x,a[i].las()}); dfs(ch[i][0]); dfs(ch[i][1]); } int main() { scanf("%d",&n); scanf("%d",&x); for(i=1;i<=n;++i) { scanf("%d %d %d %d",&l,&r,&u,&v); --l,++r; de[l].push_back({u,v}); ad[r].push_back({u,v}); } p.insert({0,E*2+5,++hc}); h[hc]=++tot; a[tot]={E*2+5-x+1,0,1}; ch[tot][0]=2; fa[++tot]=1; a[tot]={x,x,-1}; pushup(2); pushup(1); for(i=0;i<=E+1;++i) { for(auto it:ad[i]) { modify(1,0,E,it.l,it.r,-1); auto ii=p.lower_bound({it.l,it.r,0}); int nr=ii->l-1; --ii; int nl=ii->r+1; if(nl>nr) continue; int y=Findmx(1,0,E,nl,0); if(y>=nr||y==-1) Add(i,nl,nr); else { if(y>=nl) Add(i,nl,y); int y=Findmn(1,0,E,nr,0); if(y<=nr) Add(i,y,nr); } } for(auto it:de[i]) { modify(1,0,E,it.l,it.r,1); while(1) { auto y=p.lower_bound({0,it.l,0}); if(y!=p.end()) { if(min(it.r,y->r)>=max(it.l,y->l)) Del(i,max(it.l,y->l),min(it.r,y->r)); else break; } else break; } } } dfs(h[p.begin()->x]); cout<<as; }
1545
F
AquaMoon and Potatoes
AquaMoon has three integer arrays $a$, $b$, $c$ of length $n$, where $1 \leq a_i, b_i, c_i \leq n$ for all $i$. In order to accelerate her potato farming, she organizes her farm in a manner based on these three arrays. She is now going to complete $m$ operations to count how many potatoes she can get. Each operation will have one of the two types: - AquaMoon reorganizes their farm and makes the $k$-th element of the array $a$ equal to $x$. In other words, perform the assignment $a_k := x$. - Given a positive integer $r$, AquaMoon receives a potato for each triplet $(i,j,k)$, such that $1\le i<j<k\le r$, and $b_{a_i}=a_j=c_{a_k}$. Count the number of such triplets. As AquaMoon is busy finding the library, help her complete all of their operations.
We seek a solution of roughly square root time complexity; the small constraint of $m$ hints at a solution in $O(m\sqrt n)$. This immediately rules out solutions based on square root decomposition on sequences, because of the overhead incurred with initializing such structures. Instead of directly solving the problem, let us solve the following - equivalent - task: How can we solve $O(\sqrt n)$ queries in $O(n)$ time? If we only consider $O(\sqrt n)$ queries, then there are at most $O(\sqrt n)$ positions that are modified throughout these queries. Call these positions "dynamic points", and the others "static points". A note concerning notation: The following editorial was written when $X$ and $Y$ were permutations, and as such it uses $X^{-1}_i$ to denote the value $j$ such that $X_j=i$. It was later found that the solution can be easily modified to accomodate for $X$ and $Y$ that were not permutations. You can regard $X^{-1}_i$ as the set of $j$ such that $X_j=i$. In the following, we often count the number of dynamic or static elements in a prefix that equals a certain value. The number of elements in a multiset that equal $X^{-1}_i$ is the same as the number of elements in said multiset that satisfy $X_j=i$. In implementation, we can maintain the multiset $\{X_j:j\in\dots\}$ instead of the original multiset. Call a triplet "good" if it satisfies $1\le i<j< k\le r$, and $X_{a_i}=a_j=Y_{a_k}$. Hence, for each query, the good triplets fall under 8 categories, in roughly increasing order of difficulty: $i,j,k$ are all static. We can calculate for each $r$ in $O(n)$, through DP or the like, the number of good triplets in $[1,r]$ where $i,j,k$ are all static: we simply ignore the dynamic points. This is $O(n)$ preprocessing and $O(1)$ for each query. We can calculate for each $r$ in $O(n)$, through DP or the like, the number of good triplets in $[1,r]$ where $i,j,k$ are all static: we simply ignore the dynamic points. This is $O(n)$ preprocessing and $O(1)$ for each query. $i,j,k$ are all dynamic. Similarly to 1, for each query, we do a brute force DP over all dynamic $i,j,k$ before $r$. This is $O(\sqrt n)$ for each query. Similarly to 1, for each query, we do a brute force DP over all dynamic $i,j,k$ before $r$. This is $O(\sqrt n)$ for each query. $i,j$ are dynamic, $k$ is static. We iterate over all dynamic $j$, moving forward, and keep track of the amount of $X^{-1}_{a_j}$ currently seen. Then, for each $j$, we know the number of dynamic $i$ that can precede it through what we tracked; what remains is to count the number of static $a_k=Y^{-1}_{a_j}$ in $[j+1,r]$ in $O(1)$. We offline all $O(n)$ intervals that we need to count in these $O(\sqrt n)$ queries and then re-process their contribution at the end of the block. We iterate over all dynamic $j$, moving forward, and keep track of the amount of $X^{-1}_{a_j}$ currently seen. Then, for each $j$, we know the number of dynamic $i$ that can precede it through what we tracked; what remains is to count the number of static $a_k=Y^{-1}_{a_j}$ in $[j+1,r]$ in $O(1)$. We offline all $O(n)$ intervals that we need to count in these $O(\sqrt n)$ queries and then re-process their contribution at the end of the block. $j,k$ are dynamic, $i$ is static. This is virtually the same as 3; we just iterate in reverse and we count the number of static $a_i=X^{-1}_{a_j}$ in $[1,j-1]$. This is virtually the same as 3; we just iterate in reverse and we count the number of static $a_i=X^{-1}_{a_j}$ in $[1,j-1]$. $j$ is dynamic, $i,k$ are static. Using the same data structure as 3 and 4, we multiply the number of $a_i=X^{-1}_{a_j}$ in $[1,j-1]$ with the number of static $a_k=Y^{-1}_{a_j}$ in $[j+1,r]$. Using the same data structure as 3 and 4, we multiply the number of $a_i=X^{-1}_{a_j}$ in $[1,j-1]$ with the number of static $a_k=Y^{-1}_{a_j}$ in $[j+1,r]$. $i,k$ are dynamic, $j$ is static. We iterate over all dynamic $k$. The number of good $i,j,k$ in this situation equals the number of $i,j$ where $1\le i,j<k$, $a_i=X^{-1}_{Y_{a_k}}$, and $a_j=Y_{a_k}$, minus the number of $i,j$ where $1\le j\le i<k$, $a_i=X^{-1}_{Y_{a_k}}$, and $a_j=Y_{a_k}$. For the former, notice that $i,j$ are independent. As we iterate forwards through all dynamic $k$, we keep track of all dynamic values we have seen so far, and multiply the number of dynamic occurrences of $X^{-1}_{Y_{a_k}}$ with the number of static occurrences of $Y_{a_k}$ in $[1,k-1]$. As usual, to solve for static occurrences we offline them. For the latter, observe, that when $a_i=s$ we must have $a_j=X_s$ in a good triplet. Hence during the iteration over $a$, for each dynamic $i$ we count the number of static $j$ before or on it such that $a_j=X_{a_i}$. We obtain this count for each dynamic $i$ and then, for each $k$, sum the count over all $a_i=X^{-1}_{Y_{a_k}}$. We iterate over all dynamic $k$. The number of good $i,j,k$ in this situation equals the number of $i,j$ where $1\le i,j<k$, $a_i=X^{-1}_{Y_{a_k}}$, and $a_j=Y_{a_k}$, minus the number of $i,j$ where $1\le j\le i<k$, $a_i=X^{-1}_{Y_{a_k}}$, and $a_j=Y_{a_k}$. For the former, notice that $i,j$ are independent. As we iterate forwards through all dynamic $k$, we keep track of all dynamic values we have seen so far, and multiply the number of dynamic occurrences of $X^{-1}_{Y_{a_k}}$ with the number of static occurrences of $Y_{a_k}$ in $[1,k-1]$. As usual, to solve for static occurrences we offline them. For the latter, observe, that when $a_i=s$ we must have $a_j=X_s$ in a good triplet. Hence during the iteration over $a$, for each dynamic $i$ we count the number of static $j$ before or on it such that $a_j=X_{a_i}$. We obtain this count for each dynamic $i$ and then, for each $k$, sum the count over all $a_i=X^{-1}_{Y_{a_k}}$. $k$ is dynamic, $i,j$ are static. We iterate over all dynamic $k$; we seek the number of static $i,j$ in $[1,k-1]$ such that $a_j=X_{a_i}$, $i<j$, and $a_j=Y_{a_k}$. We offline all $O(n)$ dynamic $k$ that we need to count in these $O(\sqrt n)$ queries. Then, we iterate forwards through $a$ in a manner similar to part 1, keeping track of the number of $i$ that can bind to each $j$, and a sum over all current $j$ where $a_j$ equals some value, answering offlined questions on the way. We iterate over all dynamic $k$; we seek the number of static $i,j$ in $[1,k-1]$ such that $a_j=X_{a_i}$, $i<j$, and $a_j=Y_{a_k}$. We offline all $O(n)$ dynamic $k$ that we need to count in these $O(\sqrt n)$ queries. Then, we iterate forwards through $a$ in a manner similar to part 1, keeping track of the number of $i$ that can bind to each $j$, and a sum over all current $j$ where $a_j$ equals some value, answering offlined questions on the way. $i$ is dynamic, $j,k$ are static. We iterate over all dynamic $i$; we seek the number of static $j,k$ in $[i+1,r]$ such that $a_j=Y_{a_k}$, $j<k$, and $a_j=X_{a_i}$. We offline all $O(n)$ intervals that we need to count in these $O(\sqrt n)$ queries. Notice that the number of $j,k$ in an interval $[i+1,r]$ equals the number of $j,k$ where $j$ is in $[i+1,r]$ minus the number of $j,k$ where $j$ is in $[i+1,r]$ and $k$ is in $[r+1,n]$. We calculate the former iterating backwards through $a$ to count the number of $k$ that can bind to each $j$, and then use a sum similar to part 3. For the latter, we observe, that $j$ and $k$ are independent since we have fixed both the value of $a_j=X_{a_i}$ and $a_k=Y^{-1}_{X_{a_i}}$, so we simply multiply the possible $j$ in $[i+1,r]$ with the possible $k$ in $[r+1,n]$. We iterate over all dynamic $i$; we seek the number of static $j,k$ in $[i+1,r]$ such that $a_j=Y_{a_k}$, $j<k$, and $a_j=X_{a_i}$. We offline all $O(n)$ intervals that we need to count in these $O(\sqrt n)$ queries. Notice that the number of $j,k$ in an interval $[i+1,r]$ equals the number of $j,k$ where $j$ is in $[i+1,r]$ minus the number of $j,k$ where $j$ is in $[i+1,r]$ and $k$ is in $[r+1,n]$. We calculate the former iterating backwards through $a$ to count the number of $k$ that can bind to each $j$, and then use a sum similar to part 3. For the latter, we observe, that $j$ and $k$ are independent since we have fixed both the value of $a_j=X_{a_i}$ and $a_k=Y^{-1}_{X_{a_i}}$, so we simply multiply the possible $j$ in $[i+1,r]$ with the possible $k$ in $[r+1,n]$. As such, we have solved $O(\sqrt n)$ queries in $O(n)$, and have solved the problem in $O(n+m\sqrt n)$. We can prove that this problem is more difficult than calculating matrix multiplication of two matrices with size $\sqrt n\times \sqrt n$, when ignoring poly-logarithmic factors. We construct a special arrangement of array: for $a_i=2,5,8,\dots$, the corresponding $b_{a_i}=1,4,7,\dots$, the corresponding $c_{a_i}=3,6,9,\dots$. For other $a_i$, $b_{a_i}=c_{a_i}=n$. We make sure that no $n$ exists in the array $a$. Array $a$ is separated into three parts from left to right. The first part consists of $1,4,7,\dots$, each appears exactly one time. The second part consists of $2,5,8,\dots$, each may exist multiple times. The third part consists of $3,6,9,\dots$, each may exist multiple times. Let $f_{k}$ be the number of triplets $(i,j,k)$ that $b_{a_i}=a_j=c_{a_k}$. For each $a_k$, there is exactly one $a_i$ and it appears exactly one time. Hence, $f_{k}$ is equal to the number of corresponding $a_j$. We only modify elements of $a$ in the second part. If we regard $k,a_k$ that $k$ is in the third part of array as a point in 2D space, let $x_k=k,y_k=a_k$, then inserting a corresponding $a_j$ into the second part means a ranged plus operation for any point that $y=a_k$. A query operation with value $r$ reports the sum of all point $i$ that $x_i\le r$. Because of our constructed array, only triplets where $k$ is in the third part can contribute to the answer. So we used an algorithm that can solve this problem to solve the two-dimensional $x=A$ add, $y=B$ sum problem, which is equivalent to $x<A$ add, $y<B$ sum when ignoring poly-logarithmic factors. This problem is further equivalent to the famous range inverse query problem, which was proven more difficult than calculating matrix multiplication of two matrices with size $\sqrt n\times \sqrt n$. As currently the best matrix multiplication algorithm is $O(n^{2.373})$, seeking a $\tilde{O}(n)$ solution is not realistic, so the $O(n+m\sqrt n)$ solution described above is enough for competitive programming. P.S. There is an easter egg in the problem statement, find it :)
[ "brute force", "data structures", "dp" ]
3,500
// (insert magical incantation) // (insert offerings for the Gods of codeforces judging servers) //LXLORZ!!!!//rejudg #include <bits/stdc++.h> using namespace std; #define MAXN 200005 #define SQRTN 210 namespace io { const int __SIZE = (1 << 21) + 1; char ibuf[__SIZE], *iS, *iT, obuf[__SIZE], *oS = obuf, *oT = oS + __SIZE - 1, __c, qu[55]; int __f, qr, _eof; #define Gc() \ (iS == iT ? (iT = (iS = ibuf) + fread(ibuf, 1, __SIZE, stdin), \ (iS == iT ? EOF : *iS++)) \ : *iS++) inline void flush() { fwrite(obuf, 1, oS - obuf, stdout), oS = obuf; } inline void gc(char &x) { x = Gc(); } inline void pc(char x) { *oS++ = x; if (oS == oT) flush(); } inline void pstr(const char *s) { int __len = strlen(s); for (__f = 0; __f < __len; ++__f) pc(s[__f]); } inline void gstr(char *s) { for (__c = Gc(); __c < 32 || __c > 126 || __c == ' ';) __c = Gc(); for (; __c > 31 && __c < 127 && __c != ' '; ++s, __c = Gc()) *s = __c; *s = 0; } template <class I> inline bool gi(I &x) { _eof = 0; for (__f = 1, __c = Gc(); (__c < '0' || __c > '9') && !_eof; __c = Gc()) { if (__c == '-') __f = -1; _eof |= __c == EOF; } for (x = 0; __c <= '9' && __c >= '0' && !_eof; __c = Gc()) x = x * 10 + (__c & 15), _eof |= __c == EOF; x *= __f; return !_eof; } template <class I> inline void print(I x) { if (!x) pc('0'); if (x < 0) pc('-'), x = -x; while (x) qu[++qr] = x % 10 + '0', x /= 10; while (qr) pc(qu[qr--]); } struct Flusher_ { ~Flusher_() { flush(); } } io_flusher_; } // namespace io using io::gc; using io::gi; using io::gstr; using io::pc; using io::print; using io::pstr; #define all(a) a.begin(), a.end() #define fi first #define se second #define pb push_back #define mp make_pair using ll = long long; using pii = pair<int, int>; int n; int a[MAXN], X[MAXN], Y[MAXN]; int Xa[MAXN], Ya[MAXN]; int dyn[SQRTN + 5]; bool dynp[MAXN]; int ps1[MAXN]; ll ps2[MAXN]; ll ps_7j[MAXN]; int fake_a[MAXN], fake_Xa[MAXN], fake_Ya[MAXN]; // TODO: replace with linked list struct offl { int val, id, nxt; } detecpool[(SQRTN * SQRTN * 11) / 2 + 5]; int detec[MAXN]; // int detec7[MAXN]; int detec8f[MAXN]; int detecX[MAXN], detecY[MAXN]; int dryans[(SQRTN * SQRTN * 3) / 2 + 5]; int dryansX[(SQRTN * SQRTN) / 2 + 5]; int dryansY[(SQRTN * SQRTN * 4) / 2 + 5]; ll dryans_7[(SQRTN * SQRTN) / 2 + 5]; ll dryans_8_former[(SQRTN * SQRTN * 2) / 2 + 5]; ll ijk_static[MAXN]; void dryrun(vector<pii> ops) { copy(a, a + n, fake_a); copy(Xa, Xa + n, fake_Xa); copy(Ya, Ya + n, fake_Ya); memset(detec, -1, sizeof detec); memset(detecX, -1, sizeof detecX); memset(detecY, -1, sizeof detecY); // memset(detec7, -1, sizeof detec7); memset(detec8f, -1, sizeof detec8f); int pooln = 0; auto push_back = [&](int *ve, int pos, int va, int id) { detecpool[pooln] = {va, id, ve[pos]}; ve[pos] = pooln; pooln++; }; int dryid = 0, dryidX = 0, dryidY = 0, dryid7 = 0, dryid8f = 0; for (auto [opn, r] : ops) { if (opn != -1) { assert(dynp[opn]); a[opn] = r; Xa[opn] = X[r]; Ya[opn] = Y[r]; } else { int mxdyn = 0; for (int i = 0; dyn[i] <= r; i++) mxdyn++; for (int _j = 0; _j < mxdyn; _j++) { int j = dyn[_j]; int pfx_xai_km1 = 0; if (j != 0) { // int pfx_ixaj_jm1 = dryans[dryid++]; // int pfx_iyaj_r = dryans[dryid++]; // int pfx_iyaj_j = dryans[dryid++]; push_back(detecX, j - 1, a[j], dryidX++); push_back(detecY, r, a[j], dryidY++); push_back(detecY, j, a[j], dryidY++); int k = dyn[_j]; // pfx_xai_km1 = dryans[dryid++]; // int pfx_yak_km1 = dryans[dryid++]; push_back(detec, k - 1, Xa[k], dryid++); push_back(detec, k - 1, Ya[k], dryid++); // ll pfx_yak_km1 = dryans_7[dryid7++]; // push_back(detec7, k - 1, Ya[k], dryid7++); } int i = j; if (i != r) { // ll sfx_xai_ip1 = dryans_8_former[dryid8f++]; // ll sfx_xai_rp1 = dryans_8_former[dryid8f++]; // int pfx_xai_r = dryans[dryid++]; // int pfx_xai_i = pfx_xai_km1; // int pfx_iyxai_n = dryans[dryid++]; // int pfx_iyxai_r = dryans[dryid++]; push_back(detec8f, i + 1, Xa[i], dryid8f++); push_back(detec8f, r + 1, Xa[i], dryid8f++); push_back(detec, r, Xa[i], dryid++); push_back(detecY, n, Xa[i], dryidY++); push_back(detecY, r, Xa[i], dryidY++); } } } } for (int i = 0; i <= n; i++) { if (i != n && !dynp[i]) { ps1[a[i]]++; } for (int pid = detec[i]; pid != -1; pid = detecpool[pid].nxt) dryans[detecpool[pid].id] = ps1[detecpool[pid].val]; } memset(ps1, 0, n * (sizeof(int))); ll cpsm = 0; for (int i = 0; i <= n; i++) { if (i != n && !dynp[i]) { cpsm += ps_7j[Ya[i]]; ps_7j[a[i]] += ps1[a[i]]; ps1[Xa[i]]++; } ijk_static[i] = cpsm; for (int pid = detecX[i]; pid != -1; pid = detecpool[pid].nxt) { dryansX[detecpool[pid].id] = ps1[detecpool[pid].val]; dryans_7[detecpool[pid].id] = ps_7j[Y[detecpool[pid].val]]; } } memset(ps1, 0, n * (sizeof(int))); for (int i = 0; i <= n; i++) { if (i != n && !dynp[i]) { ps1[Ya[i]]++; } for (int pid = detecY[i]; pid != -1; pid = detecpool[pid].nxt) dryansY[detecpool[pid].id] = ps1[detecpool[pid].val]; } memset(ps1, 0, n * (sizeof(int))); memset(ps_7j, 0, n * (sizeof(ll))); for (int i = n; i >= 0; i--) { if (i != n && !dynp[i]) { ps2[a[i]] += ps1[a[i]]; ps1[Ya[i]]++; } for (int pid = detec8f[i]; pid != -1; pid = detecpool[pid].nxt) dryans_8_former[detecpool[pid].id] = ps2[detecpool[pid].val]; } memset(ps1, 0, n * (sizeof(int))); memset(ps2, 0, n * (sizeof(ll))); copy(fake_a, fake_a + n, a); copy(fake_Xa, fake_Xa + n, Xa); copy(fake_Ya, fake_Ya + n, Ya); } int PfxGud3[MAXN], SfxGud4[MAXN]; void process(vector<pii> ops) { set<int> mdfd; for (auto [xx, b] : ops) if (xx != -1) mdfd.insert(xx); int dync = 0; memset(dyn, 1, sizeof dyn); for (int i : mdfd) dyn[dync++] = i; memset(dynp, 0, sizeof dynp); for (int i = 0; i < dync; i++) dynp[dyn[i]] = 1; dryrun(ops); int dryid = 0, dryidX = 0, dryidY = 0, dryid6l = 0, dryid7 = 0, dryid8f = 0; for (auto [opn, r] : ops) { if (opn != -1) { a[opn] = r; Xa[opn] = X[r]; Ya[opn] = Y[r]; } else { int mxdyn = 0; for (int i = 0; dyn[i] <= r; i++) mxdyn++; ll P1 = ijk_static[r], P2 = 0, P3 = 0, P4 = 0, P5 = 0, P6 = 0, P7 = 0, P8 = 0; for (int _i = 0; _i < mxdyn; _i++) { int i = dyn[_i]; PfxGud3[i] = ps1[a[i]]; P2 += ps2[Ya[i]]; ps2[a[i]] += ps1[a[i]]; ps1[Xa[i]]++; } for (int _i = 0; _i < mxdyn; _i++) { int i = dyn[_i]; ps2[a[i]] = 0; ps1[Xa[i]] = 0; } for (int _i = mxdyn - 1; _i >= 0; _i--) { int i = dyn[_i]; SfxGud4[i] = ps1[a[i]]; ps1[Ya[i]]++; } for (int _i = 0; _i < mxdyn; _i++) { int i = dyn[_i]; ps1[Ya[i]] = 0; } for (int _j = 0; _j < mxdyn; _j++) { int j = dyn[_j]; int pfx_xai_km1 = 0; if (j != 0) { int pfx_ixaj_jm1 = dryansX[dryidX++]; int pfx_iyaj_r = dryansY[dryidY++]; int pfx_iyaj_j = dryansY[dryidY++]; P3 += PfxGud3[j] * (pfx_iyaj_r - pfx_iyaj_j); P4 += pfx_ixaj_jm1 * SfxGud4[j]; P5 += 1ll * pfx_ixaj_jm1 * (pfx_iyaj_r - pfx_iyaj_j); int k = dyn[_j]; pfx_xai_km1 = dryans[dryid++]; int pfx_yak_km1 = dryans[dryid++]; int pfxDyn_ixyak = ps1[Ya[k]]; ll pfxDyn_BAD_ixyak = ps2[Ya[k]]; ps1[Xa[k]]++; ps2[Xa[k]] += pfx_xai_km1; P6 += 1ll * pfx_yak_km1 * pfxDyn_ixyak - pfxDyn_BAD_ixyak; P7 += dryans_7[dryid7++]; } else { ps1[Xa[j]]++; } int i = j; if (i != r) { ll sfx_xai_ip1 = dryans_8_former[dryid8f++]; ll sfx_xai_rp1 = dryans_8_former[dryid8f++]; int pfx_xai_r = dryans[dryid++]; int pfx_xai_i = pfx_xai_km1; int pfx_iyxai_n = dryansY[dryidY++]; int pfx_iyxai_r = dryansY[dryidY++]; P8 += (sfx_xai_ip1 - sfx_xai_rp1) - 1ll * (pfx_xai_r - pfx_xai_i) * (pfx_iyxai_n - pfx_iyxai_r); } } for (int _i = 0; _i < mxdyn; _i++) { int i = dyn[_i]; ps2[Xa[i]] = 0; ps1[Xa[i]] = 0; } print(P1 + P2 + P3 + P4 + P5 + P6 + P7 + P8); pc('\n'); } } } signed main() { ios_base::sync_with_stdio(false); cin.tie(0); int m; gi(n), gi(m); for (int i = 0; i < n; i++) { gi(a[i]); a[i]--; } for (int i = 0; i < n; i++) { gi(X[i]); X[i]--; } for (int i = 0; i < n; i++) { gi(Y[i]); Y[i]--; } for (int i = 0; i < n; i++) { Xa[i] = X[a[i]]; Ya[i] = Y[a[i]]; } vector<pii> opq; for (int qx = 0; qx < m; qx++) { int op, v; gi(op); gi(v); if (op == 1) { int r; gi(r); opq.push_back({v - 1, r - 1}); } else { opq.push_back({-1, v - 1}); } if (opq.size() == SQRTN) { process(opq); opq.clear(); } } process(opq); }
1546
A
AquaMoon and Two Arrays
AquaMoon and Cirno are playing an interesting game with arrays. Cirno has prepared two arrays $a$ and $b$, both consist of $n$ non-negative integers. AquaMoon can perform the following operation an arbitrary number of times (possibly zero): - She chooses two indices $i$ and $j$ ($1 \le i, j \le n$), then decreases the $i$-th element of array $a$ by $1$, and increases the $j$-th element of array $a$ by $1$. The resulting values at $i$-th and $j$-th index of array $a$ are $a_i - 1$ and $a_j + 1$, respectively. Each element of array $a$ \textbf{must be non-negative after each operation}. If $i = j$ this operation doesn't change the array $a$. AquaMoon wants to make some operations to make arrays $a$ and $b$ equal. Two arrays $a$ and $b$ are considered equal if and only if $a_i = b_i$ for all $1 \leq i \leq n$. Help AquaMoon to find a sequence of operations that will solve her problem or find, that it is impossible to make arrays $a$ and $b$ equal. Please note, that you \textbf{don't have to minimize} the number of operations.
First, if the sum of elements in $a$ is not equal to the sum of elements in $b$, then the solution does not exist. Each time find a position $i$ satisfying $a_i>b_i$, and find such a $j$ satisfying $a_j<b_j$. Then let $a_i-1$, $a_j+1$, until the two arrays become the same.
[ "brute force", "greedy" ]
800
#include<bits/stdc++.h> using namespace std; #define O(x) cout<<#x<<" "<<(x)<<"\n" inline int read(){ int x=0,f=1,c=getchar(); while(!isdigit(c)){if(c=='-')f=-1;c=getchar();} while(isdigit(c)){x=(x<<1)+(x<<3)+(c^48);c=getchar();} return f==1?x:-x; } const int N=104; int n,sum,a[N]; vector<pair<int,int> >ans; inline void solve(){ n=read(); sum=0; ans.clear(); for(int i=1;i<=n;i++)a[i]=read(); for(int i=1;i<=n;i++){ a[i]-=read(); sum+=a[i]; } if(sum){puts("-1");return;} for(int i=1;i<=n;i++){ for(int j=1;j<=a[i];j++){ for(int k=1;k<=n;k++)if(a[k]<0){ ans.push_back(make_pair(i,k)); ++a[k]; break; } } } cout<<ans.size()<<"\n"; for(auto v:ans)cout<<v.first<<" "<<v.second<<"\n"; } int main(){ for(int T=read();T--;)solve(); return 0; }
1546
B
AquaMoon and Stolen String
AquaMoon had $n$ strings of length $m$ each. $n$ is an \textbf{odd} number. When AquaMoon was gone, Cirno tried to pair these $n$ strings together. After making $\frac{n-1}{2}$ pairs, she found out that there was exactly one string without the pair! In her rage, she disrupted each pair of strings. For each pair, she selected some positions (at least $1$ and at most $m$) and swapped the letters in the two strings of this pair at the selected positions. For example, if $m = 6$ and two strings "abcdef" and "xyzklm" are in one pair and Cirno selected positions $2$, $3$ and $6$ she will swap 'b' with 'y', 'c' with 'z' and 'f' with 'm'. The resulting strings will be "ayzdem" and "xbcklf". Cirno then stole away the string without pair and shuffled all remaining strings in arbitrary order. AquaMoon found the remaining $n-1$ strings in complete disarray. Also, she remembers the initial $n$ strings. She wants to know which string was stolen, but she is not good at programming. Can you help her?
We can find that for each letter of the answer must appear an odd number of times in its column(Since for other strings, they appear twice in total. The operation does not change the number of the occurrence of some certain letter in one column). So we can consider each position individually. There is always exactly one letter that occurs an odd number of times. So just take them out and they are the letters of the stolen string.
[ "interactive", "math" ]
1,200
#include <cstdio> const int Maxn=1000000; char s[Maxn+5]; char ans[Maxn+5]; int n,m; void solve(){ scanf("%d%d",&n,&m); n=(n<<1)-1; for(int i=1;i<=m;i++){ ans[i]=0; } for(int i=1;i<=n;i++){ scanf("%s",s+1); for(int j=1;j<=m;j++){ ans[j]^=s[j]; } } for(int i=1;i<=m;i++){ putchar(ans[i]); } putchar('\n'); } int main(){ int T; scanf("%d",&T); while(T--){ solve(); } return 0; }
1547
A
Shortest Path with Obstacle
There are three cells on an infinite 2-dimensional grid, labeled $A$, $B$, and $F$. Find the length of the shortest path from $A$ to $B$ if: - in one move you can go to any of the four adjacent cells sharing a side; - visiting the cell $F$ is forbidden (it is an obstacle).
Let's suppose that the forbidden cell does not affect the shortest path. In that case, the answer would be $|x_A - x_B| + |y_A - y_B|$. The forbidden cell blocks the shortest path if and only if it belongs to every shortest path. In other words, if there is only one shortest path and the forbidden cell belongs to it. So, the answer differs from $|x_A - x_B| + |y_A - y_B|$ if and only if $A$ and $B$ are in one row or column and $F$ is between them. In that case, the answer is $|x_A - x_B| + |y_A - y_B| + 2$ (i.e. greater by $2$). In order to check that point $R$ lays between $P$ and $Q$ on a straight line, just check $\min(P, Q) < R < \max(P, Q)$. So, the correct solution looks like this:
[ "implementation", "math" ]
800
#include <bits/stdc++.h> using namespace std; #define forn(i, n) for (int i = 0; i < int(n); i++) int main() { int t; cin >> t; forn(tt, t) { vector<int> a(2), b(2), f(2); cin >> a[0] >> a[1]; cin >> b[0] >> b[1]; cin >> f[0] >> f[1]; int ans = abs(a[0] - b[0]) + abs(a[1] - b[1]); if ((a[0] == b[0] && a[0] == f[0] && min(a[1], b[1]) < f[1] && f[1] < max(a[1], b[1])) || (a[1] == b[1] && a[1] == f[1] && min(a[0], b[0]) < f[0] && f[0] < max(a[0], b[0]))) ans += 2; cout << ans << endl; } }
1547
B
Alphabetical Strings
A string $s$ of length $n$ ($1 \le n \le 26$) is called alphabetical if it can be obtained using the following algorithm: - first, write an empty string to $s$ (i.e. perform the assignment $s$ := ""); - then perform the next step $n$ times; - at the $i$-th step take $i$-th lowercase letter of the Latin alphabet and write it either to the left of the string $s$ or to the right of the string $s$ (i.e. perform the assignment $s$ := $c+s$ or $s$ := $s+c$, where $c$ is the $i$-th letter of the Latin alphabet). In other words, iterate over the $n$ first letters of the Latin alphabet starting from 'a' and etc. Each time we prepend a letter to the left of the string $s$ or append a letter to the right of the string $s$. Strings that can be obtained in that way are alphabetical. For example, the following strings are alphabetical: "a", "ba", "ab", "bac" and "ihfcbadeg". The following strings \textbf{are not} alphabetical: "z", "aa", "ca", "acb", "xyz" and "ddcba". From the given string, determine if it is alphabetical.
For a start, let's find the position of the letter 'a' in string $s$. If this position does not exist, then the answer would be 'NO'. Suppose that this position exists and equals $\text{pos}_a$. Let's create two pointers $L$ and $R$. Initially $L := \text{pos}_a,~R := L$. We will try to build string $s$ using the algorithm from the statement. Suppose that we have built substring $s[L..R]$ in $i$ iterations. Consider the next letter of the Latin alphabet $c_i$. Let's look at cases: find position $pos$ of the letter $c_i$ in $s$ (if it does not exist then 'NO'); if $pos = L - 1$ then make an assignment $L := L - 1$ and process the next letter $c_i$; if $pos = R + 1$ then make an assignment $R := R + 1$ and process the next letter $c_i$; otherwise string $s$ is not alphabetical and the answer is 'NO'.
[ "greedy", "implementation", "strings" ]
800
#include <bits/stdc++.h> using namespace std; #define forn(i, n) for (int i = 0; i < int(n); i++) int main() { int t; cin >> t; forn(tt, t) { string s; cin >> s; size_t L = s.find('a'); if (L == string::npos) { cout << "NO" << endl; continue; } size_t n = s.length(), R = L; bool yes = true; for (size_t i = 1; i < n; i++) { size_t pos = s.find(char('a' + i)); if (pos == string::npos || (pos != L - 1 && pos != R + 1)) { yes = false; break; } else { L = min(L, pos); R = max(R, pos); } } cout << (yes ? "YES" : "NO") << endl; } }
1547
C
Pair Programming
Monocarp and Polycarp are learning new programming techniques. Now they decided to try pair programming. It's known that they have worked together on the same file for $n + m$ minutes. Every minute exactly one of them made one change to the file. Before they started, there were already $k$ lines written in the file. Every minute exactly one of them does one of two actions: adds a new line to the end of the file or changes one of its lines. Monocarp worked in total for $n$ minutes and performed the sequence of actions $[a_1, a_2, \dots, a_n]$. If $a_i = 0$, then he adds a new line to the end of the file. If $a_i > 0$, then he changes the line with the number $a_i$. Monocarp performed actions strictly in this order: $a_1$, then $a_2$, ..., $a_n$. Polycarp worked in total for $m$ minutes and performed the sequence of actions $[b_1, b_2, \dots, b_m]$. If $b_j = 0$, then he adds a new line to the end of the file. If $b_j > 0$, then he changes the line with the number $b_j$. Polycarp performed actions strictly in this order: $b_1$, then $b_2$, ..., $b_m$. Restore their common sequence of actions of length $n + m$ such that all actions would be correct — there should be no changes to lines that do not yet exist. Keep in mind that in the common sequence Monocarp's actions should form the subsequence $[a_1, a_2, \dots, a_n]$ and Polycarp's — subsequence $[b_1, b_2, \dots, b_m]$. They can replace each other at the computer any number of times. Let's look at an example. Suppose $k = 3$. Monocarp first changed the line with the number $2$ and then added a new line (thus, $n = 2, \: a = [2, 0]$). Polycarp first added a new line and then changed the line with the number $5$ (thus, $m = 2, \: b = [0, 5]$). Since the initial length of the file was $3$, in order for Polycarp to change line number $5$ two new lines must be added beforehand. Examples of correct sequences of changes, in this case, would be $[0, 2, 0, 5]$ and $[2, 0, 0, 5]$. Changes $[0, 0, 5, 2]$ (wrong order of actions) and $[0, 5, 2, 0]$ (line $5$ cannot be edited yet) are not correct.
The solution is that if we can do something, let's do it. It doesn't make sense not to act, because neither adding a new row nor modifying an existing one can prevent the existing row from being changed in the future. Therefore, we will iterate over the actions and eagerly act Monocarp or Polycarp. Let's create two pointers $i$ and $j$ in arrays $a$ and $b$ - index of possible action of Monocarp and Polycarp and $c$ - the current length of the file. Suppose that $a_i = 0$ or $b_j = 0$ on current iteration. Then we take the appropriate zero element and increase $c$ by one. We can do that because appending a new line cannot make a new answer prefix incorrect if the previous prefix was correct. Suppose that $a_i \ne 0$ and $b_j \ne 0$. If $a_i > c$ and $b_j > c$ then there is no answer because we can potentially do only two actions and both make the answer incorrect. If one number is greater than $c$ and the other is less than or equals then we take the one that less than or equals $c$. If one of the sequences $a$ or $b$ ends then only one potential action needs to be checked at each iteration.
[ "greedy", "two pointers" ]
1,100
#include <iostream> #include <vector> typedef std::vector<int> vi; int main() { int t; std::cin >> t; while (t--) { int k, n, m; std::cin >> k >> n >> m; vi a(n), b(m); for (int i = 0; i < n; i++) std::cin >> a[i]; for (int i = 0; i < m; i++) std::cin >> b[i]; int pos1 = 0, pos2 = 0; vi res; bool ok = true; while (pos1 != n || pos2 != m) { if (pos1 != n && a[pos1] == 0) { res.push_back(0); k++; pos1++; } else if (pos2 != m && b[pos2] == 0) { res.push_back(0); k++; pos2++; } else if (pos1 != n && a[pos1] <= k) { res.push_back(a[pos1++]); } else if (pos2 != m && b[pos2] <= k) { res.push_back(b[pos2++]); } else { std::cout << -1 << '\n'; ok = false; break; } } if (ok) { for (int cur : res) std::cout << cur << ' '; std::cout << std::endl; } } return 0; }
1547
D
Co-growing Sequence
A sequence of non-negative integers $a_1, a_2, \dots, a_n$ is called growing if for all $i$ from $1$ to $n - 1$ all ones (of binary representation) in $a_i$ are in the places of ones (of binary representation) in $a_{i + 1}$ (in other words, $a_i \:\&\: a_{i + 1} = a_i$, where $\&$ denotes bitwise AND). If $n = 1$ then the sequence is considered growing as well. For example, the following four sequences are growing: - $[2, 3, 15, 175]$ — in binary it's $[10_2, 11_2, 1111_2, 10101111_2]$; - $[5]$ — in binary it's $[101_2]$; - $[1, 3, 7, 15]$ — in binary it's $[1_2, 11_2, 111_2, 1111_2]$; - $[0, 0, 0]$ — in binary it's $[0_2, 0_2, 0_2]$. The following three sequences are non-growing: - $[3, 4, 5]$ — in binary it's $[11_2, 100_2, 101_2]$; - $[5, 4, 3]$ — in binary it's $[101_2, 100_2, 011_2]$; - $[1, 2, 4, 8]$ — in binary it's $[0001_2, 0010_2, 0100_2, 1000_2]$. Consider two sequences of non-negative integers $x_1, x_2, \dots, x_n$ and $y_1, y_2, \dots, y_n$. Let's call this pair of sequences co-growing if the sequence $x_1 \oplus y_1, x_2 \oplus y_2, \dots, x_n \oplus y_n$ is growing where $\oplus$ denotes bitwise XOR. You are given a sequence of integers $x_1, x_2, \dots, x_n$. Find the lexicographically minimal sequence $y_1, y_2, \dots, y_n$ such that sequences $x_i$ and $y_i$ are co-growing. The sequence $a_1, a_2, \dots, a_n$ is lexicographically smaller than the sequence $b_1, b_2, \dots, b_n$ if there exists $1 \le k \le n$ such that $a_i = b_i$ for any $1 \le i < k$ but $a_k < b_k$.
In order to build lexicographically minimal co-growing with $x_i$ sequence, it is enough to build its elements iteratively, beginning from $y_1$ and minimizing the $i$-th element assuming that $y_1, \ldots, y_{i - 1}$ have already been found. Assign $y_1 = 0$. According to the statement, all elements of the sequence are non-negative, so $y_1$ cannot be less than zero. It turns out that $y_i = 0$ is the minimal possible first element. The existence of an answer with $y_1 = 0$ follows from the construction algorithm described below. Let's use mathematical induction and construct $y_i$ under the assumption that all the previous elements of the sequence have already been constructed. In order to satisfy the condition for the growth of the final sequence, the number $x_i \oplus y_i$ must contain one bits at all places (but not necessarily limited to them), on which there are one bits in the number $x_{i - 1} \oplus y_{i - 1}$. Let's denote $x_{i - 1} \oplus y_{i - 1}$ for $t$ and find out what bits can be in $y_i$ to satisfy this condition: If in $t$ stands $0$ bit then independently from $x_i$ in $y_i$ at the same spot we can place any bit because there is no limit on the corresponding bit in $x_i \oplus y_i$; If in $t$ stands $1$ bit and in $x_i$ - $0$ then the corresponding bit in $y_i$ should be equal $1$, so that in $x_i \oplus y_i$ the corresponding bit also equals one; If in $t$ and in $x_i$ stands $1$ bit then in $y_1$ should be $0$ bit at the corresponding place for the same reasons. The bit transformation described above can be given by the expression $y_i = \left(t\,|\,x_i\right) \oplus x_i$. Indeed, this expression gives us bit 'one' at the fixed position if and only if at that place in $t$ stands $1$ bit and in $x_i$ stands $0$ bit. For the full solution, it remains only to apply this formula in a loop from $2$ to $n$.
[ "bitmasks", "constructive algorithms", "greedy" ]
1,300
def f(x, y): return x & ~y t = int(input()) for tt in range(t): n = int(input()) a = list(map(int, input().split())) ans = [0] * n for i in range(1, n): ans[i] = f(ans[i - 1] ^ a[i - 1], a[i]) print(" ".join(map(str, ans)))
1547
E
Air Conditioners
On a strip of land of length $n$ there are $k$ air conditioners: the $i$-th air conditioner is placed in cell $a_i$ ($1 \le a_i \le n$). Two or more air conditioners cannot be placed in the same cell (i.e. all $a_i$ are distinct). Each air conditioner is characterized by one parameter: temperature. The $i$-th air conditioner is set to the temperature $t_i$. \begin{center} {\small Example of strip of length $n=6$, where $k=2$, $a=[2,5]$ and $t=[14,16]$.} \end{center} For each cell $i$ ($1 \le i \le n$) find it's temperature, that can be calculated by the formula $$\min_{1 \le j \le k}(t_j + |a_j - i|),$$ where $|a_j - i|$ denotes absolute value of the difference $a_j - i$. In other words, the temperature in cell $i$ is equal to the minimum among the temperatures of air conditioners, increased by the distance from it to the cell $i$. Let's look at an example. Consider that $n=6, k=2$, the first air conditioner is placed in cell $a_1=2$ and is set to the temperature $t_1=14$ and the second air conditioner is placed in cell $a_2=5$ and is set to the temperature $t_2=16$. In that case temperatures in cells are: - temperature in cell $1$ is: $\min(14 + |2 - 1|, 16 + |5 - 1|)=\min(14 + 1, 16 + 4)=\min(15, 20)=15$; - temperature in cell $2$ is: $\min(14 + |2 - 2|, 16 + |5 - 2|)=\min(14 + 0, 16 + 3)=\min(14, 19)=14$; - temperature in cell $3$ is: $\min(14 + |2 - 3|, 16 + |5 - 3|)=\min(14 + 1, 16 + 2)=\min(15, 18)=15$; - temperature in cell $4$ is: $\min(14 + |2 - 4|, 16 + |5 - 4|)=\min(14 + 2, 16 + 1)=\min(16, 17)=16$; - temperature in cell $5$ is: $\min(14 + |2 - 5|, 16 + |5 - 5|)=\min(14 + 3, 16 + 0)=\min(17, 16)=16$; - temperature in cell $6$ is: $\min(14 + |2 - 6|, 16 + |5 - 6|)=\min(14 + 4, 16 + 1)=\min(18, 17)=17$. For each cell from $1$ to $n$ find the temperature in it.
Let's calculate two arrays $L$ and $R$, where: $L_i$ is the temperature in cell $i$ if we take only air conditioners with numbers less than or equal to $i$; $R_i$ is the temperature in cell $i$ if we take only air conditioners with numbers greater than or equal to $i$; Let's show how to calculate array $L$. We will calculate values from left to right using DP and next formula: $L_i = \min(L_{i+1}+1, c_i)$, where $c_i$ is the temperature of air conditioner in cell $i$ (or infinity if there is no air conditioner in this cell). Indeed, the value of $L_i$ is either determined by the air conditioner in this cell (i.e. equals $c_i$) or by some air conditioner to the left, but this means that we should take the answer from the previous cell and increase it by $1$. The full code for calculating $L$ looks like this: In exactly the same way (but from right to left) we will calculate $R$: The answer for cell $i$ is $\min(L[i], R[i])$.
[ "data structures", "dp", "implementation", "shortest paths", "sortings", "two pointers" ]
1,500
#include <bits/stdc++.h> using namespace std; #define forn(i, n) for (int i = 0; i < int(n); i++) int main() { int t; cin >> t; forn(tt, t) { int n, k; cin >> n >> k; vector<int> a(k); forn(i, k) cin >> a[i]; vector<int> t(k); forn(i, k) cin >> t[i]; vector<long long> c(n, INT_MAX); forn(i, k) c[a[i] - 1] = t[i]; long long p; vector<long long> L(n, INT_MAX); p = INT_MAX; forn(i, n) { p = min(p + 1, c[i]); L[i] = p; } vector<long long> R(n, INT_MAX); p = INT_MAX; for (int i = n - 1; i >= 0; i--) { p = min(p + 1, c[i]); R[i] = p; } forn(i, n) cout << min(L[i], R[i]) << " "; cout << endl; } }
1547
F
Array Stabilization (GCD version)
You are given an array of positive integers $a = [a_0, a_1, \dots, a_{n - 1}]$ ($n \ge 2$). In one step, the array $a$ is replaced with another array of length $n$, in which each element is the greatest common divisor (GCD) of two neighboring elements (the element itself and its right neighbor; consider that the right neighbor of the $(n - 1)$-th element is the $0$-th element). Formally speaking, a new array $b = [b_0, b_1, \dots, b_{n - 1}]$ is being built from array $a = [a_0, a_1, \dots, a_{n - 1}]$ such that $b_i$ $= \gcd(a_i, a_{(i + 1) \mod n})$, where $\gcd(x, y)$ is the greatest common divisor of $x$ and $y$, and $x \mod y$ is the remainder of $x$ dividing by $y$. In one step the array $b$ is built and then the array $a$ is replaced with $b$ (that is, the assignment $a$ := $b$ is taking place). For example, if $a = [16, 24, 10, 5]$ then $b = [\gcd(16, 24)$, $\gcd(24, 10)$, $\gcd(10, 5)$, $\gcd(5, 16)]$ $= [8, 2, 5, 1]$. Thus, after one step the array $a = [16, 24, 10, 5]$ will be equal to $[8, 2, 5, 1]$. For a given array $a$, find the minimum number of steps after which all values $a_i$ become equal (that is, $a_0 = a_1 = \dots = a_{n - 1}$). If the original array $a$ consists of identical elements then consider the number of steps is equal to $0$.
First, note that the array stabilizes if and only if it consists of equal elements, and the number the array $a$ will be consisted of is $T = \gcd(a_1, \ldots, a_n)$. Indeed, at the $i$-th step a number equal to $\gcd(a_j, \ldots, a_{(j + i) \mod n})$ will be written at the $j$-th position in the array. This is easy to prove by induction: if at the previous step the adjacent elements in the array were equal to $\gcd$ of the numbers on adjacent segments of length $i - 1$ in the original array, then their greatest common divisor will be the greatest common divisor of the union of these two segments (GCD is an idempotent operation). Thus, the algorithm will stop in no more than $n$ steps, since after $n$ steps all numbers will be equal exactly to $T$. If we divide all the numbers $a_i$ by $T$ before starting the algorithm, then the number of steps won't change, but the array will stabilize at the number $1$. Since the numbers in the array after the $k$-th step will be exactly equal to $\gcd$ of all segments of length $k + 1$ of the original array $a$, it follows that the number of steps after which all values become the same is exactly equal to the length of the maximum segment of the original array on which $\gcd > 1$. There are several ways to find the length of such a segment. For example, you can use range GCD query and binary search. The following method is based on the factorization of numbers, in other words, on their decomposition into prime factors. Factorization in this problem could be done using both the sieve of Eratosthenes or factoring each number independently in $\mathcal{O}(\sqrt{a_i})$. After all the numbers have been factorized, iterate over each $i$ and each prime $p$ in its factorization. In linear time we can go left and right from $i$, finding the maximum segment of numbers that contain the same factor $p$. Then we can update the answer with the length of this segment and move onto the next prime in the factorization of $a_i$ or go to $i + 1$, if all primes have already been iterated through. Note that if a segment of numbers divisible by $p$ contains indices from $l$ to $r$, then we iterate through it $r - l + 1$ times. In order to avoid reiteration on each segment, we remove $p$ from the factorizations of all numbers on the segment after considering only one. The resulting solution works in $\mathcal{O}\left(n \cdot \mathtt{MAX\_P}\right)$, where $\mathtt{MAX\_P}$ - the maximum number of different primes in factoriztion of $a_i$. Considering that $a_i \leq 10^6$, $\mathtt{MAX\_P} = 8$, so the solution fits into the time limit.
[ "binary search", "brute force", "data structures", "divide and conquer", "number theory", "two pointers" ]
1,900
#include <iostream> #include <vector> #include <set> using namespace std; const unsigned int MAX_A = 1'000'000; vector<unsigned int> sieve(MAX_A + 1); vector<unsigned int> prime; unsigned int gcd(unsigned int a, unsigned int b) { return b == 0 ? a : gcd(b, a % b); } unsigned int solve() { unsigned int n; cin >> n; vector<unsigned int> a(n); for (unsigned int i = 0; i < n; i++) { cin >> a[i]; } unsigned int common = a[0]; vector<set<unsigned int>> facts(n); for (unsigned int i = 1; i < n; i++) { common = gcd(common, a[i]); } for (unsigned int i = 0; i < n; i++) { unsigned int t = a[i] / common; while (t != 1) { facts[i].insert(sieve[t]); t /= sieve[t]; } } unsigned int answer = 0; for (unsigned int i = 0; i < n; i++) { for (unsigned int p : facts[i]) { int l = (i + n - 1) % n, r = i; unsigned int cnt = 0; while (facts[l].count(p) > 0) { facts[l].erase(p); l--; cnt++; if (l < 0) { l = n - 1; } } while (facts[r].count(p) > 0) { if (r != i) { facts[r].erase(p); } ++r %= n; cnt++; } answer = max(answer, cnt); } facts[i].clear(); } return answer; } int main() { sieve[1] = 1; for (unsigned int i = 2; i <= MAX_A; i++) { if (sieve[i] == 0) { sieve[i] = i; prime.push_back(i); } for (unsigned int j = 0; j < prime.size() && prime[j] <= sieve[i] && i * prime[j] <= MAX_A; j++) { sieve[i * prime[j]] = prime[j]; } } unsigned int t; cin >> t; for (unsigned int i = 0; i < t; i++) { cout << solve() << '\n'; } }
1547
G
How Many Paths?
You are given a directed graph $G$ which can contain loops (edges from a vertex to itself). Multi-edges are absent in $G$ which means that for all ordered pairs $(u, v)$ exists at most one edge from $u$ to $v$. Vertices are numbered from $1$ to $n$. A path from $u$ to $v$ is a sequence of edges such that: - vertex $u$ is the start of the first edge in the path; - vertex $v$ is the end of the last edge in the path; - for all pairs of adjacent edges next edge starts at the vertex that the previous edge ends on. We will assume that the empty sequence of edges is a path from $u$ to $u$. For each vertex $v$ output one of four values: - $0$, if there are no paths from $1$ to $v$; - $1$, if there is only one path from $1$ to $v$; - $2$, if there is more than one path from $1$ to $v$ and the number of paths is finite; - $-1$, if the number of paths from $1$ to $v$ is infinite. Let's look at the example shown in the figure. Then: - the answer for vertex $1$ is $1$: there is only one path from $1$ to $1$ (path with length $0$); - the answer for vertex $2$ is $0$: there are no paths from $1$ to $2$; - the answer for vertex $3$ is $1$: there is only one path from $1$ to $3$ (it is the edge $(1, 3)$); - the answer for vertex $4$ is $2$: there are more than one paths from $1$ to $4$ and the number of paths are finite (two paths: $[(1, 3), (3, 4)]$ and $[(1, 4)]$); - the answer for vertex $5$ is $-1$: the number of paths from $1$ to $5$ is infinite (the loop can be used in a path many times); - the answer for vertex $6$ is $-1$: the number of paths from $1$ to $6$ is infinite (the loop can be used in a path many times).
The first motivation for solving this problem is to write a lot of standard code like "find strongly connected components", do some DP over the condensed graph (the graph of strongly connected components), and so on. In fact, this problem can be solved much more elegantly with less code if you have a little better understanding of how depth-first search works. Consider a usual depth-first search on a digraph that is started from the vertex $1$. This will be a normal depth-first search, which will paint vertices using three colors: white (the vertex has not yet been found by the search), gray (the vertex is processing by DFS), and black (the vertex has already been processed by the DFS completely, that is, completely bypassed its subtree of the depth-first search tree). Here's the pseudocode: The following statements are true: there is a cycle in the digraph reachable from $s$ if and only if the root call dfs(s) visits in the line if color [v] == WHITE when color[v] == GRAY; moreover, for each reachable cycle from $s$ there is at least one vertex that will execute the previous item (then the vertex $v$ belongs to the cycle); if the root call dfs(s) visits in the line if color[v] == WHITE, when color [v] == BLACK, then there is more than one path (the opposite is not true). It is clear that there are infinite paths from $s$ to $u$ if and only if there is a vertex $v$ on some path from $s$ to $u$ such that $v$ is in a cycle. Thus, we mark all such vertices $v$ for which color[v] == GRAY at the moment of execution of the line if color [v] == WHITE. The fact is true: All vertices reachable from those noted in the previous phrase are those and only those vertices that an infinite number of paths lead to them. A similar fact is also true for finding vertices to which at least two paths (but a finite number) lead. Let's mark all such vertices $v$ for which color[v] == BLACK at the moment of execution of the line if color [v] == WHITE. The fact is true: Let's find all reachable vertices from those marked in the previous phrase. Let's discard those up to which we have already determined that the number of paths is infinite. The remaining reachable vertices are those and only those to which there are at least two paths (and their finite number). So the solution looks like this: let's make a depth-first search from the root, mark during it those vertices that were gray when trying to go to them (group A) and were black when trying to go to them (group B); mark the vertices reachable from the group A (let's call them AA); mark the vertices reachable from the group B (let's call them BB); the answer for the vertex is: $0$ if it is not reachable from $s$ (this determines the first DFS); $-1$, if it is from AA; $2$ if it is from BB (but not from AA); $1$, if it is not from AA and not from BB. $0$ if it is not reachable from $s$ (this determines the first DFS); $-1$, if it is from AA; $2$ if it is from BB (but not from AA); $1$, if it is not from AA and not from BB. In the author's solution, only one dfs function was used with an additional boolean parameter to determine its mode.
[ "dfs and similar", "dp", "graphs", "trees" ]
2,100
#include <bits/stdc++.h> using namespace std; #define forn(i, n) for (int i = 0; i < int(n); i++) int n; vector<vector<int>> g; set<int> s[2]; void dfs(int u, vector<int>& color, bool use_s) { color[u] = 1; for (int v: g[u]) if (color[v] == 0) dfs(v, color, use_s); else if (use_s) s[color[v] - 1].insert(v); color[u] = 2; } int main() { int t; cin >> t; forn(tt, t) { int m; cin >> n >> m; g = vector<vector<int>>(n); forn(i, 2) s[i] = set<int>(); forn(i, m) { int x, y; cin >> x >> y; x--, y--; g[x].push_back(y); } vector<int> color = vector<int>(n); dfs(0, color, true); vector<vector<int>> c(2, vector<int>(n)); forn(i, 2) for (auto u: s[i]) dfs(u, c[i], false); forn(i, n) { int ans = 0; if (color[i] != 0) { ans = 1; if (c[0][i]) ans = -1; else if (c[1][i]) ans = 2; } cout << ans << " "; } cout << endl; } }
1548
A
Web of Lies
\begin{quote} When you play the game of thrones, you win, or you die. There is no middle ground. \hfill Cersei Lannister, A Game of Thrones by George R. R. Martin \end{quote} There are $n$ nobles, numbered from $1$ to $n$. Noble $i$ has a power of $i$. There are also $m$ "friendships". A friendship between nobles $a$ and $b$ is always mutual. A noble is defined to be vulnerable if both of the following conditions are satisfied: - the noble has at least one friend, and - \textbf{all} of that noble's friends have a higher power. You will have to process the following three types of queries. - Add a friendship between nobles $u$ and $v$. - Remove a friendship between nobles $u$ and $v$. - Calculate the answer to the following process. The process: all vulnerable nobles are simultaneously killed, and all their friendships end. Then, it is possible that new nobles become vulnerable. The process repeats itself until no nobles are vulnerable. It can be proven that the process will end in finite time. After the process is complete, you need to calculate the number of remaining nobles. Note that the results of the process are \textbf{not} carried over between queries, that is, every process starts with all nobles being alive!
For various graphs, try and simulate the process. Determine a rule to figure out whether a noble survives or not. When you add or remove a single edge, how much can the answer change by? Try and recalculate the answer in $\mathcal{O}(1)$. Due to the queries, actually simulating the process each time will be too expensive. Proof that the Process will End: Assume after round $x$, $x_i$ nobles are killed. If $x_i=0$, then the state of the graph doesn't change, so the process will have ended. If $x_i > 0$, then the number of nobles decreases. Thus, the maximum number of rounds the process can last is $N$, so it must end. $\textbf{Lemma 1:}$ At the end of the process, no two nobles will still be friends. Proof by Contradiction: Assume that nobles $u$ and $v$ (assume WLOG that $u < v$) are still friends and the process is over. In order for $u$ to avoid being killed, there must be a noble $w$ weaker than $u$ that is also a friend of $u$. The same logic then applies to $w$, and we have an infinite descent argument. There are only a finite number of nobles weaker than $u$, so there will be a contradiction. $\textbf{Lemma 2:}$ If all of a noble's friends are weaker than it, that noble cannot be killed. Direct Proof: Since none of the noble's friends are stronger than it, it is impossible for $\textit{all}$ of them to be stronger at any point in the process. $\textbf{Final Idea:}$ By combining Lemmas 1 and 2, we can prove that if ALL of a noble's friends are weaker than it, that noble survives, otherwise it will die. This leads to the solution. Maintain in an array the number of nobles weaker than noble $i$. Since the updates guarantee that the edge being removed/added does/doesn't exist respectively, we only need to keep track of the number of edges of each noble. Essentially, a noble survives if and only if weaker[i]==edges[i]. After linear precomputation, updates and queries take constant time. The time complexity is $\mathcal{O}(N+M+Q)$.
[ "brute force", "graphs", "greedy" ]
1,400
//Written by m371 (Runtime: 202ms) #include <bits/stdc++.h> using namespace std; const int N=200050; int cnt[N],ans; int main(){ int n,m;scanf("%i %i",&n,&m); for(int i=1;i<=m;i++){ int u,v;scanf("%i %i",&u,&v); if(u>v)swap(u,v); cnt[u]++; if(cnt[u]==1)ans++; } int q;scanf("%i",&q); while(q--){ int t;scanf("%i",&t); if(t==1){ int u,v;scanf("%i %i",&u,&v); if(u>v)swap(u,v); cnt[u]++; if(cnt[u]==1)ans++; }else if(t==2){ int u,v;scanf("%i %i",&u,&v); if(u>v)swap(u,v); cnt[u]--; if(cnt[u]==0)ans--; }else printf("%i ",n-ans); } return 0; }
1548
B
Integers Have Friends
British mathematician John Littlewood once said about Indian mathematician Srinivasa Ramanujan that "every positive integer was one of his personal friends." It turns out that positive integers can also be friends with each other! You are given an array $a$ of distinct positive integers. Define a \textbf{subarray} $a_i, a_{i+1}, \ldots, a_j$ to be a friend group if and only if there exists an integer $m \ge 2$ such that $a_i \bmod m = a_{i+1} \bmod m = \ldots = a_j \bmod m$, where $x \bmod y$ denotes the remainder when $x$ is divided by $y$. Your friend Gregor wants to know the size of the largest friend group in $a$.
Let's say the subarray $A_i \dots A_j$ is all congruent to $n\mod{m}$. What does that imply about the subarray? What does the previous hint imply about the difference array generated by $D[i]=A[i+1]-A[i]$? GCD The key observation is to construct the difference array $D$ of size $N-1$, where $D[i]=abs(A[i+1]-A[i])$. If a given subarray is a friend group, then every difference is a multiple of some $m$. Since every element of $A$ is distinct, the case when $D[i]=0$ can be ignored. We can now convert this into a GCD (greatest common divisor) problem. It follows that $A[i\dots j]$ is a friend group if and only if $\gcd{(D[i\dots j-1])} > 1$. Indeed, the value $m$ that we want is equal to this GCD. To solve the problem, we can use a sparse table or a segment tree to find the largest possible subarray beginning at $i$, and then max over all subarray answers to get the final answer. The time complexity is $\mathcal{O}(N\log{N}\log{10^{18}})$. The first log is for the sparse table, the second is for computing GCDs. Note that the de facto time complexity may be closer to $\mathcal{O}(N\log{N}+N\log{10^{18}})$, due to the insights from this blog post.
[ "binary search", "data structures", "divide and conquer", "math", "number theory", "two pointers" ]
1,800
//Written by emorgan5289 (Runtime: 218ms) #include <bits/stdc++.h> using namespace std; using ll = long long; const int inf = 1e9+10; const ll inf_ll = 1e18+10; #define all(x) (x).begin(), (x).end() #define pb push_back #define cmax(x, y) (x = max(x, y)) #define cmin(x, y) (x = min(x, y)) template<typename it, typename bin_op> struct sparse_table { using T = typename remove_reference<decltype(*declval<it>())>::type; vector<vector<T>> t; bin_op f; sparse_table(it first, it last, bin_op op) : t(1), f(op) { int n = distance(first, last); t.assign(32-__builtin_clz(n), vector<T>(n)); t[0].assign(first, last); for (int i = 1; i < t.size(); i++) for (int j = 0; j < n-(1<<i)+1; j++) t[i][j] = f(t[i-1][j], t[i-1][j+(1<<(i-1))]); } // returns f(a[l..r]) in O(1) time T query(int l, int r) { int h = floor(log2(r-l+1)); return f(t[h][l], t[h][r-(1<<h)+1]); } }; int main() { ios_base::sync_with_stdio(0); cin.tie(0); int t; cin >> t; while (t--) { ll n; cin >> n; vector<ll> a(n), d(n-1); for (int i = 0; i < n; i++) cin >> a[i]; for (int i = 0; i < n-1; i++) d[i] = abs(a[i+1]-a[i]); sparse_table g(all(d), [](ll x, ll y){ return __gcd(x, y); }); int j = 0, ans = 1; for (int i = 0; i < n-1; i++) { while (j <= i && g.query(j, i) == 1) j++; cmax(ans, i-j+2); } cout << ans << " "; } }
1548
C
The Three Little Pigs
Three little pigs from all over the world are meeting for a convention! Every minute, a triple of 3 new pigs arrives on the convention floor. After the $n$-th minute, the convention ends. The big bad wolf has learned about this convention, and he has an attack plan. At some minute in the convention, he will arrive and eat exactly $x$ pigs. Then he will get away. The wolf wants Gregor to help him figure out the number of possible attack plans that involve eating exactly $x$ pigs for various values of $x$ ($1 \le x \le 3n$). Two attack plans are considered different, if they occur at different times or if the sets of little pigs to eat are different. Note that all queries are independent, that is, the wolf does not eat the little pigs, he only makes plans!
Convert the word problem into a combinatorics equation. Write the equation for some $x$. Write the equation for $x+1$. How can we easily transition from the first to the second equation? For a given $x$, we want to calculate every third term in the sequence $\binom{i}{x}$ for $i \in [1,3N]$. Think about how computing the entire summation $\sum_{i=1}^{3N}{\binom{i}{x}}$ will help. For a given $x$, we want to compute $\sum_{i=1}^{N}{\binom{3i}{x}}$, which can be solved with a combinatorial dynamic programming. Define the array dp[x][m] (dimensions: $N+1\times 3$), which computes the sum $\sum_{i=0}^{N-1}{\binom{3i+m}{x}}$. Under this definition, $ans[x]=dp[x][0]+\binom{3N}{x}$, where ans is what we want to find. Under the definition of the dp, we can make the following mathematical observations. $dp[x][0]+dp[x][1]+dp[x][2]=\sum_{i=0}^{3N-1}{\binom{i}{x}}$, since term $i$ belongs to the array with $m=i\mod{3}$. This summation can be condensed with the Hockey Stick Identity into $\binom{3N}{x+1}$. By repeated uses of Pascal's Identity, we get equations (2) and (3), giving us a system of 3 equations with 3 new unknowns, which can easily be solved. $\sum_{m=0}^{2}{dp[x][m]}=\binom{3N}{x+1}$. $dp[x][1] = dp[x][0]+dp[x-1][0]$ $dp[x][2] = dp[x][1]+dp[x-1][1]$ The base case is that $dp[0][0]=dp[0][1]=dp[0][2]=N$. Each query can now be answered trivially. The time complexity is $\mathcal{O}(N+Q)$ with combinatorial precomputation. Convert the word problem into a combinatorics equation. How will the Binomial Theorem be helpful? Try and construct a polynomial $P(k)$ such as the coefficient $k^x$ in that polynomial represents the number of attack plans if the wolf wants to eat $x$ pigs. Notice that $P(k)$ is a geometric series, so FFT is unnecessary. Define the polynomial $P(k)=(1+k)^3+(1+k)^6+\cdot + (1+k)^{3N}$. The coefficient of $k^x$ in $P(k)$ is, by the Binomial theorem on each term of the polynomial, equal to $\binom{3}{x}+\binom{6}{x}+\dots + \binom{3N}{x}$. This is equal to ans[x] from the previous solution. The only thing left to do is quickly calculate $P(k)$. Due to the tight time limit, calculating the polynomial using FFT in $\mathcal{O}{(N\log{N})}$ is probably too slow. Instead, we notice that $P(k)$ is a geometric series. Using the geometric series formula, we get that $P(k)=\frac{(1+k)^{3N+3}-(1+k)^3}{(1+k)^3-1}$. The numerator and denominator of this fraction can be expanded in linear time. Then all we have to do is a polynomial long division. Once we have $P(k)$, we can answer all the queries trivially. The time complexity is $\mathcal{O}(N)$ with combinatorial precomputation.
[ "combinatorics", "dp", "fft", "math" ]
2,500
//Written by Agnimandur (Runtime: 218ms) #include <bits/stdc++.h> #define ll long long #define sz(x) ((int) (x).size()) #define all(x) (x).begin(), (x).end() #define vi vector<int> #define vl vector<long long> #define REP(i,a) for (int i = 0; i < (a); i++) #define add push_back using namespace std; const ll MOD = 1000000007LL; int ni() { int x; cin >> x; return x; } struct Combo { vl facs; vl invfacs; int N; Combo(int N) { this->N=N; facs.assign(N+1,0); invfacs.assign(N+1,0); facs[0] = 1; for (int i = 1; i <= N; i++) { facs[i] = (facs[i-1]*i)%MOD; } invfacs[N] = power(facs[N],MOD-2); for (int i = N-1; i >= 0; i--) { invfacs[i] = (invfacs[i+1]*(i+1))%MOD; } } ll choose(int n, int k) { if (n<0||k<0||n<k) return 0LL; ll denInv = (invfacs[k]*invfacs[n-k])%MOD; ll ans = (facs[n]*denInv)%MOD; return ans; } ll power(ll x, ll y) { ll ans = 1; x %= MOD; while (y > 0) { if (y%2==1) ans = (ans*x)%MOD; y /= 2; x = (x*x)%MOD; } return ans; } }; int main() { ios::sync_with_stdio(false); cin.tie(0); int N = ni(); Combo c(3*N); ll DIV3 = c.power(3,MOD-2); const int M = 1000000; ll dp[3*M+1][3]; REP(i,3) dp[0][i] = N; for (int x = 1; x < 3*N; x++) { //solve the system of equations dp[x][0] = ((c.choose(3*N,x+1)-2*dp[x-1][0]-dp[x-1][1]+3*MOD)*DIV3)%MOD; dp[x][1] = (dp[x][0]+dp[x-1][0])%MOD; dp[x][2] = (dp[x][1]+dp[x-1][1])%MOD; } int Q = ni(); REP(q,Q) { int x = ni(); if (x==3*N) cout << "1 "; else cout << (dp[x][0]+c.choose(3*N,x))%MOD << ' '; } }
1548
D1
Gregor and the Odd Cows (Easy)
{This is the easy version of the problem. The only difference from the hard version is that in this version all coordinates are \textbf{even}.} There are $n$ fence-posts at distinct coordinates on a plane. It is guaranteed that no three fence posts lie on the same line. There are an infinite number of cows on the plane, one at every point with integer coordinates. Gregor is a member of the Illuminati, and wants to build a triangular fence, connecting $3$ distinct existing fence posts. A cow \textbf{strictly} inside the fence is said to be enclosed. If there are an \textbf{odd} number of enclosed cows and the area of the fence is an \textbf{integer}, the fence is said to be interesting. Find the number of interesting fences.
"Enclosed cows" are just interior points. It's clear that Pick's Theorem will be useful. Manipulate Pick's Theorem into a modular equation. The number of boundary points between $(x_1,y_1)$ and $(x_2,y_2)$ is $\gcd{(x_1-x_2,y_1-y_2)}$. Working with relevant formulas, find a simple condition between two points, such that they can be treated as equivalent. Every set of 3 fenceposts forms a lattice triangle We want to find the number of such lattice triangles that have an odd number of interior points. Since all the coordinates are even, the area is automatically even as well. By Pick's Theorem, $area=I+\frac{B}{2}-1$, so $2\cdot area=2I+B-2$. $I$ is the number of interior points, and $B$ is the number of boundary points, for an arbitrary triangle formed by 3 fenceposts. Let $A=2\cdot area$, so $A=2I+B-2$. Since $I$ is odd, taking both sides modulo 4 we get that $A \equiv B\mod{4}$. Since the area is an even integer, $A$ is a multiple of 4, so we get that $A\equiv B\equiv 0\mod{4}$. Let's define the $\textbf{boundary count}$ of a segment to be the number of lattice points on it, minus 1. It can be proven that the boundary count of the segment connecting $(x_1,y_1)$ and $(x_2,y_2)$ is $\gcd{(|x_2-x_1|,|y_2-y_1|)}$. In this problem, we only care about what the boundary count is modulo 4. Since all the coordinates are even, the GCD is guaranteed to be even, so the boundary count is either 0 or 2 mod 4. For the segment connecting $(x_1,y_1)$ and $(x_2,y_2)$ call its boundary count $b$. $b\equiv 0\mod{4}$ IFF $x_1\equiv x_2 \mod{4}$ AND $y_1\equiv y_2 \mod{4}$. $b\equiv 2\mod{4}$ in all other situations. $\textbf{Key Idea:}$ Turn fence post $(x,y)$ into $(x\mod{4},y\mod{4})$. Writing the area of a triangle via the shoelace formula, it becomes obvious that the area mod 4 won't change when the coordinates are modded by 4. Additionally, by our work above, $B$ (the sum of the boundary counts for the 3 sides of the triangle) is entirely dependent on the coordinates mod 4. Let cnt[x][y] be an array counting the number of points that fall into each category based on the key idea above. Since all coordinates are even, only 4 elements in the $cnt$ array will be nonzero. We can fix each point of the triangle into one of the categories, quickly calculuate $A$ and $B$, and solve the problem. Be careful not to overcount triangles. The time complexity is $\mathcal{O}(N)$, with a small constant for the counting.
[ "bitmasks", "geometry", "math", "number theory" ]
2,300
//Written by Agnimandur (Runtime: 31ms) #include <bits/stdc++.h> #define ll long long #define REP(i,a) for (int i = 0; i < (a); i++) using namespace std; int ni() { int x; cin >> x; return x; } ll choose(ll n, ll k) { if (n<k) return 0LL; if (k==2) return n*(n-1)/2; else return n*(n-1)*(n-2)/6; } // number of boundary points between two points, mod 4 int boundary(int x1, int y1, int x2, int y2) { if (x1==x2&&y1==y2) return 0; else return 2; } int main() { ios::sync_with_stdio(false); cin.tie(0); int N = ni(); ll cnt[2][2] = {{0,0},{0,0}}; REP(i,N) { int x = ni(); int y = ni(); cnt[(x%4)/2][(y%4)/2] += 1; } ll ans = 0; for (int s1 = 0; s1 < 4; s1++) { for (int s2 = s1; s2 < 4; s2++) { for (int s3 = s2; s3 < 4; s3++) { int x1 = s1/2; int y1 = s1%2; int x2 = s2/2; int y2 = s2%2; int x3 = s3/2; int y3 = s3%2; int b1 = boundary(x1,y1,x2,y2); int b2 = boundary(x1,y1,x3,y3); int b3 = boundary(x2,y2,x3,y3); int B = (b1+b2+b3)%4; if (B==0) { if (s1==s2&&s2==s3) { ans += choose(cnt[x1][y1],3); } else if (s1==s2) { ans += choose(cnt[x1][y1],2)*cnt[x3][y3]; } else if (s2==s3) { ans += choose(cnt[x2][y2],2)*cnt[x1][y1]; } else { ans += cnt[x1][y1]*cnt[x2][y2]*cnt[x3][y3]; } } } } } cout << ans << ' '; }
1548
D2
Gregor and the Odd Cows (Hard)
{This is the hard version of the problem. The only difference from the easy version is that in this version the coordinates can be \textbf{both} odd and even.} There are $n$ fence-posts at distinct coordinates on a plane. It is guaranteed that no three fence posts lie on the same line. There are an infinite number of cows on the plane, one at every point with integer coordinates. Gregor is a member of the Illuminati, and wants to build a triangular fence, connecting $3$ distinct existing fence posts. A cow \textbf{strictly} inside the fence is said to be enclosed. If there are an \textbf{odd} number of enclosed cows and the area of the fence is an \textbf{integer}, the fence is said to be interesting. Find the number of interesting fences.
Solve the easy version of the problem. Taking each coordinate modulo 4 no longer works. This is because if $\Delta{x}$ or $\Delta{y}$ are odd, there's no way to predict whether the boundary count (see editorial of the easy version) is 1 or 3 mod 4. By playing with Pick's Theorem, you should find that $B$ (the total number of boundary points) has to be even. Let $b_1$, $b_2$, and $b_3$ be the boundary count of the three sides of a lattice triangle. Lets count the answer. Fix a point. Fix the boundary count mod 4 of the two sides adjacent to that point. Fix more things. In Hint 4, we fixed a point. It's critical that you fix the boundary count of the side opposite that point to be even. First read (and understand!) the editorial for the easy version of the problem. Definition: The $\textbf{modularity}$ of a point (x,y) is (x%4,y%4). Definition: The $\textbf{boundary count}$ of a segment is the number of lattice points on that segment, minus 1. Lets precompute the array cnt[i][x][y][b] (dimensions $N\times 4\times 4\times 4$), which equals the number of other points respective to points[i] with modularity $(x,y)$ and with a boundary count mod 4 equal to $b$. This precomputation can be done in $\mathcal{O}(N^2 \log{10^7})$ time, by iterating over every pair of points. The boundary count of the segment $(x_1,y_1)$ to $(x_2,y_2)$ is $\gcd{(|x_2-x_1|,|y_2-y_1|)}$. Consider the number of boundary points on the three sides of a triangle, and call them $b_1,b_2,b_3$. Clearly, $B=b_1+b_2+b_3$. The key idea is that since $B$ is even, wlog we can assume that $b_3$ is even, and that $b_1 \equiv b_2 \mod{2}$. The other key idea is that it is easy to figure out if the boundary count is congruent to 0 or 2 mod 4 based on the modularities of the coordinates alone, but it's impossible if the boundary count is odd. Let's fix a point, and count the number of triangles that involve this point. Let's also iterate over all the relevant modularities of the other two points in the triangle, which range from $(0,0)$ up to $(3,3)$. Let's also make sure that there are an even number of boundary points on the side opposite the fixed point, which we know almost nothing about. It can be shown that $b_3 \equiv 0\mod{4}$ IFF $(x_1,y_1)=(x_2,y_2)$, and that $b_3 \equiv 2\mod{4}$ IFF $x_1,x_2$ have the same parity, and $y_1,y_2$ have the same parity, and the previous case doesn't apply. Finally, make sure that the parity of the boundary count of the two sides next to the fixed point are the same. Using the precomputed array $cnt$ and the formula for $b_3$, we can quickly find $B$ in this case. Using the Shoelace Formula, we can quickly find what $A$ is modulo 4. If $A$ is even and equal to $B$, then we know that $I$ must be odd (in the easy version of the problem, we found that $A \equiv B \mod{4}$ IFF $I$ is odd), so all triangles in this class are interesting. Thus, we add to the answer a number roughly equal to $cnt[i][x_1][y_1][b_1]\cdot cnt[i][x_2][y_2][b_2]$ (it could be lower due to the potential overcount, if two points fall into the same bucket of the array). Lastly, be sure not to overcount triangles! In my code, I count each of the EEE triangles ($b_1,b_2,b_3$ all even) 3 times, and the OOE triangles once. The time complexity is $\mathcal{O}(N^2 \log{10^7})$ (the main part of the solution is approximately $\mathcal{O}(512N)$). Implementation Note: The bottleneck of the algorithm is computing the GCDs. The simplest optimization that is sufficient for AC is to only calculate $\frac{N^2}{2}$ GCDs instead of all $N^2$ (since the GCD is commutative). Java requires additional optimizations, such as precomputing small GCDs.
[ "brute force", "geometry", "math", "number theory" ]
3,300
//Written by Agnimandur (Runtime: 2886ms) #include <bits/stdc++.h> #define ll long long #define sz(x) ((int) (x).size()) #define all(x) (x).begin(), (x).end() #define vi vector<int> #define vl vector<long long> #define pii pair<int, int> #define pll pair<ll,ll> #define REP(i,a) for (int i = 0; i < (a); i++) #define add push_back using namespace std; int ni() { int x; cin >> x; return x; } ll nl() { ll x; cin >> x; return x; } double nd() { double x; cin >> x; return x; } string next() { string x; cin >> x; return x; } ll area(ll x1, ll y1, ll x2, ll y2, ll x3, ll y3) { return abs(x1*y2+x2*y3+x3*y1-x2*y1-x3*y2-x1*y3)&3; } ll boundary(ll x1, ll y1, ll x2, ll y2) { return __gcd(abs(x1-x2),abs(y1-y2))&3; } int main() { ios::sync_with_stdio(false); cin.tie(0); int N = ni(); vector<pll> nums; vector<pii> mods; REP(i,N) { ll x = nl(); ll y = nl(); nums.add({x,y}); mods.add({(int)(x&3),(int)(y&3)}); } const int MAX_N = 6000; int cnt[MAX_N][4][4][4] = {0}; for (int i = 0; i < N; i++) { for (int j = i+1; j < N; j++) { int b = (int)boundary(nums[i].first,nums[i].second,nums[j].first,nums[j].second); cnt[i][mods[j].first][mods[j].second][b] += 1; cnt[j][mods[i].first][mods[i].second][b] += 1; } } ll eee = 0; ll ooe = 0; for (int i = 0; i < N; i++) { for (int b1 = 0; b1 < 4; b1++) { for (int b2 = b1; b2 < 4; b2 += 2) { for (int s1 = 0; s1 < 16; s1++) { int firsts2 = (b1<b2) ? 0 : s1; for (int s2 = firsts2; s2 < 16; s2++) { int x1 = s1/4; int y1 = s1%4; int x2 = s2/4; int y2 = s2%4; if (x1%2 != x2%2 || y1%2 != y2%2) continue; int b3; int triangles; if (x1==x2 && y1==y2) { b3 = 0; if (b1==b2) triangles = cnt[i][x1][y1][b1]*(cnt[i][x1][y1][b2]-1)/2; else triangles = cnt[i][x1][y1][b1]*cnt[i][x1][y1][b2]; } else { b3 = 2; triangles = cnt[i][x1][y1][b1]*cnt[i][x2][y2][b2]; } int B = (b1+b2+b3)%4; if (area(nums[i].first,nums[i].second,x1,y1,x2,y2) == B) { if (b1%2==0&&b2%2==0) eee += triangles; else ooe += triangles; } } } } } } ll ans = eee/3+ooe; cout << ans << ' '; }
1548
E
Gregor and the Two Painters
Two painters, Amin and Benj, are repainting Gregor's living room ceiling! The ceiling can be modeled as an $n \times m$ grid. For each $i$ between $1$ and $n$, inclusive, painter Amin applies $a_i$ layers of paint to the entire $i$-th row. For each $j$ between $1$ and $m$, inclusive, painter Benj applies $b_j$ layers of paint to the entire $j$-th column. Therefore, the cell $(i,j)$ ends up with $a_i+b_j$ layers of paint. Gregor considers the cell $(i,j)$ to be badly painted if $a_i+b_j \le x$. Define a badly painted region to be a \textbf{maximal} connected component of badly painted cells, i. e. a connected component of badly painted cells such that all adjacent to the component cells are not badly painted. Two cells are considered adjacent if they share a side. Gregor is appalled by the state of the finished ceiling, and wants to know the number of badly painted regions.
For simplicity let's assume that all $a_i$ are distinct (and similarly, all $b_j$). If this is not the case, we may break ties arbitrarily. Say that two badly painted cells are directly reachable from each other if they are in the same row or column and all cells in between them are also badly painted. Also, define the value of the cell at $(i,j)$ to be $a_i+b_j$. Call a badly painted cell a $\textbf{representative}$ if no cell directly reachable from it has a smaller value than it. $\textbf{Claim:}$ Every connected component of badly painted cells contains exactly one representative. $\textbf{Proof:}$ Clearly every connected component contains at least one representative; consider the cell(s) with the minimum value contained within it. To show that every connected component contains \textit{exactly} one representative, suppose that we are given a representative $(i,j)$ that is directly reachable from $(i',j)$ for all $i_l\le i'\le i_r$ and $(i,j')$ for all $j_l\le j'\le j_r$, where $a_i=\min_{i_l\le i'\le i_r}(a_{i'})$ and $b_j=\min_{j_l\le j'\le j_r}(b_{j'})$. Then the connected component containing $(i,j)$ is completely contained within the rectangle $[i_l,i_r]\times [j_l,j_r]$, and $(i,j)$ is the unique cell with the minimum value within that rectangle. This implies that a representative is always the unique cell with the minimum value within its connected component. It remains to count the number of representatives. For each $i$, let $lo_i$ be the maximum index less than $hi_i$ such that $a_{lo_i}<a_i$ and $hi_i$ be the minimum index greater than $i$ such that $a_{hi_i}<a_i$. Then define $na_i$ to be $\min(\max_{i'\in [lo_i,i]}a_{i'},\max_{i'\in [i,hi_i]}a_{i'})$. Any path from a cell in row $i$ to a cell in the same column with lower value must pass through a row with value at least $na_i$. Define $nb_j$ for each $j$ similarly. It can be shown that $(i,j)$ is a representative if and only if the following conditions hold: $a_i+b_j\le X$ $na_i+b_j>X$ $a_i+nb_j>X$ Computing $na$ and $nb$ can be done in $\mathcal{O}(N\log N+M\log M)$ with any data structure supporting range min/max queries (e.g. sparse tables), or in $O(N+M)$ with a stack. It remains to count the number of pairs $(i,j)$ that satisfy these conditions given $a$, $na$, $b$, and $nb$. First initialize two binary indexed trees $T_a$ and $T_b$. Then sort the pairs $(na_i-a_i,a_i),(nb_j-b_j,b_j)$ in decreasing order. Now for every pair in the order, if it is of the form $(na_i-a_i,a_i)$, then add to the answer the number of elements of $T_b$ that are in the range $(X-na_i,X-a_i]$, and add $a_i$ to $T_a$. The reasoning for the case $(nb_j-b_j,b_j)$ is similar (query $T_a$, update $T_b$). The time complexity is $\mathcal{O}(N\log{N}+M\log{M})$ for sorting the pairs and working with the two BITs.
[ "data structures", "divide and conquer", "graphs", "greedy", "math" ]
3,400
//Written by Benq (Runtime: 187ms) #include <bits/stdc++.h> using namespace std; using ll = long long; using db = long double; // or double, if TL is tight using str = string; // yay python! using pi = pair<int,int>; using pl = pair<ll,ll>; using pd = pair<db,db>; using vi = vector<int>; using vb = vector<bool>; using vl = vector<ll>; using vd = vector<db>; using vs = vector<str>; using vpi = vector<pi>; using vpl = vector<pl>; using vpd = vector<pd>; #define tcT template<class T #define tcTU tcT, class U // ^ lol this makes everything look weird but I'll try it tcT> using V = vector<T>; tcT, size_t SZ> using AR = array<T,SZ>; tcT> using PR = pair<T,T>; // pairs #define mp make_pair #define f first #define s second // vectors // oops size(x), rbegin(x), rend(x) need C++17 #define sz(x) int((x).size()) #define bg(x) begin(x) #define all(x) bg(x), end(x) #define rall(x) x.rbegin(), x.rend() #define sor(x) sort(all(x)) #define rsz resize #define ins insert #define ft front() #define bk back() #define pb push_back #define eb emplace_back #define pf push_front #define rtn return #define lb lower_bound #define ub upper_bound tcT> int lwb(V<T>& a, const T& b) { return int(lb(all(a),b)-bg(a)); } // loops #define FOR(i,a,b) for (int i = (a); i < (b); ++i) #define F0R(i,a) FOR(i,0,a) #define ROF(i,a,b) for (int i = (b)-1; i >= (a); --i) #define R0F(i,a) ROF(i,0,a) #define rep(a) F0R(_,a) #define each(a,x) for (auto& a: x) const int MOD = 1e9+7; // 998244353; const int MX = 2e5+5; const ll INF = 1e18; // not too close to LLONG_MAX const db PI = acos((db)-1); const int dx[4] = {1,0,-1,0}, dy[4] = {0,1,0,-1}; // for every grid problem!! mt19937 rng((uint32_t)chrono::steady_clock::now().time_since_epoch().count()); template<class T> using pqg = priority_queue<T,vector<T>,greater<T>>; // bitwise ops // also see https://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html constexpr int pct(int x) { return __builtin_popcount(x); } // # of bits set constexpr int bits(int x) { // assert(x >= 0); // make C++11 compatible until USACO updates ... return x == 0 ? 0 : 31-__builtin_clz(x); } // floor(log2(x)) constexpr int p2(int x) { return 1<<x; } constexpr int msk2(int x) { return p2(x)-1; } ll cdiv(ll a, ll b) { return a/b+((a^b)>0&&a%b); } // divide a by b rounded up ll fdiv(ll a, ll b) { return a/b-((a^b)<0&&a%b); } // divide a by b rounded down tcT> bool ckmin(T& a, const T& b) { return b < a ? a = b, 1 : 0; } // set a = min(a,b) tcT> bool ckmax(T& a, const T& b) { return a < b ? a = b, 1 : 0; } tcTU> T fstTrue(T lo, T hi, U f) { hi ++; assert(lo <= hi); // assuming f is increasing while (lo < hi) { // find first index such that f is true T mid = lo+(hi-lo)/2; f(mid) ? hi = mid : lo = mid+1; } return lo; } tcTU> T lstTrue(T lo, T hi, U f) { lo --; assert(lo <= hi); // assuming f is decreasing while (lo < hi) { // find first index such that f is true T mid = lo+(hi-lo+1)/2; f(mid) ? lo = mid : hi = mid-1; } return lo; } tcT> void remDup(vector<T>& v) { // sort and remove duplicates sort(all(v)); v.erase(unique(all(v)),end(v)); } tcTU> void erase(T& t, const U& u) { // don't erase auto it = t.find(u); assert(it != end(t)); t.erase(it); } // element that doesn't exist from (multi)set #define tcTUU tcT, class ...U inline namespace Helpers { //////////// is_iterable // https://stackoverflow.com/questions/13830158/check-if-a-variable-type-is-iterable // this gets used only when we can call begin() and end() on that type tcT, class = void> struct is_iterable : false_type {}; tcT> struct is_iterable<T, void_t<decltype(begin(declval<T>())), decltype(end(declval<T>())) > > : true_type {}; tcT> constexpr bool is_iterable_v = is_iterable<T>::value; //////////// is_readable tcT, class = void> struct is_readable : false_type {}; tcT> struct is_readable<T, typename std::enable_if_t< is_same_v<decltype(cin >> declval<T&>()), istream&> > > : true_type {}; tcT> constexpr bool is_readable_v = is_readable<T>::value; //////////// is_printable // // https://nafe.es/posts/2020-02-29-is-printable/ tcT, class = void> struct is_printable : false_type {}; tcT> struct is_printable<T, typename std::enable_if_t< is_same_v<decltype(cout << declval<T>()), ostream&> > > : true_type {}; tcT> constexpr bool is_printable_v = is_printable<T>::value; } inline namespace Input { tcT> constexpr bool needs_input_v = !is_readable_v<T> && is_iterable_v<T>; tcTUU> void re(T& t, U&... u); tcTU> void re(pair<T,U>& p); // pairs // re: read tcT> typename enable_if<is_readable_v<T>,void>::type re(T& x) { cin >> x; } // default tcT> void re(complex<T>& c) { T a,b; re(a,b); c = {a,b}; } // complex tcT> typename enable_if<needs_input_v<T>,void>::type re(T& i); // ex. vectors, arrays tcTU> void re(pair<T,U>& p) { re(p.f,p.s); } tcT> typename enable_if<needs_input_v<T>,void>::type re(T& i) { each(x,i) re(x); } tcTUU> void re(T& t, U&... u) { re(t); re(u...); } // read multiple // rv: resize and read vectors void rv(size_t) {} tcTUU> void rv(size_t N, V<T>& t, U&... u); template<class...U> void rv(size_t, size_t N2, U&... u); tcTUU> void rv(size_t N, V<T>& t, U&... u) { t.rsz(N); re(t); rv(N,u...); } template<class...U> void rv(size_t, size_t N2, U&... u) { rv(N2,u...); } // dumb shortcuts to read in ints void decrement() {} // subtract one from each tcTUU> void decrement(T& t, U&... u) { --t; decrement(u...); } #define ints(...) int __VA_ARGS__; re(__VA_ARGS__); #define int1(...) ints(__VA_ARGS__); decrement(__VA_ARGS__); } inline namespace ToString { tcT> constexpr bool needs_output_v = !is_printable_v<T> && is_iterable_v<T>; // ts: string representation to print tcT> typename enable_if<is_printable_v<T>,str>::type ts(T v) { stringstream ss; ss << fixed << setprecision(15) << v; return ss.str(); } // default tcT> str bit_vec(T t) { // bit vector to string str res = "{"; F0R(i,sz(t)) res += ts(t[i]); res += "}"; return res; } str ts(V<bool> v) { return bit_vec(v); } template<size_t SZ> str ts(bitset<SZ> b) { return bit_vec(b); } // bit vector tcTU> str ts(pair<T,U> p); // pairs tcT> typename enable_if<needs_output_v<T>,str>::type ts(T v); // vectors, arrays tcTU> str ts(pair<T,U> p) { return "("+ts(p.f)+", "+ts(p.s)+")"; } tcT> typename enable_if<is_iterable_v<T>,str>::type ts_sep(T v, str sep) { // convert container to string w/ separator sep bool fst = 1; str res = ""; for (const auto& x: v) { if (!fst) res += sep; fst = 0; res += ts(x); } return res; } tcT> typename enable_if<needs_output_v<T>,str>::type ts(T v) { return "{"+ts_sep(v,", ")+"}"; } // for nested DS template<int, class T> typename enable_if<!needs_output_v<T>,vs>::type ts_lev(const T& v) { return {ts(v)}; } template<int lev, class T> typename enable_if<needs_output_v<T>,vs>::type ts_lev(const T& v) { if (lev == 0 || !sz(v)) return {ts(v)}; vs res; for (const auto& t: v) { if (sz(res)) res.bk += ","; vs tmp = ts_lev<lev-1>(t); res.ins(end(res),all(tmp)); } F0R(i,sz(res)) { str bef = " "; if (i == 0) bef = "{"; res[i] = bef+res[i]; } res.bk += "}"; return res; } } inline namespace Output { template<class T> void pr_sep(ostream& os, str, const T& t) { os << ts(t); } template<class T, class... U> void pr_sep(ostream& os, str sep, const T& t, const U&... u) { pr_sep(os,sep,t); os << sep; pr_sep(os,sep,u...); } // print w/ no spaces template<class ...T> void pr(const T&... t) { pr_sep(cout,"",t...); } // print w/ spaces, end with newline void ps() { cout << " "; } template<class ...T> void ps(const T&... t) { pr_sep(cout," ",t...); ps(); } // debug to cerr template<class ...T> void dbg_out(const T&... t) { pr_sep(cerr," | ",t...); cerr << endl; } void loc_info(int line, str names) { cerr << "Line(" << line << ") -> [" << names << "]: "; } template<int lev, class T> void dbgl_out(const T& t) { cerr << " " << ts_sep(ts_lev<lev>(t)," ") << " " << endl; } #ifdef LOCAL #define dbg(...) loc_info(__LINE__,#__VA_ARGS__), dbg_out(__VA_ARGS__) #define dbgl(lev,x) loc_info(__LINE__,#x), dbgl_out<lev>(x) #else // don't actually submit with this #define dbg(...) 0 #define dbgl(lev,x) 0 #endif } inline namespace FileIO { void setIn(str s) { freopen(s.c_str(),"r",stdin); } void setOut(str s) { freopen(s.c_str(),"w",stdout); } void setIO(str s = "") { cin.tie(0)->sync_with_stdio(0); // unsync C / C++ I/O streams // cin.exceptions(cin.failbit); // throws exception when do smth illegal // ex. try to read letter into int if (sz(s)) setIn(s+".in"), setOut(s+".out"); // for old USACO } } /** * Description: range sum queries and point updates for $$$D$$$ dimensions * Source: https://codeforces.com/blog/entry/64914 * Verification: SPOJ matsum * Usage: exttt{BIT<int,10,10>} gives 2D BIT * Time: O((\log N)^D) */ template <class T, int ...Ns> struct BIT { T val = 0; void upd(T v) { val += v; } T query() { return val; } }; template <class T, int N, int... Ns> struct BIT<T, N, Ns...> { BIT<T,Ns...> bit[N+1]; template<typename... Args> void upd(int pos, Args... args) { assert(pos > 0); for (; pos<=N; pos+=pos&-pos) bit[pos].upd(args...); } template<typename... Args> T sum(int r, Args... args) { T res=0; for (;r;r-=r&-r) res += bit[r].query(args...); return res; } template<typename... Args> T query(int l, int r, Args... args) { l = max(l,1); r = max(r,0); return sum(r,args...)-sum(l-1,args...); } }; /** * Description: 1D range minimum query. Can also do queries * for any associative operation in $$$O(1)$$$ with D\&C * Source: KACTL * Verification: * https://cses.fi/problemset/stats/1647/ * http://wcipeg.com/problem/ioi1223 * https://pastebin.com/ChpniVZL * Memory: O(N\log N) * Time: O(1) */ template<class T> struct RMQ { // floor(log_2(x)) int level(int x) { return 31-__builtin_clz(x); } vector<T> v; vector<vi> jmp; int comb(int a, int b) { // index of min return v[a]==v[b]?min(a,b):(v[a]>v[b]?a:b); } void init(const vector<T>& _v) { v = _v; jmp = {vi(sz(v))}; iota(all(jmp[0]),0); for (int j = 1; 1<<j <= sz(v); ++j) { jmp.pb(vi(sz(v)-(1<<j)+1)); F0R(i,sz(jmp[j])) jmp[j][i] = comb(jmp[j-1][i], jmp[j-1][i+(1<<(j-1))]); } } int index(int l, int r) { // get index of min element assert(l <= r); int d = level(r-l+1); return comb(jmp[d][l],jmp[d][r-(1<<d)+1]); } T query(int l, int r) { return v[index(l,r)]; } }; BIT<int,MX> B[2]; vi getNeed(vpi v) { vi need(sz(v),MOD); RMQ<pi> RA; RA.init(v); { vi st; F0R(i,sz(v)) { while (sz(st) && v[st.bk] > v[i]) st.pop_back(); if (sz(st)) ckmin(need[i],RA.query(st.bk,i).f); st.pb(i); } } { vi st; R0F(i,sz(v)) { while (sz(st) && v[st.bk] > v[i]) st.pop_back(); if (sz(st)) ckmin(need[i],RA.query(i,st.bk).f); st.pb(i); } } return need; } int main() { setIO(); int N,M,X; re(N,M,X); vpi a(N), b(M); F0R(i,N) { re(a[i].f), a[i].s = i; } F0R(i,M) re(b[i].f), b[i].s = i; // dbg("READ"); vi na = getNeed(a), nb = getNeed(b); dbg(na); dbg(nb); V<AR<int,3>> ival; F0R(i,N) ival.pb({na[i]-a[i].f,a[i].f,0}); F0R(i,M) ival.pb({nb[i]-b[i].f,b[i].f,1}); sort(rall(ival)); ll ans = 0; each(t,ival) { // dbg(t); ans += B[t[2]^1].query(X-t[0]-t[1]+1,X-t[1]); B[t[2]].upd(t[1],1); } ps(ans); }
1549
A
Gregor and Cryptography
Gregor is learning about RSA cryptography, and although he doesn't understand how RSA works, he is now fascinated with prime numbers and factoring them. Gregor's favorite \textbf{prime} number is $P$. Gregor wants to find two bases of $P$. Formally, Gregor is looking for two integers $a$ and $b$ which satisfy both of the following properties. - $P \bmod a = P \bmod b$, where $x \bmod y$ denotes the remainder when $x$ is divided by $y$, and - $2 \le a < b \le P$. Help Gregor find two bases of his favorite prime number!
Fix $a$ into a convenient constant. Since $P \ge 5$ and is also a prime number, we know that $P-1$ is an even composite number. A even composite number is guaranteed to have at least 2 unique divisors greater than 1. Let two of these divisors be $a$ and $b$. It is guaranteed that $P\mod{a} = P\mod{b} = 1$, and thus this selection is valid. For example, we can simply pick $a=2$ and $b=P-1$, and we will get a correct solution. The time complexity is $\mathcal{O}(Q)$.
[ "math", "number theory" ]
800
//Written by penguinhacker (Runtime: 15ms) #include <bits/stdc++.h> using namespace std; int main() { ios::sync_with_stdio(0); cin.tie(0); int t; cin >> t; while(t--) { int p; cin >> p; cout << "2 " << p-1 << " "; } return 0; }
1549
B
Gregor and the Pawn Game
There is a chessboard of size $n$ by $n$. The square in the $i$-th row from top and $j$-th column from the left is labelled $(i,j)$. Currently, Gregor has some pawns in the $n$-th row. There are also enemy pawns in the $1$-st row. On one turn, Gregor moves one of \textbf{his} pawns. A pawn can move one square up (from $(i,j)$ to $(i-1,j)$) if there is no pawn in the destination square. Additionally, a pawn can move one square diagonally up (from $(i,j)$ to either $(i-1,j-1)$ or $(i-1,j+1)$) if and only if there is an enemy pawn in that square. The enemy pawn is also removed. Gregor wants to know what is the maximum number of his pawns that can reach row $1$? Note that only Gregor takes turns in this game, and \textbf{the enemy pawns never move}. Also, when Gregor's pawn reaches row $1$, it is stuck and cannot make any further moves.
There a very limited number of squares where each of Gregor's pawns could end up. Identify a greedy strategy to maximize the answer. The key insight is that due to the fact that there is only one row of enemy pawns, and those pawns never move, there are only $3$ possible columns where one of Gregor's pawns can end up in. We can solve this problem greedily, going from column $1$ to column $N$. At the current column $j$, if Gregor has a pawn in this column, then we greedily consider 3 cases. If there is an uncaptured enemy pawn in column $j-1$, mark that pawn as captured and increment the answer. Column $j-1$ will never be looked at again, so this decision is optimal. If there is no pawn in column $j$, just move Gregor's pawn forward, and increment the answer. If there is an uncaptured enemy pawn in column $j+1$, mark that pawn as captured and increment the answer. Otherwise, this pawn will not reach the first row. This greedy solution is guaranteed to produce the maximum possible answer. The time complexity is $\mathcal{O}(N)$. Write the problem as a graph problem. Valid captures or valid forward moves represent edges in the graph. We have two sets: Gregor's pawns, and destination squares. We also have some edges between these two sets. Maximum Matching. Each of Gregor's pawns can end up in one of 3 possible squares. If Gregor has a pawn in column $i$, it can end up in column $i-1$ or $i+1$ if there is an enemy pawn in those columns. Furthermore, it can remain in column $i$ if there is no enemy pawn in that column. Let's build a graph $G$, where each edge connects one of Gregor's pawns to a valid destination cell. Since every edge goes from a cell in the bottom row to a cell in the top row, $G$ is clearly bipartite. It's now clear that moving as many pawns to the end is equivalent to finding a maximum matching in $G$. The time complexity is $\mathcal{O}(N\sqrt{N})$ using the HKK algorithm for maximum matching. ($G$ has at most $2N$ vertices, and $6N$ edges)
[ "dfs and similar", "dp", "flows", "graph matchings", "graphs", "greedy", "implementation" ]
800
//Written by arvindr9 (Runtime: 31ms) #include <bits/stdc++.h> using namespace std; int t; int main() { ios::sync_with_stdio(0); cin.tie(0); cin >> t; while (t--) { int n; cin >> n; string st, tt; cin >> st >> tt; int ans = 0; vector<bool> taken(n); for (int j = 0; j < n; j++) { if (tt[j] == '1') { for (int i = j - 1; i <= j + 1; i++) { if (i >= 0 and i < n) { if (!taken[i]) { if ((st[i] == '1' and i != j) or (st[i] == '0' and i == j)) { taken[i] = 1; ans++; break; } } } } } } cout << ans << " "; } }
1550
A
Find The Array
Let's call an array $a$ consisting of $n$ positive (greater than $0$) integers beautiful if the following condition is held for every $i$ from $1$ to $n$: either $a_i = 1$, or at least one of the numbers $a_i - 1$ and $a_i - 2$ exists in the array as well. For example: - the array $[5, 3, 1]$ is beautiful: for $a_1$, the number $a_1 - 2 = 3$ exists in the array; for $a_2$, the number $a_2 - 2 = 1$ exists in the array; for $a_3$, the condition $a_3 = 1$ holds; - the array $[1, 2, 2, 2, 2]$ is beautiful: for $a_1$, the condition $a_1 = 1$ holds; for every other number $a_i$, the number $a_i - 1 = 1$ exists in the array; - the array $[1, 4]$ is not beautiful: for $a_2$, neither $a_2 - 2 = 2$ nor $a_2 - 1 = 3$ exists in the array, and $a_2 \ne 1$; - the array $[2]$ is not beautiful: for $a_1$, neither $a_1 - 1 = 1$ nor $a_1 - 2 = 0$ exists in the array, and $a_1 \ne 1$; - the array $[2, 1, 3]$ is beautiful: for $a_1$, the number $a_1 - 1 = 1$ exists in the array; for $a_2$, the condition $a_2 = 1$ holds; for $a_3$, the number $a_3 - 2 = 1$ exists in the array. You are given a positive integer $s$. Find the minimum possible size of a beautiful array with the sum of elements equal to $s$.
The maximum sum we can construct with $n$ elements is $1 + 3 + 5 + 7 + \dots + 2n-1 = n^2$, so we need at least $\lceil\sqrt{s}\rceil$ elements to construct the sum equal to $s$. Let's show how to express $s$ with exactly $\lceil\sqrt{s}\rceil$ elements. Let $\lceil\sqrt{s}\rceil = d$. By taking $1 + 3 + 5 + 7 + \dots + 2d-3$, we achieve a sum of $(d-1)^2$ using $d - 1$ elements. $s - (d-1)^2$ is not less than $1$ and not greater than $2d-1$ (since $\sqrt{(d-1)^2} = d-1$, and $\sqrt{(d-1)^2 + 2d} > d$). Thus, we can just add $s - (d-1)^2$ to our array, and the sum becomes exactly $s$. So, the solution is to find the minimum $n$ such that $n^2 \ge s$.
[ "greedy", "math" ]
800
def maxSum(x): return x ** 2 def getAns(x): res = 1 while maxSum(res) < x: res += 1 return res def main(): t = int(input()) for i in range(t): print(getAns(int(input()))) main()
1550
B
Maximum Cost Deletion
You are given a string $s$ of length $n$ consisting only of the characters 0 and 1. You perform the following operation until the string becomes empty: choose some \textbf{consecutive} substring of \textbf{equal} characters, erase it from the string and glue the remaining two parts together (any of them can be empty) in the same order. For example, if you erase the substring 111 from the string {1\textbf{111}10}, you will get the string 110. When you delete a substring of length $l$, you get $a \cdot l + b$ points. Your task is to calculate the maximum number of points that you can score in total, if you have to make the given string empty.
Let $l_1, l_2, \dots, l_k$ be the length of the substring deleted at the $i$-th step. Then the number of points will be equal to $\sum\limits_{i=1}^{k} (a \cdot l_i + b)$ or $a\sum\limits_{i=1}^{k}l_i + bk$. The sum of all $l_i$ is equal to $n$ (because in the end we deleted the entire string), so the final formula has the form $an + bk$. Obviously, for $b \ge 0$, you should delete the characters one by one so that $k=n$. Now $b < 0$ and you have to delete the string in the minimum number of operations. Let the string $s$ consist of $m$ blocks of zeros and ones, then $\lfloor\frac{m}{2}\rfloor + 1$ is the minimum number of operations for which the entire string can be deleted. As long as the number of blocks is more than $2$, we will delete the second block, the number of blocks will decrease by $2$ after each such operation (the block that we delete will disappear, and the first and third blocks will merge into one).
[ "greedy", "math" ]
1,000
#include <bits/stdc++.h> using namespace std; int main() { int t; cin >> t; while (t--) { int n, a, b; string s; cin >> n >> a >> b >> s; int m = unique(s.begin(), s.end()) - s.begin(); cout << n * a + max(n * b, (m / 2 + 1) * b) << '\n'; } }
1550
C
Manhattan Subarrays
Suppose you have two points $p = (x_p, y_p)$ and $q = (x_q, y_q)$. Let's denote the Manhattan distance between them as $d(p, q) = |x_p - x_q| + |y_p - y_q|$. Let's say that three points $p$, $q$, $r$ form a bad triple if $d(p, r) = d(p, q) + d(q, r)$. Let's say that an array $b_1, b_2, \dots, b_m$ is good if it is impossible to choose three \textbf{distinct} indices $i$, $j$, $k$ such that the points $(b_i, i)$, $(b_j, j)$ and $(b_k, k)$ form a bad triple. You are given an array $a_1, a_2, \dots, a_n$. Calculate the number of good subarrays of $a$. A subarray of the array $a$ is the array $a_l, a_{l + 1}, \dots, a_r$ for some $1 \le l \le r \le n$. Note that, according to the definition, subarrays of length $1$ and $2$ are good.
Let's figure out criteria for the bad triple $p$, $q$, $r$. It's not hard to prove that the triple is bad, iff point $q$ lies inside the bounding box of points $p$ and $r$. In other words, if $\min(x_p, x_r) \le x_q \le \max(x_p, x_r)$ and $\min(y_p, y_r) \le y_q \le \max(y_p, y_r)$. Now, looking at points $p = (a_i, i)$, $q = (a_j, j)$ and $r = (a_k, k)$ we can see that the bad situation may arise only if $i < j < k$ - so we can check only ordered triples. Looking closely at inequality $\min(a_i, a_k) \le a_j \le \max(a_i, a_k)$ we can note that there are two situations where $(i, j, k)$ forms a bad triple: when either $a_i \le a_j \le a_k$ or $a_i \ge a_j \ge a_k$. In other words, subarray is bad if and only if it contains either non-decreasing subsequence of length $3$ or non-increasing subsequence of length $3$. The final observation is that any sequence of length at least $5$ contains either non-decreasing or non-increasing subsequence of length $3$. It's not hard to prove it, either brute-forcing all possible variants (of relative orders) on paper, or searching/remembering the theorem that says it. As a result you need to check only subarrays of length at most $4$ whichever the way you want. The complexity is $O(n)$.
[ "brute force", "geometry", "greedy", "implementation" ]
1,700
#include<bits/stdc++.h> using namespace std; #define fore(i, l, r) for(int i = int(l); i < int(r); i++) #define sz(a) int((a).size()) #define x first #define y second typedef long long li; typedef long double ld; typedef pair<li, li> pt; const int INF = int(1e9); const li INF64 = li(1e18); const ld EPS = 1e-9; int n; vector<li> a; inline bool read() { if(!(cin >> n)) return false; a.resize(n); fore (i, 0, n) cin >> a[i]; return true; } li d(const pt &a, const pt &b) { return abs(a.x - b.x) + abs(a.y - b.y); } inline void solve() { li ans = 0; fore (i, 0, n) { fore (j, i, n) { if (i + 2 <= j) { bool ok = true; fore (i1, i, j) fore (i2, i1 + 1, j) { if (d(pt(a[i1], i1), pt(a[j], j)) == d(pt(a[i1], i1), pt(a[i2], i2)) + d(pt(a[i2], i2), pt(a[j], j))) ok = false; } if (!ok) break; } ans++; } } cout << ans << endl; } int main() { #ifdef _DEBUG freopen("input.txt", "r", stdin); int tt = clock(); #endif ios_base::sync_with_stdio(false); cin.tie(0), cout.tie(0); cout << fixed << setprecision(15); int t; cin >> t; while(t--) { read(); solve(); #ifdef _DEBUG cerr << "TIME = " << clock() - tt << endl; tt = clock(); #endif } return 0; }
1550
D
Excellent Arrays
Let's call an integer array $a_1, a_2, \dots, a_n$ good if $a_i \neq i$ for each $i$. Let $F(a)$ be the number of pairs $(i, j)$ ($1 \le i < j \le n$) such that $a_i + a_j = i + j$. Let's say that an array $a_1, a_2, \dots, a_n$ is excellent if: - $a$ is good; - $l \le a_i \le r$ for each $i$; - $F(a)$ is the maximum possible among all good arrays of size $n$. Given $n$, $l$ and $r$, calculate the number of excellent arrays modulo $10^9 + 7$.
Firstly, let's learn the structure of good array $a$ with maximum $F(a)$. Suppose, $a_i = i + k_i$, then $a_i + a_j = i + j$ $\Leftrightarrow$ $k_i = -k_j$. In other words, we can group $a_i$ by $|k_i|$ and pairs will appear only inside each group. It's easy to prove that if the group has size $m$ then it's optimal to split it in half: one with $+k_i$ and other with $-k_i$. Then the number of pairs inside the group will be equal to $\left\lfloor \frac{m}{2} \right\rfloor \cdot \left\lceil \frac{m}{2} \right\rceil$. It's also not hard to prove that in this case it's optimal to place all elements inside one group. In other words, it's optimal to make a half of all elements as $a_i = i + k$ and the other half as $a_i = i - k$ for some integer $k > 0$. Then $F(a) = \left\lfloor \frac{n}{2} \right\rfloor \cdot \left\lceil \frac{n}{2} \right\rceil$. To achieve maximum $F(a)$ the excellent array should also have this structure. Let $\mathit{half} = \left\lfloor \frac{n}{2} \right\rfloor$. For a fixed $k$ if $n$ is even then we should choose exactly $\mathit{half}$ positions $i$ to set as $a_i = i + k$, but if $n$ is odd, we can choose either $\mathit{half}$ or $\mathit{half} + 1$ positions. Let's analyze what happens with different $k$. Obviously, $k \ge 1$. While $k \le \min(1 - l, r - n)$ both $i + k$ and $i - k$ are in the segment $[l, r]$ for any $i$. In this case we can choose any $a_i$ as $i + k$, so there are exactly $\binom{n}{\mathit{half}}$ ways for even $n$ and $\binom{n}{\mathit{half}} + \binom{n}{\mathit{half} + 1}$ ways for odd $n$. When $k > \min(1 - l, r - n)$ then for $i \in [1, \mathit{lf})$ (where $\mathit{lf} = \max(1, l + k)$) there is only one choice - to set $a_i = i + k$. Analogically, for $i \in (\mathit{rg}, n]$ (where $\mathit{rg} = \min(n, r - k)$) there is only choice to set $a_i = i - k$. What remains is $\mathit{rg} - \mathit{lf} + 1$ elements without restrictions, so there are $\binom{\mathit{rg} - \mathit{lf} + 1}{\mathit{half} - (\mathit{lf} - 1)}$ ways to choose for even $n$ or $\binom{\mathit{rg} - \mathit{lf} + 1}{\mathit{half} - (\mathit{lf} - 1)} + \binom{\mathit{rg} - \mathit{lf} + 1}{\mathit{half} + 1 - (\mathit{lf} - 1)}$ ways for odd $n$. Note that it's convenient to say that $\binom{n}{k} = 0$ if $k < 0$ or $n < k$, so we don't need extra checks. Lastly, note that we can process all $k \in [1, \min(1 - l, r - n)]$ with one formula and there are only $O(n)$ of $k > \min(1 - l, r - n)$ with non-zero number of ways to choose, so we can iterate over all such $k$ straightforwardly. The total complexity is $O(n \log{\mathit{MOD}})$ because of precomputation of factorials and inverse factorials to calculate $\binom{n}{k}$.
[ "binary search", "combinatorics", "constructive algorithms", "implementation", "math", "sortings", "two pointers" ]
2,300
#include<bits/stdc++.h> using namespace std; #define fore(i, l, r) for(int i = int(l); i < int(r); i++) #define sz(a) int((a).size()) const int MOD = int(1e9) + 7; int norm(int a) { while (a >= MOD) a -= MOD; while (a < 0) a += MOD; return a; } int mul(int a, int b) { return int(a * 1ll * b % MOD); } int binPow(int a, int k) { int ans = 1; while (k > 0) { if (k & 1) ans = mul(ans, a); a = mul(a, a); k >>= 1; } return ans; } const int N = 200 * 1000 + 55; int f[N], inf[N]; void precalc() { f[0] = inf[0] = 1; fore (i, 1, N) { f[i] = mul(f[i - 1], i); inf[i] = binPow(f[i], MOD - 2); } } int C(int n, int k) { if (k < 0 || n < k) return 0; return mul(f[n], mul(inf[n - k], inf[k])); } int n, l, r; inline bool read() { if(!(cin >> n >> l >> r)) return false; return true; } inline void solve() { int half = n / 2; int st = min(1 - l, r - n); int ans = mul(st, C(n, half)); if (n & 1) ans = norm(ans + mul(st, C(n, half + 1))); for (int k = st + 1; ; k++) { int lf = max(1, l + k); int rg = min(n, r - k); if (rg + 1 - lf < 0) break; ans = norm(ans + C(rg + 1 - lf, half - (lf - 1))); if (n & 1) ans = norm(ans + C(rg + 1 - lf, half + 1 - (lf - 1))); } cout << ans << endl; } int main() { #ifdef _DEBUG freopen("input.txt", "r", stdin); int tt = clock(); #endif ios_base::sync_with_stdio(false); cin.tie(0), cout.tie(0); cout << fixed << setprecision(15); precalc(); int t; cin >> t; while (t--) { read(); solve(); #ifdef _DEBUG cerr << "TIME = " << clock() - tt << endl; tt = clock(); #endif } return 0; }
1550
E
Stringforces
You are given a string $s$ of length $n$. Each character is either one of the first $k$ lowercase Latin letters or a question mark. You are asked to replace every question mark with one of the first $k$ lowercase Latin letters in such a way that the following value is maximized. Let $f_i$ be the maximum length substring of string $s$, which consists entirely of the $i$-th Latin letter. A substring of a string is a contiguous subsequence of that string. If the $i$-th letter doesn't appear in a string, then $f_i$ is equal to $0$. The value of a string $s$ is the minimum value among $f_i$ for all $i$ from $1$ to $k$. What is the maximum value the string can have?
Notice that if there are substrings of length $x$ for each letter, then there are also substrings of length $x-1$. Thus, the function on the answer is monotonous, so the binary search is applicable. Let's have some answer $x$ fixed by binary search. We have to place $k$ blocks of letters of length $x$ somewhere in a string. If we fix an order these blocks go into the string, then the greedy algorithm for placing them works. Put each block after the previous one but as far to the left as possible (the correctness can be proven by showing that picking not the furthest to the left position can't be more optimal). If there exists such an order that all blocks fit, then the answer is greater than or equal to $x$. The common transition is to move from iterating over permutations to dynamic programming over submasks. Let $dp[mask]$ be the smallest prefix of the string, such that all blocks of letters from the mask fit into this prefix. The transitions are the same: pick a new block and place it as early after that prefix as possible. So far the solution works pretty slow, since for each of $2^k$ masks we have to find the earliest possible position for a block. Let's use some precalculations to perform the transitions in $O(1)$. Notice that the transition doesn't depend on a mask, only on a length of the previous prefix. Thus, for every prefix and every letter, we can save the closest position for a block. Let $pos[i][j]$ be the closest position for a prefix of length $i$ and the $j$-th letter. $pos[i][j]$ is at least equal to $pos[i + 1][j]$. However, if the block can be placed at the $i$-th position, then it should be updated. That can happen if the closest occurrence of any letter except $j$ is not smaller than $j + x$. Thus, we can also maintain the closest occurrence of every letter. With some smart iterations, we can do the precalculations in $O(nk)$. The dynamic programming works in $O(2^k \cdot k)$ then. Overall complexity: $O((nk + 2^k \cdot k) \log n)$.
[ "binary search", "bitmasks", "brute force", "dp", "strings", "two pointers" ]
2,500
#include <bits/stdc++.h> #define forn(i, n) for (int i = 0; i < int(n); i++) using namespace std; int n, k; string s; bool check(int d){ vector<int> lst(k, n); vector<vector<int>> pos(n + 1, vector<int>(k, n + 1)); for (int i = n - 1; i >= 0; --i){ if (s[i] != '?'){ lst[s[i] - 'a'] = i; } int cur = n; forn(j, k){ pos[i][j] = (i + d <= cur ? i + d : pos[i + 1][j]); cur = min(cur, lst[j]); } cur = n; for (int j = k - 1; j >= 0; --j){ if (i + d > cur) pos[i][j] = pos[i + 1][j]; cur = min(cur, lst[j]); } } vector<int> dp(1 << k, n + 1); dp[0] = 0; forn(mask, 1 << k) if (dp[mask] < n + 1){ forn(i, k) if (!((mask >> i) & 1)) dp[mask | (1 << i)] = min(dp[mask | (1 << i)], pos[dp[mask]][i]); } return dp[(1 << k) - 1] <= n; } int main() { cin >> n >> k; cin >> s; int l = 1, r = n; int res = 0; while (l <= r){ int m = (l + r) / 2; if (check(m)){ res = m; l = m + 1; } else{ r = m - 1; } } cout << res << endl; return 0; }
1550
F
Jumping Around
There is an infinite pond that can be represented with a number line. There are $n$ rocks in the pond, numbered from $1$ to $n$. The $i$-th rock is located at an integer coordinate $a_i$. The coordinates of the rocks are pairwise distinct. The rocks are numbered in the increasing order of the coordinate, so $a_1 < a_2 < \dots < a_n$. A robot frog sits on the rock number $s$. The frog is programmable. It has a base jumping distance parameter $d$. There also is a setting for the jumping distance range. If the jumping distance range is set to some integer $k$, then the frog can jump from some rock to any rock at a distance from $d - k$ to $d + k$ inclusive in any direction. The distance between two rocks is an absolute difference between their coordinates. You are assigned a task to implement a feature for the frog. Given two integers $i$ and $k$ determine if the frog can reach a rock number $i$ from a rock number $s$ performing a sequence of jumps with the jumping distance range set to $k$. The sequence can be arbitrarily long or empty. You will be given $q$ testcases for that feature, the $j$-th testcase consists of two integers $i$ and $k$. Print "Yes" if the $i$-th rock is reachable and "No" otherwise. You can output "YES" and "NO" in any case (for example, strings "yEs", "yes", "Yes" and 'YES"' will be recognized as a positive answer).
Notice that increasing $k$ only increases the range of the jump distances in both directions. So every rock that was reachable with some $k$, will be reachable with $k+1$ as well. Thus, let's try to find the smallest possible value of $k$ to reach each rock. Let's imagine this problem as a graph one and consider the following algorithm. For every pair of rocks, make an edge of weight equal to the smallest $k$ required to jump from one to another. For some rocks $v$ and $u$ that is $w = |d - |a_v - a_u||$. How to check the reachability with these edges? Well, if the jump range value is $k$, then there should exist of path by edges of weight no more than $k$. So we can start with an empty graph, first add the edges of the smallest weight, then the second smallest, and so on. The first time a pair of vertices becomes reachable from each other is the minimum such weight. An experienced reader can notice the resemblance with the Kruskal algorithm for finding the minimum spanning tree. After the spanning tree is constructed, the minimum $k$ is the maximum value on a path between the vertices. The issue is that Kruskal requires $O(n^2 \log n^2)$ to construct an MST for a complete graph. Prim can make it $O(n^2)$, which is still too much. Thus, the solution is to resort to Boruvka. On each iteration of Boruvka we have to find the smallest weight edge from each component to some other one. We can solve it the following way. Maintain a sorted set of rocks coordinates. The smallest weight edges are the ones that are the closest to $d$ distance from each rock. So we could query a lower_bound of $a_i - d$ and $a_i + d$ on each rock $i$ to find them. Don't forget to look at the both sides of the lower_bound result. However, the issue is that we can bump into the rocks from the same component. Thus, let's process components one by one. When processing a component, first remove all its vertices from the set. Then query the edges for each vertex. Then add the vertices back. This way, only the edges to other components will be considered. That makes it an $O(n \log^2 n)$ construction, with one log from the number of Boruvka iterations and another $n \log n$ from finding the edges. That should pass if coded carefully enough, and that is basically the intended solution. Still, there exists a $O(n \log n)$ construction. That will require a $O(n)$ algorithm for finding the edges. So there are four possible edges for each rock $i$: the closest to $a_i - d$ from the left, from the right and the same for $a_i + d$. Let's consider only the first case, the rest will be similar. The coordinates are sorted beforehand, and we are processing the rocks from left to right. We can maintain a pointer to the latest encountered rock to the left of $a_i - d$. The issue with it being from the same component is still there. Let's go around it by also storing the second latest encountered rock such that it's from the different component from the actual latest one. This can be updated in the same manner one calculates the second maximum of the array. Now you just have to do that for all four cases. This two pointers approach makes it $O(n)$ for each iteration, thus making the construction $O(n \log n)$. Since the queries ask for a path from some fixed vertex $s$ to a certain vertex $i$, it's the same as calculating the maximum edge on a path from the root of the tree to each vertex. Can be done with a simple dfs. The only thing left is to check if the minimum possible $k$ is less than or equal to the one provided in the query. Overall complexity: $O(n \log^2 n + q)$ or $O(n \log n + q)$.
[ "binary search", "data structures", "divide and conquer", "dp", "dsu", "graphs", "shortest paths" ]
2,700
#include <bits/stdc++.h> #define forn(i, n) for (int i = 0; i < int(n); i++) using namespace std; const int INF = 1e9; struct edge2{ int u, w; }; vector<vector<edge2>> g; struct edge3{ int v, u, w; edge3(){} edge3(int v, int u, int w) : v(v), u(u), w(w) { if (v > u) swap(v, u); } }; bool operator <(const edge3 &a, const edge3 &b){ if (a.w != b.w) return a.w < b.w; if (a.v != b.v) return a.v < b.v; return a.u < b.u; } vector<int> p, rk; int getp(int a){ return a == p[a] ? a : p[a] = getp(p[a]); } bool unite(int a, int b){ a = getp(a), b = getp(b); if (a == b) return false; if (rk[a] < rk[b]) swap(a, b); rk[a] += rk[b]; p[b] = a; return true; } vector<int> mn; void dfs(int v, int p, int d){ mn[v] = d; for (auto e : g[v]) if (e.u != p) dfs(e.u, v, max(d, e.w)); } int main() { int n, q, s, d; scanf("%d%d%d%d", &n, &q, &s, &d); --s; vector<int> a(n); forn(i, n) scanf("%d", &a[i]); p.resize(n); rk.resize(n); forn(i, n) rk[i] = 1, p[i] = i; g.resize(n); int cnt = n; int iter = 0; while (cnt > 1){ ++iter; vector<edge3> es(n, edge3(-1, -1, INF)); int j, mn1, mn2; j = 0, mn1 = -1, mn2 = -1; forn(i, n) getp(i); forn(i, n){ while (j < n && a[i] - a[j] > d){ if (mn1 == -1 || p[mn1] == p[j]) mn1 = j; else{ mn2 = mn1; mn1 = j; } ++j; } if (mn1 != -1 && p[mn1] != p[i]){ es[p[i]] = min(es[p[i]], edge3(mn1, i, abs(abs(a[i] - a[mn1]) - d))); } if (mn2 != -1 && p[mn2] != p[i]){ es[p[i]] = min(es[p[i]], edge3(mn2, i, abs(abs(a[i] - a[mn2]) - d))); } } j = 0, mn1 = -1, mn2 = -1; forn(i, n){ while (j < n && a[j] - a[i] <= d){ if (mn1 == -1 || p[mn1] == p[j]) mn1 = j; else{ mn2 = mn1; mn1 = j; } ++j; } if (mn1 != -1 && p[mn1] != p[i]){ es[p[i]] = min(es[p[i]], edge3(mn1, i, abs(abs(a[i] - a[mn1]) - d))); } if (mn2 != -1 && p[mn2] != p[i]){ es[p[i]] = min(es[p[i]], edge3(mn2, i, abs(abs(a[i] - a[mn2]) - d))); } } j = n - 1, mn1 = -1, mn2 = -1; for (int i = n - 1; i >= 0; --i){ while (j >= 0 && a[j] - a[i] > d){ if (mn1 == -1 || p[mn1] == p[j]) mn1 = j; else{ mn2 = mn1; mn1 = j; } --j; } if (mn1 != -1 && p[mn1] != p[i]){ es[p[i]] = min(es[p[i]], edge3(mn1, i, abs(abs(a[i] - a[mn1]) - d))); } if (mn2 != -1 && p[mn2] != p[i]){ es[p[i]] = min(es[p[i]], edge3(mn2, i, abs(abs(a[i] - a[mn2]) - d))); } } j = n - 1, mn1 = -1, mn2 = -1; for (int i = n - 1; i >= 0; --i){ while (j >= 0 && a[i] - a[j] <= d){ if (mn1 == -1 || p[mn1] == p[j]) mn1 = j; else{ mn2 = mn1; mn1 = j; } --j; } if (mn1 != -1 && p[mn1] != p[i]){ es[p[i]] = min(es[p[i]], edge3(mn1, i, abs(abs(a[i] - a[mn1]) - d))); } if (mn2 != -1 && p[mn2] != p[i]){ es[p[i]] = min(es[p[i]], edge3(mn2, i, abs(abs(a[i] - a[mn2]) - d))); } } for (auto e : es) if (e.v != -1){ if (unite(e.v, e.u)){ --cnt; g[e.v].push_back({e.u, e.w}); g[e.u].push_back({e.v, e.w}); } } } mn.resize(n); dfs(s, -1, 0); forn(_, q){ int i, k; scanf("%d%d", &i, &k); --i; puts(mn[i] <= k ? "Yes" : "No"); } return 0; }
1551
A
Polycarp and Coins
Polycarp must pay \textbf{exactly} $n$ burles at the checkout. He has coins of two nominal values: $1$ burle and $2$ burles. Polycarp likes both kinds of coins equally. So he doesn't want to pay with more coins of one type than with the other. Thus, Polycarp wants to minimize the difference between the count of coins of $1$ burle and $2$ burles being used. Help him by determining two non-negative integer values $c_1$ and $c_2$ which are the number of coins of $1$ burle and $2$ burles, respectively, so that the total value of that number of coins is \textbf{exactly} $n$ (i. e. $c_1 + 2 \cdot c_2 = n$), and the absolute value of the difference between $c_1$ and $c_2$ is as little as possible (i. e. you must minimize $|c_1-c_2|$).
Let's initialize variables $c_1$ and $c_2$ by the same value of $\lfloor{\frac{n}{3}}\rfloor$. Then we need to gather additionally the remainder of dividing $n$ by $3$. If the remainder is equal to $0$, we don't need to gather anything else because the variables $c_1$ and $c_2$ have been already set to the correct answer: $|c_1 - c_2| = 0$ because $c_1 = c_2$ and no absolute value can be less than $0$. Otherwise, $|c_1 - c_2| \neq 0$ because $c_1 = c_2$ and $n = c_1 + 2 \times c_2 = 3 \times c_1$ in this case, but that's impossible if $n$ isn't divisible by 3. If the remainder is equal to $1$, then we need to gather additionally $1$ burle using one coin of $1$ burle so let's increase $c_1$ by $1$. In this case, $c_1 = c_2 + 1$, hence $|c_1 - c_2| = 1$, this value cannot be less than $1$, as it was proved above. If the remainder is equal to $2$, then we need to gather additionally $2$ burles using one coin of $2$ burles so let's increase $c_2$ by $1$. In this case, $c_2 = c_1 + 1$, hence $|c_1 - c_2| = 1$, this value cannot be less than $1$. There are no other remainders of dividing by $3$ so these cases cover the whole solution.
[ "greedy", "math" ]
800
for i in range(0, int(input())): n = int(input()) c1 = n // 3; c2 = c1; if n % 3 == 1: c1 += 1 elif n % 3 == 2: c2 += 1 print(c1, c2)
1551
B1
Wonderful Coloring - 1
This is a simplified version of the problem B2. Perhaps you should read the problem B2 before you start solving B1. Paul and Mary have a favorite string $s$ which consists of lowercase letters of the Latin alphabet. They want to paint it using pieces of chalk of two colors: red and green. Let's call a coloring of a string wonderful if the following conditions are met: - each letter of the string is either painted in exactly one color (red or green) or isn't painted; - each two letters which are painted in the same color are different; - the number of letters painted in red is equal to the number of letters painted in green; - the number of painted letters of this coloring is \textbf{maximum} among all colorings of the string which meet the first three conditions. E. g. consider a string $s$ equal to "kzaaa". One of the wonderful colorings of the string is shown in the figure. \begin{center} {\small The example of a wonderful coloring of the string "kzaaa".} \end{center} Paul and Mary want to learn by themselves how to find a wonderful coloring of the string. But they are very young, so they need a hint. Help them find $k$ — the number of red (or green, these numbers are equal) letters in a wonderful coloring.
Let's calculate the number of letters which occur exactly once in the string and letters that occur more than once - $c_1$ and $c_2$, respectively. If a letter occurs more than once, one of its occurrences may be painted in red and another one may be painted in green. We cannot paint all other occurrences because there will be two equal letters painted in one color, but this is unacceptable by the statement. So there are no more than $c_2$ occurrences of letters that occur more than once to be painted in red. Let's select $c_2$ such occurrences and paint them. We need to paint additionally the letters which occur exactly once by meeting the same conditions as we meet painting the whole string. There's no way to paint these letters and not meet the first two conditions. So we must select the maximal count of the letters so that we will be able to paint some set of remaining letters in green so that the number of red letters will be equal to the number of green letters. This number is equal to $\lfloor \frac{c_1}{2} \rfloor$. So the final answer is equal to $c_2 + \lfloor \frac{c_1}{2} \rfloor$.
[ "greedy", "strings" ]
800
#include <bits/stdc++.h> using namespace std; const int L = 26; int cnt[L]; int main() { int t; cin >> t; while (t--) { string s; cin >> s; memset(cnt, 0, sizeof(cnt)); for (auto c : s) cnt[c - 'a']++; int cnt1 = 0; int cnt2 = 0; for (int i = 0; i < L; i++) if (cnt[i] == 1) cnt1++; else if (cnt[i] > 0) cnt2++; cout << (cnt2 + cnt1 / 2) << endl; } return 0; }
1551
B2
Wonderful Coloring - 2
This problem is an extension of the problem "Wonderful Coloring - 1". It has quite many differences, so you should read this statement completely. Recently, Paul and Mary have found a new favorite sequence of integers $a_1, a_2, \dots, a_n$. They want to paint it using pieces of chalk of $k$ colors. The coloring of a sequence is called wonderful if the following conditions are met: - each element of the sequence is either painted in one of $k$ colors or isn't painted; - each two elements which are painted in the same color are different (i. e. there's no two equal values painted in the same color); - let's calculate for each of $k$ colors the number of elements painted in the color — all calculated numbers must be equal; - the total number of painted elements of the sequence is the \textbf{maximum} among all colorings of the sequence which meet the first three conditions. E. g. consider a sequence $a=[3, 1, 1, 1, 1, 10, 3, 10, 10, 2]$ and $k=3$. One of the wonderful colorings of the sequence is shown in the figure. \begin{center} {\small The example of a wonderful coloring of the sequence $a=[3, 1, 1, 1, 1, 10, 3, 10, 10, 2]$ and $k=3$. Note that one of the elements isn't painted.} \end{center} Help Paul and Mary to find a wonderful coloring of a given sequence $a$.
Since we must use exactly $k$ colors, each element that occurs in the sequence may have no more than $k$ painted occurrences. Let's select for each element $x$ $min(k, cnt_x)$ its occurrences where $cnt_x$ is the number of all its occurrences in the sequence. Let $b_1, b_2, \dots, b_m$ be a sequence of all elements that occur in the sequence $a$, but in the sequence $b$ they will occur only once. Let's create a $1$-indexed array $p$ in which we will add sequentially indices of the selected occurrences of $b_1$ in the sequence $a$, then the selected occurrences of $b_2$, and so on till $b_m$. Currently, $p$ is a set of occurrences, which wonderful coloring is a wonderful coloring of the whole sequence $a$ because if we want to paint an occurrence outside $p$, we can do it only by selecting an occurrence of the same element in $p$ which we will not paint so that no more than $k$ occurrences will be painted. We must use exactly $k$ colors and paint for each color an equal number of occurrences, hence if we want to paint all occurrences from $p$, we must remove from it the minimum number of occurrences so that the size of the array $p$ will be divided by $k$ (i. e. remove the number of occurrences equal to the remainder of dividing the size of $p$ by $k$). We can remove any occurrences, for example, let's delete it from the suffix of $p$. Currently, we can paint all occurrences from $p$ using the following rule: the occurrence $p_i$ we must paint in the color with a number $((i - 1) \% k) + 1$ where $\%$ takes the remainder of dividing the left operand by the right operand. So all occurrences from $p$ will be painted and all $k$ colors will be used. Since all occurrences of one element belong to one subsegment of $p$ and their number isn't greater than $k$, they will be painted in different colors. It may be so that the array $p$ before painting will be empty. In this case, the wonderful coloring of $a$ doesn't contain any painted element.
[ "binary search", "constructive algorithms", "data structures", "greedy" ]
1,400
#include <bits/stdc++.h> using namespace std; const int MAX_N = 200 * 1000 + 13; int ans[MAX_N]; map<int, vector<int>> indices; int main() { int t; cin >> t; while (t--) { int n, k; cin >> n >> k; indices.clear(); memset(ans, 0, n * sizeof(ans[0])); for (int i = 0; i < n; i++) { int x; cin >> x; if (indices[x].size() < k) indices[x].push_back(i); } int m = 0; for (auto e : indices) m += e.second.size(); m -= m % k; int color = 0; for (auto e : indices) for (auto i : e.second) { ans[i] = ++color; color %= k; if (--m == 0) goto _output; } _output: for (int i = 0; i < n; i++) cout << ans[i] << ' '; cout << '\n'; } return 0; }
1551
C
Interesting Story
Stephen Queen wants to write a story. He is a very unusual writer, he uses only letters 'a', 'b', 'c', 'd' and 'e'! To compose a story, Stephen wrote out $n$ words consisting of the first $5$ lowercase letters of the Latin alphabet. He wants to select the \textbf{maximum} number of \textbf{words} to make an \textbf{interesting} story. Let a story be a sequence of words that are not necessarily different. A story is called interesting if there exists a letter which occurs among all words of the story more times than all other letters together. For example, the story consisting of three words "bac", "aaada", "e" is interesting (the letter 'a' occurs $5$ times, all other letters occur $4$ times in total). But the story consisting of two words "aba", "abcde" is not (no such letter that it occurs more than all other letters in total). You are given a sequence of $n$ words consisting of letters 'a', 'b', 'c', 'd' and 'e'. Your task is to choose the maximum number of them to make an interesting story. If there's no way to make a non-empty story, output $0$.
Let $f(s, c)$ be the number of the occurrences of the letter $c$ in the word $s$ minus the number of the occurrences of all other letters in total. Since for each two words $s_1$ and $s_2$ the number of the occurrences of a letter in the word $s_1 + s_2$ is the sum of the numbers of its occurrences in $s_1$ and $s_2$, the equality $f(s_1 + s_2, c) = f(s_1, c) + f(s_2, c)$ is true ($s_1 + s_2$ means the concatenation of $s_1$ and $s_2$). Consider a sequence of words $s_1, s_2, \ldots, s_n$. A story consisting of words $s_{i_1}, s_{i_2}, \ldots, s_{i_m}$ is interesting if and only if there's a letter $c$ such that $f(s_{i_1} + s_{i_2} + \ldots + s_{i_m}, c) > 0$ - it exactly means that there's a letter which occurs more times than all other in total. So we are interested in searching for a letter $c$ such that exists a positive integer $m$ - a maximal number of words $s_{i_1}, s_{i_2}, \ldots, s_{i_m}$ such that $\sum\limits_{j = 1}^{m} f(s_{i_j}, c) = f(s_{i_1} + s_{i_2} + \ldots + s_{i_m}, c) > 0$. Suppose we have a set of words that form an interesting story and where $c$ is the letter having more occurrences than all other letters in total. Suppose we can add to it one of few words. We had better add a word $s$ such that $f(s, c)$ is maximal to be able to add more words in the future. So the problem has the following solution: for each letter $c$ of the Latin alphabet and for each word $s_i$ let's calculate $f(s, c)$. Then let's iterate over all letters $c$, take a sequence $f(s_1, c), f(s_2, c), \ldots, f(s_n, c)$ and sort it in descending order. Let's initialize an interesting story by a set of a single word corresponding to the first element of the sequence. If there's no word $s$ such that $f(s, c) \le 0$, then there's no non-empty interesting story containing some words of the given set. Otherwise, let's take the next elements of the sequence sequentially until the sum of $f(s, c)$ over all taken words $s$ is greater than zero. Let's select a letter such that the corresponding taken set is maximal over all letters. Finally, we should print the set's size. The solution consists of two phases: the calculation of all $f(s, c)$ (works in $O(L \times \sum\limits_{i = 1}^{n} |s_i|)$ where $L$ is the alphabet's size, $|s|$ is the lengths of a string $s$) and building a maximal interesting story for each letter $c$ (sorting and a greedy algorithm - $O(L \times n \times \log n)$).
[ "greedy", "sortings", "strings" ]
1,500
#include <bits/stdc++.h> using namespace std; const int MAX_N = 2 * 100 * 1000 + 13; const int L = 26; vector<int> balance[L]; int main() { int t; cin >> t; while (t--) { int n; cin >> n; for (int i = 0; i < L; i++) balance[i].clear(); for (int i = 1; i <= n; i++) { string s; cin >> s; int initBalance = -(int)s.length(); for (int j = 0; j < L; j++) balance[j].push_back(initBalance); for (auto c : s) balance[c - 'a'].back() += 2; } int bestCount = 0; int bestLetter = 0; for (int i = 0; i < L; i++) { auto& b = balance[i]; sort(b.begin(), b.end()); reverse(b.begin(), b.end()); if (b[0] <= 0) continue; int sumBalance = b[0]; int j = 1; for (; j < n && sumBalance > 0; j++) sumBalance += b[j]; if (sumBalance <= 0) j--; if (j > bestCount) { bestCount = j; bestLetter = i; } } cout << bestCount << endl; } return 0; }
1551
D2
Domino (hard version)
The only difference between this problem and D1 is that you don't have to provide the way to construct the answer in D1, but you have to do it in this problem. There's a table of $n \times m$ cells ($n$ rows and $m$ columns). The value of $n \cdot m$ is even. A domino is a figure that consists of two cells having a common side. It may be horizontal (one of the cells is to the right of the other) or vertical (one of the cells is above the other). You need to place $\frac{nm}{2}$ dominoes on the table so that exactly $k$ of them are horizontal and all the other dominoes are vertical. The dominoes cannot overlap and must fill the whole table.
Suppose $n$ and $m$ are even. A necessary and sufficient condition of existence of the answer is that $k$ is even. Let's prove the sufficient condition. If the count of the horizontal dominoes is even, then we can combine them and vertical dominoes to blocks of size $2 \times 2$ (the number of the vertical dominoes is even, too, if $k$ is even). If $n$ and $m$ are even, we can fill the table with these blocks. The description of the locations of the dominoes may be printed as follows: consider the table is a chessboard where a cell is a block of two dominoes. Consider the leftmost topmost cell of the board is black. If a cell of the board is black, let's mark one of the dominoes of the block with the letter "a" and the other one with the letter "b". If a cell of the board is white, let's mark one of the dominoes of the block with the letter "c" and the other one with the letter "d". There will be no situation that some two cells of the table are marked with one letter but belong to different dominoes. Let's prove the necessary condition. The number of cells in a column ($n$) is even, so the number of cells that belong to vertical dominoes is even because cells of each vertical domino may be either belong to the column or not belong at the same time. So the number of cells that belong to horizontal dominoes is even. Let's cross out all cells that belong to vertical dominoes and let's find the leftmost column having cells that haven't been crossed out. It's the leftmost column with such cells so the pairwise cells of the non-crossed out cells belong to the column to the right of the found one. The number of such cells in the right column is equal to the number of found cells so it's even and the number of found horizontal dominoes is even, too. Let's cross out the found cells and the pairwise cells. The number of non-crossed out cells in the right column will be even. The number of crossed-out horizontal dominoes will be even, too. Let's repeat this procedure until all the dominoes will be crossed out. In every step, we have crossed out the even number of horizontal dominoes, hence the total count of horizontal dominoes is even. Suppose $n$ is odd, hence $m$ is even. In this case, every column contains an odd number of cells, whereas the number of cells that belong to vertical dominoes is even. So the number of cells that belong to horizontal dominoes is odd. Consider the leftmost column and find a cell of it that belongs to a horizontal domino (it must be found because the number of such cells is odd so it isn't equal to $0$). Let's find the pairwise cell and cross out both cells. Currently, the two columns will have an even number of non-crossed-out cells. Let's repeat the procedure until all columns will have even non-crossed-out cells. We will cross out $m$ cells and $\frac{m}{2}$ dominoes. So the necessary condition is that the number of horizontal dominoes ($k$) is at least $\frac{m}{2}$. Let's extend the necessary condition with the following condition: the value of $k - \frac{m}{2}$ is even. Consider the table that we've become after the previous procedure where each column has exactly one crossed-out cell. Let's start the procedure we've done in the case of even both $n$ and $m$. The procedure can be started on our table because each column of the table has an even number of non-crossed-out cells. As a result of the procedure, we will cross out an even count of horizontal dominoes, so the value of $k - \frac{m}{2}$ is even. Let's build an answer if the conditions $k \ge \frac{m}{2}$ and $k - \frac{m}{2}$ is even are met. Let's place in the topmost row $\frac{m}{2}$ horizontal dominoes and mark their cells as follows: the first domino will be marked with "x", the second one - with "y", the third one - with "x", and so on. As the result, the region of $n - 1$ rows and $m$ columns will be unfilled. Both values are even, and the value of $k - \frac{m}{2}$ is even, too. So let's fill the region as if it's a separate table having even numbers of rows and columns. As it was proved above, it's possible to do. The set of letters used for the region and set of the letters used for the topmost row don't have common elements, so there will be no cells that are marked with one letter but belong to different dominoes. The case of odd $m$ (hence, $n$ is even) is similar to the previous one - let's transpose the table (it will have $m$ rows and $n$ columns), swap the values of $k$ and $\frac{nm}{2} - k$, solve the case above and transpose the table back to have $n$ rows and $m$ columns.
[ "constructive algorithms", "implementation", "math" ]
2,100
#include <bits/stdc++.h> using namespace std; char field[128][128]; int main() { int t; cin >> t; while (t--) { int n, m, kh; cin >> n >> m >> kh; int kv = n * m / 2 - kh; if (n & 1) { kh -= m / 2; if (kh < 0) { cout << "NO\n"; continue; } for (int i = 0; i < m / 2; i++) field[n - 1][i * 2] = field[n - 1][i * 2 + 1] = ((i & 1) ? 'x' : 'y'); } else if (m & 1) { kv -= n / 2; if (kv < 0) { cout << "NO\n"; continue; } for (int i = 0; i < n / 2; i++) field[i * 2][m - 1] = field[i * 2 + 1][m - 1] = ((i & 1) ? 'x' : 'y'); } if ((kh & 1) || (kv & 1)) { cout << "NO\n"; continue; } for(int i = 0; i < n / 2; i++) for (int j = 0; j < m / 2; j++) { if (kh) { kh -= 2; field[2 * i][2 * j] = field[2 * i][2 * j + 1] = (((i + j) & 1) ? 'a' : 'b'); field[2 * i + 1][2 * j] = field[2 * i + 1][2 * j + 1] = (((i + j) & 1) ? 'c' : 'd'); } else { field[2 * i][2 * j] = field[2 * i + 1][2 * j] = (((i + j) & 1) ? 'a' : 'b'); field[2 * i][2 * j + 1] = field[2 * i + 1][2 * j + 1] = (((i + j) & 1) ? 'c' : 'd'); } } cout << "YES\n"; for (int i = 0; i < n; i++) { field[i][m] = 0; cout << field[i] << '\n'; } } return 0; }
1551
E
Fixed Points
Consider a sequence of integers $a_1, a_2, \ldots, a_n$. In one move, you can select any element of the sequence and delete it. After an element is deleted, all elements to the right are shifted to the left by $1$ position, so there are no empty spaces in the sequence. So after you make a move, the sequence's length decreases by $1$. The indices of the elements after the move are recalculated. E. g. let the sequence be $a=[3, 2, 2, 1, 5]$. Let's select the element $a_3=2$ in a move. Then after the move the sequence will be equal to $a=[3, 2, 1, 5]$, so the $3$-rd element of the new sequence will be $a_3=1$ and the $4$-th element will be $a_4=5$. You are given a sequence $a_1, a_2, \ldots, a_n$ and a number $k$. You need to find the minimum number of moves you have to make so that in the resulting sequence there will be \textbf{at least} $k$ elements that are equal to their indices, i. e. the resulting sequence $b_1, b_2, \ldots, b_m$ will contain at least $k$ indices $i$ such that $b_i = i$.
Let's use the concept of dynamic programming. Let's create an array $dp$ ($0$-indexed) with size of $(n + 1) \times (n + 1)$. $dp[i][j]$ will contain the maximal number of the elements equal to their indices if we have considered the first $i$ elements of the sequence $a$ and have not deleted $j$ elements. Let's fill the array with zeroes, then we will increase the elements of the array for different $i$ and $j$. Let's start the $for$-loop with parameter $i$ from $0$ to $n - 1$ and the internal one with parameter $j$ from $0$ to $i$. Consider an element $a_{i + 1}$. We can delete or not delete it. If we delete this element, the number of the elements equal to their indices will not be increased and the number of the non-deleted element will not be increased, too. It means that the answer for $dp[i + 1][j]$ may be updated with $dp[i][j]$. Since we are interested in a maximum answer, we rewrite $dp[i + 1][j]$ only if $dp[i][j]$ is greater than $dp[i + 1][j]$. Suppose we don't delete this element. We haven't deleted previously $j$ elements so $a_{i + 1}$ will have the index $(j + 1)$ and there will be $j + 1$ non-deleted elements if we consider $i + 1$ elements so we must update $dp[i + 1][j + 1]$. If $a_{i + 1} = j + 1$ (i. e. an element equal to its index is found), let's update $dp[i + 1][j + 1]$ with $dp[i][j] + 1$. Otherwise, we should update it with $dp[i][j]$. Remember that update may be done only if we rewrite the less value with the greater value. Let's build the answer as follows. We need to minimize the number of deleted elements (maximize the number of non-deleted elements) so that the number of the elements equal to their indices is at least $k$. Consider only the elements of $dp$ having the first index $i = n$. Let's start a $for$-loop in the descending order of $j$. If $dp[n][j] \ge k$, $j$ is the maximum number of elements that we will not delete, so the answer is $n - j$. If we will not find $j$ such that $dp[n][j] \ge k$, there's no desired sequence of moves so the answer is $-1$. The algorithm works in $O(n^2)$.
[ "binary search", "brute force", "dp" ]
2,000
#include <bits/stdc++.h> using namespace std; const int MAX_N = 6000; int dp[MAX_N][MAX_N]; int a[MAX_N]; int main() { int t; cin >> t; while (t--) { int n, k; cin >> n >> k; for (int i = 1; i <= n; i++) cin >> a[i]; for (int i = 0; i <= n; i++) for (int j = 0; j <= i; j++) dp[i][j] = 0; for(int i = 0; i < n; i++) for (int j = 0; j <= i; j++) { dp[i + 1][j] = max(dp[i + 1][j], dp[i][j]); dp[i + 1][j + 1] = max(dp[i + 1][j + 1], dp[i][j] + ((a[i + 1] == j + 1) ? 1 : 0)); } int ans = -1; for(int i = n; i >= 0; i--) if (dp[n][i] >= k) { ans = n - i; break; } cout << ans << '\n'; } return 0; }
1551
F
Equidistant Vertices
A tree is an undirected connected graph without cycles. You are given a tree of $n$ vertices. Find the number of ways to choose exactly $k$ vertices in this tree (i. e. a $k$-element subset of vertices) so that all pairwise distances between the selected vertices are equal (in other words, there exists an integer $c$ such that for all $u, v$ ($u \ne v$, $u, v$ are in selected vertices) $d_{u,v}=c$, where $d_{u,v}$ is the distance from $u$ to $v$). Since the answer may be very large, you need to output it modulo $10^9 + 7$.
If $k = 2$, any set of two vertices may be taken so the answer is $\frac{n(n - 1)}{2}$ modulo $10^9 + 7$. Suppose $k \ge 3$. Consider three vertices $A$, $B$, $C$ such that $d_{A, B} = d_{A,C} = d_{B,C}$. If this equality is true, there's a vertex $Q$ that belongs to all three paths, otherwise, either one of the vertices belongs to the path between two others or there is more than one simple path (i. e. path having distinct edges) between any of the vertices so the graph isn't a tree. Hence, the following equalities are true: $d_{A,B} = d_{A,Q} + d_{B,Q},$ $d_{A,C} = d_{A,Q} + d_{C,Q},$ $d_{B,C} = d_{B,Q} + d_{C,Q}.$ Then $d_{A,Q} + d_{B,Q} = d_{A,Q} + d_{C,Q},$ $d_{A,Q} + d_{B,Q} = d_{B,Q} + d_{C,Q},$ $d_{A,Q} + d_{C,Q} = d_{B,Q} + d_{C,Q},$ hence, $d_{A,Q} = d_{B,Q} = d_{C,Q}$. Suppose $k > 3$. Let's select vertices $A$, $B$, $C$, $D$ that is a correct desired set of four vertices, for the triple of paths $AB$, $AC$, $BC$ let's select a common vertex $Q$ and for the triple $BC$, $CD$, $BD$ - $Q'$. Because $d_{B,Q} = d_{C,Q},$ $d_{B,Q'} = d_{C,Q'}$ $Q$ is the same vertex as $Q'$. The same procedure we can do for all other pairs of triples of vertices. The situation will not be another if we add a new vertex in the set if the set will still meet the problem's conditions. So if $k \ge 3$, a vertex $Q$ exists such that all vertices of the set are equidistant from it. Note that for each set only one such $Q$ exists. Let's iterate over all vertices taking them as $Q$ and "hang" the tree by $Q$. The set of $k$ vertices equidistant from $Q$ meets the problem's condition if and only if the vertices of the set are placed in different subtrees of vertices adjacent to $Q$ (in other words, the paths from them to $Q$ must intersect only in $Q$). Let's calculate the number of desired sets for a given $Q$ and a layer of equidistant vertices. Let $L_Q$ be the number of vertices adjacent to $Q$ (and it's the number of subtrees, too). Let's create an array $cnt$ ($1$-indexed) of size $L_Q$ so that the $i$-th element will contain the number of the vertices of the layer in the $i$-th subtree. For the layer of vertices adjacent to $Q$, this array will be filled with $1$. For the other layers, we can update the array as follows: let's mark $Q$ and vertices adjacent to $Q$ as used, then for every vertex of the current layer let's decrease $cnt[i]$ by $1$ if $i$ is the index of the subtree of the vertex, then let's increase $cnt[i]$ by the number of the vertices adjacent to the current one but not used. Then let's mark the vertices as used. After the iteration, the array $cnt$ will correspond to the new layer. Using the array, let's calculate the number of the desired sets of $k$ vertices using the concept of dynamic programming. Let's create an array $dp$ ($0$-indexed) of size $(L_Q + 1) \times (k + 1)$. $dp[i][j]$ will contain a number of found sets of $j$ vertices if only $i$ subtrees have been considered. Let's fill the array with $0$ except $dp[0][0] = 1$. Let's start a $for$-loop with parameter $i$ from $0$ to $L_Q - 1$ and the internal one with parameter $j$ from $0$ to $k$. In every step, we can either take a vertex from $(i + 1)$-th subtree or take nothing. If we take a vertex from the subtree (it's possible only if $j < k$), then we have $cnt[i + 1] \cdot dp[i][j]$ ways to select $j + 1$ vertices considering $i + 1$ subtrees so that the last vertex belongs to the $(i + 1)$-th subtree. This value we must add to $dp[i + 1][j + 1]$ that must contain all ways to select $j + 1$ vertices from $i + 1$ subtrees. If we ignore the subtree, the number of ways to select $j$ vertices from $i + 1$ subtrees ignoring the $(i + 1)$-th subtree is $dp[i][j]$. It must be added to the number of ways to select $j$ vertices from $i + 1$ subtrees - $dp[i + 1][j]$. The answer for the current $Q$ and the current layer of equidistant vertices is $dp[L_Q][k]$. The answer for the whole tree is the sum of the answers for all $Q$ and for all layers of equidistant vertices. Remember that all arithmetical operations must be done modulo $10^9 + 7$. The number of possible central vertices is $n$. For every central vertex and every layer we perform two actions: recalculate the array $cnt$ and calculate the number of the corresponding sets using the concept of dynamic programming. The recalculation of $cnt$ works in $O(L_Qk)$, it's just BFS starting from $Q$ so for every central vertex it works in O(n). The dynamic programming for the current $Q$ and the current layer works in $O(L_Qk)$, for the current $Q$ and all layers - in $O(nL_Qk)$. The summary time corresponding to the current $Q$ is $O(nL_Qk)$. The total algorithm work time is $\sum\limits_{Q = 1}^{n} O(nL_QK) = O(nk\sum\limits_{Q = 1}^{n} L_Q)$. The sum of all $L_Q$ is a total number of the adjacent vertices to all vertices, it's just a double number of edges - $2(n - 1)$. So the total work time is $O(n^2k)$.
[ "brute force", "combinatorics", "dfs and similar", "dp", "trees" ]
2,200
#include <bits/stdc++.h> using namespace std; const int MAX_N = 128; typedef long long ll; const ll mod = 1000 * 1000 * 1000 + 7; ll add(ll x, ll y) { return (x + y) % mod; } ll mul(ll x, ll y) { return x * y % mod; } vector<int> g[MAX_N]; bool used[MAX_N]; int cnt[MAX_N]; ll dp[MAX_N][MAX_N]; ll rundp(int m, int k) { for (int i = 0; i <= m; i++) for (int j = 0; j <= k; j++) dp[i][j] = 0; dp[0][0] = 1; for (int i = 0; i < m; i++) for (int j = 0; j <= k; j++) { dp[i + 1][j] = add(dp[i + 1][j], dp[i][j]); dp[i + 1][j + 1] = add(dp[i + 1][j + 1], mul(dp[i][j], cnt[i])); } return dp[m][k]; } void solve() { int n, k; cin >> n >> k; for (int i = 0; i < n; i++) g[i].clear(); for (int i = 1; i < n; i++) { int a, b; cin >> a >> b; g[--a].push_back(--b); g[b].push_back(a); } if (k == 2) { cout << n * (n - 1LL) / 2 % mod << '\n'; return; } ll ans = 0; for (int center = 0; center < n; center++) { memset(used, 0, n); used[center] = true; vector<pair<int, int>> layer; int m = g[center].size(); for (int i = 0; i < m; i++) { int y = g[center][i]; layer.emplace_back(y, i); cnt[i] = 1; used[y] = true; } while (!layer.empty()) { ans = add(ans, rundp(m, k)); vector<pair<int, int>> newlayer; for (auto p : layer) { cnt[p.second]--; for (auto y : g[p.first]) if (!used[y]) { newlayer.emplace_back(y, p.second); used[y] = true; cnt[p.second]++; } } layer = newlayer; } } cout << ans << '\n'; } int main() { int t; cin >> t; while (t--) solve(); return 0; }
1552
A
Subsequence Permutation
A string $s$ of length $n$, consisting of lowercase letters of the English alphabet, is given. You must choose some number $k$ between $0$ and $n$. Then, you select $k$ characters of $s$ and permute them however you want. In this process, the positions of the other $n-k$ characters remain unchanged. You have to perform this operation exactly once. For example, if $s="andrea"$, you can choose the $k=4$ characters $"a_d_ea"$ and permute them into $"d_e_aa"$ so that after the operation the string becomes $"dneraa"$. Determine the minimum $k$ so that it is possible to sort $s$ alphabetically (that is, after the operation its characters appear in alphabetical order).
Let $\texttt{sort}(s)$ be $s$ sorted alphabetically. The answer to the problem is the number $m$ of mismatches between $s$ and $\texttt{sort}(s)$ (i.e., the positions with different characters in the two strings). Choosing $k=m$ characters is sufficient. Let us choose the mismatched characters between $s$ and $\texttt{sort}(s)$, and permute them so that they are sorted alphabetically. It is not hard to prove that the resulting string will coincide with $\texttt{sort}(s)$. Choosing strictly less than $m$ characters is not sufficient. If $k < m$, by the Pigeonhole Principle at least one of the mismatched characters will be left out, and thus it will prevent the final string from being ordered alphabetically. Complexity: $O(n\log n)$.
[ "sortings", "strings" ]
800
#include <iostream> #include <string> #include <algorithm> using namespace std; void solve() { int n; string s; cin >> n >> s; string s_ord = s; sort(s_ord.begin(), s_ord.end()); int ans = 0; for (int i = 0; i < n; i++) ans += (s[i] != s_ord[i]); cout << ans << "\n"; } int main() { int t; cin >> t; for (int i = 1; i <= t; i++) solve(); return 0; }
1552
B
Running for Gold
The Olympic Games have just started and Federico is eager to watch the marathon race. There will be $n$ athletes, numbered from $1$ to $n$, competing in the marathon, and all of them have taken part in $5$ important marathons, numbered from $1$ to $5$, in the past. For each $1\le i\le n$ and $1\le j\le 5$, Federico remembers that athlete $i$ ranked $r_{i,j}$-th in marathon $j$ (e.g., $r_{2,4}=3$ means that athlete $2$ was third in marathon $4$). Federico considers athlete $x$ superior to athlete $y$ if athlete $x$ ranked better than athlete $y$ in at least $3$ past marathons, i.e., $r_{x,j}<r_{y,j}$ for at least $3$ distinct values of $j$. Federico believes that an athlete is likely to get the gold medal at the Olympics if he is superior to all other athletes. Find any athlete who is likely to get the gold medal (that is, an athlete who is superior to all other athletes), or determine that there is no such athlete.
Solution 1 First of all, observe that athlete $i$ is superior to athlete $j$ if and only if athlete $j$ is not superior to athlete $i$. The issue is, of course, that we cannot iterate over all pairs of athletes as there are $\binom{n}{2} = O(n^2)$ pairs, which is too much to fit in the time limit. Notice that there can be at most one athlete who is likely to get the gold medal (if there were $2$, one would not be superior to the other which is a contradiction). Let us describe the algorithm. We iterate over the athletes from $1$ to $n$, keeping a possible winner $w$. When we process $i$, we check whether $w$ is superior to $i$. In that case, clearly $i$ is not the one who is likely to get the gold medal and we do nothing. On the other hand, if $i$ is superior to $w$, we deduce that $w$ cannot be the athlete who is likely to get the gold medal. In this case, we assign $w:=i$ and we proceed. Notice that, when we have finished processing athletes, if there is an athlete superior to everyone else it is for sure $w$. Finally, we check whether $w$ is superior to everyone else or not. Complexity: $O(n)$. Solution 2 Let us describe a randomized solution which seems very naive but can actually be proven to have a good complexity (still, the proof is much harder than an optimistic guess). There are many other possible randomized approach which can be proven to have a correct complexity with a similar proof. For each athlete $i$ check whether he is superior to all other athletes, iterating over other athletes in a random order, and stop as soon as you find an athlete who is superior to $i$. Let us show that the described algorithm has complexity $O(n\log n)$. Consider an athlete $i$ who is superior to exactly $n-1-k$ other athletes. Let us compute the expected amount of other athletes the algorithm processes when deciding if athlete $i$ is likely to get a medal. If $k = 0$, the algorithm iterates over all other $n - 1 = O(n)$ athletes. If $k \ge 1$, the expected number of other athletes the algorithm iterates over is $O\Big(\frac{n}{k}\Big)$. To prove this, one shall observe that if we take $k$ random distinct elements out of $\{1, \, 2, \, \dots, \, n\}$ the expected value of the smallest one is $O\Big(\frac{n}{k}\Big)$. So, the overall complexity of the algorithm is: $O\left(q_0 \cdot n + \sum_{k = 1}^{n - 1} q_k \frac{n}{k}\right) = O\left(n \left(q_0 + \sum_{k = 1}^{n - 1}\frac{q_k}{k}\right)\right),$ If we let $s_k = q_0 + q_1 + \cdots + q_k$, we can also estimate the complexity with $O\left(n \left(s_0 + \sum_{k = 1}^{n - 1}\frac{s_k - s_{k-1}}{k}\right)\right) = O\left(n\left(\sum_{k = 1}^{n - 2}\frac{s_k}{k(k + 1)} + \frac{s_{n - 1}}{n - 1}\right)\right).$ It remains to estimate $s_k$ and the following lemma does exactly this. Lemma. For each $k \ge 0$, there are at most $2k + 1$ athletes which are superior to at least $n - 1 - k$ athletes. Proof. Assume that there are $m$ athletes who are superior to at least $n - 1 - k$ other athletes. Consider the complete directed graph on these $m$ athletes so that the edge $i \to j$ is present if and only if athlete $i$ is superior to athlete $j$. Each vertex has out-degree $\ge m - 1 - k$ and therefore the number of edges is at least $m(m - 1 - k)$. On the other hand, the number of edges is clearly $\binom{m}{2}$ and therefore we obtain $m(m - 1 - k) \le \frac{m(m - 1)}2 \implies m \le 2k+1$ The lemma tells us that $s_k \le 2k + 1$ and therefore the complexity is estimated by $O\left(n\left(\sum_{k = 1}^{n - 2} \frac{2k + 1}{k(k + 1)} + \frac{2n - 1}{n - 1}\right)\right) = O\left(2n\sum_{k = 1}^{n - 2} \frac{1}{k}\right) = O(n\log n).$ Complexity: $O(n\log n)$, randomized.
[ "combinatorics", "graphs", "greedy", "sortings" ]
1,500
#define _USE_MATH_DEFINES #include <bits/stdc++.h> using namespace std; typedef long long LL; typedef unsigned long long ULL; #define SZ(x) ((int)((x).size())) // Returns the time elapsed in nanoseconds from 1 January 1970, at 00:00:00. LL get_time() { return chrono::duration_cast<chrono::nanoseconds>( chrono::steady_clock::now().time_since_epoch()) .count(); } template <typename T1, typename T2> string print_iterable(T1 begin_iter, T2 end_iter, int counter) { bool done_something = false; stringstream res; res << "["; for (; begin_iter != end_iter and counter; ++begin_iter) { done_something = true; counter--; res << *begin_iter << ", "; } string str = res.str(); if (done_something) { str.pop_back(); str.pop_back(); } str += "]"; return str; } template <typename S, typename T> ostream& operator <<(ostream& out, const pair<S, T>& p) { out << "{" << p.first << ", " << p.second << "}"; return out; } template <typename T> ostream& operator <<(ostream& out, const vector<T>& v) { out << "["; for (int i = 0; i < (int)v.size(); i++) { out << v[i]; if (i != (int)v.size()-1) out << ", "; } out << "]"; return out; } template<class TH> void _dbg(const char* name, TH val){ clog << name << ": " << val << endl; } template<class TH, class... TA> void _dbg(const char* names, TH curr_val, TA... vals) { while(*names != ',') clog << *names++; clog << ": " << curr_val << ", "; _dbg(names+1, vals...); } #if DEBUG && !ONLINE_JUDGE ifstream input_from_file("input.txt"); #define cin input_from_file #define dbg(...) _dbg(#__VA_ARGS__, __VA_ARGS__) #define dbg_arr(x, len) clog << #x << ": " << print_iterable(x, x+len, -1) << endl; #else #define dbg(...) #define dbg_arr(x, len) #endif /////////////////////////////////////////////////////////////////////////// //////////////////// DO NOT TOUCH BEFORE THIS LINE //////////////////////// /////////////////////////////////////////////////////////////////////////// const int M = 5; struct Rank { int r[M]; }; bool operator<(const Rank& A, const Rank& B) { int cnt = 0; for (int i = 0; i < M; i++) cnt += A.r[i] < B.r[i]; return cnt >= 3; } void solve() { int n; cin >> n; vector<Rank> ath(n); for (int i = 0; i < n; i++) for (int j = 0; j < M; j++) cin >> ath[i].r[j]; vector<int> ord(n); for (int i = 0; i < n; i++) ord[i] = i; random_shuffle(ord.begin(), ord.end()); for (int i = 0; i < n; i++) { bool works = true; for (int j = 0; j < n and works; j++) { if (i != j) works &= ath[ord[i]] < ath[ord[j]]; } if (works) { cout << ord[i]+1 << "\n"; return; } } cout << -1 << "\n"; } int main() { ios::sync_with_stdio(false); cin.tie(0); // Remove in problems with online queries! srand(time(NULL)); int t; cin >> t; for (int i = 0; i < t; i++) solve(); }
1552
C
Maximize the Intersections
On a circle lie $2n$ distinct points, with the following property: however you choose $3$ chords that connect $3$ disjoint pairs of points, no point strictly inside the circle belongs to all $3$ chords. The points are numbered $1, \, 2, \, \dots, \, 2n$ in clockwise order. Initially, $k$ chords connect $k$ pairs of points, in such a way that all the $2k$ endpoints of these chords are distinct. You want to draw $n - k$ additional chords that connect the remaining $2(n - k)$ points (each point must be an endpoint of exactly one chord). In the end, let $x$ be the total number of intersections among all $n$ chords. Compute the maximum value that $x$ can attain if you choose the $n - k$ chords optimally. Note that the exact position of the $2n$ points is not relevant, as long as the property stated in the first paragraph holds.
Let us forget about the original labeling of the points. Relabel the $2(n - k)$ "free" points $1, \, 2, \, \dots, \, 2(n - k)$ in clockwise order, starting from an arbitrary point. Also, in the following, we will color black the original $k$ chords, and red the additional $n - k$ chords. For $1 \le i \le n - k$, connect point $i$ to point $i + n - k$ with a red chord. We shall prove that this configuration - which we will henceforth refer to as the star configuration - is the only one that achieves the maximum number of intersections. The proof will be divided into two parts. First part: we show that, if there are two red chords that do not intersect, it is possible to increase the number of intersections. Second part: we show that there is exactly one configuration in which all pairs of red chords intersect, namely the star configuration. First part. Suppose that, after drawing $n - k$ red chords, there is a pair of red chords that do not intersect. Let these chords connect points $a$-$b$ and $c$-$d$ respectively. Without loss of generality, assume that the chords $a$-$d$ and $b$-$c$ intersect. We show that, by replacing chords $a$-$b$ and $c$-$d$ with $a$-$d$ and $b$-$c$, the overall number of intersections increases. Consider any other chord (either black or red) that intersected exactly one of $a$-$b$ and $c$-$d$. It is easy to see that, in the new configuration, that chord intersects exactly one of $a$-$d$ and $b$-$c$, as exemplified in the following picture: Now consider any other chord (either black or red) that intersected both $a$-$b$ and $c$-$d$. Again, one can see that, in the new configuration, that chord intersects both $a$-$d$ and $b$-$c$: Second part. Consider a configuration that is not the star configuration. Then, there is at least one chord $a$-$b$ ($a < b$) such that $b \ne a + n - k$. Without loss of generality, suppose $a = 1$. Now, exactly one of the two sets of points $\{2, \, \dots, \, b - 1\}$ and $\{b + 1, \, \dots, \, 2(n - k)\}$ must contain at least $n - k$ points. By the Pigeonhole Principle, there must be a chord whose endpoints are both contained in this set, and one can see that such a chord does not intersect $a$-$b$. Thus we have shown that the star configuration is the only one in which all pairs of chords intersect, which implies (from the first part) that it is the only one that maximizes the number of intersections. Producing the $n - k$ chords of the star configuration is trivial. It remains to count the number of intersections, which can be done naively in $O(n^2)$ (for each pair of chords, check if they intersect). Bonus: Find an $O(n\log n)$ algorithm to count the number of intersections. Complexity: $O(n^2)$.
[ "combinatorics", "constructive algorithms", "geometry", "greedy", "sortings" ]
1,800
#include <iostream> #include <vector> using namespace std; bool intersect(pair<int, int> c, pair<int, int> d) { if (c.first > d.first) swap(c, d); return c.second > d.first and c.second < d.second; } void solve() { int n, k; cin >> n >> k; vector<pair<int, int>> chords; vector<bool> used(2 * n + 1, false); for (int i = 1; i <= k; i++) { int x, y; cin >> x >> y; if (x > y) swap(x, y); chords.push_back(make_pair(x, y)); used[x] = used[y] = true; } vector<int> unused; for (int i = 1; i <= 2 * n; i++) if (!used[i]) unused.push_back(i); for (int i = 0; i < n - k; i++) chords.push_back(make_pair(unused[i], unused[i + n - k])); int ans = 0; for (int i = 0; i < n; i++) for (int j = i + 1; j < n; j++) ans += intersect(chords[i], chords[j]); cout << ans << endl; } int main() { int t; cin >> t; for (int i = 1; i <= t; i++) solve(); return 0; }
1552
D
Array Differentiation
You are given a sequence of $n$ integers $a_1, \, a_2, \, \dots, \, a_n$. Does there exist a sequence of $n$ integers $b_1, \, b_2, \, \dots, \, b_n$ such that the following property holds? - For each $1 \le i \le n$, there exist two (not necessarily distinct) indices $j$ and $k$ ($1 \le j, \, k \le n$) such that $a_i = b_j - b_k$.
Suppose that a solution $b_1, \, \dots, \, b_n$ exists. For each $1 \le i \le n$, let $j_i, \, k_i$ be the indices such that $a_i = b_{j_i} - b_{k_i}$. Consider the directed graph on vertices $1, \, \dots, \, n$, with the $n$ edges $j_i \rightarrow k_i$. If we ignore, for a moment, the orientations, we are left with an undirected graph with $n$ vertices and $n$ edges, which must contain a cycle (possibly a loop). Let $m$ be the length of one such cycle, and let $v_1, \, \dots, v_m$ be its vertices. Now, of course $(b_{v_1} - b_{v_2}) + (b_{v_2} - b_{v_3}) + \cdots + (b_{v_m} - b_{v_1}) = 0$, since all the terms cancel out. Notice that, for each $i$, there exists $1 \le t_i \le n$ such that (indices are taken modulo $m$, so that $v_{m + 1} = v_1$) $b_{v_i} - b_{v_{i + 1}} = \begin{cases} a_{t_i} & \text{if there is an edge } v_i \rightarrow v_{i + 1}, \\ -a_{t_i} & \text{if there is an edge } v_i \leftarrow v_{i + 1}. \end{cases}$ Thus, there must be a nonempty subset $\{t_1, \, \dots, \, t_m\} \subseteq \{1, \, \dots, \, n\}$ and a choice of signs $s_1, \, \dots, \, s_m$ ($s_i \in \{+1, \, -1\}$) such that $\tag{$\star$} s_1a_{t_1} + \cdots + s_ma_{t_m} = 0.$ Let us show that this condition is also sufficient for the existence of a valid sequence $b_1, \, \dots, \, b_n$. Suppose there exist $m$, $t_1, \, \dots, \, t_m$ and $s_1, \, \dots, \, s_m$ so that $(\star)$ holds. We construct $b_1, \, \dots, \, b_n$ as follows. Set $b_{t_1} = 0$. Then, inductively, for each $1 \le i < m$ set $b_{t_{i + 1}} = b_{t_i} - s_ia_{t_i}$. Finally, for $i \not\in \{t_1, \, \dots, \, t_m\}$, set $b_i = a_i$. It is easy to check that this construction works. The algorithm that iterates over all $3^n - 1$ choices of the subset and the signs (for each $i$ we decide whether $a_i$ is included in the subset and if it is included, whether its sign is positive or negative) and checks if for one of them $(\star)$ holds, is sufficient to solve the problem under the given constraints. Alternatively, one may treat the problem as a knapsack instance (the weights are $a_i$, but can be chosen with arbitrary sign, and we shall understand whether we can fill precisely a knapsack with $0$ capacity). With this approach, the complexity is $O(n^2\max|a_i|)$. Bonus: Solve the problem with complexity $O(n3^{\frac{n}{2}})$. Complexity: $O(n3^n)$.
[ "bitmasks", "brute force", "constructive algorithms", "dfs and similar", "dp", "graphs", "math" ]
1,800
#include <iostream> #include <vector> using namespace std; void solve() { int n; cin >> n; vector<int> a(n + 1); for (int i = 1; i <= n; i++) cin >> a[i]; int three2n = 1; for (int i = 1; i <= n; i++) three2n *= 3; for (int k = 1; k < three2n; k++) { int k_cp = k; int sum = 0; for (int i = 1; i <= n; i++) { int s = k_cp % 3; k_cp /= 3; if (s == 2) s = -1; sum += s * a[i]; } if (sum == 0) { cout << "YES" << endl; return; } } cout << "NO" << endl; } int main() { int t; cin >> t; for (int i = 1; i <= t; i++) solve(); return 0; }
1552
E
Colors and Intervals
The numbers $1, \, 2, \, \dots, \, n \cdot k$ are colored with $n$ colors. These colors are indexed by $1, \, 2, \, \dots, \, n$. For each $1 \le i \le n$, there are exactly $k$ numbers colored with color $i$. Let $[a, \, b]$ denote the interval of integers between $a$ and $b$ inclusive, that is, the set $\{a, \, a + 1, \, \dots, \, b\}$. You must choose $n$ intervals $[a_1, \, b_1], \, [a_2, \, b_2], \, \dots, [a_n, \, b_n]$ such that: - for each $1 \le i \le n$, it holds $1 \le a_i < b_i \le n \cdot k$; - for each $1 \le i \le n$, the numbers $a_i$ and $b_i$ are colored with color $i$; - each number $1 \le x \le n \cdot k$ belongs to at most $\left\lceil \frac{n}{k - 1} \right\rceil$ intervals. One can show that such a family of intervals always exists under the given constraints.
Solution 1 We describe the algorithm and later we explain why the construction works. Let $x_{i, j}$ ($1 \le i \le n$, $1 \le j \le k$) denote the position of the $j$-th occurrence of color $i$ (from the left). First, sort the colors according to $x_{i, 2}$. Take the first $\left\lceil \frac{n}{k - 1} \right\rceil$ colors, and to each of them assign the interval $[x_{i, 1}, \, x_{i, 2}]$. Then, sort the remaining colors according to $x_{i, 3}$, take the first $\left\lceil \frac{n}{k - 1} \right\rceil$ and to each of them assign the interval $[x_{i, 2}, \, x_{i, 3}]$. More generally, in the $t$-th step: sort the remaining colors according to $x_{i, t + 1}$; take the first $\left\lceil \frac{n}{k - 1} \right\rceil$ (possibly less in the last step) of these colors; assign to each color $i$ the interval $[x_{i, t}, \, x_{i, t + 1}]$. Let us show that this choice of intervals works. It is straightforward to see that the first two properties hold. It remains to check the third property. We prove that two intervals selected in different steps are disjoint. Since in each step we select at most $\left\lceil \frac{n}{k - 1} \right\rceil$ intervals, this is sufficient to conclude. Consider two colors $i, \, j$ selected in two different steps $s < t$ respectively. Then, we have $x_{i, s + 1} < x_{j, s + 1} \le x_{j, t}$ and thus $[x_{i, s}, \, x_{i, s + 1}] \cap [x_{j, t}, \, x_{j, t + 1}] = \varnothing$, which is exactly what we wanted to prove. Complexity: $O(nk)$. Solution 2 A greedy approach is also possible. Let $x_{i, j}$ be defined as in Solution 1. Consider all intervals of the form $[x_{i, j}, \, x_{i, j + 1}]$ and sort them increasingly according to their right endpoint. We now iterate over these intervals, and for each of them, we decide to select it if both these conditions are met (which are equivalent to "selecting the interval does not violate any requirement"): no interval of the same color has been chosen yet; among the numbers spanned by the interval, no one is contained in (at least) $\left\lceil \frac{n}{k - 1} \right\rceil$ already selected intervals. Let us prove that this algorithm works (i.e., it selects exactly one interval for each color). Suppose, by contradiction, that, for some color $i$, no interval of that color gets chosen. This means that, for each $1 \le j \le k - 1$, there exist $\left\lceil \frac{n}{k - 1} \right\rceil$ selected intervals that intersect interval $[x_{i, j}, \, x_{i, j + 1}]$. We can say more: the rightmost endpoints of these intervals must belong to $[x_{i, j}, \, x_{i, j + 1}]$; indeed, if it weren't the case for at least one interval $[a, \, b]$, the interval $[x_{i, j}, \, x_{i, j + 1}]$ would come before $[a, \, b]$ in the ordering, so it would actually have been selected. Since all these intervals must be distinct, they are $(k - 1)\left\lceil \frac{n}{k - 1} \right\rceil \ge n$. Yet this contradicts the fact that they must be at most $n - 1$, one for each color other than $i$. Complexity: $O(n^2k)$.
[ "constructive algorithms", "data structures", "greedy", "sortings" ]
2,300
#include <iostream> #include <vector> #include <algorithm> #include <numeric> using namespace std; struct Interval { int a, b; int color; Interval(int _a, int _b, int _color): a(_a), b(_b), color(_color) {} bool operator <(const Interval other) { return b < other.b; } }; void solve() { int n, k; cin >> n >> k; int r = (n + k - 2) / (k - 1); vector<int> c(n * k + 1); for (int i = 1; i <= n * k; i++) cin >> c[i]; vector<vector<int>> x(n + 1); for (int i = 1; i <= n * k; i++) x[c[i]].push_back(i); vector<Interval> candidates; for (int i = 1; i <= n; i++) for (int j = 0; j < k - 1; j++) candidates.push_back(Interval(x[i][j], x[i][j + 1], i)); sort(candidates.begin(), candidates.end()); vector<pair<int, int>> intervals(n + 1); vector<bool> taken(n + 1, false); vector<int> weights(n * k + 1, 0); for (Interval I : candidates) { if (taken[I.color]) continue; int max_w = 0; for (int i = I.a; i <= I.b; i++) max_w = max(max_w, weights[i]); if (max_w < r) { taken[I.color] = true; for (int i = I.a; i <= I.b; i++) ++weights[i]; intervals[I.color] = make_pair(I.a, I.b); } } for (int i = 1; i <= n; i++) cout << intervals[i].first << " " << intervals[i].second << endl; } int main() { solve(); return 0; }
1552
F
Telepanting
An ant moves on the real line with constant speed of $1$ unit per second. It starts at $0$ and always moves to the right (so its position increases by $1$ each second). There are $n$ portals, the $i$-th of which is located at position $x_i$ and teleports to position $y_i < x_i$. Each portal can be either active or inactive. The initial state of the $i$-th portal is determined by $s_i$: if $s_i=0$ then the $i$-th portal is initially inactive, if $s_i=1$ then the $i$-th portal is initially active. When the ant travels through a portal (i.e., when its position coincides with the position of a portal): - if the portal is inactive, it becomes active (in this case the path of the ant is not affected); - if the portal is active, it becomes inactive and the ant is instantly teleported to the position $y_i$, where it keeps on moving as normal. How long (from the instant it starts moving) does it take for the ant to reach the position $x_n + 1$? It can be shown that this happens in a finite amount of time. Since the answer may be very large, compute it modulo $998\,244\,353$.
Solution 1 The key insight is realizing that, if at some point the ant is located at position $x$, then all the portals with $x_i < x$ are active. One can prove this by induction on the time $t$. Indeed, when $t = 0$, $x = 0$ and there are no portals with $x_i < x$. Now suppose this is true at time $t$, and let $x$ be the position of the ant at that time. There are three possible scenarios to consider. If there is no portal at position $x$, then the statement is trivially true at time $t + 1$. If there is an inactive portal at position $x$, then that portal will become active and the position of the ant at time $t + 1$ will be $x + 1$, so all the portals with $x_i < x + 1$ will be active. If there is an active portal at position $x$, the ant will be teleported to some position $y < x$, and thus at time $t + 1$ it will be at position $y + 1 \le x$. Since all the portals with $x_i < x$ were active in the first place, and $y + 1 \le x$, all the portals with $x_i < y + 1$ will be active as well. Let $q_i$ be the time the ant needs to go from the position $x_i$ to the position $x_i$ again assuming that all the portals $1, \, 2, \, \dots, \, i$ are active. In order to find a formula for $q_i$, let us describe the movement of the ant when it starts from position $x_i$ and the portals $1, 2, \dots, i$ are active. The ant gets instantly teleported to $y_i$. The ant walks from $y_i$ to $x_{j_i}$, where $j_i \le i$ is the smallest index so that $y_i < x_{j_i}$. The ant walks for $q_{j_i}$ seconds and it ends up in $x_{j_i}$ with the portal inactive. The ant walks from $x_{j_i}$ to $x_{j_i+1}$. The ant walks for $q_{j_i+1}$ seconds and it ends up in $x_{j_i+1}$ with the portal inactive. $\cdots$ The ant walks for $q_{i-1}$ seconds and it ends up in $x_{i-1}$ with the portal inactive. The ant walks from $x_{i-1}$ to $x_i$. Adding up the contributions of the steps described above, we obtain the following recurrence for $q_i$: $q_i = (x_i - y_i) + q_{j_i} + q_{j_i + 1} + \cdots + q_{i - 1}.$ Let $A$ be the set of portals that are initially active. The answer to the problem is given by the formula (which can be proven analyzing the movement of the ant as we have done to prove the recurrence relation for $q_i$) $x_n + 1 + \sum_{i \in A} q_i.$ Using a binary search to identify $j_i$ and keeping the prefix sums of $q_1, \, q_2, \, \dots, \, q_n$, one can implement the described solution in $O(n\log n)$. Complexity: $O(n\log n)$. Solution 2 Let $z_i$ be the index of the teleporter reached immediately after using teleporter $i$ (it can be computed by binary search). Let $\text{dp}_{i, 0}$ be the number of times $x_i$ is reached. Let $\text{dp}_{i, 1}$ be the number of times the teleporter $i$ is used. Then, the answer is easy to calculate: each time the teleporter $i$ is active you spend $x_{z_i} - y_i$ time, and each time the teleporter $i$ is inactive you spend $x_{i + 1} - x_i$ time. Summing up all the contributions, the answer turns out to be $\sum_{i = i}^n \: [\text{dp}_{i, 1}(x_{z_i} - y_i) + (\text{dp}_{i, 0} - \text{dp}_{i, 1})(x_{i + 1} - x_i)],$ Now we find recurrences for $\text{dp}_{i, 0}$ and $\text{dp}_{i, 1}$. The crucial observation is that $\text{dp}_{i, 0}$ has the same parity of $s_i \oplus 1$, where $\oplus$ denotes bitwise XOR. Thus, it is not hard to see that, for $1 \le i \le n$, $\begin{align*} \text{dp}_{i + 1, 0} = \sum_{z_j = i + 1} \text{dp}_{j, 1} + \frac{\text{dp}_{i, 0} + (s_i \oplus 1)}{2} \implies \text{dp}_{i, 0} & = 2\left(\text{dp}_{i + 1, 0} - \sum_{z_j = i + 1} \text{dp}_{j, 1}\right) - (s_i \oplus 1), \\ \text{dp}_{i, 1} & = \frac{\text{dp}_{i, 0} - (s_i \oplus 1)}{2}. \end{align*}$ It now suffices to iterate over the indices $i$ in decreasing order. Complexity: $O(n\log n)$.
[ "binary search", "data structures", "dp", "sortings" ]
2,200
#include <bits/stdc++.h> using namespace std; #define nl "\n" #define nf endl #define ll long long #define pb push_back #define _ << ' ' << #define INF (ll)1e18 #define mod 1996488706 #define hmod 998244353 #define maxn 200010 ll i, i1, j, k, k1, t, n, m, res, flag[10], a, b; ll x[maxn], y[maxn], s[maxn], dp[maxn][2], c[maxn]; vector<array<ll, 2>> v; vector<ll> adj[maxn]; void fx(ll &x, ll p) { if ((x + p) % 2) x = (x + hmod) % mod; } int main() { ios::sync_with_stdio(0); cin.tie(0); #if !ONLINE_JUDGE && !EVAL ifstream cin("input.txt"); ofstream cout("output.txt"); #endif cin >> n; for (i = 1; i <= n; i++) { cin >> x[i] >> y[i] >> s[i]; v.pb({x[i], i}); } x[n + 1] = x[n] + 1; for (i = 1; i <= n; i++) { array<ll, 2> o = {y[i], -INF}; auto it = lower_bound(v.begin(), v.end(), o); c[i] = (*it)[0] - y[i]; y[i] = (*it)[1]; adj[y[i]].pb(i); } dp[n][0] = s[n] + 1; dp[n][1] = s[n]; for (i = n - 1; i >= 1; i--) { dp[i][0] = (2 * dp[i + 1][0]) % mod; for (auto u : adj[i + 1]) dp[i][0] = (dp[i][0] - 2 * dp[u][1] + 2 * (ll)mod) % mod; dp[i][0] = (dp[i][0] - (s[i] ^ 1) + mod) % mod; fx(dp[i][0], s[i] ^ 1); dp[i][1] = dp[i][0] / 2; } /* for (i = 1; i <= n; i++) { cout << x[i + 1] - x[i] _ c[i] _ dp[i][0] _ dp[i][1] << nl; } */ res = x[1] % mod; for (i = 1; i <= n; i++) { res += (dp[i][1] * c[i]); res %= mod; res += ((dp[i][0] - dp[i][1] + mod) * (x[i + 1] - x[i])) % mod; } res %= hmod; cout << res << nl; return 0; }
1552
G
A Serious Referee
Andrea has come up with what he believes to be a novel sorting algorithm for arrays of length $n$. The algorithm works as follows. Initially there is an array of $n$ integers $a_1,\, a_2,\, \dots,\, a_n$. Then, $k$ steps are executed. For each $1\le i\le k$, during the $i$-th step the subsequence of the array $a$ with indexes $j_{i,1}< j_{i,2}< \dots< j_{i, q_i}$ is sorted, without changing the values with the remaining indexes. So, the subsequence $a_{j_{i,1}},\, a_{j_{i,2}},\, \dots,\, a_{j_{i,q_i}}$ is sorted and all other elements of $a$ are left untouched. Andrea, being eager to share his discovery with the academic community, sent a short paper describing his algorithm to the journal "Annals of Sorting Algorithms" and you are the referee of the paper (that is, the person who must judge the correctness of the paper). You must decide whether Andrea's algorithm is correct, that is, if it sorts any array $a$ of $n$ integers.
Let us say that an array of $n$ integers is good if it is sorted by Andrea's algorithm, and bad otherwise. First of all, we state and prove the following intuitive fact (which is well-known for sorting networks): Lemma. (Zero-One Principle) All arrays $a$ with values in $\{0, \, 1\}$ are good if and only if all arrays are good. Proof. The "if" part is trivial. To prove the converse, consider an array $a$ made up of arbitrary integers (for simplicity, we assume that they are distinct). Fix some $1 \le s \le n$ and construct the array $b$ such that $b_i = 0$ if $a_i$ is among the $s$ smallest elements of $a$, and $b_i = 1$ otherwise. Since we know that $b$ is good, it follows that Andrea's algorithm applied on $a$ will produce an array in which the $s$ smallest elements occupy the first $s$ positions. Since this is true for every $s \in \{1, \, 2, \, \dots, \, n\}$, we deduce that $a$ is good. If $n = 1$, then the answer is always ACCEPTED. From now on, we assume $n \ge 2$. Let $S_i = \{j_{i,1}, \, \dots, \, j_{i,q_i}\}$ be the set of indices considered in the $i$-th step. Let $T_i = S_1 \cup S_2 \cup \cdots \cup S_i$. Given a function $f: T_i \to \{0, \, 1\}$, we say that it is $i$-achievable if there is an initial array $a_1, \, a_2, \, \dots, \, a_n \in \{0, \, 1\}$ so that, after $i$ steps, $a_j = f(j)$ holds for each $j \in T_i$. Applying the Zero-One Principle, one can show that the answer to the problem is ACCEPTED if and only if $T_k = \{1, \, 2, \, \dots, \, n\}$ and all the $k$-achievable functions are nondecreasing (there are $n + 1$ nondecreasing functions, which are those like $(0,0,\dots,0,1,\dots,1,1)$). The idea, then, is to compute, for each $i = 1, \, 2, \, \dots, \, k$, the set of $i$-achievable functions. Let $f: T_i \to \{0, \, 1\}$ be an $i$-achievable function. Notice that, for each $g: S_{i+1} \setminus T_i \to \{0, \, 1\}$, we can find an initial configuration such that after $i$ steps it coincides with $f$ on $T_i$ and with $g$ on $S_{i + 1} \setminus T_i$. In particular, we can choose arbitrarily how many times the function $g$ takes the value $1$. The crucial observation is that if we know $f$ and we know how many times the function $g$ takes the value $1$, then we know unambiguously what happens on $T_{i + 1}$ after $i + 1$ steps: on $T_{i + 1} \setminus S_{i + 1}$ the values are exactly the values of $f$, on $S_{i + 1}$ the values are nondecreasing and thus only the number of ones is necessary to determine them. Thus, given an $i$-achievable function, we can construct the $(i + 1)$-achievable functions it can evolve into and there are exactly $|S_{i + 1} \setminus T_i| + 1$ of them. Let $d_i = |S_i \setminus T_{i - 1}|$. The complexity of the described algorithm is $O(n \cdot (d_1 + 1)(d_2 + 1) \cdots (d_k + 1))$. Since $(d_1 + 1) + (d_2 + 1) + \cdots + (d_k + 1) \le n + k$, we have (by the AM-GM inequality): $(d_1 + 1)(d_2 + 1) \cdots (d_k + 1) \le \left(\frac{n + k}{k}\right)^k.$ Here are three observations which reduce hugely the execution time (the first one is already sufficient to fit into the time-limit comfortably): It is possible to encode the $i$-achievable functions as bitmasks and all the steps of the solutions can be performed as bitwise operations. Let us show that if $T_{k - 1} \ne \{1, \, 2, \, \dots, \, n\}$ and $S_k \ne \{1, \, 2, \, \dots, \, n\}$, then the answer is REJECTED. Let $x \not\in T_{k - 1}$ and $y \not\in S_k$. If $x = y$, then $x \not\in T_k$ and we already know that the answer is REJECTED. Otherwise, let $a$ be a permutation of $1, \, 2, \, \dots, \, n$ with $a_x = y$. After $k - 1$ steps it still holds $a_x = y$ and after $k$ steps $y \in \{a_j: j \in S_k\}$ which implies that $a_y \ne y$ and therefore the array is not sorted by the algorithm. With this observation (together with the above described usage of bitmasks), the complexity of the algorithm becomes $O\left(\left(\frac{n + k - 1}{k - 1}\right)^{k - 1}\right)$. To save a lot of memory (and, since memory allocation is expensive, also execution time), one can implement the algorithm in a recursive fashion. Complexity: $O\left(n\left(\frac{n + k}{k}\right)^k\right)$.
[ "bitmasks", "brute force", "dfs and similar", "sortings" ]
3,000
#define _USE_MATH_DEFINES #include <bits/stdc++.h> using namespace std; typedef long long LL; typedef unsigned long long ULL; #define SZ(x) ((int)((x).size())) // Returns the time elapsed in nanoseconds from 1 January 1970, at 00:00:00. LL get_time() { return chrono::duration_cast<chrono::nanoseconds>( chrono::steady_clock::now().time_since_epoch()) .count(); } template <typename T1, typename T2> string print_iterable(T1 begin_iter, T2 end_iter, int counter) { bool done_something = false; stringstream res; res << "["; for (; begin_iter != end_iter and counter; ++begin_iter) { done_something = true; counter--; res << *begin_iter << ", "; } string str = res.str(); if (done_something) { str.pop_back(); str.pop_back(); } str += "]"; return str; } template <typename S, typename T> ostream& operator <<(ostream& out, const pair<S, T>& p) { out << "{" << p.first << ", " << p.second << "}"; return out; } template <typename T> ostream& operator <<(ostream& out, const vector<T>& v) { out << "["; for (int i = 0; i < (int)v.size(); i++) { out << v[i]; if (i != (int)v.size()-1) out << ", "; } out << "]"; return out; } template<class TH> void _dbg(const char* name, TH val){ clog << name << ": " << val << endl; } template<class TH, class... TA> void _dbg(const char* names, TH curr_val, TA... vals) { while(*names != ',') clog << *names++; clog << ": " << curr_val << ", "; _dbg(names+1, vals...); } #if DEBUG && !ONLINE_JUDGE ifstream input_from_file("input.txt"); #define cin input_from_file #define dbg(...) _dbg(#__VA_ARGS__, __VA_ARGS__) #define dbg_arr(x, len) clog << #x << ": " << print_iterable(x, x+len, -1) << endl; #else #define dbg(...) #define dbg_arr(x, len) #endif /////////////////////////////////////////////////////////////////////////// //////////////////// DO NOT TOUCH BEFORE THIS LINE //////////////////////// /////////////////////////////////////////////////////////////////////////// const int MAXN = 40; const int MAXK = 10; int n, k; LL active[MAXK+1]; LL bb[MAXK]; LL pref[MAXK][MAXN+1]; LL q[MAXK]; int inactive[MAXK]; bool recur(LL S, int it) { if (it == k-1) { int ones = __builtin_popcountll(S); int zeros = n-ones; LL sortedS = ((1ll<<ones)-1)<<zeros; LL diff = S ^ sortedS; return (diff & bb[it]) == diff; } int min_ones = __builtin_popcountll(S & bb[it]); S |= bb[it]; for (int ones = min_ones; ones <= min_ones + inactive[it]; ones++) { if (recur(S & pref[it][ones], it+1) == false) return false; } return true; } int main() { ios::sync_with_stdio(false); cin.tie(0); // Remove in problems with online queries! cin >> n; if (n == 1) { cout << "ACCEPTED\n"; return 0; } cin >> k; for (int it = 0; it < k; it++) { cin >> q[it]; pref[it][q[it]] = (1ll<<n)-1; for (int i = 0; i < q[it]; i++) { int x; cin >> x; x--; bb[it] |= 1ll<<x; pref[it][q[it]-(i+1)] = ~bb[it]; } inactive[it] = __builtin_popcountll(bb[it]&(~active[it])); active[it+1] = active[it] | bb[it]; } if (q[k-1] == n) { cout << "ACCEPTED\n"; return 0; } if (active[k-1] != (1ll<<n)-1) { cout << "REJECTED\n"; return 0; } if (!recur(0, 0)) cout << "REJECTED\n"; else cout << "ACCEPTED\n"; }