contest_id
stringlengths
1
4
index
stringclasses
43 values
title
stringlengths
2
63
statement
stringlengths
51
4.24k
tutorial
stringlengths
19
20.4k
tags
listlengths
0
11
rating
int64
800
3.5k
code
stringlengths
46
29.6k
923
E
Perpetual Subtraction
There is a number $x$ initially written on a blackboard. You repeat the following action a fixed amount of times: - take the number $x$ currently written on a blackboard and erase it - select an integer uniformly at random from the range $[0, x]$ inclusive, and write it on the blackboard Determine the distribution of final number given the distribution of initial number and the number of steps.
We can model this process as a Markov chain with $N + 1$ states with transition matrix $\frac{\|\mathbf{\hat{l}}\|\mathbf{\hat{l}}\mathbf{\hat{l}}}{\|\mathbf{\hat{l}}\|}_{\hat{l}}\mathbf{\hat{l}}$ The task is to find $R = A^{M} \cdot P$. A naive solution using Matrix exponentiation is obviously too slow, as it uses $O(N^{3}\log M)$ time. We need to improve upon it and we look for eigenvalue decomposition. This is a triangular matrix, thus its eigenvalues are the elements on the main diagonal. Hence $\Lambda=\left(\begin{array}{c c c c c c}{{1}}&{{0}}&{{0}}&{{0}}&{{\dots}}&{{0}}\\ {{0}}&{{1/2}}&{{0}}&{{0}}&{{\dots}}&{{0}}\\ {{0}}&{{0}}&{{1/3}}&{{0}}&{{\dots}}&{{0}}\\ {{0}}&{{0}}&{{1/4}}&{{\vdots}}&{{\ddots}}&{{0}}\\ {{\vdots}}&{{\vdots}}&{{}}&{{\vdots}}&{{\ddots}}&{{1/(N+1)\right)$ We can show that the eigenvector corresponding to $\lambda_{i}=\operatorname*{in}_{i+1}$ is $v_{i}=((-1)^{i+j}\cdot{\binom{i}{j}})_{j=0}^{N}$ (consult proof at the end). Thus, we have ${\cal O}=\left(\begin{array}{c c c c c}{{1}}&{{-1}}&{{-1}}&{{\dots}}&{{(-1)^{N}}}\\ {{0}}&{{1}}&{{-2}}&{{3}}&{{\dots}}&{{(-1)^{N+1}\left(N\right)}}\\ {{}}&{{}}&{{}}&{{}}&{{\ddots}}&{{\left(-1\right)^{N+1}\left(N\right)}}\\ {{\vdots}}&{{}}&{{}}&{{}}\\ {{0}}&{{0}}&{{}}&{{\vdots}}&{{\ddots}}&{{}}&{{}}\\ {{\vdots}}&{{}}&{{}}&{{}}&{{}}\\ {{0}}&{{}}&{{}}&{{}}&{{}}&{{}}\end{array}\right)$ To finalise the eigenvalue decomposition, we need to find the inverse of $Q$. It can be shown $Q$ and $Q^{ - 1}$ are the same up to the sign (consult proof at the end): $Q^{-1}=\left(\begin{array}{c c c c c c}{{1}}&{{1}}&{{1}}&{{\cdot\cdot\cdot}}&{{1}}\\ {{0}}&{{1}}&{{2}}&{{3}}&{{\cdot\cdot\cdot}}&{{\binom{N}{\sqrt}}}\\ {{0}}&{{0}}&{{1}}&{{3}}&{{\cdot\cdot}}&{{\binom{N}{2}}}\\ {{\vdots}}&{{\vdots}}&{{\vdots}}&{{\ddots}}&{{\vdots}}\\ {{0}}&{{0}}&{{0}}&{{0}}&{{\cdot\cdot\cdot}}&{{1}}\end{array}\right)$ The advantage of eigendecomposition is that $A^{M} = Q \cdot \Lambda ^{M} \cdot Q^{ - 1},$ where the diagonal matrix $ \Lambda $ can be exponentiated in $O(N\log M)$. We can calculate the result as $R = A^{M} \cdot P = Q \cdot ( \Lambda ^{M} \cdot (Q^{ - 1} \cdot P)) .$ Performed naively, this runs in $O(N^{2}+N\log M)$, which is still too slow. We obviously need to multiply with $Q$ (and its inverse) faster. Fortunately, both linear functions $Q$ and $Q^{ - 1}$ are a convolution and we can compute the multiplication in $O(N\log N)$ using FFT, which is a bit hinted by the modulus in which to compute the answer. Proof of the eigenvectors We show that $\forall i\in[0,N],k\in[0,N]$ it holds $\begin{array}{l}{{\sum_{j=0}^{N}v_{i,j}\cdot A_{k,j}=\sum_{j=k}^{i}\left(-1\right)^{i+j}\cdot\left({}_{j}\right)\cdot\frac{1}{j+1}}}\\ {{\sum_{j=k}^{i+1}\left(\stackrel{i+j}{\left(i+1\right)^{i+j}\Big(\stackrel{i+1}{i+1}\right)\cdot\stackrel{i+1}{i+1}}\cdot\cdots\left(-1\right)^{i+j}\Big({}_{i+1}^{i+1}\right)\cdot\stackrel{1}{j+1}}}\\ {{\frac{1}{i+1}\left(-1\right)^{i+k}\Big({}_{i+1}\right)^{i+k}\Big)\cdot\cdot\cdot\cdot\cdot\cdot\left(-1\right)^{i+k}\Big({}_{i+1}^{i+1}\right)\Big)}}\end{array}$ where the fifth equality can be proven by induction on $i - k$. Proof of the inverse Denote $P = QQ^{ - 1}$. First, we show that the diagonal of the product only contains ones. This is easy, since the $i$-th row of $Q$ has zeroes until $i - 1$-th column, $i$-th column of $Q^{ - 1}$ has zeroes starting from $i + 1$-th row, and $Q_{i, i} = Q^{ - 1}_{i, i} = 1$. Thus, $P_{i, i} = 1$. Next we show that the $P_{i, j} = 0$ for $i \neq j$. When $i > j$, all the summands in the inner product are zero. It remains to show the claim for $i < j$. $\begin{array}{l}{{P_{i j}=\sum_{k=0}^{N}Q_{i k}Q_{k j}^{-1}}}\\ {{\sum_{k=0}^{N}(-1)^{i+k}{\binom{k}{j}}\cdot{\binom{j}{k-i}}^{i+k}{\binom{j-k}{j-k}}(-1)^{k}{\binom{j-i}{k-i}}}}\\ {{\frac{j}{k}}\end{array}$ Proof that $Q$ and $Q^{ - 1}$ are convolutions Put $y = Q^{ - 1}x$. We want to show that $y$ can be computed by convolution. See that $\begin{array}{l}{{y_{i}=\sum_{j=0}^{N}Q_{i,j}^{-1}x_{j}}}\\ {{=\sum_{(-1)^{i}}(-1)^{i+j}\left(j^{i}\right)x_{j}}}\\ {{=\left.\frac{(-1)^{i}}{i!}\sum_{j=i}^{N}(-1)^{j}\cdot j!\cdot x_{j}\cdot\frac{1}{j-i}}}\\ {{=\frac{(-1)^{i}}{i!}\sum_{j=i}^{N}f_{x}(j)\cdot g(j-i)\,,}}\end{array}$ hence $y$ is a convolution of functions $f$ and $g$ up to some multiplicative factors. The proof for $Q$ is similar, we can just remove all the $( - 1)^{ \alpha }$ terms.
[ "fft", "math", "matrices" ]
3,100
null
923
F
Public Service
There are $N$ cities in Bob's country connected by roads. Some pairs of cities are connected by public transport. There are two competing transport companies — \textbf{Boblines} operating buses and \textbf{Bobrail} running trains. When traveling from $A$ to $B$, a passenger always first selects the mode of transport (either bus or train), and then embarks on a journey. For every pair of cities, there are exactly two ways of how to travel between them without visiting any city more than once — one using only bus routes, and the second using only train routes. Furthermore, there is no pair of cities that is directly connected by both a bus route and a train route. You obtained the plans of each of the networks. Unfortunately, each of the companies uses different names for the same cities. More precisely, the bus company numbers the cities using integers from $1$ to $N$, while the train company uses integers between $N + 1$ and $2N$. Find one possible mapping between those two numbering schemes, such that no pair of cities is connected directly by both a bus route and a train route. Note that this mapping has to map different cities to different cities.
For $k \ge 0$, we call graph $G$ a $k$-star, if we can remove $k$ vertices to form a star, and we cannot remove $k - 1$ vertices to form a star. In this terminology, a $0$-star is a star. It should be rather obvious that if one of the graphs is a 0-star, then the answer is clearly No. This is because the minimum degree of a vertex in a tree is $1$, and star has a vertex of degree $N - 1$, and the corresponding vertex in the merged graph would have degree at least $N$, which is clearly impossible. Surprisingly, the answer is Yes in all other cases. We prove this by giving an explicit construction. There are three cases: Assume that one of the graphs is 1-star. Without loss of generality let it be $G$. Denote $v$ the vertex that can be removed to turn $G$ into star, $u$ its only neighbor, and $w$ be the vertex of degree $N - 2$. In graph $H$, find any leaf and denote it $w'$. Let its only neighbour be $u'$. Furthermore, pick $v'$ a vertex that is not adjacent to $u'$ (such vertex always exists as $H$ is not a star). Observe that mapping $u\rightarrow u^{\prime}$, $v\to v^{\prime}$ and $w\to w^{\prime}$ does not introduce multiedges in the merged graph. Furthermore, all other edges in $G$ are incident to $w$, but none of the unprocessed edges in $H$ are incident to $w'$. We can thus map the remaining $N - 3$ vertices arbitrarily. Mapping a 1-star $G$ (in red), to arbitrary tree $H$ (in blue). See that there are no multiedges between $u, v, w$, no multiedges from $u, v, w$ to the rest of the graph (since $u$, $v$ and $w'$ have no neighbours there), and no multiedges in the rest of the graph (since $G\setminus\{u,v,w\}$ is a graph with zero edges). Mapping a 1-star $G$ (in red), to arbitrary tree $H$ (in blue). See that there are no multiedges between $u, v, w$, no multiedges from $u, v, w$ to the rest of the graph (since $u$, $v$ and $w'$ have no neighbours there), and no multiedges in the rest of the graph (since $G\setminus\{u,v,w\}$ is a graph with zero edges). $N = 4$ or $N = 5$: There are only five non-isomorphic trees on this many vertices. Two of them are 0-stars (for which the answer is No), two of them are 1-stars (that we handled in previous case). The only remaining graph is a path on five vertices. Two such graphs can always be merged together. For simplicity of implementation, we can simply try all $5!$ possible mappings. All trees on 4 or 5 vertices. All trees on 4 or 5 vertices. Otherwise, we use induction. In $G$, find two leaves $u$ and $v$ such that $d(u, v) \ge 3$ and $G\setminus\{u,v\}$ is not a star. This is always possible: either $G$ is a 2-star, and then we can pick one of the neighbouring leaves of the vertex with highest degree and one other leaf, or we can pick any two leaves that are not adjacent to the same vertex. Do the same thing for $H$, finding $u'$ and $v'$. Remove these pairs of vertices from the respective graphs and use induction to merge those smaller graphs. Now we can either map $u\to u^{\prime}$, $v\to v^{\prime}$ or $u\to v^{\prime}$, $v\to u^{\prime}$ - as only one of these mappings may introduce a multiedge. The above is relatively simple to implement in $O(n^{2})\,$. To turn it into an $O(n\log n)$ algorithm, we need to maintain a few additional information about the graph. Furthemore, note that a graph $G$ is a $k$-star if and only if the maximum degree is $|G| - k - 1$. The list vertices sorted by their degree, for instance using set of pairs. This is so that we can find the vertex with maximum degree easily, which is useful for testing $k$-starness. the set of vertices with a leaf neighbour, and the set of leaves neighbouring a given vertex, so that we can find the leaves quickly Using the above, we can always find appropriate leaf and remove it in $O(\log n)$, which is sufficient.
[ "constructive algorithms", "graphs", "trees" ]
3,200
null
924
A
Mystical Mosaic
There is a rectangular grid of $n$ rows of $m$ initially-white cells each. Arkady performed a certain number (possibly zero) of operations on it. In the $i$-th operation, a non-empty subset of rows $R_{i}$ and a non-empty subset of columns $C_{i}$ are chosen. For each row $r$ in $R_{i}$ and each column $c$ in $C_{i}$, the intersection of row $r$ and column $c$ is coloured black. There's another constraint: a row or a column can only be chosen at most once among all operations. In other words, it means that no pair of $(i, j)$ ($i < j$) exists such that $R_{i}\cap R_{j}\neq\varnothing$ or $C_{i}\cap C_{j}\neq\varnothing$, where $\bigcap{}$ denotes intersection of sets, and $\textstyle{\mathcal{O}}$ denotes the empty set. You are to determine whether a valid sequence of operations exists that produces a given final grid.
No row or column can be selected more than once, hence whenever a row $r$ is selected in an operation, all cells in it uniquely determine the set of columns that need to be selected - let's call it $S_{r}$. Let's assume a valid set of operations exists. Take out any two rows, $i$ and $j$. If rows $i$ and $j$ are selected in the same operation, we can deduce that $S_{i} = S_{j}$; if they're in different operations, we get $S_{i}\cap S_{j}=\varnothing$. Therefore, if $S_{i} \neq S_{j}$ and $S_{i}\cap S_{j}\neq\varnothing$ hold for any pair of rows $(i, j)$, no valid operation sequence can be found. Otherwise (no pair violates the condition above), a valid sequence of operations can be constructed: group all rows with the same $S$'s and carry out an operation with each group. Thus, it's a necessary and sufficient condition for the answer to be "Yes", that for each pair of rows $(i, j)$, either $S_{i} = S_{j}$ or $S_{i}\cap S_{j}=\varnothing$ holds. The overall complexity is $O(n^{2}m)$. It can be divided by the system's word size if you're a bitset enthusiast, and a lot more if hashes and hash tables release their full power.
[ "greedy", "implementation" ]
1,300
#include <cstdio> #include <algorithm> #include <vector> static const int MAXN = 1004; static int n, m; static bool g[MAXN][MAXN]; // c[i] = set of rows where the i-th column is coloured static std::vector<int> c[MAXN]; static bool checked[MAXN] = { false }; int main() { scanf("%d%d", &n, &m); getchar(); for (int i = 0; i < n; ++i) for (int j = 0; j <= m; ++j) { g[i][j] = (getchar() == '#'); if (g[i][j]) c[j].push_back(i); } for (int i = 0; i < n; ++i) if (!checked[i]) { std::vector<int> r; for (int j = 0; j < m; ++j) if (g[i][j]) for (int k : c[j]) r.push_back(k); std::sort(r.begin(), r.end()); r.resize(std::unique(r.begin(), r.end()) - r.begin()); for (int k : r) { if (checked[k]) { puts("No"); return 0; } checked[k] = true; for (int p = 0; p < m; ++p) if (g[i][p] != g[k][p]) { puts("No"); return 0; } } } puts("Yes"); return 0; }
924
B
Three-level Laser
An atom of element X can exist in $n$ distinct states with energies $E_{1} < E_{2} < ... < E_{n}$. Arkady wants to build a laser on this element, using a three-level scheme. Here is a simplified description of the scheme. Three distinct states $i$, $j$ and $k$ are selected, where $i < j < k$. After that the following process happens: - initially the atom is in the state $i$, - we spend $E_{k} - E_{i}$ energy to put the atom in the state $k$, - the atom emits a photon with useful energy $E_{k} - E_{j}$ and changes its state to the state $j$, - the atom spontaneously changes its state to the state $i$, losing energy $E_{j} - E_{i}$, - the process repeats from step 1. Let's define the energy conversion efficiency as $\eta={\frac{E_{k}-E_{i}}{E_{k}-E_{i}}}$, i. e. the ration between the useful energy of the photon and spent energy. Due to some limitations, Arkady can only choose such three states that $E_{k} - E_{i} ≤ U$. Help Arkady to find such the maximum possible energy conversion efficiency within the above constraints.
First of all, you can note that for fixed $i$ and $k$ setting $j = i + 1$ is always the best choice. Indeed, if $X > Y$, then ${\frac{A-X}{B}}<{\frac{A-Y}{B}}$ for positive $B$. Then, let's fix $i$. Then $j = i + 1$, and what is the optimal $k$? We can define the energy loss as $l=1-\eta={\frac{E_{i}-E_{i}}{E_{i}-E}}$. As we need to minimize the loss, it's obvious that we should maximize $E_{k}$, so we should choose as large $k$ as possible satisfying $E_{k} - E_{i} \le U$. This is a classic problem that can be solved with two pointers approach, this leads to $O(n)$ solution, or with binary search approach, this leads to $O(n\log n)$ solution. Both are acceptable.
[ "binary search", "greedy", "two pointers" ]
1,600
#include <iostream> #include <sstream> #include <cstdio> #include <vector> #include <cmath> #include <queue> #include <string> #include <cstring> #include <cassert> #include <iomanip> #include <algorithm> #include <set> #include <map> #include <ctime> #include <cmath> #define forn(i, n) for(int i=0;i<n;++i) #define fore(i, l, r) for(int i = int(l); i <= int(r); ++i) #define sz(v) int(v.size()) #define all(v) v.begin(), v.end() #define pb push_back #define mp make_pair #define x first #define y1 ________y1 #define y second #define ft first #define sc second #define pt pair<int, int> template<typename X> inline X abs(const X& a) { return a < 0? -a: a; } template<typename X> inline X sqr(const X& a) { return a * a; } typedef long long li; typedef long double ld; using namespace std; const int INF = 1000 * 1000 * 1000; const ld EPS = 1e-9; const ld PI = acos(-1.0); const int N = 100 * 1000 + 13; int n, u; int a[N]; int idx1 = -1, idx2, idx3; inline void read() { scanf("%d%d", &n, &u); for (int i = 0; i < n; i++) { scanf("%d", &a[i]); } } inline void solve() { int k = 0; for (int i = 0; i < n; i++) { k = max(k, i); while (k + 1 < n && a[k + 1] - a[i] <= u) { k++; } if (k - i - 1 <= 0) { continue; } int j = i + 1; if (idx1 == -1 || (a[k] - a[j]) * 1ll * (a[idx3] - a[idx1]) > (a[idx3] - a[idx2]) * 1ll * (a[k] - a[i])) { idx1 = i, idx2 = j, idx3 = k; } } if (idx1 == -1) { cout << -1 << endl; return; } cout.precision(20); cout << (double)(a[idx3] - a[idx2]) / (a[idx3] - a[idx1]) << endl; } int main () { #ifdef fcspartakm freopen("input.txt", "r", stdin); //freopen("output.txt", "w", stdout); #endif srand(time(NULL)); cerr << setprecision(10) << fixed; read(); solve(); //cerr << "TIME: " << clock() << endl; }
924
C
Riverside Curio
Arkady decides to observe a river for $n$ consecutive days. The river's water level on each day is equal to some real value. Arkady goes to the riverside each day and makes a mark on the side of the channel at the height of the water level, but if it coincides with a mark made before, no new mark is created. The water does not wash the marks away. Arkady writes down the number of marks strictly above the water level each day, on the $i$-th day this value is equal to $m_{i}$. Define $d_{i}$ as the number of marks strictly under the water level on the $i$-th day. You are to find out the minimum possible sum of $d_{i}$ over all days. There are no marks on the channel before the first day.
Define $t_{i}$ as the total number of marks (above or at or under the water level) on the $i$-th day. As $t_{i} = m_{i} + 1 + d_{i}$, minimizing $\textstyle\sum d_{i}$ is equivalent to minimizing $\textstyle\sum t_{i}$. For the $i$-th day we would like to find the minimum value of $t_{i}$. Needless to say $t_{i} \ge max{t_{i - 1}, m_{i} + 1}$ should hold. On each day we can increase $t$ by at most one, thus $t_{i} \ge t_{i + 1} - 1$, which is equivalent to the condition that $t_{i} \ge t_{j} - (j - i)$ holds for all $j > i$. The first condition is straightforward - just go over from left to right and keep a record; but how to ensure that the second condition hold? One of the approaches is going backwards. Go from right to left and keep a counter which, on each day, decreases by $1$ and then is taken maximum with the $t_{i}$ currently at hand. This counter always records the minimum required $t_{i}$ value that satisfies the second condition. Assign this counter to $t_{i}$ along the way. Based on such minimum decisions, raising any $t_{i}$ by any positive value does not allow other $t_{i}$'s to be reduced. Hence summing this value over all days provides us with an optimal answer in $O(n)$ time.
[ "data structures", "dp", "greedy" ]
1,700
#include <cstdio> #include <algorithm> typedef long long int64; static const int MAXN = 1e5 + 4; static int n, m[MAXN]; static int t[MAXN]; int main() { scanf("%d", &n); for (int i = 0; i < n; ++i) scanf("%d", &m[i]); for (int i = n - 1, cur = 0; i >= 0; --i) { cur = std::max(0, cur - 1); cur = std::max(cur, m[i] + 1); t[i] = cur; } int64 ans = 0; for (int i = 0, cur = 0; i < n; ++i) { cur = std::max(cur, t[i]); ans += cur; } for (int i = 0; i < n; ++i) ans -= (m[i] + 1); printf("%lld\n", ans); return 0; }
924
D
Contact ATC
Arkady the air traffic controller is now working with $n$ planes in the air. All planes move along a straight coordinate axis with Arkady's station being at point $0$ on it. The $i$-th plane, small enough to be represented by a point, currently has a coordinate of $x_{i}$ and is moving with speed $v_{i}$. It's guaranteed that $x_{i}·v_{i} < 0$, i.e., all planes are moving towards the station. Occasionally, the planes are affected by winds. With a wind of speed $v_{wind}$ (not necessarily positive or integral), the speed of the $i$-th plane becomes $v_{i} + v_{wind}$. According to weather report, the current wind has a steady speed falling inside the range $[ - w, w]$ (inclusive), but the exact value cannot be measured accurately since this value is rather small — smaller than the absolute value of speed of any plane. Each plane should contact Arkady at the exact moment it passes above his station. And you are to help Arkady count the number of pairs of planes $(i, j)$ ($i < j$) there are such that there is a possible value of wind speed, under which planes $i$ and $j$ contact Arkady at the same moment. This value needn't be the same across different pairs. The wind speed is the same for all planes. You may assume that the wind has a steady speed and lasts arbitrarily long.
Stuck in tedious fractions produced by kinematic formulae? No, that's not the way to go. Forgetting about all physics, how does wind speed affect the time when a plane flies over the origin? Consider a plane flying from left to right, passing through the origin. Initially it has speed $v_{i} - w$ and meets the origin at time $t$. As the wind speed goes from $- w$ to $+ w$, the plane's speed continually rises to $v_{i} + w$, with $t$ becoming smaller and smaller (this is true because $w < |v_{i}|$). The similar holds for planes going from right to left, with the exception that $t$ becomes greater and greater. Then, how does wind speed affect the order in which a pair of planes pass the origin? Imagine two planes, $A$ and $B$. With wind speed $- w$, they arrive at the origin at moments $t_{A}$ and $t_{B}$, respectively. As the wind speed goes from $- w$ to $+ w$, $t_{A}$ moves smoothly and so does $t_{B}$ and suddenly... Uh! They become the same! That makes them a valid pair. From this perspective we can conclude that for such a pair of planes $A$ and $B$, if $A$ arrives at the origin at moment $t_{A}$ and $t'_{A}$ with wind speed $- w$ and $+ w$, and $B$ at $t_{B}$ and $t'_{B}$ respectively, they possibly meet at the origin iff $(t_{A} - t_{B}) \cdot (t'_{A} - t'_{B}) \le 0$. Oh, what's this? Inversion pairs, of course! Apply wind $- w$ and $+ w$ to all the planes, find out the orders in which they arrive at the origin under the two winds, and count the pairs $(A, B)$ where $A$ goes before $B$ in the first permutation, and after $B$ in the second one. One detail to note is that in case of ties, pairs should be sorted by descending values of speed in the first pass and ascending in the second (the speeds cannot be the same since all planes are distinct). That leaves us only with a binary indexed tree to be implemented. The overall time complexity is $O(n\log n)$. Please note that it's recommended to use an integer pair to represent a fraction, since the difference between arrival times can be as little as $10^{ - 10}$ - though a floating point error tolerance of $5 \times 10^{ - 12}$ passes all tests, its robustness is not so easy to control.
[]
2,500
#include <cstdio> #include <algorithm> #include <utility> typedef long long int64; static const int MAXN = 1e5 + 4; static const int MAXX = 1e8 + 4; static int n, w; static int x[MAXN], v[MAXN]; struct fraction { template <typename T> static inline T gcd(const T a, const T b) { return (b == 0) ? a : gcd(b, a % b); } int64 num, deno; inline void simplify() { if (deno < 0) { num *= -1; deno *= -1; } int64 g = gcd(num < 0 ? -num : num, deno); num /= g; deno /= g; } fraction() { } fraction(int64 num, int64 deno) : num(num), deno(deno) { simplify(); } inline bool operator < (const fraction &rhs) const { return num * rhs.deno < deno * rhs.num; } inline bool operator != (const fraction &rhs) const { return num * rhs.deno != deno * rhs.num; } }; // Time bounds at which clouds can arrive at the origin static std::pair<fraction, fraction> t[MAXN]; // Used at discretization static std::pair<fraction, int> d[MAXN]; static int p[MAXN]; struct bit { static const int MAXN = ::MAXN; int f[MAXN]; bit() { std::fill(f, f + MAXN, 0); } inline void add(int pos, int inc) { for (++pos; pos < MAXN; pos += (pos & -pos)) f[pos] += inc; } inline int sum(int rg) { int ans = 0; for (++rg; rg; rg -= (rg & -rg)) ans += f[rg]; return ans; } inline int sum(int lf, int rg) { return sum(rg) - sum(lf - 1); } } arkady; int main() { scanf("%d%d", &n, &w); for (int i = 0; i < n; ++i) scanf("%d%d", &x[i], &v[i]); for (int i = 0; i < n; ++i) { int64 v1 = v[i] - w, v2 = v[i] + w; t[i] = {fraction(-x[i], v1), fraction(-x[i], v2)}; } for (int i = 0; i < n; ++i) t[i].second.num *= -1; std::sort(t, t + n); for (int i = 0; i < n; ++i) t[i].second.num *= -1; for (int i = 0; i < n; ++i) d[i] = {t[i].second, i}; std::sort(d, d + n); for (int i = 0, rk = -1; i < n; ++i) { if (i == 0 || d[i].first != d[i - 1].first) ++rk; p[d[i].second] = rk; } /*for (int i = 0; i < n; ++i) printf("%.4lf %.4lf | %d\n", (double)t[i].first.num / t[i].first.deno, (double)t[i].second.num / t[i].second.deno, p[i]);*/ int64 ans = 0; for (int i = 0; i < n; ++i) { ans += arkady.sum(p[i], MAXN - 1); arkady.add(p[i], 1); } printf("%lld\n", ans); return 0; }
924
E
Wardrobe
Olya wants to buy a custom wardrobe. It should have $n$ boxes with heights $a_{1}, a_{2}, ..., a_{n}$, stacked one on another in some order. In other words, we can represent each box as a vertical segment of length $a_{i}$, and all these segments should form a single segment from $0$ to $\textstyle\sum_{i=1}^{n}a_{i}$ without any overlaps. Some of the boxes are important (in this case $b_{i} = 1$), others are not (then $b_{i} = 0$). Olya defines the convenience of the wardrobe as the number of important boxes such that their bottom edge is located between the heights $l$ and $r$, inclusive. You are given information about heights of the boxes and their importance. Compute the maximum possible convenience of the wardrobe if you can reorder the boxes arbitrarily.
The first idea of the author's solution is to reverse the problem: change $l$ to $H - r$ and $r$ to $H - l$, where $H$ is the total height of the wardrobe. Now an important box is counted in the answer if and only if its top edge is within the segment $[l, r]$. We'll see later the profit of this operation. Now, we'll build a wardrobe of arbitrary height using only a subset of the boxes, and choose the maximum possible answer. Why can we remove the constraint to use all boxes? We can always assume that we add all the boxes we don't take at the top of the boxes we take, and the answer won't decrease. So, we can do some kind of knapsack, where not taking a box means putting it on the top after considering all boxes. What we don't know is how to compute the answer and in which order to consider the boxes in the knapsack. Ok, if we consider them in such an order that there is an optimal answer in which the boxes we "take" in the knapsack always come in this order, then computing the answer is easy: we can always assume that we put a new box on the top of already taken ones, and add $1$ to the current answer if it is an important box and its top edge falls in the range $[l, r]$. Now we should find such an order. Note that in an optimal answer we can always arrange boxes in this order: some number of unimportant boxes, then some number of important boxes that don't increase the answer, then some number of important boxes that increase the answer, and after that a mix of important and unimportant boxes which don't count in the answer and that we consider as "not taken" in the knapsack. This means that we can first consider all unimportant boxes in the knapsack, and then all important ones. It's also easy to see that the order of unimportant boxes does not matter. However, it turns out that the order of important boxes matters. To choose the order of important boxes, we can use an old, but good trick. Suppose two important boxes with heights $a_{i}$ and $a_{j}$ stand one on the other. Answer the question: "What is the condition such that if it is satisfied, then it is always better to put $a_{j}$ on the top of $a_{i}$, and not vice versa?" Here we consider only the boxes that count in the answer and those under them, because other we simply "don't take" in the knapsack. It turns out that the condition is simple: $a_{j} \le a_{i}$, no matter do these boxes count in the answer or not. Here we used the fact that we inversed the problem, and the position of the top edge matters, not the bottom one. So, as we now know that it is always optimal to put the important boxes from largest to smallest (in the inversed problem), we can sort them in that order and perform the knapsack. The complexity is $O(n\cdot\sum a_{i})$. This can also be reduced to $O(H{\sqrt{H}})$ where $H$ is the height of the wardrobe, using a standard optimization for the knapsack.
[ "dp", "greedy" ]
2,700
/** * This is a solution for problem wardrobe * This is nk_ok.cpp * * @author: Nikolay Kalinin * @date: Tue, 20 Mar 2018 21:48:33 +0300 */ #include <bits/stdc++.h> using namespace std; using ll = long long; using ld = long double; using D = double; using uint = unsigned int; template<typename T> using pair2 = pair<T, T>; #ifdef WIN32 #define LLD "%I64d" #else #define LLD "%lld" #endif #define pb push_back #define mp make_pair #define all(x) (x).begin(),(x).end() #define fi first #define se second struct tbox { int h, t; }; inline bool operator<(const tbox &a, const tbox &b) { if (a.t != b.t) return a.t < b.t; return a.h > b.h; } const int maxn = 10005; const int inf = 1e9; tbox b[maxn]; int ans[maxn]; int n, l, r; int main() { scanf("%d%d%d", &n, &l, &r); int sumh = 0; for (int i = 0; i < n; i++) { scanf("%d", &b[i].h); sumh += b[i].h; } tie(l, r) = pair{sumh - r, sumh - l}; for (int i = 0; i < n; i++) scanf("%d", &b[i].t); sort(b, b + n); for (int i = 0; i <= sumh; i++) ans[i] = -inf; ans[0] = 0; for (int i = 0; i < n; i++) { for (int h = sumh - b[i].h; h >= 0; h--) ans[h + b[i].h] = max(ans[h + b[i].h], ans[h] + b[i].t * (h + b[i].h >= l && h + b[i].h <= r)); } cout << *max_element(ans, ans + sumh + 1) << endl; return 0; }
924
F
Minimal Subset Difference
We call a positive integer $x$ a $k$-beautiful integer if and only if it is possible to split the multiset of its digits in the decimal representation into two subsets such that the difference between the sum of digits in one subset and the sum of digits in the other subset is \textbf{less than or equal to} $k$. Each digit should belong to exactly one subset after the split. There are $n$ queries for you. Each query is described with three integers $l$, $r$ and $k$, which mean that you are asked how many integers $x$ between $l$ and $r$ (inclusive) are $k$-beautiful.
Let $f(x)$ be the minimal subset difference mentioned in the problem statement. The problem seems like a regular digit DP problem. However, it's a bit hard to reduce the number of DP states. Let's take a careful consideration. For a given integer $x$, we can use knapsack DP to determine $f(x)$. Denote the sum of digits of $x$ as $s(x)$. You can just calculate whether there is a subset such that the sum of the subset is a fixed number $y$, and then find the maximal $y$ $\left(0\leq y\leq{\frac{s(x)}{2}}\right)$ which concludes $f(x) = s(x) - 2y$. By the way, this type of knapsack DP could be implemented by bitwise operation. If we defined $dp(len, sum, state)$ as the number of integers $x$ such that the length of $x$ is $len$, the digit sum is $sum$, and the knapsack DP array is $state$ (an array only consisting $0$ and $1$, which could be represented as bit vector), the problem would be difficult to solve. For example, to represent $x = 88888888899999999$, a case of $f(x) = 0$, the length of $state$ might be $73$, which is a bit long vector. Although most of $state$s satisfy $dp(len, sum, state) = 0$, there are still many $state$s which might be used. You may notice the order of digits is unnecessary for the knapsack DP. If we defined $state$ as the number of appearance of digits $1, 2, ..., 9$ (digit $0$ is unnecessary), the number of $state$s would be ${\binom{18+9}{9}}\approx5\cdot10^{6}$. Hence, we can apply knapsack DP for each $state$ first and then calculate for digit DP. Here are more details. Let's redefine $dp(len, k, state)$ as the number of ways to arrange the lowest $len$ digits of $x$ such that $f(x) = k$ and the other digits (higher than the lowest $len$ digits) form the $state$ (i. e. the number of appearance of digits $1, 2, ..., 9$). We firstly search all the $state$s such that the total number of appearance $ \le 18$ and $f(x) = k$, and then set $dp(0, k, state) = 1$. After searching, we calculate $dp(len, k, new state)$ from $dp(len - 1, k, state) > 0$ by enumerating the $len$-th digit. However, the number of $dp(len, k, state) > 0$ is still too large to calculate for digit DP. You should notice that in decimal representation it always has $0 \le f(x) \le 9$ for any integer $x$. Furthermore, because of the distribution of digits, most of $state$s are in cases of $f(x) = 0, 1$ (you can make a knapsack DP to prove). In addition, it is easy to show $f(x)$ and $s(x)$ always have the same parity, so we can apply inclusion-exclusion principle to solve the problem only in the cases of $f(x) \ge 2$ and another counting problem with fixed parity of $s(x)$. The total time complexity in above is $\cal O\left(\!\!\begin{array}{c}{{(L+D)}}\\ {{D}}\end{array}\!\!\right)+L D S\log S+n(9-k)L D\log S\!\!\!\mathrm{\!}\ \!\!\mathrm{\!}\ \!\right)$, where $L$ $( = 18)$ is the maximal length of $x$, $D$ $( = 9)$ is the maximal digit, and $S$ $( \approx 3 \cdot 10^{4})$ is the number of distinct $state$s such that there exists $dp(len, k, state) > 0$ $(0 \le len \le L, 2 \le k \le 9)$. However, it can hardly pass the tests with $n = 5 \cdot 10^{4}$, because we have some fairly worse tests to maximize the times of $dp$ access (e. g. $l$ and $r$ have a lot of $9$ as digits and $k = 1$). We could make a tradeoff between pretreatment and queries by several ways. For example, define $dp(len, k, state, upp)$ as the similar but it memorizes that the $len$-th digit is less than $upp$. If we did that, the total time complexity would be $\ O\left({(\stackrel{(L+D)}{D})}+L D S\log S+n(9-k)L\log S\right)$, which is acceptable. The aforementioned solution is not easy to code; you can use some advanced approach to get accepted, though.
[ "dp" ]
3,200
#include <bits/stdc++.h> using namespace std; typedef long long LL; const int maxd = 10, maxl = 18, maxs = 81, BLEN = 5, BMSK = 31; struct States { vector<LL> keys, vals[maxd + 1]; void insert(LL key) { keys.push_back(key); } void initialize(LL val = 0) { sort(keys.begin(), keys.end()); keys.erase(unique(keys.begin(), keys.end()), keys.end()); for(int i = 0; i < maxd; ++i) vals[i].assign(keys.size(), val); } void summation() { for(int i = 1; i < maxd; ++i) for(vector<LL>::iterator it = vals[i - 1].begin(), jt = vals[i].begin(); jt != vals[i].end(); ++it, ++jt) *jt += *it; } size_t size() const { return keys.size(); } void update(int upp, LL key, LL adt) { vals[upp][lower_bound(keys.begin(), keys.end(), key) - keys.begin()] += adt; } LL find(int upp, LL key) { vector<LL>::iterator it = lower_bound(keys.begin(), keys.end(), key); if(it == keys.end() || *it != key) return 0; return vals[upp][it - keys.begin()]; } } f[maxd + 1][maxl + 1]; LL c[2][maxl + 1], bit[maxd + 1]; bitset<maxs + 1> vis[maxd + 1]; void dfs(int dep, int cnt, int sum, LL msk) { if(dep == maxd) { for(int i = sum >> 1; i >= 0; --i) if(vis[dep - 1].test(i)) { int dif = sum - i - i; if(dif > 1) f[dif][0].insert(msk); break; } return; } vis[dep] = vis[dep - 1]; dfs(dep + 1, cnt, sum, msk); while(cnt < maxl) { vis[dep] |= vis[dep] << dep; dfs(dep + 1, ++cnt, sum += dep, msk += bit[dep]); } } LL solve(int dif, LL lim) { char dig[maxl + 3]; int len = sprintf(dig, "%lld", lim); LL ret = 0, msk = 0; States *F = f[dif]; for(int i = 0; i < len; ++i) { dig[i] -= '0'; if(len - i > maxl) { for(int j = 0; j < dig[i]; ++j) ret += F[len - i - 1].find(maxd - 1, msk); } else if(dig[i]) { ret += F[len - i].find(dig[i] - 1, msk); msk += bit[dig[i]]; } } return ret; } LL solve2(LL lim) { char dig[maxl + 3]; int len = sprintf(dig, "%lld", lim), part = 0; LL ret = 0; for(int i = 0; i < len; ++i) { dig[i] -= '0'; int odd = dig[i] >> 1, even = (dig[i] + 1) >> 1; ret += c[part][len - 1 - i] * odd + c[part ^ 1][len - 1 - i] * even; part ^= dig[i] & 1; } return ret; } int main() { c[0][0] = 1; for(int i = 0; i < maxl; ++i) { int odd = maxd >> 1, even = (maxd + 1) >> 1; c[0][i + 1] = c[0][i] * even + c[1][i] * odd; c[1][i + 1] = c[0][i] * odd + c[1][i] * even; } bit[1] = 1; for(int i = 2; i < maxd; ++i) bit[i] = bit[i - 1] << BLEN; vis[0].set(0); dfs(1, 0, 0, 0); for(int i = 2; i < maxd; ++i) { f[i][0].initialize(1); for(int j = 0; j < maxl; ++j) { States &cur = f[i][j], &nxt = f[i][j + 1]; for(int idx = 0, sz = cur.size(); idx < sz; ++idx) { int cnt = j; LL msk = cur.keys[idx], tmp = msk; for(int k = 1; k < maxd; ++k, tmp >>= BLEN) { int rem = tmp & BMSK; if(!rem) continue; cnt += rem; nxt.insert(msk - bit[k]); } if(cnt < maxl) nxt.insert(msk); } nxt.initialize(0); for(int idx = 0, sz = cur.size(); idx < sz; ++idx) { int cnt = j; LL msk = cur.keys[idx], ways = cur.vals[maxd - 1][idx], tmp = msk; for(int k = 1; k < maxd; ++k, tmp >>= BLEN) { int rem = tmp & BMSK; if(!rem) continue; cnt += rem; nxt.update(k, msk - bit[k], ways); } if(cnt < maxl) nxt.update(0, msk, ways); } nxt.summation(); } } int t; scanf("%d", &t); for(int Case = 1; Case <= t; ++Case) { LL L, R; int k; scanf("%lld%lld%d", &L, &R, &k); LL ans = R + 1 - L; if(!k) { ans -= solve2(R + 1) - solve2(L); for(int i = 2; i < maxd; i += 2) ans -= solve(i, R + 1) - solve(i, L); } else { for(int i = k + 1; i < maxd; ++i) ans -= solve(i, R + 1) - solve(i, L); } printf("%lld\n", ans); } return 0; }
925
A
Stairs and Elevators
In the year of $30XX$ participants of some world programming championship live in a single large hotel. The hotel has $n$ floors. Each floor has $m$ sections with a single corridor connecting all of them. The sections are enumerated from $1$ to $m$ along the corridor, and all sections with equal numbers on different floors are located exactly one above the other. Thus, the hotel can be represented as a rectangle of height $n$ and width $m$. We can denote sections with pairs of integers $(i, j)$, where $i$ is the floor, and $j$ is the section number on the floor. The guests can walk along the corridor on each floor, use stairs and elevators. Each stairs or elevator occupies all sections $(1, x)$, $(2, x)$, $\ldots$, $(n, x)$ for some $x$ between $1$ and $m$. All sections not occupied with stairs or elevators contain guest rooms. It takes one time unit to move between neighboring sections on the same floor or to move one floor up or down using stairs. It takes one time unit to move up to $v$ floors in any direction using an elevator. You can assume you don't have to wait for an elevator, and the time needed to enter or exit an elevator is negligible. You are to process $q$ queries. Each query is a question "what is the minimum time needed to go from a room in section $(x_1, y_1)$ to a room in section $(x_2, y_2)$?"
First thing to mention is that we can use no more than one stairs or elevator per query. Indeed, optimal path is always a few sections horizontally, then a stair of elevator, then a few sections horizontally. Then, we can note that we can always use one of the nearest stairs/elevators to start/finish. Using this fact, we can binary search in the sequence of stairs/elevators to find the optimal one, and choose the optimum between using a stairs and an elevator. Don't forget about the case where you don't have to reach any stairs/elevators. The complexity is $O(q \log{n})$.
[ "binary search" ]
1,600
null
925
B
Resource Distribution
One department of some software company has $n$ servers of different specifications. Servers are indexed with consecutive integers from $1$ to $n$. Suppose that the specifications of the $j$-th server may be expressed with a single integer number $c_j$ of artificial resource units. In order for production to work, it is needed to deploy two services $S_1$ and $S_2$ to process incoming requests using the servers of the department. Processing of incoming requests of service $S_i$ takes $x_i$ resource units. The described situation happens in an advanced company, that is why each service may be deployed using not only one server, but several servers simultaneously. If service $S_i$ is deployed using $k_i$ servers, then the load is divided equally between these servers and each server requires only $x_i / k_i$ (that may be a fractional number) resource units. Each server may be left unused at all, or be used for deploying exactly one of the services (but not for two of them simultaneously). The service should not use more resources than the server provides. Determine if it is possible to deploy both services using the given servers, and if yes, determine which servers should be used for deploying each of the services.
Suppose that the load of the first service was divided among $k_1$ servers and the load of the second service was divided among $k_2$ servers. In such case first service will be running on $k_1$ servers of resource at least $p_1 = \lceil x_1 / k_1 \rceil$ and second service will be run on $k_2$ servers of resource at least $p_2 = \lceil x_2 / k_2 \rceil$. Suppose that $p_1 \leq p_2$, the remaining case will be dealt in the similar way. Remove all servers that have less than $p_1$ resources, we are not going to use them. We may consider only assignments in which any server assigned to the first service has at most as many resources as any server assigned to the second service (otherwise we may swap them and the answer will still be correct). In such manner we may show that the first service may be assigned to the first $k_1$ servers having at least $p_1$ resource units and the second service may be assigned to the last $k_2$ servers in ascending order of available resources. Finally notice that if we fix $k_1$, the optimal value of $k_2$ is minimum such that the last $k_2$ servers have at least $p_2$ resource units. Calculate the minimum possible $k_2$ in linear time, after that try each possible value of $k_1$ and check if the first $k_1$ servers having at least $p_1$ resource units do not intersect with the last $k_2$ servers (it may be checked in a single binary search). We got a solution with running time of $O(n \log n)$.
[ "binary search", "implementation", "sortings" ]
1,700
null
925
C
Big Secret
Vitya has learned that the answer for The Ultimate Question of Life, the Universe, and Everything is not the integer \sout{54} 42, but an increasing integer sequence $a_1, \ldots, a_n$. In order to not reveal the secret earlier than needed, Vitya encrypted the answer and obtained the sequence $b_1, \ldots, b_n$ using the following rules: - $b_1 = a_1$; - $b_i = a_i \oplus a_{i - 1}$ for all $i$ from 2 to $n$, where $x \oplus y$ is the bitwise XOR of $x$ and $y$. It is easy to see that the original sequence can be obtained using the rule $a_i = b_1 \oplus \ldots \oplus b_i$. However, some time later Vitya discovered that the integers $b_i$ in the cypher got shuffled, and it can happen that when decrypted using the rule mentioned above, it can produce a sequence that is not increasing. In order to save his reputation in the scientific community, Vasya decided to find some permutation of integers $b_i$ so that the sequence $a_i = b_1 \oplus \ldots \oplus b_i$ is strictly increasing. Help him find such a permutation or determine that it is impossible.
Let's assume that we've found a suitable permutation of all numbers, except all occurences of the number $1$. When can we insert the $1$'s so that the new arrangement of numbers is again good? We can see that the XOR of all numbers before any occurence of the number $1$ must be even, so there should an even number of odd numbers before it. Suppose that there are $x$ $1$'s in the input, and $y$ odd numbers greater than $1$. If $x > y + 1$, then in any arrangement there is going to be a pair of $1$'s such that there are no odd numbers between them, hence the condition above cannot hold for both of them simultaneously. On the other hand, if $x \leq y + 1$, then it is possible to insert the $1$'s into any permutation of greater numbers. Indeed, we can place one instance of $1$ at the start, and then place remaining $1$'s immediately after greater odd numbers. Note that this argument works just as well if we consider numbers in the range $[2^k, 2^{k + 1})$ as "$1$'s", and numbers in $[2^{k + 1}, \infty)$ as "numbers greater than $1$". Note further that it doesn't matter how exactly we insert the "$1$'s" since number of available gaps doesn't depend on that. Hence, we can go as follows: group the numbers by their leading bits. Make an empty list for the answer, and process the numbers in groups by decreasing of their leading bits. Suppose there are $x$ numbers with leading bit $k$, and $y$ greater numbers that have $1$ in the $k$-th bit. If $x > y + 1$, then there is no answer. Otherwise, insert the numbers from the current group as described above. The complexity of this solution is $O(n \log A)$, where $A$ is the largest value among the numbers in the input.
[ "constructive algorithms", "math" ]
2,200
null
925
D
Aztec Catacombs
Indiana Jones found ancient Aztec catacombs containing a golden idol. The catacombs consists of $n$ caves. Each pair of caves is connected with a two-way corridor that can be opened or closed. The entrance to the catacombs is in the cave $1$, the idol and the exit are in the cave $n$. When Indiana goes from a cave $x$ to a cave $y$ using an open corridor, all corridors connected to the cave $x$ change their state: all open corridors become closed, all closed corridors become open. Indiana wants to go from cave $1$ to cave $n$ going through as small number of corridors as possible. Help him find the optimal path, or determine that it is impossible to get out of catacombs.
Let us formualate a problem in terms of graph theory and make some observation. Denote the start and the finish vertices as $s$ and $t$. Observation 1. If vertices $s$ and $t$ are connected with a simple path consisting of $k$ edges, then by the statement of the problem Indiana Johns may use it leading us to the answer of length $k$. Thus, the answer does not exceed $d$ where $d$ is the length of the shortest path between $s$ and $t$ if $s$ and $t$ are connected and $\infty$ otherwise. Let us call path consisting only of the original edges of the graph trivial and the paths including originally non-existent edges non-trivial. Observation 2. The length of any non-trivial path is at least 4. Indeed, let $s = v_0 \xrightarrow{e_1} v_2 \xrightarrow{e_2} \cdots \xrightarrow{e_k} v_k = t$ be the valid path in which some of the edges $e_i$ is missing in the original graph. Notice that the edge $e_1$ may not be missing as by the moment we follow it, nothing was flipped yet, and $e_2$ also may not be missing as it requires $e_2$ to be same with $e_1$ which was just flipped. Also note that $e_3$ may not be the last edge in our path because otherwise it must be missing in the original graph (since the path is non-trivial), and we did not visit vertex $v_2$ yet as $v_2 \neq v_1$ and $v_2 \neq v_0 = 1$. Thus, $k \geq 4$. Observation 3. If $d \leq 4$, then the answer is $d$. It immediately follows from two previous observations: shortest trivial path has the length of $d$ and shortest non-trivial has the length of at least $4$. Observation 4. If $d \geq 4$ and there exists a vertex $v_2$ at the distance of $2$ from $v_0 = s$, then there exists a non-trivial path of length $4$. Indeed, $v_0 \rightarrow v_1 \rightarrow v_2 \rightarrow v_0 \rightarrow t$ is such path where $v_1$ is a vertex through which the path of length $2$ between $v_0$ and $v_2$ passes. Finally, note that $v_2$ and $v_0$ are not initially connected (otherwise the distance between $v_0$ and $v_2$ would be 1), hence when we visit $v_2$, the edge $v_2 \rightarrow v_0$ is present. Similarly, by the moment of second visit of vertex $v_0$ originally missing edge $v_0 \rightarrow t$ appears. Observation 5. Any non-trivial path of length $4$ looks exactly as described in an observation 4, first two initially existent edges, then newly appeared edge leading to $s$ and finally newly appeared edge leading to $t$. It immediately follows from the explanation of the observation 2. Observation 6. If $d \geq 4$ and there no vertex $v_2$ located at the distance $2$ from $s$, then $s$ is connected with all vertices in its connected component and this component does not contain $t$. Observation 7. If, under conditions of the previous observation, it we remove vertex $s$, then all the vertices initially adjacent to it will be distributed into several connected components. If all connected components are cliques, there are no valid paths. Indeed, after first transition we get into some clique and then we may only move inside it and it keeps shrinking until we get to an isolated vertex from which we can not go anywhere. Observation 8. If any of the connected components adjacent with $s$ is not a clique, then the shortest valid non-trivial path has a length of $5$. Indeed, consider a connected component $C$ initially connected with $s$ that is not a clique. It is not a clique, hence it contains a vertex $v_1$ of degree less than $|C| - 1$. This vertex is not connected with a whole component $C$, thus there are vertices $v_2, v_3 \in C$ such that $v_3$ is not connected with $v_1$, while $v_1$ and $v_2$ are connected and $v_2$ and $v_3$ are also connected with an edge. It means that there is a non-trivial path $v_0 \rightarrow v_1 \rightarrow v_2 \rightarrow v_3 \rightarrow v_1 \rightarrow t$. The observations above cover all possible cases in this problem and also yield a solution working in linear time in terms of a size of the original graph.
[ "constructive algorithms" ]
2,600
null
925
E
May Holidays
It's May in Flatland, and there are $m$ days in this month. Despite the fact that May Holidays are canceled long time ago, employees of some software company still have a habit of taking short or long vacations in May. Of course, not all managers of the company like this. There are $n$ employees in the company that form a tree-like structure of subordination: each employee has a unique integer id $i$ between $1$ and $n$, and each employee with id $i$ (except the head manager whose id is 1) has exactly one direct manager with id $p_i$. The structure of subordination is not cyclic, i.e. if we start moving from any employee to his direct manager, then we will eventually reach the head manager. We define that an employee $u$ is a subordinate of an employee $v$, if $v$ is a direct manager of $u$, or the direct manager of $u$ is a subordinate of $v$. Let $s_i$ be the number of subordinates the $i$-th employee has (for example, $s_1 = n - 1$, because all employees except himself are subordinates of the head manager). Each employee $i$ has a bearing limit of $t_i$, which is an integer between $0$ and $s_i$. It denotes the maximum number of the subordinates of the $i$-th employee being on vacation at the same moment that he can bear. If at some moment strictly more than $t_i$ subordinates of the $i$-th employee are on vacation, and the $i$-th employee himself is not on a vacation, he becomes displeased. In each of the $m$ days of May exactly one event of the following two types happens: either one employee leaves on a vacation at the beginning of the day, or one employee returns from a vacation in the beginning of the day. You know the sequence of events in the following $m$ days. Your task is to compute for each of the $m$ days the number of displeased employees on that day.
In terms of trees we have a rooted tree whose vertices may be activated and deactivated, and each vertex has a limit for the number of deactivated vertices among its descendants. We are required to switch the state of some vertex, and after each query we report the number of activated vertices unsatisfied vertices. Let the balance of a vertex be equal to the difference between its limit of deactivated descendants and the actual number of deactivated vertices among its descendants. In such terms we are interested in the number of activated vertices with the negative balance. Let's utilize the idea of sqrt-optimization. Consider a block of $k$ consecutive queries, let us answer all of them. Suppose this query affects the state of vertices $v_1, v_2, \ldots, v_l$ ($l \leq k$), let us call such vertices interesting. Then, during the current query block, the balance will change only for the vertices that have at least one interesting vertex in its subtree. Let's perform a classical trick of building the condensed tree containing the given interesting vertices. Namely, sort all the interesting vertices in order of their visit when doing DFS, and add all vertices of form $lca(v_i, v_{i+1})$ for all $1 \leq i < l$ to the set of interesting vertices. After such procedure all vertices whose balance may change may be splitted into $O(k)$ vertical paths each of which ends in an interesting vertex. Now we are going to consider separately the interesting vertices and the interior vertices of all paths between interesting vertices. In each of the paths the balance of all vertices is changed simultaneously, thus we may sort all the vertices in each path by balance and then group all vertices having the same balance together. Introduce a pointer that initially stands at the first satisfied group (with non-negative balance). When the balance of all groups is changed by 1, instead of actually changing the value of balance we may just shift the pointer by at most one position to the left or to the right (artificially changing the origin) and accounting at most one group the pointer has passed in the answer. On each query we have to perform such an operation with every path and interesting vertex that is located above the queried vertex. Since each vertex and each path is processed in $O(1)$, processing a single query takes $O(k)$ time and processing all queries inside a block takes $O(k^2)$ time. It is possible to build all paths and groups in running time of a single DFS plus sort time (std::sort or counting sort) for grouping vertices of equal balance. This part of solution takes $O(n)$ per each query block or $O(n \log n)$ depending on used sorting algorithm. If we use count sort, the resulting complexity will be $O(\frac{m}{k} \left(k^2 + n\right))$, finally we can take $k = \Theta(\sqrt{n})$ and get $O(m \sqrt{n})$ running time.
[ "data structures", "trees" ]
2,900
null
925
F
Parametric Circulation
Vova has recently learned what a circulaton in a graph is. Recall the definition: let $G = (V, E)$ be a directed graph. A circulation $f$ is such a collection of non-negative real numbers $f_e$ ($e \in E$), that for each vertex $v \in V$ the following conservation condition holds: $$\sum\limits_{e \in \delta^{-}(v)} f_e = \sum\limits_{e \in \delta^{+}(v)} f_e$$ where $\delta^{+}(v)$ is the set of edges that end in the vertex $v$, and $\delta^{-}(v)$ is the set of edges that start in the vertex $v$. In other words, for each vertex the total incoming flow should be equal to the total outcoming flow. Let a $lr$-circulation be such a circulation $f$ that for each edge the condition $l_e \leq f_e \leq r_e$ holds, where $l_e$ and $r_e$ for each edge $e \in E$ are two non-negative real numbers denoting the lower and upper bounds on the value of the circulation on this edge $e$. Vova can't stop thinking about applications of a new topic. Right now he thinks about the following natural question: let the graph be fixed, and each value $l_e$ and $r_e$ be a linear function of a real variable $t$: $$l_e(t) = a_e t + b_e$$ $$r_e(t) = c_e t + d_e$$ Note that $t$ is the \textbf{same} for all edges. Let $t$ be chosen at random from uniform distribution on a segment $[0, 1]$. What is the probability of existence of $lr$-circulation in the graph?
First, let's use the classical reduction of $lr$-circulation problem to the maximum flow problem. Consider a network $G' = (V \cup \{s, t\}, E')$ where for each $e = uv \in E$ there are three edges: $e_0 = uv$ with capacity $c_{e_0} = r_e - l_e$ $e_1 = sv$ with capacity $c_{e_1} = l_e$ $e_2 = ut$ with capacity $c_{e_2} = l_e$ Statement: it is possible to provide a bisection between $s-t$ flows of value $\sum \limits_{e \in E} l_e$ in $G'$ and $lr$-circulations in $G$. Indeed, consider a flow $f'$ in $G'$, that saturates all edges going from $s$ (and all the edges leading into $t$ at the same time). Let $f_e = f'_{e_0} + l_e$. Notice that it is a correct circulation: for any vertex $v$ where the middle equation is immediately following from the conservation condition for any vertex from $V$ for a flow $f'$. On the other hand, the obtained circulation is indeed an $lr$-circulation because of how we got values of $f'_e$. By performing all the steps in the reverse direction, we may recover a maximum flow in $G'$ by any $lr$-circulation that finishes our proof. Now we are going to answer the following question: we have a parametric network $G'(t)$ in which all capacities linearly depend on $t$, we have to find the probability that $G'$ allows a flow that saturates all edges from the source under condition that $t$ is sampled from $U[0, 1]$. Let us show that the set of $t$ that allow existence of a sought flow is a segment. It follows from the fact that the value $maxflow(t)$ of a maximum flow in $G'(t)$ is concave: suppose $f'(t_1)$ is a an admissible flow in $G'(t_1)$ and $f'(t_2)$ is an admissible flow in $G'_{t_2}$. Then it is easy to see that $\lambda f'(t_1) + (1 - \lambda) f'(t_2)$ is an admissible flow in $G'\left(\lambda t_1 + (1 - \lambda) t_2\right)$ for any $\lambda \in [0, 1]$ (as all the constraints on variables $f'_{e}$ are linear inequalities), from which immediately follows that $maxflow(\lambda t_1 + (1 - \lambda) t_2) \geq \lambda maxflow(t_1) + (1 - \lambda) maxflow(t_2)$. Denote $suml(t) = \sum\limits_{e \in E}l(t)$. Let us notices that $gap(t) = suml(t) - maxflow(t) \geq 0$ for any $t$ and we are interested in precisely those values of $t$, such that $gap(t) = 0$. Thus, the sought values of $t$ form a segment as the function $gap(t)$ is convex. The remaining part of the solution is very simple: find a minimum of a convexvalue $gap(t)$ over a segment $[0, 1]$. If it is non-zero, then the answer is 0. Otherwise, we can locate the boundaries of an answer segment using two binary searches and print the difference between them. While implementing such a solution, one may face several difficulties arising from the precision issues, so we will provide two observations that may help you deal with them. One may notice that $maxflow(t)$ is actually a piecewise linear function, all pieces of which have the integer slope. Actually, $maxflow(t) = mincut(t) = \min\limits_{\text{cut }C} cost(C, t)$, and the cost of any fixed cut in $G'(t)$ is a linear function of $t$ with an integer slope. Thus, $maxflow(t)$ is a lower envelope of a family of linear functions with integer slopes. The similar fact holds for a function $gap(t)$ also. And we are interested in a horizontal segment in $gap(t)$ which may be found using the binary search over a sign of a derivative $gap'(t)$. Finally notice that calculating a derivative $gap'(t)$ may be done by finding a maximum flow and adding up all slopes of capacities of the edges defining a minimum cut restricting given maximum flow (since exactly this cut provides a linear constraint defining a segment of a function $gap(t)$, which a point $t$ belongs to). An alternative observation - consider only the points $t$ such that $t = \frac{k}{10^7}$ where $k$ is integer. If we keep only such points on the sought segment, its length will decrease by no more than $2 \cdot 10^{-7}$ which is allowed by a required answer precision. Finally, we can multiply all $b_e$ and $d_e$ by $10^7$ and consider $t$ to be an integer between $0$ and $10^7$ which allows you to implement a solution that only uses integer data types. We get a solution with a running time of $O(maxflow \cdot \log prec^{-1})$ where $prec$ is a required precision equal to $10^{-6}$ under conditions of a given problem and $maxflow$ is a running time of your favourite maximum flow algorithm. Practically you could use Dinic algorithm or Edmonds-Karp algorithm with capacity scaling.
[ "binary search", "flows" ]
3,100
null
928
A
Login Verification
When registering in a social network, users are allowed to create their own convenient login to make it easier to share contacts, print it on business cards, etc. Login is an arbitrary sequence of lower and uppercase latin letters, digits and underline symbols («_»). However, in order to decrease the number of frauds and user-inattention related issues, it is prohibited to register a login if it is similar with an already existing login. More precisely, two logins $s$ and $t$ are considered similar if we can transform $s$ to $t$ via a sequence of operations of the following types: - transform lowercase letters to uppercase and vice versa; - change letter «O» (uppercase latin letter) to digit «0» and vice versa; - change digit «1» (one) to any letter among «l» (lowercase latin «L»), «I» (uppercase latin «i») and vice versa, or change one of these letters to other. For example, logins «Codeforces» and «codef0rces» as well as «OO0OOO00O0OOO0O00OOO0OO_lol» and «OO0OOO0O00OOO0O00OO0OOO_1oI» are considered similar whereas «Codeforces» and «Code_forces» are not. You're given a list of existing logins with no two similar amonst and a newly created user login. Check whether this new login is similar with any of the existing ones.
At first let's format all existing logins. For each login we will do the following: change all uppercase letters on lowercase letters; change all letters o on the digit 0; change all letters i and l on the digit 1. If new login is equal to some of the formatted login, the user can not register new login. In the other case, he will be able to do this.
[ "*special", "strings" ]
1,200
null
928
B
Chat
There are times you recall a good old friend and everything you've come through together. Luckily there are social networks — they store all your message history making it easy to know what you argued over 10 years ago. More formal, your message history is a sequence of messages ordered by time sent numbered from $1$ to $n$ where $n$ is the total number of messages in the chat. Each message might contain a link to an earlier message which it is a reply to. When opening a message $x$ or getting a link to it, the dialogue is shown in such a way that $k$ previous messages, message $x$ and $k$ next messages are visible (with respect to message $x$). In case there are less than $k$ messages somewhere, they are yet all shown. Digging deep into your message history, you always read all visible messages and then go by the link in the current message $x$ (if there is one) and continue reading in the same manner. Determine the number of messages you'll read if your start from message number $t$ for all $t$ from $1$ to $n$. Calculate these numbers independently. If you start with message $x$, the initial configuration is $x$ itself, $k$ previous and $k$ next messages. Messages read multiple times are considered as one.
Let's use the dynamic. We will calculate the answer for the messages in the order of increasing their numbers. Let the current message has number $x$, then the answer was calculated for each $y$ from $1$ to $(x - 1)$ and equals to $cnt_{y}$. If there is no a link in the message $y$ then the answer for this message - is a length of the segment $[y - k, y + k]$ (it is necessary not to forget that the left boundary of the segment should be positive, and the right boundary should not exceed $n$). In the other case, the message $y$ contains the link to the message $z$. We know that $z < y$, so the $cnt_{z}$ has been already calculated. All messages which were counted for message $z$ should be counted in the answer for message $y$. Also new messages should be added - it is messages from the segment $[y - k, y + k]$ which do not included in the segment $[z - k, y + k]$. Also we remember that the left boundary of the segment should be positive, and the right boundary should not exceed $n$. After we considered all messages - simply print the array $cnt$.
[ "*special", "dp" ]
1,400
null
930
A
Peculiar apple-tree
In Arcady's garden there grows a peculiar apple-tree that fruits one time per year. Its peculiarity can be explained in following way: there are $n$ inflorescences, numbered from $1$ to $n$. Inflorescence number $1$ is situated near base of tree and any other inflorescence with number $i$ ($i > 1$) is situated at the top of branch, which bottom is $p_{i}$-th inflorescence and $p_{i} < i$. Once tree starts fruiting, there appears exactly one apple in each inflorescence. The same moment as apples appear, they start to roll down along branches to the very base of tree. Each second all apples, except ones in first inflorescence simultaneously roll down one branch closer to tree base, e.g. apple in $a$-th inflorescence gets to $p_{a}$-th inflorescence. Apples that end up in first inflorescence are gathered by Arcady in exactly the same moment. Second peculiarity of this tree is that once two apples are in same inflorescence they \textbf{annihilate}. This happens with each pair of apples, e.g. if there are $5$ apples in same inflorescence in same time, only one will not be annihilated and if there are $8$ apples, all apples will be annihilated. Thus, there can be no more than one apple in each inflorescence in each moment of time. Help Arcady with counting number of apples he will be able to collect from first inflorescence during one harvest.
Firstly, let's formalize problem: we have tree with root in first inflorescence. Let's examine apples that can roll down to the base of tree in $t$-th moment of time. It is obvious this are apples initially situated in nodes at $t$ distance from root. Key idea of solution is that we can suppose that apples in nonroot nodes don't annihilate but roll down to the very root and annihilate in it. This assumption is correct because number of apples in root at the $t$-th moment depends only on parity of apples that got there at that moment. Thus let's calculate $cnt_{t}$ - number of apples that will appear in root in root in $t$-th moment of time for each $t$. This can be performed by BFS or DFS. Answer for this problem is sum of all $cnt_{t} mod 2$ ($a mod b$ means calculating remainder $a$ modulo $b$) for each $t$ from $0$ up to $d$, where $d$ is maximal distance from root to node of tree.
[ "dfs and similar", "graphs", "trees" ]
1,500
null
930
B
Game with String
Vasya and Kolya play a game with a string, using the following rules. Initially, Kolya creates a string $s$, consisting of small English letters, and uniformly at random chooses an integer $k$ from a segment $[0, len(s) - 1]$. He tells Vasya this string $s$, and then shifts it $k$ letters to the left, i. e. creates a new string $t = s_{k + 1}s_{k + 2}... s_{n}s_{1}s_{2}... s_{k}$. Vasya does not know the integer $k$ nor the string $t$, but he wants to guess the integer $k$. To do this, he asks Kolya to tell him the first letter of the new string, and then, after he sees it, open one more letter on some position, which Vasya can choose. Vasya understands, that he can't guarantee that he will win, but he wants to know the probability of winning, if he plays optimally. He wants you to compute this probability. Note that Vasya wants to know the value of $k$ uniquely, it means, that if there are at least two cyclic shifts of $s$ that fit the information Vasya knowns, Vasya loses. Of course, at any moment of the game Vasya wants to maximize the probability of his win.
Idea. Let's consider all possible $c_{1}$ that will be first in $t$. Then, let's consider all possible numbers of second letter that Vasya will ask about - this will be $d$. If pair of letters ($c_{1}$, $c_{2}$) occurs only once at $d$ distance, than if $c_{2}$ opens second time, Vasya will be able to determine shift. Solution. Let's loop through all letters at $d$ distance from all $c_{1}$ letters and for each symbol $c_{2}$ we will calculate number of such letters. This can be done in $O(cnt(c_{1}))$, where $cnt(c_{1})$ is number of letters $c_{1}$ in initial string. Now, if we fix such $d$ after opening $c_{1}$, that maximizes number of unique pairs(we will name it $p$) ($c_{1}$, $c_{2}$) at $d$ distance, this will be optimal $d$, and conditional probability of victory in situation of fixed $c_{1}$ equals $p / cnt(c_{1})$. Now we only need to sum up conditional probabilities for different $c_{1}$. Probability of $c_{1}$ equals $cnt(c_{1}) / n$, thus answer is $\sum{\frac{p}{c n t(c_{1})}}\cdot{\frac{c n t(c_{1})}{n}}=\sum_{n}$.
[ "implementation", "probabilities", "strings" ]
1,600
null
930
C
Teodor is not a liar!
Young Teodor enjoys drawing. His favourite hobby is drawing segments with integer borders inside his huge $[1;m]$ segment. One day Teodor noticed that picture he just drawn has one interesting feature: there doesn't exist an integer point, that belongs each of segments in the picture. Having discovered this fact, Teodor decided to share it with Sasha. Sasha knows that Teodor likes to show off so he never trusts him. Teodor wants to prove that he can be trusted sometimes, so he decided to convince Sasha that there is no such integer point in his picture, which belongs to each segment. However Teodor is lazy person and neither wills to tell Sasha all coordinates of segments' ends nor wills to tell him their amount, so he suggested Sasha to ask him series of questions 'Given the integer point $x_{i}$, how many segments in Fedya's picture contain that point?', promising to tell correct answers for this questions. Both boys are very busy studying and don't have much time, so they ask you to find out how many questions can Sasha ask Teodor, that having only answers on his questions, Sasha can't be sure that Teodor isn't lying to him. Note that Sasha doesn't know amount of segments in Teodor's picture. Sure, Sasha is smart person and never asks about same point twice.
Idea. The main idea is that set of point $x_{i}$ is bad(meaning that Sasha can't be sure, that Teodor hasn't lied, relying only on this information) $\iff c n t(x_{i})$ satisfies the following property: $\begin{array}{c}{{\exists i:1\ \le\ i\le n\circ n t(x_{1})\ \le\ c n t(x_{2})\ \le\ \cdot\cdot\cdot\ \le\ c n t(x_{i})\ \ge\ c n t(x_{i+1})\ \ge\ \cdot\cdot\cdot}}\\ {{c n t(x_{i+2})\ \ge\cdot\cdot\cdot\cdot\cdot\cdot\cdot\cdot\quad c n t(x_{n})}}\end{array}\$. Solution. Firstly let's calculate $cnt(x_{i})$ for each integer point in $[1;m]$. One way to do this is scanning line, which asymptotics is $O(m + n)$. Other approach uses segment tree supporting segment addition queries. In this case asymptotics is $O(n \cdot log(m))$ . Now we only need to find longest sequence satisfying this property. Let's consider all possible $x_{i}$ in previous inequation(element that has peak $cnt(x_{i})$). Now the answer is length of longest nondecreasing sequence ending in $x_{i} +$ length of longest nonincreasing sequence, starting in $x_{i} - 1$. Both lengths can be found in $O(1)$ if one precalculates this lengths for each $1 \le i \le m$, using dynamic programming. Note that you should use $O(m \cdot log(m))$ algorithm for calculating this dp, not $O(m^{2})$, otherwise you will end up with TL verdict. Total asymptotics of this solution is $O(m \cdot log(m))$ for solution using scanning line or $O((n + m) \cdot log(m))$ for solution using segment tree.
[ "data structures", "dp" ]
1,900
null
930
D
Game with Tokens
Consider the following game for two players. There is one white token and some number of black tokens. Each token is placed on a plane in a point with integer coordinates $x$ and $y$. The players take turn making moves, white starts. On each turn, a player moves \textbf{all} tokens of their color by $1$ to up, down, left or right. Black player can choose directions for each token independently. After a turn of the white player the white token can not be in a point where a black token is located. There are no other constraints on locations of the tokens: positions of black tokens can coincide, after a turn of the black player and initially the white token can be in the same point with some black point. If at some moment the white player can't make a move, he loses. If the white player makes $10^{100500}$ moves, he wins. You are to solve the following problem. You are given initial positions of all black tokens. It is guaranteed that initially all these positions are distinct. In how many places can the white token be located initially so that if both players play optimally, the black player wins?
Note that if black and white chip are placed in the beginning in points ${x, y}$ with the same parity of $x + y$ then black chip can't be on the manhattan distance $1$ from white chip before white's move. So black chip can't block white chip and can't somehow affect the game. We can solve the problem independently for black chips with odd $x + y$, white chip with even $x + y$ and for black chips with even $x + y$, white chip with odd $x + y$. Note that we can solve the problem for black chips with even $x + y$ if we move all of them on $1$ upward and then solve the problem for odd $x + y$. Let's now consider only black chips with odd $x + y$. Look at the image. If black chip is placed in black point then it can stop white chip placed in red, blue, yellow, green points if it will move up, down, left, right, respectively (i.e. white point can't make infinite number of move in these directions whatever moves it will make). Note that one black chip can stop white chip only in one or zero directions. If there are four black chips that can stop white chip in different directions then black will win. Else white chip can move in some direction infinitely and white will win. So, every black chip generates four angles of different types. If point ${x, y}$ is contained in intersection of four angles of different types and $x + y$ is even then we should count this point in answer. Let's substitute every point ${x, y}$ to point ${x + y, x - y}$. There are still four types of angles but now every coordinate of white chip must be even number. In particular, the first image will look like this: Let's leave only points with even coordinates and divide every coordinate by two. Still every black chip generates four angles, white chip must be in intersection of four angles of different types but now there are no restrictions about parity of anything. How to count points in intersection of four angles of different types effectively? Find for each type of angles and for each $x$-coordinate half-interval of $y$-coordinates such that every point in this half-interval will be in some angle of current type. If we can find these half-intervals then we can find for every $x$-coordinate length of intersections of four half-intervals and answer will be equal to sum of these lengths. Let's consider angles of only one type because for other types we can do something symmetric. Let's these angles will have sides directed upward and rightward. Then for each x-coordinate half-interval is $[L_{x}, \infty )$ where $L_{x}$ is minimal y-coordinate of vertices of angles which aren't placed to the right from $x$. So we can sort all vertices by $x$ and then write some easy scanline.
[ "data structures", "games", "implementation" ]
2,500
null
930
E
Coins Exhibition
Arkady and Kirill visited an exhibition of rare coins. The coins were located in a row and enumerated from left to right from $1$ to $k$, each coin either was laid with its obverse (front) side up, or with its reverse (back) side up. Arkady and Kirill made some photos of the coins, each photo contained a segment of neighboring coins. Akrady is interested in obverses, so on each photo made by him there is at least one coin with obverse side up. On the contrary, Kirill is interested in reverses, so on each photo made by him there is at least one coin with its reverse side up. The photos are lost now, but Arkady and Kirill still remember the bounds of the segments of coins each photo contained. Given this information, compute the remainder of division by $10^{9} + 7$ of the number of ways to choose the upper side of each coin in such a way, that on each Arkady's photo there is at least one coin with obverse side up, and on each Kirill's photo there is at least one coin with reverse side up.
Denote obverse-up coin as $0$ and reverse-up coin as $1$. Then we are to compute the number of binary strings of length $k$ such that $n$ of the given segments have atleast one $0$ and other $m$ ones - atleast one $1$. Let $dp[i][l_{0}][l_{1}]$ be the number of binary strings of length $i$ such that the last zero is at position $l_{0}$, the last one is at $l_{1}$ and all restrictions are satisfied for all segments with right borders not exceeding $i$. The transitions then are straighforward: check all possible values for position $i + 1$ and relax $l_{0}$ and $l_{1}$ accordingly. Let the new values be $l_{0}'$ and $l_{1}'$. Now consider all segments ending at $i + 1$. If there are such $[l, r]$ among them demanding zero while $l > l_{0}'$ or demanding one while $l > l_{1}'$, this continuation doesn't suit. Otherwise add $dp[i][l_{0}][l_{1}]$ to $dp[i + 1][l_{0}'][l_{1}']$. This works in $O(k^{3} + n + m)$ if we precompute all segments ending in $r$ for all $r$. Anyway, this is too slow. The first thing to enhance is to notice that either $l_{0} = i$ or $l_{1} = i$. Then we have to deal with $dp_{0}[i][l_{1}]$ (implying $l_{0} = i$) and $dp_{1}[i][l_{0}]$ (implying $l_{1} = i$). The transitions are the same, and the complexity becomes $O(k^{2} + n + m)$. Still too slow :( The further improvement is that all positions with no segments endings can be treated similarly since the transitions are equal. At the same time it doesn't matter whether the last zero is at $l_{0}$ or $l_{0} + 1$ if there are no segments beginning at $l_{0} + 1$. Same applies to $l_{1}$. Let's compress coordinates then, i.e. find all $x_{i}$ such that $x_{i}$ and $x_{i} + 1$ are covered by different sets of segments. Now it's time to slightly change the dp definition: let $dp_{0}[i][l_{1}]$be the number of binary strings of length $x_{i}$ such that the last digit is $0$, the last $1$ is somewhere between $x_{l1 - 1} + 1$ and $x_{l1}$ and all restrictions are satisfied for all segments with endings not exceeding $x_{i}$. $dp_{1}[i][l_{0}]$ is denoted in a similar fashion. Consider the possible transitions. Without loss of generality we'll account for transitions from $dp_{0}[i][l_{1}]$ to some $dp_{?}[i + 1][?]$. The goal is to somehow append a binary string of length $(x_{i + 1} - x_{i})$ to the existing one. There are three major cases: All additional digits are $0$, then we jump to $dp_{0}[i + 1][l_{1}]$ with coefficient $1$ (there's only one way to construct a string of zeros). All additional digits are $1$, then we jump to $dp_{1}[i + 1][i]$ with coefficient $1$. Note that the last zero remains at $x_{i}$. There are some $0$ and some $1$. Then the jump is to $dp_{0}[i + 1][i + 1]$ and $dp_{1}[i + 1][i + 1]$ with coefficients equal to $2^{xi + 1 - xi - 1} - 1$ since only the last digit if fixed. This is possible iff $x_{i + 1} - x_{i} > 1$. Moreover, we have to consider all segments ending at $x_{i + 1}$ and discard those not satisfying the restrictions. This works in $O((n+m)^{2}\cdot\log\left(k\right))$ (extra logarithm is for fast powers). There's only one step left to a full solution. Note that in the dp above you can separate digit-adding from constraint-accounting transitions and treat them one after another. That means that you can first apply all transitions from $d p_{*}[i][*]$ to $d p_{*}[i+1][*]$ disregarding segments endings at $x_{i + 1}$ and then null $dp_{0}[i + 1][l_{1}]$, where $l_{1} < l$, where $[l, x_{i}]$ is an arbitrary segment applying $1$-constraint and null $dp_{1}[i + 1][l_{0}]$, where $l_{0} < l$, where $[l, x_{i}]$ is an arbitrary segment applying $0$-constraint. Futhermore note that transitions with coeffitients not equal to $1$ are applied only to $d p_{*}[i+1][i]$ and $d p_{*}[i+1][i+1]$ while values of $d p_{*}[i+1][j]$ where $j < i$ are either $d p_{*}[i|[j]$ or $0$. That means we can store two arrays $dp_{0}[l_{1}]$ and $dp_{1}[l_{0}]$, implying $i$ being equal to the current value. Now when jumping from $i$ to $i + 1$ we have to relax $d p_{*}[i]$ and $d p_{*}[i+1]$, and null some prefix of $dp_{0}$ and some prefix of $dp_{1}$ depending on beginnins of segments ending at $x_{i}$. The new values of $d p_{*}[i]$ and $d p_{*}[i+1]$ are easy to obtain via sum of elements in this arrays and $x_{i + 1} - x_{i}$. With a properly chosen data structure the complexity becomes ${\cal O}((n+m)\log{(n+m)}\log{(k)})$ with $O(n + m)$ memory. This is now enough to get ac. There's an alternative approach: you can just keep track of the first non-nulled element since it can only increase. This also helps maintain the current sum of values without using specific data structures. This works in $O((n+m)\cdot(\log\left(n+m)+\log\left(k\right)))$ (including sort).
[ "data structures", "dp", "math" ]
2,900
null
931
A
Friends Meeting
Two friends are on the coordinate axis $Ox$ in points with integer coordinates. One of them is in the point $x_{1} = a$, another one is in the point $x_{2} = b$. Each of the friends can move by one along the line in any direction unlimited number of times. When a friend moves, the tiredness of a friend changes according to the following rules: the first move increases the tiredness by $1$, the second move increases the tiredness by $2$, the third — by $3$ and so on. For example, if a friend moves first to the left, then to the right (returning to the same point), and then again to the left his tiredness becomes equal to $1 + 2 + 3 = 6$. The friends want to meet in a integer point. Determine the minimum total tiredness they should gain, if they meet in the same point.
At first understand the fact that friend should make their moves one be one and the friend who initially was left should move to the right and other friend should move to the left. Let $len = |a - b|$. Then the first friend will make $cntA = len / 2$ moves, and the second friend - $cntB = len - len / 2$ moves. So the answer is the sum of two arithmetic progressions $cntA \cdot (cntA + 1) / 2$ and $cntB \cdot (cntB + 1) / 2$. The given constrains allowed to calculate this sums in linear time - simply iterate from $1$ to $cntA$ for the first sum and from $1$ to $cntB$ to the second.
[ "brute force", "greedy", "implementation", "math" ]
800
null
931
B
World Cup
The last stage of Football World Cup is played using the play-off system. There are $n$ teams left in this stage, they are enumerated from $1$ to $n$. Several rounds are held, in each round the remaining teams are sorted in the order of their ids, then the first in this order plays with the second, the third — with the fourth, the fifth — with the sixth, and so on. It is guaranteed that in each round there is even number of teams. The winner of each game advances to the next round, the loser is eliminated from the tournament, there are no draws. In the last round there is the only game with two remaining teams: the round is called the Final, the winner is called the champion, and the tournament is over. Arkady wants his two favorite teams to play in the Final. Unfortunately, the team ids are already determined, and it may happen that it is impossible for teams to meet in the Final, because they are to meet in some earlier stage, if they are strong enough. Determine, in which round the teams with ids $a$ and $b$ can meet.
Initially, we need to understand the following fact. Since the number of teams in each round is even, $n$ should be a power of two. We will solve the problem for the 0-indexing commands, so we decrease the given $a$ and $b$ on one. For each round we will determine the number of the match, in which the teams with initial numbers $a$ and $b$ will play. The command $a$ will play in the match number $a / 2$, and the command $b$ will play in the match number $b / 2$. If $a / 2 = b / 2$, then these teams will play in the same match, and we need to print the number of the current round as an answer. If the number of remaining teams equals to two - this will be the final match of the tournament. If the match numbers not equal we consider the next round. In this case, the number of command $a$ becomes $a / 2$ and the number of number $b$ becomes $b / 2$. The number of teams which will go to the next round is $n = n / 2$. This process is always finite, because sooner or later will remain only $2$ teams and in this round will be only one match - the final match of the tournament.
[ "constructive algorithms", "implementation" ]
1,200
null
931
C
Laboratory Work
Anya and Kirill are doing a physics laboratory work. In one of the tasks they have to measure some value $n$ times, and then compute the average value to lower the error. Kirill has already made his measurements, and has got the following integer values: $x_{1}$, $x_{2}$, ..., $x_{n}$. It is important that the values are close to each other, namely, the difference between the maximum value and the minimum value is \textbf{at most $2$}. Anya does not want to make the measurements, however, she can't just copy the values from Kirill's work, because the error of each measurement is a random value, and this coincidence will be noted by the teacher. Anya wants to write such integer values $y_{1}$, $y_{2}$, ..., $y_{n}$ in her work, that the following conditions are met: - the average value of $x_{1}, x_{2}, ..., x_{n}$ is equal to the average value of $y_{1}, y_{2}, ..., y_{n}$; - all Anya's measurements are in the same bounds as all Kirill's measurements, that is, the maximum value among Anya's values is not greater than the maximum value among Kirill's values, and the minimum value among Anya's values is not less than the minimum value among Kirill's values; - the number of equal measurements in Anya's work and Kirill's work is as small as possible among options with the previous conditions met. Formally, the teacher goes through all Anya's values one by one, if there is equal value in Kirill's work and it is not strike off yet, he strikes off this Anya's value and one of equal values in Kirill's work. The number of equal measurements is then the total number of \textbf{strike off} values in Anya's work. Help Anya to write such a set of measurements that the conditions above are met.
The average value of Anya measurements should be equal to the average value of Kirill's measurements, so the sum of all Anya measurements should be equal to the sum of all Kirill dimensions. Let the minimum number in the Kirill's measurements is $min$ and the maximum - $max$. Then, if $(max - min)$ is less than or equal to one, Anya will not be able to write down any measurements that Kirill do not have, so all her measurements will coincide with his measurements. There remains the case when $(max - min) = 2$. Each Anya measurement should be at least $min$ and not more than $max$. We need to brute how many of minimal measurements equal to $min$ Anya will write down from $0$ to $n$. Then the numbers of measurements equal to $(min + 1)$ and $max$ can be uniquely determined. Let $sum$ is the necessary sum of all measurements, which is equal to the sum of all Kirill measurements, and $cntMin$ is the current number of minimal measurements that Anya will write down. Then Anya needs to write remaining number of measurements such that their sum equals to $leftSum = sum - cntMin cdotmin$. The minimum sum that Anya can get with the remaining measurements is $minSum = (n - cntMin) cdot(min + 1)$, and the maximum is $maxSum = (n - cntMin) cdotmax$. Then, if $leftSum < minSum$ or $leftSum > maxSum$, Anya can not take $cntMin$ minimum values and get the desired sum. Otherwise, Anya should write the measurements equal to $(min + 1)$ in the amount of $(leftSum - minSum)$, and all remaining measurements will be equal to the $max$. After that, we need to update the answer with the number of coinciding values of $min$, $(min + 1)$ and $max$ in Anya's and Kirill's measurements. After we updated the answer, move to the next value $cntMin$.
[ "implementation", "math" ]
1,700
null
932
A
Palindromic Supersequence
You are given a string $A$. Find a string $B$, where $B$ is a palindrome and $A$ is a subsequence of $B$. A subsequence of a string is a string that can be derived from it by deleting some (not necessarily consecutive) characters without changing the order of the remaining characters. For example, "cotst" is a subsequence of "contest". A palindrome is a string that reads the same forward or backward. The length of string $B$ should be at most $10^{4}$. It is guaranteed that there always exists such string. You do not need to find the shortest answer, the only restriction is that the length of string $B$ should not exceed $10^{4}$.
Let $reverse(s)$ be the reverse of string $s$. Now, $s + reverse(s)$ will always have $s$ as a subsequence (as first half) and it is a palindrome with size less than $10^{4}$. So, it may be one of the possible solutions.
[ "constructive algorithms" ]
800
#include<bits/stdc++.h> using namespace std; int main() { string s; cin>>s; cout<<s; reverse(s.begin(),s.end()); cout<<s; return 0; }
932
B
Recursive Queries
Let us define two functions $f$ and $g$ on positive integer numbers. \begin{center} $f(n)=\mathrm{product~of~non}{\mathrm{-zero~digits~of~}}n$$g(n)={\binom{n}{g(f(n))}}\quad{\begin{array}{l l}{{\mathrm{if~}}n<10}\\ {{\mathrm{otherwise}}}\end{array}}$ \end{center} You need to process $Q$ queries. In each query, you will be given three integers $l$, $r$ and $k$. You need to print the number of integers $x$ between $l$ and $r$ inclusive, such that $g(x) = k$.
If we can show that for all integers $n \ge 10$, we have $n > f(n)$ then we can use bottom up dp for calculating $g(n)$ for all the integers $1 \le n \le 10^{6}$ in $O(n)$. And as $1 \le g(n) \le 9$, using partial sum arrays for each possible value of $g(n)$, we can answer the queries in $O(1)$. For the proof that for all integers $n \ge 10$, we have $n > f(n)$, let us assume an integer $n \ge 10$ of length $k \ge 2$ which can be represented as $n_{k}n_{k - 1}... n_{2}n_{1}$ where $n_{i} \neq 0$ for all $1 \le i \le k$. We have assumed that $n_{i} \neq 0$, as even if any of the $n_{i} = 0$, it will neither affect $n$ nor $f(n)$ in our proof given below. Given, $f(n) = n_{k} \times n_{k - 1} \times ... \times n_{2} \times n_{1}$ and $n = n_{1} + 10 \times n_{2} + ... + 10^{k - 1} \times n_{k}$. As $f(n) \le 9^{k - 1} \times n_{k}$, $9^{k - 1} \times n_{k} < 10^{k - 1} \times n_{k}$ and $10^{k - 1} \times n_{k} \le n$. So, $f(n) < n$ and hence we can use bottom up dp for calculating $g(n)$ for all values of $n$. Also, we can observe that the integer $n$ reduces pretty much quickly to a single digit while calculating $g(n)$, so we can directly calculate $g(n)$ for all $1 \le n \le 10^{6}$ without using dp as well.
[ "binary search", "data structures", "dfs and similar" ]
1,300
#include "bits/stdc++.h" #ifdef PRINTERS #include "printers.hpp" using namespace printers; #define tr(a) cerr<<#a<<" : "<<a<<endl #else #define tr(a) #endif #define ll long long #define pb push_back #define mp make_pair #define pii pair<int,int> #define vi vector<int> #define all(a) (a).begin(),(a).end() #define F first #define S second #define sz(x) (int)x.size() #define hell 1000000007 #define endl '\n' #define rep(i,a,b) for(int i=a;i<b;i++) using namespace std; int x[10][1000005]; int f(int x){ if(x<10)return x; int prod=1; while(x){ if(x%10)prod*=(x%10); x/=10; } return f(prod); } void solve(){ for(int i=1;i<=1000000;i++){ x[f(i)][i]++; } for(int i=1;i<10;i++){ for(int j=1;j<=1000000;j++){ x[i][j]+=x[i][j-1]; } } int Q; cin>>Q; while(Q--){ int l,r,k; cin>>l>>r>>k; cout<<x[k][r]-x[k][l-1]<<endl; } } int main(){ ios_base::sync_with_stdio(false); cin.tie(0); cout.tie(0); int t=1; // cin>>t; while(t--){ solve(); } return 0; }
932
C
Permutation Cycle
For a permutation $P[1... N]$ of integers from $1$ to $N$, function $f$ is defined as follows: \begin{center} $f(i,j)={\binom{P[i]}{f(P[i],j-1)}}\quad\quad{\mathrm{if~}}j=1$ \end{center} Let $g(i)$ be the minimum positive integer $j$ such that $f(i, j) = i$. We can show such $j$ always exists. For given $N, A, B$, find a permutation $P$ of integers from $1$ to $N$ such that for $1 ≤ i ≤ N$, $g(i)$ equals either $A$ or $B$.
For $f(i, j) = i$ and $g(i) = k$, there must exist a cycle of length $k$ beginning from index $i$ and ending at the same index $i$ of permutation $P$. While generating a permutation $P$, we are constrained to generate cycles of length either $A$ or $B$ as $g(i)$ for all $1 \le i \le N$ must be equal to either of them. Let us try to generate a cycle of length $k$ for indices $i$ till $i + k - 1$ using only the integers $i$ till $i + k - 1$, each once. If $P[i] = i + k - 1$ and $P[j] = j - 1$ for all $i < j \le i + k - 1$, we in turn get a cycle of length $k$ for each of the indices $i$ till $i + k - 1$, that is $f(j, k) = j$ for all $i \le j \le i + k - 1$. So, if there exists a solution $(x, y)$ where $x \ge 0$ and $y \ge 0$, for $Ax + By = N$, we can in turn generate a permutation $P$ satisfying our needs. Otherwise, no such permutation is possible. So, now for any one of the solution $(x, y)$, generate $x$ cycles of length $A$, beginning from indices $1$, $A + 1$, $A * 2 + 1$ ... $A * (x - 1) + 1$ and then beginning from indices $A * x + 1$, $A * x + 1 + B$, ... $A * x + 1 + B * (y - 1)$, generate $y$ cycles of length $B$.
[ "brute force", "constructive algorithms" ]
1,600
#include "bits/stdc++.h" #define ll long long #define pb push_back #define mp make_pair #define pii pair<int,int> #define vi vector<int> #define all(a) (a).begin(),(a).end() #define F first #define S second #define sz(x) (int)x.size() #define hell 1000000007 #define endl '\n' #define rep(i,a,b) for(int i=a;i<b;i++) using namespace std; void solve(){ int n,a,b; cin>>n>>a>>b; vector<int>v(n); iota(all(v),1); int i=0; while(i<n){ if((n-i)%b==0){ if(b>1)rotate(v.begin()+i,v.begin()+i+1,v.begin()+i+b); i+=b; } else{ if(i+a>n){ cout<<-1; return; } if(a>1)rotate(v.begin()+i,v.begin()+i+1,v.begin()+i+a); i+=a; } } for(auto j:v)cout<<j<<" "; } int main(){ ios_base::sync_with_stdio(false); cin.tie(0); cout.tie(0); int t=1; // cin>>t; while(t--){ solve(); } return 0; }
932
D
Tree
You are given a node of the tree with index $1$ and with weight $0$. Let $cnt$ be the number of nodes in the tree at any instant (initially, $cnt$ is set to $1$). Support $Q$ queries of following two types: - $\mathrm{i}\ \mathrm{R}\ \mathrm{W}:$ Add a new node (index $cnt + 1$) with weight $W$ and add edge between node $R$ and this node. - $2\ \mathrm{R}\ \mathrm{X}:$ Output the maximum length of sequence of nodes which - starts with $R$. - Every node in the sequence is an ancestor of its predecessor. - Sum of weight of nodes in sequence does not exceed $X$. - For some nodes $i, j$ that are consecutive in the sequence if $i$ is an ancestor of $j$ then $w[i] ≥ w[j]$ and there should not exist a node $k$ on simple path from $i$ to $j$ such that $w[k] ≥ w[j]$ The tree is rooted at node $1$ at any instant. \textbf{Note that the queries are given in a modified way.}
The main idea is that we will use binary lifting. Twice. Let's consider the following $O(Q \times N)$ algorithm - for every vertex $u$ (when inserted) find the closest vertex $v$ above it with $w[v] \ge w[u]$. Lets have an array $nxt[]$, such that $nxt[u] = v$. Then the query will be done by simply jumping to the vertex in $nxt[]$, until our sum becomes larger than $X$. Obviously this is $O(Q \times Depth) = O(Q \times N)$. To speed it up, we will have $2$ binary liftings. The first one will be for finding the $nxt[]$ and the second one will be for answering the queries. For the first one we will store the $2^{i}$-th parent and the maximum weight on the path and for the second one, we will store the $2^{i}$-th $nxt[]$ vertex and the sum of the weights on the path. Well that's all and in such a way you can achieve $O(Q\log N)$. Thanks to radoslav11 for nice and short editorial in comments.
[ "binary search", "dp", "trees" ]
2,200
#include "bits/stdc++.h" #ifdef PRINTERS #include "printers.hpp" using namespace printers; #define tr(a) cerr<<#a<<" : "<<a<<endl #else #define tr(a) #endif #define ll long long #define pb push_back #define mp make_pair #define pii pair<int,int> #define vi vector<int> #define all(a) (a).begin(),(a).end() #define F first #define S second #define sz(x) (int)x.size() #define hell 1000000007 #define endl '\n' #define rep(i,a,b) for(int i=a;i<b;i++) using namespace std; int par[20][400000]; ll par_sum[20][400000]; int w[400000]; void solve(){ int Q; cin>>Q; w[0]=INT_MAX; int last=0; int cur=1; for(int i=0;i<20;i++)par_sum[i][1]=1e16; while(Q--){ int ch; cin>>ch; if(ch==1){ ll p,q; cin>>p>>q; p^=last; q^=last; cur++; w[cur]=q; if(w[p]>=w[cur]){ par[0][cur]=p; } else{ int from=p; for(int i=19;i>=0;i--){ if(w[par[i][from]]<w[cur]) from=par[i][from]; } par[0][cur]=par[0][from]; } par_sum[0][cur]=(par[0][cur]==0?1e16:w[par[0][cur]]); for(int i=1;i<20;i++){ par[i][cur]=par[i-1][par[i-1][cur]]; par_sum[i][cur]=(par[i][cur]==0?1e16:par_sum[i-1][cur]+par_sum[i-1][par[i-1][cur]]); } } else{ ll p,q; cin>>p>>q; p^=last; q^=last; if(w[p]>q){ cout<<0<<endl; last=0; } else{ q-=w[p]; int ans=1; for(int i=19;i>=0;i--){ if(par_sum[i][p]<=q){ ans+=(1<<i); q-=par_sum[i][p]; p=par[i][p]; } } cout<<ans<<endl; last=ans; } } } } int main(){ ios_base::sync_with_stdio(false); cin.tie(0); cout.tie(0); int t=1; // cin>>t; while(t--){ solve(); } return 0; }
932
E
Team Work
You have a team of $N$ people. For a particular task, you can pick any non-empty subset of people. The cost of having $x$ people for the task is $x^{k}$. Output the sum of costs over all non-empty subsets of people.
The required sum can be expressed as $\sum_{r=1}^{n}{\binom{n}{r}}r^{k}$. $f(x)=(1+x)^{n}=\sum_{r=0}^{n}{\binom{n}{r}}x^{r}$ Differentiating the above equation and multiplying by $x$, we get $n x(1+x)^{n-1}=\sum_{r=1}^{n}{\binom{n}{r}}r x^{r}$ Differentiating and multiplying by $x$ again, we get $x{\frac{\mathrm{d}}{\mathrm{d}x}}\left(n x(1+x)^{n-1}\right)=\sum_{r=1}^{n}{\binom{n}{r}}r^{2}x$ Repeating the process (multiplying by $x$ and differentiating) $k$ times, and replacing $x = 1$, we get the desired sum. This can be done using dynamic programming. Consider $dp[a][b][c]$ to be the value of the function $x^{b}(1 + x)^{c}$ after performing the differentiation and multiplying by $x$, $a$ times at $x = 1$. So, our final answer will be $dp[k][0][n]$. $x^{b}(1+x)^{c}$ after 1 operation, the above function becomes: $x(b x^{b-1}(1+x)^{c}+c x^{b}(1+x)^{c-1})$ or $b x^{b}(1+x)^{c}+c x^{b+1}(1+x)^{c-1})$ So, $dp[a][b][c] = b * dp[a - 1][b][c] + c * dp[a - 1][b + 1][c - 1]$ Take care of special cases when $a = 0$ or $b = 0$. The above $dp$ seems to be $3$ dimensional but actually has $O(k^{2})$ states since $b + c$ is constant reducing one dimension.
[ "combinatorics", "dp", "math" ]
2,400
#include "bits/stdc++.h" #define ll long long #define pb push_back #define mp make_pair #define pii pair<int,int> #define vi vector<int> #define all(a) (a).begin(),(a).end() #define F first #define S second #define sz(x) (int)x.size() #define hell 1000000007 #define endl '\n' #define rep(i,a,b) for(int i=a;i<b;i++) using namespace std; int dp[5001][5001]; ll expo(ll base, ll exponent, ll mod) { ll ans = 1; while(exponent !=0 ) { if((exponent&1) == 1) { ans = ans*base ; ans = ans%mod; } base = base*base; base %= mod; exponent>>= 1; } return ans%mod; } int fill(int diffs,int a,int tot){ if(dp[diffs][a]>=0)return dp[diffs][a]; int b=tot-a; if(diffs==0){ return dp[diffs][a]=expo(2,b,hell); } return dp[diffs][a]=((b?1LL*b*fill(diffs-1,a+1,tot):0LL)+(a?1LL*a*fill(diffs-1,a,tot):0LL))%hell; } void solve(){ int N,k; cin>>N>>k; memset(dp,-1,sizeof dp); cout<<fill(k,0,N)<<endl; } int main(){ ios_base::sync_with_stdio(false); cin.tie(0); cout.tie(0); int t=1; // cin>>t; while(t--){ solve(); } return 0; }
932
F
Escape Through Leaf
You are given a tree with $n$ nodes (numbered from $1$ to $n$) rooted at node $1$. Also, each node has two values associated with it. The values for $i$-th node are $a_{i}$ and $b_{i}$. You can jump from a node to any node in its subtree. The cost of one jump from node $x$ to node $y$ is the product of $a_{x}$ and $b_{y}$. The total cost of a path formed by one or more jumps is sum of costs of individual jumps. For every node, calculate the minimum total cost to reach any leaf from that node. Pay attention, that root can never be leaf, even if it has degree $1$. Note that you cannot jump from a node to itself.
The problem can be solved by dynamic programming. Let $dp[i]$ be the minimum cost to reach a leaf from $i^{th}$ node. Then, $d p[i]=\operatorname*{min}_{\forall j\in s u b t r e e(i)}(a[i]\ast b[j]+d p[j])$ where $j \neq i$. This is equivalent to finding the minimum $y$ at $x = a[i]$ among lines $y = b[j] * x + dp[j]$. Thus, convex hull trick of dp optimization can be used to find the minimum value. Once the value is calculated for a node, a line with slope $b[i]$ and y-intercept $dp[i]$ is added to the hull. However,at all times, we need to maintain a lower convex hull of only the nodes in the current subtree. The trick of merging small to large can be used here. While we are at a node $i$, we form a convex hull for the subtree rooted at each child. The convex hulls of the light children can then be merged into the convex hull of the heavy child. Once the convex hull for the entire subtree is formed, a query for the minimum value at x-coordinate $a[i]$ in the lower hull gives the value of $dp[i]$. The merging of small to large has time complexity of $O(nlogn)$ while the addition of a line into the hull requires time $O(logn)$. Complexity: $O(nlog^{2}n)$
[ "data structures", "dp", "geometry" ]
2,700
#include "bits/stdc++.h" #define ll long long #define pb push_back #define mp make_pair #define pii pair<int,int> #define vi vector<int> #define all(a) (a).begin(),(a).end() #define F first #define S second #define sz(x) (int)x.size() #define hell 1000000007 #define endl '\n' #define rep(i,a,b) for(int i=a;i<b;i++) using namespace std; bool Q; struct Line { mutable ll k, m, p; bool operator<(const Line& o) const { return Q ? p < o.p : k < o.k; } }; struct LineContainer : multiset<Line> { const ll inf = LLONG_MAX; ll div(ll a, ll b){ return a / b - ((a ^ b) < 0 && a % b); } bool isect(iterator x, iterator y) { if (y == end()) { x->p = inf; return false; } if (x->k == y->k) x->p = x->m > y->m ? inf : -inf; else x->p = div(y->m - x->m, x->k - y->k); return x->p >= y->p; } void add(ll k, ll m) { auto z = insert({k, m, 0}), y = z++, x = y; while (isect(y, z)) z = erase(z); if (x != begin() && isect(--x, y)) isect(x, y = erase(y)); while ((y = x) != begin() && (--x)->p >= y->p) isect(x, erase(y)); } ll query(ll x) { assert(!empty()); Q = 1; auto l = *lower_bound({0,0,x}); Q = 0; return l.k * x + l.m; } }; vector<int> x,y; vector<vi> adj; vector<ll> ans; vector<int> subsize; void dfs1(int u,int v){ subsize[u]=1; for(auto i:adj[u]){ if(i==v)continue; dfs1(i,u); subsize[u]+=subsize[i]; } } void dfs2(int v, int p,LineContainer& cur){ int mx=-1,bigChild=-1; bool leaf=1; for(auto u:adj[v]){ if(u!=p and subsize[u]>mx){ mx=subsize[u]; bigChild=u; leaf=0; } } if(bigChild!=-1){ dfs2(bigChild,v,cur); } for(auto u:adj[v]){ if(u!=p and u!=bigChild){ LineContainer temp; dfs2(u,v,temp); for(auto i:temp){ cur.add(i.k,i.m); } } } if(!leaf)ans[v]=-cur.query(x[v]); else ans[v]=0; cur.add(-y[v],-ans[v]); } void solve(){ int n; cin>>n; x.resize(n+1); y.resize(n+1); ans.resize(n+1); subsize.resize(n+1); adj.resize(n+1); rep(i,1,n+1)cin>>x[i]; rep(i,1,n+1)cin>>y[i]; rep(i,1,n){ int u,v; cin>>u>>v; adj[u].pb(v); adj[v].pb(u); } dfs1(1,0); LineContainer lc; dfs2(1,0,lc); rep(i,1,n+1)cout<<ans[i]<<" "; } int main(){ ios_base::sync_with_stdio(false); cin.tie(0); cout.tie(0); int t=1; // cin>>t; while(t--){ solve(); } return 0; }
932
G
Palindrome Partition
Given a string $s$, find the number of ways to split $s$ to substrings such that if there are $k$ substrings $(p_{1}, p_{2}, p_{3}, ..., p_{k})$ in partition, then $p_{i} = p_{k - i + 1}$ for all $i$ $(1 ≤ i ≤ k)$ and $k$ is even. Since the number of ways can be large, print it modulo $10^{9} + 7$.
Let n be the length of the string $s$. Consider the string $t = s[0]s[n - 1]s[1]s[n - 2]s[2]s[n - 3]...s[n / 2 - 1]s[n / 2]$. The problem can be reduced to finding the number of ways to partition string $t$ into palindromic substrings of even length. Proof: Let $k$ be the total number of partitions. Let $p_{i} = p_{k - i + 1} = x_{1}x_{2}x_{3}...x_{m}$ where $m$ denotes length of $p_{i}$ and $x_{j}$ denotes $j^{th}$ character of $p_{i}$. The part of string $t$ corresponding to these two partitions is $x_{1}x_{m}x_{2}x_{m - 1}...x_{m - 1}x_{2}x_{m}x_{1}$ which is an even length palindrome. Similarly, the converse is also true. Dynamic programming can be used to solve the problem. Let $dp[i]$ be the number of ways to partition $t[1...i]$ into even length palindromes. Then, $d p[i]=\sum d p[j]$ where $t[j + 1...i]$ is an even length palindrome. Of course for odd $i$, $dp[i] = 0$. As discussed in thisblog, we can use an eertree to implement the solution. On the other hand, we can avoid the use of any suffix structure by following the algorithm described in thispaper. Complexity: $O(|s|log|s|)$
[ "dp", "string suffix structures", "strings" ]
2,900
#define NDEBUG NDEBUG #include <algorithm> #include <array> #include <bitset> #include <cassert> #include <cstring> #include <cmath> #include <functional> #include <iomanip> #include <iostream> #include <map> #include <set> #include <sstream> #include <string> #include <tuple> #include <unordered_map> #include <unordered_set> #include <vector> #include <memory> #include <queue> #include <random> #define forn(t, i, n) for (t i = 0; i < (n); ++i) using namespace std; // TC_REMOVE_BEGIN /// caide keep bool __hack = std::ios::sync_with_stdio(false); /// caide keep auto __hack1 = cin.tie(nullptr); // TC_REMOVE_END // Section with adoption of array and vector algorithms. #define ENABLE_IF(e) typename enable_if<e>::type* = nullptr namespace template_util { constexpr int bytecount(uint64_t x) { return x ? 1 + bytecount(x >> 8) : 0; } template<int N> struct bytetype { }; template<> struct bytetype<4> { typedef uint32_t type; }; template<> struct bytetype<1> { typedef uint8_t type; }; /// caide keep template<uint64_t N> struct minimal_uint : bytetype<bytecount(N)> { }; } template<class T> T next(istream& in) { T ret; in >> ret; return ret; } /* TODOs: primitive root discrete log tests!!! */ namespace mod_impl { /// caide keep template <class T> constexpr inline T mod(T MOD) { return MOD; } /// caide keep template <class T> constexpr inline T mod(T* MOD) { return *MOD; } /// caide keep template <class T> constexpr inline T max_mod(T MOD) { return MOD - 1; } /// caide keep template <class T> constexpr inline T max_mod(T*) { return numeric_limits<T>::max() - 1; } constexpr inline uint64_t combine_max_sum(uint64_t a, uint64_t b) { return a > ~b ? 0 : a + b; } /// caide keep template <class T> constexpr inline uint64_t next_divisible(T mod, uint64_t max) { return max % mod == 0 ? max : combine_max_sum(max, mod - max % mod); } /// caide keep template <class T> constexpr inline uint64_t next_divisible(T*, uint64_t) { return 0; } //caide keep constexpr int IF_THRESHOLD = 2; template <class T, T MOD_VALUE, uint64_t MAX, class RET = typename template_util::minimal_uint<max_mod(MOD_VALUE)>::type, ENABLE_IF(MAX <= max_mod(MOD_VALUE) && !is_pointer<T>::value)> inline RET smart_mod(typename template_util::minimal_uint<MAX>::type value) { return value; } template <class T, T MOD_VALUE, uint64_t MAX, class RET = typename template_util::minimal_uint<max_mod(MOD_VALUE)>::type, ENABLE_IF(max_mod(MOD_VALUE) < MAX && MAX <= IF_THRESHOLD * max_mod(MOD_VALUE) && !is_pointer<T>::value)> inline RET smart_mod(typename template_util::minimal_uint<MAX>::type value) { while (value >= mod(MOD_VALUE)) { value -= mod(MOD_VALUE); } return (RET)value; } } #define MAX_MOD mod_impl::max_mod(MOD_VALUE) struct DenormTag {}; template <class T, T MOD_VALUE, uint64_t MAX = MAX_MOD, ENABLE_IF(MAX_MOD >= 2)> struct ModVal { typedef typename template_util::minimal_uint<MAX>::type storage; storage value; /// caide keep inline ModVal(): value(0) { assert(MOD >= 2); } inline ModVal(storage v, DenormTag): value(v) { assert(MOD >= 2); assert(v <= MAX); }; inline operator ModVal<T, MOD_VALUE>() { return {v(), DenormTag()}; }; typename template_util::minimal_uint<mod_impl::max_mod(MOD_VALUE)>::type v() const { return mod_impl::smart_mod<T, MOD_VALUE, MAX>(value); } }; template <class T, T MOD_VALUE, uint64_t MAX1, uint64_t MAX2, uint64_t NEW_MAX = mod_impl::combine_max_sum(MAX1, MAX2), ENABLE_IF(NEW_MAX != 0), class Ret = ModVal<T, MOD_VALUE, NEW_MAX>> inline Ret operator+(ModVal<T, MOD_VALUE, MAX1> o1, ModVal<T, MOD_VALUE, MAX2> o2) { return {typename Ret::storage(typename Ret::storage() + o1.value + o2.value), DenormTag()}; } template <class T, T MOD_VALUE, uint64_t MAX> inline ModVal<T, MOD_VALUE>& operator+=(ModVal<T, MOD_VALUE>& lhs, const ModVal<T, MOD_VALUE, MAX>& rhs) { lhs = lhs + rhs; return lhs; } template <class T, T MOD_VALUE, class MOD_TYPE> struct ModCompanion { typedef MOD_TYPE mod_type; typedef ModVal<mod_type, MOD_VALUE> type; template <uint64_t C> inline static constexpr ModVal<mod_type, MOD_VALUE, C> c() { return {C, DenormTag()}; }; }; #undef MAX_MOD template <uint64_t MOD_VALUE> struct Mod : ModCompanion<uint64_t, MOD_VALUE, typename template_util::minimal_uint<MOD_VALUE>::type> { template<uint64_t VAL> static constexpr uint64_t literal_builder() { return VAL; } template<uint64_t VAL, char DIGIT, char... REST> static constexpr uint64_t literal_builder() { return literal_builder<(10 * VAL + DIGIT - '0') % MOD_VALUE, REST...>(); } }; #define REGISTER_MOD_LITERAL(mod, suffix) \ template <char... DIGITS> mod::type operator "" _##suffix() { \ return mod::c<mod::literal_builder<0, DIGITS...>()>(); \ } template <class T, T MOD_VALUE, uint64_t MAX> inline ostream& operator<<(ostream& s, ModVal<T, MOD_VALUE, MAX> val) { s << val.v(); return s; } using md = Mod<1000000007>; using mt = md::type; REGISTER_MOD_LITERAL(md, mod) struct Triple { int start, delta, count; int end() { return start + delta * (count - 1); } }; // ostream& operator<<(ostream& out, const Triple& t) { // out << "(" << t.start << ", " << t.delta << ", " << t.count << ")"; // return out; // } void solve(istream& in, ostream& out) { auto s = next<string>(in); string s1; s1.reserve(2 * s.length()); forn (int, i, s.length() / 2) { s1.push_back(s[i]); s1.push_back(s[s.length() - i - 1]); } int n = s1.length(); vector<Triple> g; vector<mt> d(n + 1), cache(n + 1); d[0] = 1_mod; forn (int, i, n) { vector<Triple> g1; int prev = -i - 1; auto push = [&](Triple t) { if (g1.empty() || t.delta != g1.back().delta) { g1.push_back(t); } else { g1.back().count += t.count; } }; for (auto t : g) { if (t.start > 0 && s1[t.start - 1] == s1[i]) { t.start--; if (prev != t.start - t.delta) { push(Triple{t.start, t.start - prev, 1}); t.start += t.delta; t.count--; } if (t.count > 0) { push(t); } prev = t.end(); } } if (i >= 1 && s1[i - 1] == s1[i]) { push(Triple{i - 1, i - 1 - prev, 1}); } g = move(g1); for (auto& t : g) { mt add = d[t.end()]; if (t.count > 1) { add += cache[t.start - t.delta]; } if (t.start - t.delta >= 0) { cache[t.start - t.delta] = add; } d[i + 1] += add; } } out << d[n] << endl; } int main() { solve(cin, cout); return 0; }
933
A
A Twisty Movement
A dragon symbolizes wisdom, power and wealth. On Lunar New Year's Day, people model a dragon with bamboo strips and clothes, raise them with rods, and hold the rods high and low to resemble a flying dragon. A performer holding the rod low is represented by a $1$, while one holding it high is represented by a $2$. Thus, the line of performers can be represented by a sequence $a_{1}, a_{2}, ..., a_{n}$. Little Tommy is among them. He would like to choose an interval $[l, r]$ ($1 ≤ l ≤ r ≤ n$), then reverse $a_{l}, a_{l + 1}, ..., a_{r}$ so that the length of the longest non-decreasing subsequence of the new sequence is maximum. A non-decreasing subsequence is a sequence of indices $p_{1}, p_{2}, ..., p_{k}$, such that $p_{1} < p_{2} < ... < p_{k}$ and $a_{p1} ≤ a_{p2} ≤ ... ≤ a_{pk}$. The length of the subsequence is $k$.
Since $1 \le a_{i} \le 2$, it's equivalent to find a longest subsequence like $1^{} * 2^{} * 1^{} * 2^{} *$. By an easy dynamic programming we can find it in $O(n)$ or $O(n^{2})$ time. You can see $O(n^{2})$ solution in the model solution below. Here we introduce an O(n) approach: Since the subsequence can be split into 4 parts ($11...22...11...22...$) , we can set $dp[i][j](i = 1...n, j = 0..3)$ be the longest subsequence of $a[1...i]$ with first $j$ parts.
[ "dp" ]
1,800
#include <bits/stdc++.h> #define rep(i, x, y) for (int i = (x), _ = (y); i < _; ++i) #define down(i, x, y) for (int i = (x) - 1, _ = (y); i >= _; --i) #define fi first #define se second #define mp(x, y) make_pair(x, y) #define pb(x) push_back(x) #define bin(x) (1 << (x)) #define SZ(x) int((x).size()) using namespace std; typedef pair<int, int> pii; typedef vector<int> Vi; typedef long long ll; template<typename T> inline bool upmax(T &x, T y) { return x < y ? (x = y, 1) : 0; } template<typename T> inline bool upmin(T &x, T y) { return x > y ? (x = y, 1) : 0; } const int MAX_N = 2005; int pre[MAX_N][2], suf[MAX_N][2]; int g[MAX_N][MAX_N][2][2]; int w[MAX_N], N; int main() { scanf("%d", &N); rep (i, 0, N) { scanf("%d", &w[i]); w[i]--; } rep (i, 0, N) { if (i) memcpy(pre[i], pre[i - 1], sizeof pre[i]); down (j, 2, w[i]) upmax(pre[i][j], pre[i][w[i]] + 1); } down (i, N, 0) { if (i < N - 1) memcpy(suf[i], suf[i + 1], sizeof suf[i]); rep (j, 0, w[i] + 1) upmax(suf[i][j], suf[i][w[i]] + 1); } int ans = pre[N - 1][1]; rep (i, 0, N) { rep (a, 0, w[i] + 1) rep (b, w[i], 2) { g[i][i][a][b] = 1; } } rep (l, 1, N) rep (i, 0, N - l) { int j = i + l; rep (l, 0, 2) rep (a, 0, 2 - l) { int b = a + l; g[i][j][a][b] = max((a < 1 ? g[i][j][a + 1][b] : 0), (b ? g[i][j][a][b - 1] : 0)); upmax(g[i][j][a][b], g[i + 1][j][a][b] + (b == w[i])); upmax(g[i][j][a][b], g[i][j - 1][a][b] + (a == w[j])); upmax(ans, (i ? pre[i - 1][a] : 0) + g[i][j][a][b] + (j < N - 1 ? suf[j + 1][b] : 0)); } } printf("%d\n", ans); return 0; }
933
B
A Determined Cleanup
In order to put away old things and welcome a fresh new year, a thorough cleaning of the house is a must. Little Tommy finds an old polynomial and cleaned it up by taking it modulo another. But now he regrets doing this... Given two integers $p$ and $k$, find a polynomial $f(x)$ with non-negative integer coefficients strictly less than $k$, whose remainder is $p$ when divided by $(x + k)$. That is, $f(x) = q(x)·(x + k) + p$, where $q(x)$ is a polynomial (not necessarily with integer coefficients).
For simplicity's sake, we present a rather intuitive approach rather than a rigorous proof (can be obtained by induction). For a given polynomial $f(x)$, what's its remainder taken modulo $(x + k)$? Let $f(x) = q(x) \cdot (x + k) + p$. Let Simulating the process of polynomial long division $f(x)$ divided by $(x + k)$, we get Try it yourself! A simple pattern emerges from the results. Let's take a closer look! $p = a_{0} + ( - k) \cdot a_{1} + ... + ( - k)^{d} \cdot a^{d}$ And there's another constraint: $0 \le a_{i} < k$. Base negative $k$, that's it! The coefficients $a_{0}, a_{1}, ..., a_{d}$ is the base $- k$ representation of $p$. It surely exists, and is unique! We can also deduce that $d=O(\log n)$, which is why there is no constraint on the output $d$. If you aren't familiar with negative bases, please refer to Wikipedia. But that doesn't matter! You may as well come up with an algorithm for converting to negative bases on your own. For an example, refer to the "Calculation" section in the aforementioned page.
[ "math" ]
2,000
#include <bits/stdc++.h> using namespace std; long long p; int k; int cnt=0; int ans[107]; int get() { int x=p%k; if (x<0) x+=k; return x%k; } int main() { scanf("%lld%d",&p,&k); while (p!=0) { ++cnt; ans[cnt]=get(); p-=get(); p/=(-k); } printf("%d\n",cnt); for (int i=1;i<=cnt;i++) printf("%d ",ans[i]); printf("\n"); return 0; }
933
C
A Colourful Prospect
Firecrackers scare Nian the monster, but they're wayyyyy too noisy! Maybe fireworks make a nice complement. Little Tommy is watching a firework show. As circular shapes spread across the sky, a splendid view unfolds on the night of Lunar New Year's eve. A wonder strikes Tommy. How many regions are formed by the circles on the sky? We consider the sky as a flat plane. A region is a connected part of the plane with positive area, whose bound consists of parts of bounds of the circles and is a curve or several curves without self-intersections, and that does not contain any curve other than its boundaries. Note that exactly one of the regions extends infinitely.
It seems the problem can be solved with case analysis at first sight. Okay let's try to do so... For $n = 1$, it's trivial and the answer is, of course, $2$. For $n = 2$, there are two cases: If the two circles are intersect, the answer is $4$; Otherwise, the answer is $3$. For $n = 3$... well I think it's really a tough job, so think about general case will probably make our lives better. The main solution is based on Euler's formula for planar graph. This formula tells us that if we denote the number of vertices in a connected graph by $v$, the number of edges by $e$ and the number of faces (or regions) by $f$, we have $f = e - v + 2$. Since the circles can form several components, denoted the number of which by $c$, the formula for general planar graph should be $f = e - v + c + 1$. So what we need to do is to calculate $v$, $e$, and $c$. It's easy to see that $v$ is the number of unique intersection between circles. As for $e$, we can calculate the number of edges on each circle, which is equal to the unique intersection on each circle. The only special case is a single circle, and we can consider it as a graph without vertices and edges but forms one component, or a vertex with an edge to itself. Anyway it doesn't matter when $v = e$. The last one is $c$, which can be obtained easily with the help of dsu or dfs/bfs. The total complexity is $O(n^{2}\log{n})$, but why I leave this special case as a problem instead of a general case? Reread the first sentence of this tutorial and you will get the answer :) Here I want to show you the test #113, which is made by hands. Guess how many regions in this graph?
[ "geometry", "graphs" ]
2,700
#include <cmath> #include <cstdio> #include <algorithm> static const int MAXN = 3; static const double EPS = 1e-9; static int n; static int x[MAXN], y[MAXN], r[MAXN]; // -2: internally separate // -1: internally tangent // 0: intersecting // +1: externally tangent // +2: externally separate static int g[MAXN][MAXN]; // Number of points passed by all three circles static int conc; inline int rel(int a, int b) { int dssq = (x[a] - x[b]) * (x[a] - x[b]) + (y[a] - y[b]) * (y[a] - y[b]), dfsq = (r[a] - r[b]) * (r[a] - r[b]), smsq = (r[a] + r[b]) * (r[a] + r[b]); if (dssq < dfsq) return -2; else if (dssq == dfsq) return -1; else if (dssq < smsq) return 0; else if (dssq == smsq) return +1; else return +2; } inline void get_intersections(int a, int b, double ix[2], double iy[2]) { double angle = atan2(y[b] - y[a], x[b] - x[a]); double ds = sqrt((x[a] - x[b]) * (x[a] - x[b]) + (y[a] - y[b]) * (y[a] - y[b])); double delta = acos((ds * ds + r[a] * r[a] - r[b] * r[b]) / (2.0 * r[a] * ds)); ix[0] = x[a] + r[a] * cos(angle + delta); iy[0] = y[a] + r[a] * sin(angle + delta); ix[1] = x[a] + r[a] * cos(angle - delta); iy[1] = y[a] + r[a] * sin(angle - delta); } inline bool on_circle(int a, double x0, double y0) { return fabs((x[a] - x0) * (x[a] - x0) + (y[a] - y0) * (y[a] - y0) - r[a] * r[a]) <= EPS; } int main() { scanf("%d", &n); for (int i = 0; i < n; ++i) scanf("%d%d%d", &x[i], &y[i], &r[i]); for (int i = 0; i < n - 1; ++i) for (int j = i + 1; j < n; ++j) g[i][j] = g[j][i] = rel(i, j); conc = 0; if (n == 3) { for (int i = 0; i < 2; ++i) { for (int j = i + 1; j < 3; ++j) if (g[i][j] >= -1 && g[i][j] <= +1) { int k = 3 - i - j; double ix[2], iy[2]; get_intersections(i, j, ix, iy); if (on_circle(k, ix[0], iy[0])) ++conc; if (on_circle(k, ix[1], iy[1]) && g[i][j] == 0) ++conc; break; } if (conc != 0) break; } } if (n == 1) { puts("2"); } else if (n == 2) { puts(g[0][1] == 0 ? "4" : "3"); } else if (n == 3) { int x[3] = { g[0][1], g[0][2], g[1][2] }; std::sort(x, x + 3); if (x[0] == -2) { printf("%d\n", 4 + (x[1] == 0) + (x[2] == 0)); } else if (x[0] == -1) { if (x[1] == -1) { printf("%d\n", x[2] == -1 ? 4 : (6 - x[2])); } else { switch (x[1] * 10 + x[2]) { case 00: printf("%d\n", 7 - conc); break; case 01: puts("6"); break; case 02: puts("5"); break; case 11: case 12: case 22: puts("4"); break; default: puts("> <"); } } } else if (x[0] >= +1) { puts(x[0] == +1 && x[2] == +1 ? "5" : "4"); } else { // x[0] == 0 switch (x[1] * 10 + x[2]) { case 00: printf("%d\n", 8 - conc); break; case 01: printf("%d\n", 7 - conc); break; case 02: puts("6"); break; case 11: puts("6"); break; case 12: puts("5"); break; case 22: puts("5"); break; default: puts("> <"); } } } else puts("> <"); return 0; }
933
D
A Creative Cutout
Everything red frightens Nian the monster. So do red paper and... you, red on Codeforces, potential or real. Big Banban has got a piece of paper with endless lattice points, where lattice points form squares with the same area. His most favorite closed shape is the circle because of its beauty and simplicity. Once he had obtained this piece of paper, he prepares it for paper-cutting. He drew $n$ concentric circles on it and numbered these circles from $1$ to $n$ such that the center of each circle is the same lattice point and the radius of the $k$-th circle is $\sqrt{k}$ times the length of a lattice edge. Define the degree of beauty of a lattice point as the summation of the \textbf{indices} of circles such that this lattice point is inside them, or on their bounds. Banban wanted to ask you the total degree of beauty of all the lattice points, but changed his mind. Defining the total degree of beauty of all the lattice points on a piece of paper with $n$ circles as $f(n)$, you are asked to figure out $({\sum_{k=1}^{m}}f(k)){\mathrm{~mod~}}(10^{9}+7)$.
For the sake of explanation, let's use $\textstyle{\binom{n}{k}}$ to represent the binomial coefficient $\frac{n!}{k!(n-k)!}$ and construct a coordinate system such that each coordinate axis parallels to one of the lattice edges, the origin is the center of concentric circles and each unit of length in this system is as long as the length of a lattice edge. For $f(n)$, we could figure out the contribution of each lattice point $(x, y)$ is $\textstyle\sum_{k=x^{2}+u^{2}}^{n}k={\binom{n+1}{2}}-{\binom{x^{2}+y^{2}}{2}}.$ Defining $L$ as $x^{2} + y^{2}$, we could conclude for the answer the contribution of each lattice point $(x, y)$ is $\begin{array}{l}{{\displaystyle\sum_{k=L}^{m}\left(\left(k+1\atop2\atop1}\right)-\left(\displaystyle=\left(2\right)\right)\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad\qquad}}\\ {{\left(m+2\right)-\left(L+1\right)L+m(m+1)(m+2)\right)}}\end{array}$ By using $(x^{2}+y^{2})^{n}=\sum_{k=0}^{n}{\binom{n}{k}}x^{2k}y^{2(n-k)}$, we could form the answer as $\sum_{x^{2}+y^{2}\leq m}\sum_{p+q\leq3}\mathrm{GOe}\Pi_{p,q}x^{2p}y^{2q}$ The remaining part to solve this problem is just to enumerate all the possible integers $x, q$ and then calculate ${\sum}_{p=0}^{35-q}\mathrm{CO}\mathrm{e}\Pi_{p,q}x^{2p}$, $\sum_{y\in\mathbb{Z}^{*},\|y\|\leq{\sqrt{m-x^{2}}}}\ y^{2q}$ in constant time. The total complexity is $O({\sqrt{m}})$. By the way, the standard solution has hardcoded some closed forms to calculate the partial sum of small powers fast, but you can precalculate $s_{q}(k)=\sum_{\stackrel{U\in{\mathcal{Z}},|y|<k}{\leq k}}y^{2q}$ and then enumerate $x$. Please be careful with 64-bit integer overflow, for example, $10^{12} \cdot 10^{9} \ge 2^{64}$. Although there is a pretest in case of $m = 2^{32}$ to reject brute force and some solutions with obvious overflow, it is highly probable to fail in the case of large input such as $m = 10^{12}$. The probability of failure is increasing when the number increases. Take care.
[ "brute force", "combinatorics", "math" ]
2,900
#include <bits/stdc++.h> using namespace std; #define re return #define sz(a) (int)a.size() #define mp(a, b) make_pair(a, b) #define fi first #define se second #define re return #define forn(i, n) for (int i = 0; i < int(n); i++) #define bend(a) a.begin(),a.end() typedef long long ll; typedef vector<int> vi; typedef pair<int, int> pii; typedef pair<long long, long long> pll; typedef long double ld; typedef unsigned long long ull; const ll mod = int(1e9) + 7; ll n, sum1, sum2, sum3, sum4, ans; vector<ll> cp; ll mdd(ll c) { c %= mod; if (c < 0) c += mod; re c; } ll pow1(ll k, ll st) { ll ans = 1; while (st) { if (st & 1) ans = mdd(ans * k); k = mdd(k * k); st >>= 1; } re ans; } ll get_sum(ll n) { n %= mod; n += mod; n %= mod; re (n * (n + 1) / 2LL) % mod; } ll get_cnt(ll a) { a = mdd(a); re mdd(mdd(2LL * mdd(a * a) * a) - mdd(3LL * mdd(a * a) * mdd(n + 2)) + mdd(mdd(a) * mdd(3LL * n + 4)) + mdd(n) * mdd(mdd(mdd(n) * mdd(n)) + 3LL * n + 2LL)); } int main() { iostream::sync_with_stdio(0), cin.tie(0); cin >> n; for (ll k = 1; k * k <= n; k++) { ll c = mdd(k * k); cp.push_back(k * k); sum1 = mdd(sum1 + c); sum2 = mdd(sum2 + mdd(c * c)); sum3 = mdd(sum3 + mdd(mdd(c * c) * c)); sum4 = mdd(sum4 + get_cnt(c)); //cout << sum4 << "\n"; } ans = mdd(get_cnt(1) + 4LL * sum4); ll k1 = 2, k2 = mdd(-3LL * (n + 2)), k3 = mdd(3LL * n + 4); for (ll a = 0; ; a++){ while (sz(cp) && cp.back() + (a+1LL) * (a + 1LL) > n) { ll c = mdd(cp.back() + a * a); sum1 = mdd(sum1 - c); sum2 = mdd(sum2 - mdd(c * c)); sum3 = mdd(sum3 - mdd(mdd(c * c) * c)); sum4 = mdd(sum4 - mdd(get_cnt(c))); cp.pop_back(); continue; } if (sz(cp) == 0) break; ll k = mdd(2LL * a + 1), ksq = mdd(k * k), ktr = mdd(ksq * k); //cout << sum4 << "\n"; sum4 = mdd(sum4 + mdd(ll(sz(cp)) * mdd(k1 * ktr + k2 * ksq + k3 * k)) + mdd(sum1 * mdd(2LL * k * k2 + 3LL * k1 * ksq)) + mdd(sum2 * mdd(3LL * k * k1))); //cout << sum4 << "\n"; ans = mdd(ans + 4LL * sum4); sum3 = mdd(sum3 + mdd(ll(sz(cp)) * ktr) + mdd(sum2 * 3LL * k) + mdd(ksq * 3LL * sum1)); sum2 = mdd(sum2 + mdd(ll(sz(cp)) * ksq) + mdd(2LL * sum1 * k)); sum1 = mdd(sum1 + mdd(ll(sz(cp)) * k)); //cout << sum3 << " " << sum2 << " " << sum1 << "\n"; } //cout << ans /6<< "\n"; ans = mdd(ans * pow1(6, mod - 2)); cout << ans; }
933
E
A Preponderant Reunion
East or west, home is best. That's why family reunion, the indispensable necessity of Lunar New Year celebration, is put in such a position. After the reunion dinner, Little Tommy plays a game with the family. Here is a concise introduction to this game: - There is a sequence of $n$ non-negative integers $p_{1}, p_{2}, ..., p_{n}$ in the beginning. It is ruled that each integer in this sequence should be non-negative \textbf{at any time}. - You can select two \textbf{consecutive positive} integers in this sequence, $p_{i}$ and $p_{i + 1}$ $(1 ≤ i < n)$, and then decrease them by their minimum (i. e. $min(p_{i}, p_{i + 1})$), the cost of this operation is equal to $min(p_{i}, p_{i + 1})$. We call such operation as a descension. - The game immediately ends when there are no two consecutive positive integers. Your task is to end the game so that the total cost of your operations is as small as possible. Obviously, every game ends after at most $n - 1$ descensions. Please share your solution of this game with the lowest cost.
The author was inspired by 100705B1 - Rasta-making and then made this problem. Noticing that there are no two consecutive positive integers after the game ends, the final sequence can be divided into some intervals which consist only zero elements such that the gap between every two adjacent intervals is at most one element (may positive). Let's try to solve a general version of this problem first. In this version, we don't need to decrease two consecutive positive integers by their minimum. We can decrease any two consecutive integers by $1$ many times (even if integers are negative) and our task is to eliminate all two consecutive positive integers such that the cost as small as possible. We can prove by contradiction or adjustment that there are no negative elements in the best solution for the general version (because the original elements are non-negative). Furthermore, the cost of the best solution for the general version is less or equal to that of the best solution for the original version. Let's consider the cost for the general version to make such an interval $[l, r]$ (i. e. $p_{l}, p_{l + 1}, ..., p_{r}$) become all non-positive $(1 \le l \le r \le n)$. Before concluding the formula, you may assume $p_{0} = p_{n + 1} = 0$. Let $c_{l} = p_{l},$ $c_{i} = max(p_{i} - c_{i - 1}, 0)$ $(i = l + 1, l + 2, ..., r)$. We can construct a series of operations to make them become non-positive such that the cost can be represented as $\textstyle\sum_{i=1}^{r}c_{i}$. Let's call the cost of such an interval $[l, r]$ as $f(l, r)$. Similarly, we know the actual minimal cost is less or equal to $f(l, r)$. If the length of an interval $[l, r]$ is greater than $2$, there is an observation that $\begin{array}{c}{{f(l,r)}}\\ {{{}=\left(\sum_{i=}^{r-2}c_{i}\right)+c_{r-1}+\operatorname*{max}(p_{r}-c_{r}-1)}}\\ {{=f(l,r-2)+\operatorname*{max}(p_{r},c_{r-1})}}\\ {{{}=f(l,r-2)+p_{r}}}\\ {{{}=f(l,r-2)+f(r,r)}}\end{array}$ We can easily prove that $f(l, r)$ is the actual minimal cost in the cases of length $1$ and $2$. In addition, if we get any of the best solutions for the general version, we can construct a series of operations which is valid both in the general version and the original version. So we can conclude that the cost of the best solution for the general version is greater or equal to that for the original version. With one conclusion we mentioned above, we know that the minimal costs for the original version and the general version are equivalent. Denote $dp(i)$ as the minimum cost the first $i$ elements have used if the $i$-th element is going to be the right endpoint of such an interval. It's easy to compute $dp(n)$ in ${\mathcal{O}}(n)$. After picking up all the intervals for the best solution, construction can be implemented by greedy. For example, utilize the descensions at the inner of intervals first, and then make use of the descensions at the edge of intervals. Please note that there may be at most one element that belongs to no interval at the head or the tail of the final sequence. Also, the descensions should operate on positive integers.
[ "constructive algorithms", "dp" ]
3,200
#include <bits/stdc++.h> using namespace std; typedef long long LL; const int maxn = (int)3e5 + 3; int n, a[maxn], cnt, p[maxn], m, out[maxn]; LL f[maxn]; bool v[maxn]; int descension(int pos) { int dt = min(a[pos], a[pos + 1]); if(dt) out[++m] = pos; a[pos] -= dt; a[pos + 1] -= dt; return dt; } int main() { scanf("%d", &n); for(int i = 1; i <= n; ++i) { scanf("%d", a + i); LL odd = f[max(i - 2, 0)] + a[i], even = f[max(i - 3, 0)] + max(a[i - 1], a[i]); f[i] = min(odd, even); v[i] = f[i] != odd; } // a[n + 1] = 0; LL ans = min(f[n - 1], f[n]); // printf("%lld\n", ans); for(int i = n - (ans == f[n - 1]); i > 0; i -= 2 + v[i]) p[++cnt] = i; reverse(p + 1, p + cnt + 1); for(int i = 1; i <= cnt; ++i) { int pre = p[i - 1], cur = p[i], ctr = 0; if(v[cur]) ctr += descension(cur - 1); ctr += descension(pre + 1); ctr += descension(cur); assert(ctr == f[cur] - f[pre]); } printf("%d\n", m); for(int i = 1; i <= m; ++i) printf("%d\n", out[i]); return 0; }
934
A
A Compatible Pair
Nian is a monster which lives deep in the oceans. Once a year, it shows up on the land, devouring livestock and even people. In order to keep the monster away, people fill their villages with red colour, light, and cracking noise, all of which frighten the monster out of coming. Little Tommy has $n$ lanterns and Big Banban has $m$ lanterns. Tommy's lanterns have brightness $a_{1}, a_{2}, ..., a_{n}$, and Banban's have brightness $b_{1}, b_{2}, ..., b_{m}$ respectively. Tommy intends to hide one of his lanterns, then Banban picks one of Tommy's non-hidden lanterns and one of his own lanterns to form a pair. The pair's brightness will be the product of the brightness of two lanterns. Tommy wants to make the product as small as possible, while Banban tries to make it as large as possible. You are asked to find the brightness of the chosen pair if both of them choose optimally.
We can do as what we are supposed to do - hide one of the Tommy's lantern, and then take one non-hidden lantern from Tommy and one lantern from Banban so that the product of their brightness is maximized and the minimum between all cases becomes our answer. This is a straightforward $O(n^{2}m)$ solution. Also, there are many other ways to solve the problem but needs overall consideration. By the way, there were 10 pretests at first where most of contestants failed on the last one. However, considering not to make the judger running with heavy loads, I took away 3 pretests and the pretest 10 was taken by mistake. I must apologize for the extremely weak pretests that make tons of hacks now. But it looks not so bad from the result...
[ "brute force", "games" ]
1,400
#include<bits/stdc++.h> using namespace std; typedef long long ll; const ll INF=(1LL<<60)-1; ll a[55],b[55]; int main() { int n,m; scanf("%d%d",&n,&m); for(int i=1;i<=n;i++) scanf("%lld",&a[i]); for(int i=1;i<=m;i++) scanf("%lld",&b[i]); ll res=INF; for(int i=1;i<=n;i++) { ll now=-INF; for(int j=1;j<=n;j++)if(j!=i) for(int k=1;k<=m;k++) now=max(now,a[j]*b[k]); res=min(res,now); } printf("%lld\n",res); return 0; }
934
B
A Prosperous Lot
Apart from Nian, there is a daemon named Sui, which terrifies children and causes them to become sick. Parents give their children money wrapped in red packets and put them under the pillow, so that when Sui tries to approach them, it will be driven away by the fairies inside. Big Banban is hesitating over the amount of money to give out. He considers loops to be lucky since it symbolizes unity and harmony. He would like to find a positive integer $n$ not greater than $10^{18}$, such that there are exactly $k$ \underline{loops} in the decimal representation of $n$, or determine that such $n$ does not exist. A \underline{loop} is a planar area enclosed by lines in the digits' decimal representation written in Arabic numerals. For example, there is one loop in digit $4$, two loops in $8$ and no loops in $5$. Refer to the figure below for all exact forms.
What's the maximum number of loops in an integer no greater than $10^{18}$? Since $8$ is the only digit with two loops, we tend to use as many eights as possible. It can be seen that the answer is $36$, achieved by $888 888 888 888 888 888$. Thus if $k > 36$, the answer does not exist under the constraints. There are tons of approaches to the following part. Share yours in the comments! The author considers $8$ and $9$ as lucky numbers and uses only $8$ and $9$ to construct a valid answer. In particular, the output consists of $\textstyle{\left\lfloor{\frac{k}{2}}\right\rfloor}$ eight(s) and $(k{\mathrm{~mod~}}2)$ nine(s).
[ "constructive algorithms", "implementation" ]
1,200
#include <bits/stdc++.h> using namespace std; int n,k; int main() { scanf("%d",&k); if (k>36) printf("%d\n",-1); else { while (k>0) { if (k>=2) { printf("%d",8); k-=2; } else { printf("%d",9); k-=1; } } printf("\n"); } return 0; }
935
A
Fafa and his Company
Fafa owns a company that works on huge projects. There are $n$ employees in Fafa's company. Whenever the company has a new project to start working on, Fafa has to divide the tasks of this project among all the employees. Fafa finds doing this every time is very tiring for him. So, he decided to choose the best $l$ employees in his company as team leaders. Whenever there is a new project, Fafa will divide the tasks among only the team leaders and each team leader will be responsible of some positive number of employees to give them the tasks. To make this process fair for the team leaders, each one of them should be responsible for the same number of employees. Moreover, every employee, who is not a team leader, has to be under the responsibility of exactly one team leader, and no team leader is responsible for another team leader. Given the number of employees $n$, find in how many ways Fafa could choose the number of team leaders $l$ in such a way that it is possible to divide employees between them evenly.
Let's try all values of $l$ from 1 to $n - 1$ and check whether the remaining people could be distributed equally over team leaders (that is, $l$ divides $n - l$). The number of valid values of $l$ is our answer. Complexity: $O(n)$. The problem is also equivalent to finding the number of ways to divide the $n$ employees into equal teams where each team contains more than one employee. It can also be solved in $O({\sqrt{n}})$ by finding the number of divisors of $n$.
[ "brute force", "implementation" ]
800
null
935
B
Fafa and the Gates
Two neighboring kingdoms decided to build a wall between them with some gates to enable the citizens to go from one kingdom to another. Each time a citizen passes through a gate, he has to pay one silver coin. The world can be represented by the first quadrant of a plane and the wall is built along the identity line (i.e. the line with the equation $x = y$). Any point below the wall belongs to the first kingdom while any point above the wall belongs to the second kingdom. There is a gate at any integer point on the line (i.e. at points $(0, 0)$, $(1, 1)$, $(2, 2)$, ...). The wall and the gates do not belong to any of the kingdoms. Fafa is at the gate at position $(0, 0)$ and he wants to walk around in the two kingdoms. He knows the sequence $S$ of moves he will do. This sequence is a string where each character represents a move. The two possible moves Fafa will do are 'U' (move one step up, from $(x, y)$ to $(x, y + 1)$) and 'R' (move one step right, from $(x, y)$ to $(x + 1, y)$). Fafa wants to know the number of silver coins he needs to pay to walk around the two kingdoms following the sequence $S$. Note that if Fafa visits a gate without moving from one kingdom to another, he pays no silver coins. Also assume that he doesn't pay at the gate at point $(0, 0)$, i. e. he is initially on the side he needs.
Fafa visits the gates when he stands on the line $y = x$. This happens only when he makes an equal number of up and right moves. Fafa will pass the gates if he is currently at a gate and will make a move similar to the last one. So, we can iterate over the moves in order from left to right keeping track of the number of up and right moves till now, and increment the answer if the next move is similar to the current one and the number of up and right moves are equal. Complexity: $O(n)$.
[ "implementation" ]
900
null
935
C
Fifa and Fafa
Fifa and Fafa are sharing a flat. Fifa loves video games and wants to download a new soccer game. Unfortunately, Fafa heavily uses the internet which consumes the quota. Fifa can access the internet through his Wi-Fi access point. This access point can be accessed within a range of $r$ meters (this range can be chosen by Fifa) from its position. Fifa must put the access point inside the flat which has a circular shape of radius $R$. Fifa wants to minimize the area that is not covered by the access point inside the flat without letting Fafa or anyone outside the flat to get access to the internet. The world is represented as an infinite 2D plane. The flat is centered at $(x_{1}, y_{1})$ and has radius $R$ and Fafa's laptop is located at $(x_{2}, y_{2})$, not necessarily inside the flat. Find the position and the radius chosen by Fifa for his access point which minimizes the uncovered area.
Let $p$ be Fafa's position and $c$ be the flat center. Obviously, the largest wifi circle we can draw will touch the point $p$ and a point on the flat circle border $q$. The diameter of the circle will be the distance between $p$ and $q$. To maximize the area of the wifi circle, we will choose $q$ to be the furthest point on the border from $p$. This point lies on the line connecting $p$ and $c$. The center of wifi circle $w$ will be the midpoint of the line segment connecting $p$ and $q$. If the point $p$ is outside the circle, then the wifi circle will be the flat circle ($w = c$). If the point $p$ lies on the flat center ($p = c$), then there are infinite number of solutions as $q$ can be any point on the flat circle border. Complexity: $O(1)$
[ "geometry" ]
1,600
null
935
D
Fafa and Ancient Alphabet
Ancient Egyptians are known to have used a large set of symbols $\textstyle\mathbf{\hat{Z}}$ to write on the walls of the temples. Fafa and Fifa went to one of the temples and found two non-empty words $S_{1}$ and $S_{2}$ of equal lengths on the wall of temple written one below the other. Since this temple is very ancient, some symbols from the words were erased. The symbols in the set $\textstyle\sum$ have equal probability for being in the position of any erased symbol. Fifa challenged Fafa to calculate the probability that $S_{1}$ is lexicographically greater than $S_{2}$. Can you help Fafa with this task? You know that $|\sum|=m$, i. e. there were $m$ distinct characters in Egyptians' alphabet, in this problem these characters are denoted by integers from $1$ to $m$ in alphabet order. A word $x$ is lexicographically greater than a word $y$ of the same length, if the words are same up to some position, and then the word $x$ has a larger character, than the word $y$. We can prove that the probability equals to some fraction $P\ /Q$, where $P$ and $Q$ are coprime integers, and $Q\not\equiv0\mod{(10^{9}+7)}$. Print as the answer the value $R=P\cdot Q^{-1}\mathrm{\mod\(10^{9}+7)}$, i. e. such a non-negative integer less than $10^{9} + 7$, such that $R\cdot Q\equiv P\mod\left(10^{9}+7\right)$, where $a\equiv b\mod(m)$ means that $a$ and $b$ give the same remainders when divided by $m$.
Let $Suff_{1}(i)$ and $Suff_{2}(i)$ be the suffixes of $S_{1}$ and $S_{2}$ starting from index $i$, respectively. Also, let $P(i)$ be the probability of $Suff_{1}(i)$ being lexicographically larger than $Suff_{2}(i)$. $P(i)$ is equal to the probability that: $S_{1}[i]$ is greater than $S_{2}[i]$, or $S_{1}[i]$ is equal to $S_{2}[i]$ and $Suff_{1}(i + 1)$ is lexicographically greater than $Suff_{2}(i + 1)$. More formally, The answer to the problem is $P(0)$. Complexity: $O(n)$
[ "math", "probabilities" ]
1,900
null
935
E
Fafa and Ancient Mathematics
Ancient Egyptians are known to have understood difficult concepts in mathematics. The ancient Egyptian mathematician Ahmes liked to write a kind of arithmetic expressions on papyrus paper which he called as Ahmes arithmetic expression. An Ahmes arithmetic expression can be defined as: - "$d$" is an Ahmes arithmetic expression, where $d$ is a one-digit positive integer; - "$(E_{1} op E_{2})$" is an Ahmes arithmetic expression, where $E_{1}$ and $E_{2}$ are valid Ahmes arithmetic expressions (without spaces) and $op$ is either plus $( + )$ or minus $( - )$. For example 5, (1-1) and ((1+(2-3))-5) are valid Ahmes arithmetic expressions.On his trip to Egypt, Fafa found a piece of papyrus paper having one of these Ahmes arithmetic expressions written on it. Being very ancient, the papyrus piece was very worn out. As a result, all the operators were erased, keeping only the numbers and the brackets. Since Fafa loves mathematics, he decided to challenge himself with the following task: Given the number of plus and minus operators in the original expression, find out the maximum possible value for the expression on the papyrus paper after putting the plus and minus operators in the place of the original erased operators.
We can represent the arithmetic expression as a binary tree where: each leaf node is a digit. each non-leaf node is an operator and the left and right subtrees are the operands. To maximize the value of the sub-expression represented by a subtree rooted at $v$, we will either: put a plus (+) operator at $v$, and maximize the values of $left(v)$ and $right(v)$ subtrees, or put a minus (-) operator at $v$, and maximize the value of $left(v)$ subtree and minimize the value of $right(v)$ subtree. We can solve this task using DP keeping in mind the number of remaining operators of each type. The DP state will be ($v, rem^{ + }, rem^{ - }$). However, this state will not fit in time or memory limits. Since $min(P, M) \le 100$, we can drop the larger of the last two parameters and implicitly calculate it from the other parameter and the subtree size.
[ "dfs and similar", "dp", "trees" ]
2,300
null
935
F
Fafa and Array
Fafa has an array $A$ of $n$ positive integers, the function $f(A)$ is defined as $\textstyle{\sum_{i=1}^{n-1}|a_{i}-a_{i+1}|}$. He wants to do $q$ queries of two types: - $1 l r x$ — find the maximum possible value of $f(A)$, if $x$ is to be added to one element in the range $[l, r]$. You can choose to which element to add $x$. - $2 l r x$ — increase all the elements in the range $[l, r]$ by value $x$. Note that queries of type $1$ don't affect the array elements.
Let's have a look at the relation of each element $a_{i}$ with its adjacent elements. Without loss of generality, assume $a_{i}$ has two adjacent elements $b_{i}$ and $c_{i}$ where $b_{i} \le c_{i}$. One of the following cases will hold for $a_{i}$, $b_{i}$ and $c_{i}$: $a_{i} \ge b_{i}$ and $a_{i} \ge c_{i}$. $a_{i} \ge b_{i}$ and $a_{i} < c_{i}$. $a_{i} < b_{i}$ and $a_{i} < c_{i}$. For a query $1 l r x$: if there exists an element $a_{i}$, where $i\in[l,r]$, such that case $1$ holds, then it is the best element on which we can apply the add operation because it will increment $f(A)$ by $2 \cdot x$. if case $1$ doesn't exist, then there is at most one element for which case $3$ holds (you can prove this by contradiction). Let's assume that this case holds for element $a_{i}$, where $i\in[l,r]$. Then we will either: increment the element $a_{j}$ where $j\in[L,r]$, $j \neq i$ and $j = argmin_{k}{c_{k} - a_{k}}$. The value of $f(A)$ will be incremented by $2 \cdot max(0, x - (c_{k} - a_{k}))$. increment the element $a_{i}$. The value of $f(A)$ will be incremented by $2 \cdot max(0, x - (c_{i} - a_{i})) - 2 \cdot min(b_{i} - a_{i}, x)$. if neither case $1$ nor case $3$ exists, then we can only the second option of the previous case. For a query $2 l r x$: the only affected elements will be $a_{l - 1}, a_{l}, a_{r}, a_{r + 1}$. We can use segment trees to answer queries in $O(logn)$ time. Complexity: $O(n+q\cdot\log n)$
[ "data structures", "greedy" ]
2,600
null
936
A
Save Energy!
Julia is going to cook a chicken in the kitchen of her dormitory. To save energy, the stove in the kitchen automatically turns off after $k$ minutes after turning on. During cooking, Julia goes to the kitchen every $d$ minutes and turns on the stove if it is turned off. While the cooker is turned off, it stays warm. The stove switches on and off instantly. It is known that the chicken needs $t$ minutes to be cooked on the stove, if it is turned on, and $2t$ minutes, if it is turned off. You need to find out, how much time will Julia have to cook the chicken, if it is considered that the chicken is cooked evenly, with constant speed when the stove is turned on and at a constant speed when it is turned off.
There are repeated segments in the cooking process, that are between two consecutive moments, when Julia turns the stove on. Let's call such segment a period. Consider two cases: If $k \le d$, when Julia comes, the stove is always off, that means $period = d$. In other case Julia comes to the kitchen $p$ times between two turnings on, when the stove is still on, and does nothing. In this case $p$ is a number such that $p \cdot d < k$ - the stove is on, $(p + 1) \cdot d \ge k$ - the stove is off. Then the period is $(p + 1) \cdot d$ and $p$ is equal to $ \lceil k / d \rceil - 1$. So $period = \lceil k / d \rceil \cdot d$. If $carry > 2k$, chicken will be prepared after $carry - k$ minutes: $k$ minutes the stove will be on and $carry - 2k$ it will be off. Thus the answer is $num \cdot period + carry - k$ Otherwise $carry$ parts become ready after $carry / 2$ minutes and answer is $num \cdot period + carry / 2$.
[ "binary search", "implementation", "math" ]
1,700
null
936
B
Sleepy Game
Petya and Vasya arranged a game. The game runs by the following rules. Players have a directed graph consisting of $n$ vertices and $m$ edges. One of the vertices contains a chip. Initially the chip is located at vertex $s$. Players take turns moving the chip along some edge of the graph. Petya goes first. Player who can't move the chip loses. If the game lasts for $10^{6}$ turns the draw is announced. Vasya was performing big laboratory work in "Spelling and parts of speech" at night before the game, so he fell asleep at the very beginning of the game. Petya decided to take the advantage of this situation and make both Petya's and Vasya's moves. Your task is to help Petya find out if he can win the game or at least draw a tie.
Note that the answer is sequence of adjacent vertices of even length such that the last vertex of this sequence has no outgoing edges. Build state graph as follows: State is pair $(v, parity)$, where $v$ is vertex of initial graph and $parity$ is parity of count of vertices on path from $s$ to $v$. For every edge $uv$ of initial graph add edges $(u,0)\rightarrow(v,1)$ and $(u,1)\rightarrow(v,0)$ in state graph. So there exists path from $(s, 1)$ to $(v, parity)$ if and only if there exists path from $s$ to $v$ in initial graph of parity $parity$. Lets find all reachable from $(s, 1)$ states using BFS or DFS. If there is state $(v, 0)$ among them such that $v$ has no outgoing edges in initial graph, then Petya can win. He can move along vertices in path from $(s, 1)$ to $(v, 0)$ in state graph. Otherwise we need to check if Petya can make $10^{6}$ moves for drawing a tie. If there is a tie then the chip visited some vertex twice, because $n < 10^{6}$. Therefore it is sufficient to check if there is a cycle in initial graph reachable from $s$. In this case Petya can play as follows: move to any vertex of cycle and then move along the cycle as long as it requires to draw a tie.
[ "dfs and similar", "dp", "games", "graphs" ]
2,100
null
936
C
Lock Puzzle
Welcome to another task about breaking the code lock! Explorers Whitfield and Martin came across an unusual safe, inside of which, according to rumors, there are untold riches, among which one can find the solution of the problem of discrete logarithm! Of course, there is a code lock is installed on the safe. The lock has a screen that displays a string of $n$ lowercase Latin letters. Initially, the screen displays string $s$. Whitfield and Martin found out that the safe will open when string $t$ will be displayed on the screen. The string on the screen can be changed using the operation «shift $x$». In order to apply this operation, explorers choose an integer $x$ from 0 to $n$ inclusive. After that, the current string $p = αβ$ changes to $β^{R}α$, where the length of $β$ is $x$, and the length of $α$ is $n - x$. In other words, the suffix of the length $x$ of string $p$ is reversed and moved to the beginning of the string. For example, after the operation «shift $4$» the string «abcacb» will be changed with string «bcacab », since $α = $ab, $β = $cacb, $β^{R} = $bcac. Explorers are afraid that if they apply too many operations «shift», the lock will be locked forever. They ask you to find a way to get the string $t$ on the screen, using no more than $6100$ operations.
The answer is <<NO>> only in the case when multisets of letters in $s$ and $t$ differ. In all other cases there is a solution. Let's construct the solution uses $\textstyle{\frac{5}{2}}n$ operations. To do that, you need to <<add>> two symbols to current already built substring using five operations. You can do it, for example, using the following method (the underlined string is chosen as $ \beta $): ...x...abc $\to$ cba......x cba......x $\to$ x......abc x......abc $\to$ cbax...... cbax...y.. $\to$ ..ycbax... ..ycbax... $\to$ .....ycbax If we had abc as suffix, after these operations, we get ycbax, which is two symbols longer. Choosing x and y accordingly, we can maintain maintain the invariant, that the suffix of current string always contains monotone (increasing or decreasing) sequence. After we make this sequence have length $n$, the entire string is either a cyclic shift or a reversed cyclic shift of $t$. You can do cyclic shift by $k$ in three operations: <<shift $n - k$>>, <<shift $k$>>, <<shift $n$>>. This way, we get a ${\textstyle{\frac{5}{2}}}n+O(1)$ solution.
[ "constructive algorithms", "implementation", "strings" ]
2,300
null
936
D
World of Tank
Vitya loves programming and problem solving, but sometimes, to distract himself a little, he plays computer games. Once he found a new interesting game about tanks, and he liked it so much that he went through almost all levels in one day. Remained only the last level, which was too tricky. Then Vitya remembered that he is a programmer, and wrote a program that helped him to pass this difficult level. Try do the same. The game is organized as follows. There is a long road, two cells wide and $n$ cells long. Some cells have obstacles. You control a tank that occupies one cell. Initially, the tank is located before the start of the road, in a cell with coordinates $(0, 1)$. Your task is to move the tank to the end of the road, to the cell $(n + 1, 1)$ or $(n + 1, 2)$. Every second the tank moves one cell to the right: the coordinate $x$ is increased by one. When you press the up or down arrow keys, the tank instantly changes the lane, that is, the $y$ coordinate. When you press the spacebar, the tank shoots, and the nearest obstacle along the lane in which the tank rides is instantly destroyed. In order to load a gun, the tank needs $t$ seconds. Initially, the gun is not loaded, that means, the first shot can be made only after $t$ seconds after the tank starts to move. If at some point the tank is in the same cell with an obstacle not yet destroyed, it burns out. If you press the arrow exactly at the moment when the tank moves forward, the tank will first move forward, and then change the lane, so it will not be possible to move diagonally. Your task is to find out whether it is possible to pass the level, and if possible, to find the order of actions the player need to make.
At first arrange a pair of facts: If there is a path which doesn't contain blowed cell you can easily convert it to path which does. So it's enough to check such paths to determine answer. Tank can accumulate shots when it moves without turns. In other words: consider tank makes 100 steps without turns and shots, so lets say that tank accumulate 100 steps. If $t$ equals to 3 tank was able to make 33 shots on that part of path. Accumulating steps is the same as deferred shots. Tank gains steps and when it's necessary to make some shots we can choose position of shots on this straight part of path in such way that tank blows obstacles. But when the tank changes its row you should flush accumulated steps to minimum of $t$ and old value because accumulated steps has sense only on hte straight line without shots because otherwise it may be impossible to choose correct positions for shots. The second step is solution with asymptotics $O(n)$: Using dynamic programming: $dp[i][j]$ - maximal number of accumulated step if tank is in cell with coordinates $(i, j)$ and -1 if it's impossible. You should update dynamic's values from previous cell in the same row or cell in another row but in the same column. Check that $dp[i][j - 1] \neq - 1$ (otherwise tank can't be in the $(i, j - 1)$ cell) after that update value by $dp[i][j - 1] + 1$ if there is no obstacle in cell $(i, j)$ and by $dp[i][j - 1] - t + 1$ otherwise. Before updating by cell in another row you should update both of them ($(1, j)$ and $(2, j)$ if rows numerated from 1) by cells in previous column and only after that you can update them be each other. Store for each cell $(i, j)$ was it updated from $(3 - i, j)$ or $(i, j - 1)$. Start from last column and iterate to the zero one to restore the path. When you restore path you can easily calculate number of obstacles which should be blowed for each part of path without shots. So now you have to choose places for them. Consider any part of the path from any turn to the next one. Let there are $s$ obstacles, so all of them must be destroyed by the tank. Let the first cell of this path's part has coordinates $(i, j)$ and $dp[i][j]$ equal to $k$, so the first shot on this part can be not earlier than $(i, j + t - k)$, the next one not earlier than $(i, j + 2t - k)$ and so on. Place shots in that position and tank will correctly blow up all obstacles. It obviously follows from dynamic programming's definition. Complexity: $O(n)$. The third step is prooving that tank can turn only to cells which are immediately after an obstacle (if obstacle is in cell with coordinates $(i, j)$ then tank turns to cell $(i, j + 1)$). Consider any path which is one of soloutions. Consider the first turn in this path which doesn't fit the constraint above. Then consider the nearest obstacle in the same row as row where tank will be after turn, but with smaller number of a column. If tank turns to the cell which immediately after that obstacle then it can move along that row to current position, eliminates unnecessary turns, so the considered turn will be eliminated. Otherwise tank was in another row, so you can easily turn and do the same actions to eliminate such turn. Repeat this actions you transform any path to path which has only turns to cells immediately after obstacles. Sort obstacles in order of increasing their column's number. So due to fact that tank can turn only in certain points it isn't necessary calculates DP for all $2 \cdot n$ cells. Now $m_{1} + m_{2}$ cells enough. So calculate the same DP for cells which are immediatel after obstacles, strore for each point the point where it was updated from to restore path. Restore path and for each part without turns calculate number of obstacles to destroy and place shots. Complexity $O(m_{1} + m_{2})$.
[ "dp", "greedy" ]
3,000
null
936
E
Iqea
Gridland is placed on infinite grid and has a shape of figure consisting of cells. Every cell of Gridland is a city. Two cities that are placed in adjacent cells are connected by the road of length $1$. It's possible to get from any city to any other city using roads. The \underline{distance} between two cities is the minimum total road length on path from one city to another. It's possible to get from any cell that doesn't belong to Gridland to any other cell that doesn't belong to Gridland by using only cells which don't belong to Gridland. In other words, Gridland is connected and complement of Gridland is also connected. At the moment no city in Gridland has Iqea famous shop. But Iqea has great plans for building shops in Gridland. For customers' convenience Iqea decided to develop an application. Using this application everyone can learn the distance to the nearest Iqea. You are to develop this application. You are asked to process two types of queries: - new Iqea shop has been opened in the city with coordinates $(x, y)$; - customer wants to know the distance to the nearest already opened Iqea shop from his city located in a cell with coordinates $(x, y)$. Pay attention that customer can move only by roads and can never leave Gridland on his way to the shop.
Let's cut the figure into strips of consecutive cells which lie in the same column. Consider strips as vertices. Connect two vertices by an edge if their strips have common edge. It can be proved that resulting graph is a tree. Consider two cells (x_a, y_a) and (x_b, y_b). Cell (x_a, y_a) lies on strip v_a, cell (x_b, y_b) lies on strip v_b. Then any shortest path between these two cells goes only through strips corresponding to vertices on path from v_a to v_b. And every such strip has non-empty intersection with path between this two cells. So if we build centroid decomposition of tree of strips, shortest path between two cells will surely go through strip of centroid which divide vertices of these two cells. Thus, you can solve standard problem on tree: sometimes vertices are turning on, and you are to find the distance to the nearest vertex that is turned on. To solve this problem you can use centroid decomposition. Only one question left. How to combine distances from centroid to two cells? Consider two cells (x_a, y_a) and (x_b, y_b), and path between them. Let this path goes through strip v_c. Let d_a be a distance from (x_a, y_a) to the strip v_c, z_a be a y coordinate of nearest cell on strip v_c. The same way define d_b and z_b for cell (x_b, y_b). Then the distance between (x_a, y_a) and (x_b, y_b) equals to d_a + d_b + |z_a - z_b|. So we can store h values in every vertex and change them in the following way: val_i = \min(val_i, d_a + |z_i - z_a|), h is the size of strip, z_i is the y coordinate of i-th cell on strip. This can be done using segment tree. Example of building a tree. Blue and yellow colors mark cells (x_a, y_a) and (x_b, y_b). Green color marks strip v_c. The values are: d_a = 6, d_b = 7, z_a = 8, z_b = 5. I honestly don't know if this is the picture originally drawn for this tutorial. The original picture in Metapost doesn't compile, and I asked ChatGPT to redraw it for me... Values d_a and z_a for every pair cell and vertex can be calculated during building of centroid decomposition. Complexity of the solution is O(n\cdot log(n) + q\cdot log^2(n)) time and O(n\cdot log(n)) memory.
[ "data structures", "dfs and similar", "divide and conquer", "dsu", "shortest paths", "trees" ]
3,400
null
937
A
Olympiad
The recent All-Berland Olympiad in Informatics featured $n$ participants with each scoring a certain amount of points. As the head of the programming committee, you are to determine the set of participants to be awarded with diplomas with respect to the following criteria: - At least one participant should get a diploma. - None of those with score equal to zero should get awarded. - When someone is awarded, all participants with score \textbf{not less} than his score should also be awarded. Determine the number of ways to choose a subset of participants that will receive the diplomas.
Drop all participants with zero points. Then the answer is simply the number of distinct points among the remaining participants.
[ "implementation", "sortings" ]
800
null
937
B
Vile Grasshoppers
The weather is fine today and hence it's high time to climb the nearby pine and enjoy the landscape. The pine's trunk includes several branches, located one above another and numbered from $2$ to $y$. Some of them (more precise, from $2$ to $p$) are occupied by tiny vile grasshoppers which you're at war with. These grasshoppers are known for their awesome jumping skills: the grasshopper at branch $x$ can jump to branches $2\cdot x\,,\,>\,\cdot\,x\,,\,\ \,\left\lfloor{\frac{y}{x}}\right\rfloor\cdot\,x$. Keeping this in mind, you wisely decided to choose such a branch that none of the grasshoppers could interrupt you. At the same time you wanna settle as high as possible since the view from up there is simply breathtaking. In other words, your goal is to find the highest branch that cannot be reached by any of the grasshoppers or report that it's impossible.
The first observation is that the optimal branch shouldn't be divisible by anything in range $[2..p]$. Let us decrease $y$ until its minimal divisor (other than one) is greater than $p$. Why does this approach work? Note that the the nearest prime less or equal to $y$ is valid. At the same time the prime gap of numbers less than billion doesn't exceed $300$ and we're gonna factorize no more than $300$ numbers in total. Therefore the complexity is $300\cdot{\sqrt{10^{9}}}$.
[ "brute force", "math", "number theory" ]
1,400
null
938
A
Word Correction
Victor tries to write his own text editor, with word correction included. However, the rules of word correction are really strange. Victor thinks that if a word contains two \textbf{consecutive} vowels, then it's kinda weird and it needs to be replaced. So the word corrector works in such a way: as long as there are two consecutive vowels in the word, it deletes the first vowel in a word such that there is \textbf{another vowel right before it}. If there are no two consecutive vowels in the word, it is considered to be correct. You are given a word $s$. Can you predict what will it become after correction? \textbf{In this problem letters a, e, i, o, u and y are considered to be vowels}.
Iterate over the string, output only consonants and vowels which don't have a vowel before them.
[ "implementation" ]
800
"#include<bits/stdc++.h>\n\nusing namespace std;\n\nconst string V = \"aeiouy\";\n\nbool vowel(char c)\n{\n\treturn V.find(c) != -1;\n}\n\nint main()\n{\n\tint n;\n\tcin >> n;\n\tstring s;\n\tcin >> s;\n\tcout << s[0];\n\tfor(int i = 1; i < n; i++)\n\t\tif (!vowel(s[i - 1]) || !vowel(s[i]))\n\t\t\tcout << s[i];\n\tcout << endl;\n}"
938
B
Run For Your Prize
You and your friend are participating in a TV show "Run For Your Prize". At the start of the show $n$ prizes are located on a straight line. $i$-th prize is located at position $a_{i}$. Positions of all prizes are distinct. You start at position $1$, your friend — at position $10^{6}$ (and there is no prize in any of these two positions). You have to work as a team and collect all prizes in minimum possible time, in any order. You know that it takes exactly $1$ second to move from position $x$ to position $x + 1$ or $x - 1$, both for you and your friend. You also have trained enough to instantly pick up any prize, if its position is equal to your current position (and the same is true for your friend). Carrying prizes does not affect your speed (or your friend's speed) at all. Now you may discuss your strategy with your friend and decide who will pick up each prize. Remember that every prize must be picked up, either by you or by your friend. What is the minimum number of seconds it will take to pick up all the prizes?
You can find the total time with the knowledge of the prefix length. The final formula is $\operatorname*{min}(a_{n}-1,10^{6}-a_{1},\operatorname*{min}_{i=1}^{n-1}(\operatorname*{max}(a_{i}-1,10^{6}-a_{i+1})))$.
[ "brute force", "greedy" ]
1,100
"#include <bits/stdc++.h>\n\n#define forn(i, n) for (int i = 0; i < int(n); i++)\n\nusing namespace std;\n\nconst int INF = 1e7;\n\nint n;\nvector<int> pos;\n\nint main() {\n\tscanf(\"%d\", &n);\n\tpos.resize(n);\n\tforn(i, n)\n\t\tscanf(\"%d\", &pos[i]);\n\t\n\tint ans = INF;\n\t\n\tforn(i, n + 1){\n\t\tint cur = 0;\n\t\tif (i) cur = max(cur, pos[i - 1] - 1);\n\t\tif (i != n) cur = max(cur, 1000000 - pos[i]);\n\t\t\n\t\tans = min(ans, cur);\n\t}\n\t\n\tprintf(\"%d\\n\", ans);\n}"
938
C
Constructing Tests
Let's denote a $m$-free matrix as a binary (that is, consisting of only $1$'s and $0$'s) matrix such that every square submatrix of size $m × m$ of this matrix contains at least one zero. Consider the following problem: {You are given two integers $n$ and $m$. You have to construct an $m$-free square matrix of size $n × n$ such that \textbf{the number of $1$'s in this matrix is maximum possible}. Print the maximum possible number of $1$'s in such matrix.} You don't have to solve this problem. Instead, you have to construct a few tests for it. You will be given $t$ numbers $x_{1}$, $x_{2}$, ..., $x_{t}$. For every $i\in[1,t]$, find two integers $n_{i}$ and $m_{i}$ ($n_{i} ≥ m_{i}$) such that the answer for the aforementioned problem is exactly $x_{i}$ if we set $n = n_{i}$ and $m = m_{i}$.
Now that you know the formula, you can iterate over $n$ and find the correct value. The lowest non-zero value you can get for some $n$ is having $k = 2$. So you can estimate $n$ as about $\frac{4{\sqrt{x}}}{3}$. Now let's get $k$ for some fixed $n$. $n^{2}-\lfloor{\frac{n}{k}}\rfloor^{2}=x$ $\to$ $\lfloor{\frac{n}{k}}\rfloor^{2}=n^{2}-x$ $\to$ $\lfloor{\frac{n}{k}}\rfloor)={\sqrt{n^{2}-x}}$ $\to$ $k={\frac{n}{\sqrt{n^{2}-x}}}$. Due to rounding down, it's enough to check only this value of $k$.
[ "binary search", "brute force", "constructive algorithms" ]
1,700
"#include <bits/stdc++.h>\n\n#define forn(i, n) for (int i = 0; i < int(n); i++)\n\nusing namespace std;\n\nint getSqr(int x){\n\tint l = sqrt(x);\n\tfor (int i = -2; i <= 2; ++i)\n\t\tif (l + i >= 0 && (l + i) * (l + i) == x)\n\t\t\treturn l;\n\treturn -1;\n}\n\nvoid solve(){\n int x;\n\tscanf(\"%d\", &x);\n\tfor (int n = 1; n == 1 || n * n - (n / 2) * (n / 2) <= x; ++n){\n\t\tint lk = n * n - x;\n\t\tif (lk < 0) continue;\n\t\tint sq = getSqr(lk);\n\t\tif (sq <= 0) continue;\n\t\t\n\t\tint k = n / sq;\n\t\tif (k > 0 && n * n - (n / k) * (n / k) == x){\n\t\t\tprintf(\"%d %d\\n\", n, k);\n\t\t\treturn;\n\t\t}\n\t}\n\t\n\tputs(\"-1\");\n}\n\nint main() {\n int tc;\n scanf(\"%d\", &tc);\n\tforn(i, tc)\n\t solve();\n}"
938
D
Buy a Ticket
Musicians of a popular band "Flayer" have announced that they are going to "make their exit" with a world tour. Of course, they will visit Berland as well. There are $n$ cities in Berland. People can travel between cities using two-directional train routes; there are exactly $m$ routes, $i$-th route can be used to go from city $v_{i}$ to city $u_{i}$ (and from $u_{i}$ to $v_{i}$), and it costs $w_{i}$ coins to use this route. Each city will be visited by "Flayer", and the cost of the concert ticket in $i$-th city is $a_{i}$ coins. You have friends in every city of Berland, and they, knowing about your programming skills, asked you to calculate the minimum possible number of coins they have to pay to visit the concert. For every city $i$ you have to compute the minimum number of coins a person from city $i$ has to spend to travel to some city $j$ (or possibly stay in city $i$), attend a concert there, and return to city $i$ (if $j ≠ i$). Formally, for every $i\in[1,n]$ you have to calculate $\operatorname*{min}_{j=1}^{n}2d(i,j)+a_{j}$, where $d(i, j)$ is the minimum number of coins you have to spend to travel from city $i$ to city $j$. If there is no way to reach city $j$ from city $i$, then we consider $d(i, j)$ to be infinitely large.
The function of the path length is not that different from the usual one. You can multiply edge weights by two and run Dijkstra in the following manner. Set $dist_{i} = a_{i}$ for all $i\in[1,n]$ and push these values to heap. When finished, $dist_{i}$ will be equal to the shortest path.
[ "data structures", "graphs", "shortest paths" ]
2,000
"#include <bits/stdc++.h>\n\n#define forn(i, n) for (int i = 0; i < int(n); i++)\n\nusing namespace std;\n\ntypedef long long li;\n\nconst int N = 200 * 1000 + 13;\nconst li INF64 = 1e18;\n\nint n, m;\nli a[N];\nvector<pair<int, li>> g[N];\n\nli dist[N];\n\nvoid Dijkstra(){\n\tset<pair<li, int>> q;\n\tforn(i, n){\n\t\tdist[i] = a[i];\n\t\tq.insert({dist[i], i});\n\t}\n\t\n\twhile (!q.empty()){\n\t\tint v = q.begin()->second;\n\t\tq.erase(q.begin());\n\t\t\n\t\tfor (auto it : g[v]){\n\t\t\tint u = it.first;\n\t\t\tli w = it.second;\n\t\t\t\n\t\t\tif (dist[u] > dist[v] + w){\n\t\t\t\tq.erase({dist[u], u});\n\t\t\t\tdist[u] = dist[v] + w;\n\t\t\t\tq.insert({dist[u], u});\n\t\t\t}\n\t\t}\n\t}\n}\n\nint main() {\n\tscanf(\"%d%d\", &n, &m);\n\tforn(_, m){\n\t\tint f, t;\n\t\tli w;\n\t\tscanf(\"%d%d%lld\", &f, &t, &w);\n\t\t--f, --t;\n\t\tw *= 2;\n\t\tg[f].push_back({t, w});\n\t\tg[t].push_back({f, w});\n\t}\n\tforn(i, n){\n\t\tscanf(\"%lld\", &a[i]);\n\t}\n\t\n\tDijkstra();\n\tforn(i, n)\n\t\tprintf(\"%lld \", dist[i]);\n\tputs(\"\");\n\treturn 0;\n}"
938
E
Max History
You are given an array $a$ of length $n$. We define $f_{a}$ the following way: - Initially $f_{a} = 0$, $M = 1$; - for every $2 ≤ i ≤ n$ if $a_{M} < a_{i}$ then we set $f_{a} = f_{a} + a_{M}$ and then set $M = i$. Calculate the sum of $f_{a}$ over all $n!$ permutations of the array $a$ modulo $10^{9} + 7$. Note: two elements are considered different if their indices differ, so for every array $a$ there are exactly $n!$ permutations.
It is easy to see that $i$-th element appears in $f_{a}$ if and only if all elements appearing before it in the array are less than it, so if we define $l_{i}$ as the number of elements less than $a_{i}$ the answer will be equal to: $\textstyle\sum_{i=1}^{n}a_{i}\times[\sum_{i=1}^{l_{i}+1}{\binom{l_{i}}{j_{-1}}}\times(j-1)!\times(n-j)!]$By determining the index of $a_{i}$, if it is on the index $j$ then we have to choose $j - 1$ of the $l_{i}$ elements smaller than it and then permuting them and then permuting the other elements. We can find all $l_{i}$ with complexity of $O(n log n)$. If we were to implement this, the complexity would equal to $O(n^{2})$. Now let's make our formula better. So let's open it like so: $\textstyle{\sum_{i=1}^{n}a_{i}\times[\sum_{j=1}^{l_{i+1}}{\frac{l_{i}!}{(j-1)!\times(l_{i-};+1)!}}\times(j-1)!\times(n-j)!]}$and then it equals to: $\sum_{i=1}^{n}a_{i}\times\ [\sum_{j=1}^{l_{i}+1}\,\frac{l_{i}!}{(l_{i-i+1})!}\,\times\,\left(m-j\right)!\right]$and now let's take out the $l_{i}!$ , $\textstyle{\sum_{i=1}^{n}a_{i}\times\,l_{i}!\times\left[\sum_{j=1}^{l_{i}+1}{\frac{(n-j)!}{(l_{i}-j+1)!}}\right]}$now let's multiply the inside the first sigma by $\frac{1}{(n-l_{i}-1)!}$ and the second sigma by $(n - l_{i} - 1)!$ and it gets equal to: $\textstyle\sum_{i=1}^{n}a_{i}\times l_{i}!\times(n-l_{i}-1)!\times[\sum_{j=1}^{l_{i+1}}{\frac{(n-j)!}{(n-l_{i}-1)!\times(l_{i}-j+1)!}}]$and it is easy to see it equals to: $\textstyle\sum_{i=1}^{n}a_{i}\times l_{i}!\times(n-l_{i}-1)!\times[\sum_{j=1}^{l_{i+1}}{\binom{n-j}{n-l_{i}-1}}]$and using the fact that $\ {(_{k}^{k})}\,+\,{\binom{k+1}{k}}\,+\,\cdot\,\cdot\,\cdot\,\cdot\,\cdot\,\cdot\,\quad+\,{\binom{n}{k+1}}\qquad(k\leq n)$it will equal to: $\textstyle\sum_{i=1}^{n}a_{i}\times l_{i}!\times(n-l_{i}-1)!\times(\O_{n-l})$So the final answer will equal to: $\sum_{i=1}^{n}{\frac{a_{i}\times n!}{n-l!}}$of which can be easily implemented in $O(n log n)$. Make sure to not add the answer for maximum number in the sequence.
[ "combinatorics", "math" ]
2,300
"#include <bits/stdc++.h>\nusing namespace std;\ntypedef long long ll;\nconst int MAX_N = 1e6+10;\nconst int mod = 1e9+7;\n\nll a[MAX_N],n,fact[MAX_N],rfact[MAX_N];\nll num(int t){\n return fact[n-t-1]*rfact[n-t+1]%mod;\n}\nmain(){\n scanf(\"%lld\", &n);\n for(ll i=1;i<=n;i++)\n scanf(\"%lld\", &a[i]);\n fact[0]=rfact[n+1]=1;\n for(int i=1;i<=n+1;i++)\n fact[i]=(fact[i-1]*i)%mod;\n for(int i=n;i>=1;i--)\n rfact[i]=(rfact[i+1]*i)%mod;\n sort(a+1,a+n+1);\n ll cnt=0,curr=0,ans=0;\n for(ll i=1;i<=n&&a[i]!=a[n];i++){\n if(a[i]==a[i-1])\n cnt++;\n else\n curr+=cnt,cnt=1;\n ans=(ans+num(curr)*a[i]%mod)%mod;\n }\n printf(\"%d\",ans);\n}"
938
F
Erasing Substrings
You are given a string $s$, initially consisting of $n$ lowercase Latin letters. After that, you perform $k$ operations with it, where $k=\lfloor\log_{2}(n)\rfloor$. During $i$-th operation you \textbf{must} erase some substring of length exactly $2^{i - 1}$ from $s$. Print the lexicographically minimal string you may obtain after performing $k$ such operations.
Let's try to apply some greedy observations. Since each state represents a possible prefix of the resulting string, then among two states $dp[m_{1}][mask_{1}]$ and $dp[m_{2}][mask_{2}]$ such that the lenghts of corresponding prefixes are equal, but the best answers for states are not equal, we don't have to consider the state with lexicographically greater answer. So actually for every length of prefix there exists only one best prefix we will get, and we may store a boolean in each state instead of a string. The boolean will denote if it is possible to get to corresponding state with minimum possible prefix. To calculate this, we iterate on the lengths of prefixes of the resulting string. When we fix the length of prefix, we firstly consider dynamic programming transitions that denote deleting a substring (since they don't add any character). Then among all states $dp[m][mask]$ that allow us to reach some fixed length of prefix and have $dp[m][mask] = true$ we pick the best character we can use to proceed to next prefix (and for a fixed state that's actually $(m + 1)$-th character of the string). This is $O(n^{2}\log{n})$, but in fact it's pretty fast.
[ "bitmasks", "dp", "greedy" ]
2,700
"#include <bits/stdc++.h>\n\nusing namespace std;\n\nconst int N = 5043;\nconst int M = 12;\n\nbool dp[N][(1 << M)];\n\ntypedef pair<int, int> pt;\n\n#define x first\n#define y second\n\nvector<int> bits[(1 << M)];\n\nint main()\n{\n\tstring s;\n\tcin >> s;\n\tint n = s.size();\n\tstring ans;\n\tdp[0][0] = 1;\n\tvector<pt> cur;\n\tcur.push_back(make_pair(0, 0));\n\tint final_sz = n;\n\tint cur_len = 1;\n\tint cnt = 0;\n\twhile((1 << cnt) < n)\n\t\tcnt++;\n\tcnt--;\n\t\n\tfor(int i = 0; i < (1 << cnt); i++)\n\t\tfor(int j = 0; j < cnt; j++)\n\t\t\tif ((i & (1 << j)) == 0)\n\t\t\t\tbits[i].push_back(j);\n\twhile(final_sz > cur_len)\n\t{\n\t\tfinal_sz -= cur_len;\n\t\tcur_len *= 2;\n\t}\n\twhile(ans.size() < final_sz)\n\t{\n\t\tchar min_chr = 'z';\n\t\tfor(int i = 0; i < cur.size(); i++)\n\t\t{\n\t\t\tauto x = cur[i];\n\t\t\tfor(auto y : bits[x.y])\n\t\t\t{\n\t\t\t\tif (dp[x.x + (1 << y)][x.y ^ (1 << y)] == 0)\n\t\t\t\t{\n\t\t\t\t\tcur.push_back(make_pair(x.x + (1 << y), x.y ^ (1 << y)));\n\t\t\t\t\tdp[x.x + (1 << y)][x.y ^ (1 << y)] = 1;\n\t\t\t\t}\n\t\t\t}\t\n\t\t\tmin_chr = min(min_chr, s[x.x]);\n\t\t}\n\t\tvector<pt> new_cur;\n\t\tans.push_back(min_chr);\n\t\tfor(auto x : cur)\n\t\t\tif (s[x.x] == min_chr)\n\t\t\t{\n\t\t\t\tdp[x.x + 1][x.y] = 1;\n\t\t\t\tnew_cur.push_back(make_pair(x.x + 1, x.y));\n\t\t\t}\n\t\tcur = new_cur;\n\t}\n\tcout << ans << endl;\n}"
938
G
Shortest Path Queries
You are given an undirected connected graph with weighted edges. The length of some path between two vertices is the bitwise xor of weights of all edges belonging to this path (if some edge is traversed more than once, then it is included in bitwise xor the same number of times). There are three types of queries you have to process: - $1$ $x$ $y$ $d$ — add an edge connecting vertex $x$ to vertex $y$ with weight $d$. It is guaranteed that there is no edge connecting $x$ to $y$ before this query; - $2$ $x$ $y$ — remove an edge connecting vertex $x$ to vertex $y$. It is guaranteed that there was such edge in the graph, and the graph stays connected after this query; - $3$ $x$ $y$ — calculate the length of the shortest path (possibly non-simple) from vertex $x$ to vertex $y$. Print the answers for all queries of type $3$.
This is a more complex version of problem G from Educational Round 27. You can find its editorial here. To solve the problem we consider now, you have to use a technique known as dynamic connectivity. Let's build a segment tree over queries: each vertex of the segment tree will contain a list of all edges existing in the graph on the corresponding segment of queries. If some edge exists from query $l$ to query $r$, then it's like an addition operation on segment $[l, r]$ in segment tree (but instead of addition, we insert this edge into the list of edges on a segment, and we make no pushes). Then if we write some data structure that will allow to add an edge and rollback operations we applied to the structure, then we will be able to solve the problem by DFS on segment tree: when we enter a vertex, we add all edges in the list of this vertex; when we are in a leaf, we calculate the required answer for the corresponding moment of time; and when we leave a vertex, we rollback all changes we made there. What data structure do we need? Firstly, we will have to use DSU maintaining the distance to the leader (to maintain the length of some path between two vertices). Don't use path compression, this won't work well since we have to do rollbacks. Secondly, we have to maintain the base of all cycles in the graph (since the graph is always connected, it doesn't matter that some cycles may be unreachable: by the time we get to leaves of the segment tree, these cycles will become reachable, so there's no need to store a separate base for each component). A convenient way to store the base is to make an array of $30$ elements, initially filled with zeroes (we denote this array as $a$). $i$-th element of the array will denote some number in a base such that $i$-th bit is largest in the number. Adding some number $x$ to this base is really easy: we iterate on bits from $29$-th to $0$-th, and if some bit $j$ is equal to $1$ in $x$, and $a[j] \neq 0$, then we just set $x:=x\oplus a[j]$ (let's call this process reduction, we will need it later). If we get $0$ after doing these operations, then the number we tried to add won't affect the base, and we don't need to do anything; otherwise, let $k$ be the highmost bit equal to $1$ in $x$, and then we set $a[k]: = x$. This method of handling the base of cycles also allows us to answer queries of type $3$ easily: firstly, we pick the length of some path from DSU (let it be $p$), and secondly, we just apply reduction to $p$, and this will be our answer.
[ "bitmasks", "data structures", "dsu", "graphs" ]
2,900
"#include <bits/stdc++.h>\n\nusing namespace std;\n\ntypedef pair<pair<int, int>, int> edge;\n\n#define x first\n#define y second\n\nconst int M = 2000043;\nconst int N = 200043;\nconst int B = 30;\nconst int IDX = 200002;\n\nint* where[M];\nint val[M];\nint st = 0;\n\ninline void rollback(int new_st)\n{\n\twhile(st != new_st)\n\t{\n\t\tst--;\n\t\t(*where[st]) = val[st];\n\t}\n}\n\ninline void change(int& address, int new_val)\n{\n\twhere[st] = &address;\n\tval[st] = address;\n\tst++;\n\taddress = new_val;\n}\n\nvector<edge> T[4 * N];\nvector<edge> Q[4 * N];\nint ans[N];\n\nvoid add_edge(int v, int l, int r, int L, int R, edge e)\n{\n\tif (L >= R)\n\t\treturn;\n\tif (l == L && r == R)\n\t{\n\t\tT[v].push_back(e);\n\t\treturn;\n\t}\n\tint mid = (l + r) >> 1;\n\tadd_edge(v * 2 + 1, l, mid, L, min(mid, R), e);\n\tadd_edge(v * 2 + 2, mid, r, max(L, mid), R, e);\n}\n\nint base[B];\n\ninline void try_gauss(int v)\n{\n\tfor(int i = 29; i >= 0; i--)\n\t\tif (base[i] != -1 && (v & (1 << i)))\n\t\t\tv ^= base[i];\n\tif (v != 0)\n\t\tfor(int i = 29; i >= 0; i--)\n\t\t\tif (v & (1 << i))\n\t\t\t\treturn change(base[i], v);\n}\n\nint rnk[N];\nint dsu[N];\nint dist[N];\n\ninline int get_p(int x)\n{\t\n\twhile(dsu[x] != x)\n\t\tx = dsu[x];\n\treturn x;\n}\n\ninline int get_dist(int x)\n{\n\tint res = 0;\n\twhile(dsu[x] != x)\n\t{\n\t\tres ^= dist[x];\n\t\tx = dsu[x];\n\t}\n\treturn res;\n}\n\ninline bool merge(int x, int y, int d)\n{\n\tint dist_x = get_dist(x);\n\tint dist_y = get_dist(y);\n\tx = get_p(x);\n\ty = get_p(y);\n\tif (x == y)\n\t\treturn false;\n\td ^= (dist_x ^ dist_y);\n\tif (rnk[x] < rnk[y])\n\t\tswap(x, y);\n\tchange(dsu[y], x);\n\tchange(rnk[x], rnk[x] + rnk[y]);\n\tchange(dist[y], d);\n\treturn true;\n}\n\ninline void process(int v)\n{\n\tfor(auto x : T[v])\n\t\tif (!merge(x.x.x, x.x.y, x.y))\n\t\t{\n\t\t\tint cycle_len = x.y ^ get_dist(x.x.x) ^ get_dist(x.x.y);\n\t\t\ttry_gauss(cycle_len);\n\t\t}\n}\n\ninline int answer(int x, int y)\n{\n\tint d = get_dist(x) ^ get_dist(y);\n\tfor(int i = 29; i >= 0; i--)\n\t\tif (base[i] != -1 && (d & (1 << i)))\n\t\t\td ^= base[i];\n\treturn d;\n}\n\nvoid dfs(int v, int l, int r)\n{\n\tint rollback_to = st;\n\tprocess(v);\n\tif (l == r - 1)\n\t{\n\t\tfor(auto x : Q[v])\n\t\t\tans[x.y] = answer(x.x.x, x.x.y);\n\t}\n\telse\n\t{\n\t\tint mid = (l + r) >> 1;\n\t\tdfs(v * 2 + 1, l, mid);\n\t\tdfs(v * 2 + 2, mid, r);\t\t\n\t}\n\trollback(rollback_to);\n}\n\nvoid add_query(int v, int l, int r, int pos, edge e)\n{\n\tif (l == r - 1)\n\t\tQ[v].push_back(e);\n\telse\n\t{\n\t\tint mid = (l + r) >> 1;\n\t\tif (pos < mid)\n\t\t\tadd_query(v * 2 + 1, l, mid, pos, e);\n\t\telse\n\t\t\tadd_query(v * 2 + 2, mid, r, pos, e);\n\t}\n}\n\nint main() {\n\tint n, m;\n\tscanf(\"%d %d\", &n, &m);\n\tfor(int i = 0; i < n; i++)\n\t{\n\t\tdsu[i] = i;\n\t\tdist[i] = 0;\n\t\trnk[i] = 1;\n\t}\n\tint cur = 0;\n\tmap<pair<int, int>, pair<int, int> > z;\n\tfor(int i = 0; i < m; i++)\n\t{\n\t\tint x, y, d;\n\t\tscanf(\"%d %d %d\", &x, &y, &d);\n\t\t--x;\n\t\t--y;\n\t\tz[make_pair(x, y)] = make_pair(0, d);\n\t}\n\tint cnt_q = 0;\n\tint q;\n\tscanf(\"%d\", &q);\n\tfor(int i = 0; i < q; i++)\n\t{\n\t\tint t;\n\t\tscanf(\"%d\", &t);\n\t\tif (t == 1)\n\t\t{\n\t\t\tint x, y, d;\n\t\t\tscanf(\"%d %d %d\", &x, &y, &d);\n\t\t\tcur++;\n\t\t\t--x;\n\t\t\t--y;\n\t\t\tz[make_pair(x, y)] = make_pair(cur, d);\n\t\t}\n\t\tif (t == 2)\n\t\t{\n\t\t\tint x, y;\n\t\t\tscanf(\"%d %d\", &x, &y);\n\t\t\t--x;\n\t\t\t--y;\n\t\t\tcur++;\n\t\t\tadd_edge(0, 0, IDX, z[make_pair(x, y)].x, cur, make_pair(make_pair(x, y), z[make_pair(x, y)].y));\n\t\t\tz.erase(make_pair(x, y));\n\t\t}\n\t\tif (t == 3)\n\t\t{\n\t\t\tint x, y;\n\t\t\tscanf(\"%d %d\", &x, &y);\n\t\t\t--x;\n\t\t\t--y;\n\t\t\tadd_query(0, 0, IDX, cur, make_pair(make_pair(x, y), cnt_q));\n\t\t\tcnt_q++; \n\t\t}\n\t}\n\tcur++;\n\tfor(auto x : z)\n\t\tadd_edge(0, 0, IDX, x.y.x, cur, make_pair(x.x, x.y.y));\n\tdfs(0, 0, IDX);\n\tfor(int i = 0; i < cnt_q; i++)\n\t\tprintf(\"%d\\n\", ans[i]);\n\treturn 0;\n}"
939
A
Love Triangle
As you could know there are no male planes nor female planes. However, each plane on Earth likes some other plane. There are $n$ planes on Earth, numbered from $1$ to $n$, and the plane with number $i$ likes the plane with number $f_{i}$, where $1 ≤ f_{i} ≤ n$ and $f_{i} ≠ i$. We call a love triangle a situation in which plane $A$ likes plane $B$, plane $B$ likes plane $C$ and plane $C$ likes plane $A$. Find out if there is any love triangle on Earth.
It is enough to check if there is some $i$ such that $f_{ffi} = i$, i. e. f[f[f[i]]] == i.
[ "graphs" ]
800
null
939
B
Hamster Farm
Dima has a hamsters farm. Soon $N$ hamsters will grow up on it and Dima will sell them in a city nearby. Hamsters should be transported in boxes. If some box is not completely full, the hamsters in it are bored, that's why each box should be completely full with hamsters. Dima can buy boxes at a factory. The factory produces boxes of $K$ kinds, boxes of the $i$-th kind can contain in themselves $a_{i}$ hamsters. Dima can buy any amount of boxes, but he should buy boxes of only one kind to get a wholesale discount. Of course, Dima would buy boxes in such a way that each box can be completely filled with hamsters and transported to the city. If there is no place for some hamsters, Dima will leave them on the farm. Find out how many boxes and of which type should Dima buy to transport maximum number of hamsters.
The easies way to solve this problem is to find the minimum number of hamsters that can be left on the farm. If Dima byes boxes of the $i$-th type, there are $n\ \mathrm{mod}\ a_{i}$ hamsters left on the farm. So we should find such a type $x$, that the value $n{\mathrm{~mod~}}a_{x}$ is minimum among all $x$; The number of boxes to buy is then equal to $n\dim a_{x}$.
[ "implementation" ]
1,000
null
939
C
Convenient For Everybody
In distant future on Earth day lasts for $n$ hours and that's why there are $n$ timezones. Local times in adjacent timezones differ by one hour. For describing local time, hours numbers from $1$ to $n$ are used, i.e. there is no time "0 hours", instead of it "$n$ hours" is used. When local time in the $1$-st timezone is $1$ hour, local time in the $i$-th timezone is $i$ hours. Some online programming contests platform wants to conduct a contest that lasts for an hour in such a way that its beginning coincides with beginning of some hour (in all time zones). The platform knows, that there are $a_{i}$ people from $i$-th timezone who want to participate in the contest. Each person will participate if and only if the contest starts no earlier than $s$ hours 00 minutes local time and ends not later than $f$ hours 00 minutes local time. Values $s$ and $f$ are equal for all time zones. If the contest starts at $f$ hours 00 minutes local time, the person won't participate in it. Help platform select such an hour, that the number of people who will participate in the contest is maximum.
Initially, compute prefix sums: for each $i$ the total number of people in timezones from the first to the $i$-th. Let's loop through all possible starting times of the competition. Each starting time gives as one or two segments of timezones, in which people will compete in the contest. We can easily compute the total number of participants in $O(1)$ with the use of prefix sums. The total complexity is $O(N)$.
[ "binary search", "two pointers" ]
1,600
null
939
D
Love Rescue
Valya and Tolya are an ideal pair, but they quarrel sometimes. Recently, Valya took offense at her boyfriend because he came to her in t-shirt with lettering that differs from lettering on her pullover. Now she doesn't want to see him and Tolya is seating at his room and crying at her photos all day long. This story could be very sad but fairy godmother (Tolya's grandmother) decided to help them and restore their relationship. She secretly took Tolya's t-shirt and Valya's pullover and wants to make the letterings on them same. In order to do this, for one unit of mana she can buy a spell that can change some letters on the clothes. Your task is calculate the minimum amount of mana that Tolya's grandmother should spend to rescue love of Tolya and Valya. More formally, letterings on Tolya's t-shirt and Valya's pullover are two strings with same length $n$ consisting only of lowercase English letters. Using one unit of mana, grandmother can buy a spell of form $(c_{1}, c_{2})$ (where $c_{1}$ and $c_{2}$ are some lowercase English letters), which can arbitrary number of times transform a single letter $c_{1}$ to $c_{2}$ and vise-versa on both Tolya's t-shirt and Valya's pullover. You should find the minimum amount of mana that grandmother should spend to buy a set of spells that can make the letterings equal. In addition you should output the required set of spells.
Let's build a graph with 26 vertices representing the 26 letters of English alphabet. When we buy a spell of form $(c_{1}, c_{2})$, add an edge between vertices $c_{1}$ and $c_{2}$. It's easy to see, that it is possible to change a letter $a$ to a letter $b$ if and only if there is a path between corresponding vertices in the graph. So our task is to add the minimum possible number of edges such that characters $s_{1}[i]$ and $s_{2}[i]$ are in one connected component for each $i$ (here $s_{1}$ and $s_{2}$ are the given strings). Let's now take an empty graph and add edges between vertices $s_{1}[i]$ and $s_{2}[i]$ for each $i$. These edges, as we already know, add constraints on the final graph (these letters should be in a single connected component in the final graph). Let's compute the number of connected components in the graph - let it be $k$. Let's consider one connected component, let its size be $x_{i}$. Note that the spell we should buy should connect all these vertices in a single component. We can do this using at least $x_{i} - 1$ edges, and the edges that suit us are any spanning tree of this component, that can be found using a dfs, or just connect one vertex of this component to all the others. So the total number of spells is $\textstyle\sum_{i=1}^{k}(x_{i}-1)=\sum_{i=1}^{k}x_{i}-k=n-k$. This is the answer to the problem.
[ "dfs and similar", "dsu", "graphs", "greedy", "strings" ]
1,600
null
939
E
Maximize!
You are given a multiset $S$ consisting of positive integers (initially empty). There are two kind of queries: - Add a positive integer to $S$, the newly added integer is not less than any number in it. - Find a subset $s$ of the set $S$ such that the value ${\mathrm{max}}(s)-{\mathrm{mean}}(s)$ is maximum possible. Here $max(s)$ means maximum value of elements in $s$, $\operatorname{mean}(s)$ — the average value of numbers in $s$. Output this maximum possible value of ${\mathrm{max}}(s)-{\mathrm{mean}}(s)$.
Let's first prove some lemmas we will use in the solution. Let $a_{0}, ..., a_{n}$ be integers that are at the current moment in $S$, sorted in increasing order. Lemma 1. Let the maximum element in optimal $s$ be $a_{n}$. Then the rest of the elements in $s$ form a prefix of $a$. Proof: let it be wrong. Let's consider $a_{i}$ $(i < n)$ - the first element of $a$, that is not in $s$. We know that in $s$ some element $a_{j}$ is presented $(i < j < n)$. Let's replace in $s$ $a_{j}$ with $a_{i}$, the average $mean(s)$ will not increase, because $a_{i} \le a_{j}$, $max(s)$ will not change, so $max(s) - mean(s)$ will not decrease. Lemma 2. Let $m_{i}=a_{n}-\frac{a_{n}+\sum_{j=0}^{i-1}a_{j}}{i+1}$ - the value that we want to maximize in case of $s$ consisting of $a_{n}$, and a prefix of $a$ of length $i$, i. e. elements $a_{0}, ..., a_{i - 1}$. A claim: $s i g n(m_{i+1}-m_{i})=s i g n(a_{n}+\sum_{j=0}^{i-1}a_{j}-a_{i}(i+1)$, where $sign(x)$ denotes the sign of $x$. Proof: $m_{i+1}-m_{i}={\frac{(-a_{n}-\sum_{j=0}^{n}a_{j})(i+1)+(a_{n}+\sum_{j=0}^{n-1}a_{j})(i+2)}{(t+1)!(i+1)!(i+2)}}={\frac{a_{n}+\sum_{j=1}^{n}a_{j}-a_{i}(i+1))}{(t+1)!(i+2)}}$. Because the denominator is always $> 0$, then $s i g n(m_{i+1}-m_{i})=s i g n(a_{n}+\sum_{j=0}^{i-1}a_{j}-a_{i}(i+1)$. Lemma 3. Let's denote for a fixed $n$ $f(i)=a_{n}+\sum_{j=0}^{i-1}a_{j}-a_{i}(i+1)$. $f(i)$ is non-decreasing for increasing $i$. Proof: $f(i + 1) = f(i) + a_{i} - a_{i + 1}(i + 2) + a_{i}(i + 1) = f(i) - (i + 2)(a_{i + 1} - a_{i})$. $a_{i + 1} - a_{i} \ge 0$, because $a$ is sorted. Then $f(i + 1) - f(i) = - (i + 2)(a_{i + 1} - a_{i}) \le 0$, i. e. $f(i + 1) - f(i) \le 0$, this means that $f(i)$ does not decrease when $i$ increases. Let's solve the problem now. Let's keep the current answer for the query of type $2$, let it be $ans$. When a new operation of type $1$ comes, let's update it with the optimal value of $max(s) - mean(s)$ in case $max(s) = a_{n}$, where $a_{n}$ is the newly added element. To find this optimal value, let's do binary search for $f(i)$ and find the minimum value of $i$, such that $f(i) \le 0$. Lemmas prove us that this prefix of length $i$ is optimal for fixed $max(s) = a_{n}$. Now update the value of $ans$ with the value $a_{n}-\frac{a_{n}+\sum_{j=0}^{i-1}a_{j}}{i+1}$. To compute the values $\textstyle\sum_{j=0}^{k}a_{j}$ fast, we should maintain the array of prefix sums in $a$ - one more element is added to this array each time a query of type $1$ comes.
[ "binary search", "greedy", "ternary search", "two pointers" ]
1,800
null
939
F
Cutlet
Arkady wants to have a dinner. He has just returned from a shop where he has bought a semifinished cutlet. He only needs to fry it. The cutlet should be fried for $2n$ seconds, in particular, it should be fried for $n$ seconds on one side and $n$ seconds on the other side. Arkady has already got a frying pan and turn on fire, but understood that maybe he won't be able to flip the cutlet exactly after $n$ seconds after the beginning of cooking. Arkady is too busy with sorting sticker packs in his favorite messenger and can flip the cutlet only in some periods of time. Namely, there are $k$ periods of time in which he can do it, the $i$-th of them is an interval of time from $l_{i}$ seconds after he starts cooking till $r_{i}$ seconds, inclusive. Arkady decided that it's not required to flip the cutlet exactly in the middle of cooking, instead, he will flip it several times in such a way that the cutlet will be fried exactly $n$ seconds on one side and $n$ seconds on the other side in total. Help Arkady and find out if it's possible for him to cook the cutlet, if he is able to flip the cutlet only in given periods of time; and if yes, find the minimum number of flips he needs to cook the cutlet.
Let's use dynamic programming approach. Solve the following subproblem: let $t$ be the seconds passed since the start of cooking, and $t_{0}$ seconds among them the cutlet was preparing on the current side; what is the minimum number of flips needed to reach this state? This can be easily computed using answers for subproblems $(t - 1, t_{0})$, $(t - 1, t_{0} - 1)$, in which the cutlet lays on the same side, and $(t - 1, t - 1 - t_{0})$ and $(t - 1, t - t_{0})$, in which the cutlet lays on the other side. You should carefully consider the number of flips needed for each transition, and check if it is possible, according to Arkady's availability. The complexity of this solution is $O(n^{2})$, which is not enough. For full solution, it is enough to consider only moments that correspond to start of some segment Arkady's availability, because between these moments we can make at most two flips, otherwise the result is obviously not optimal. In such case to compute the answer for subproblem $(t = l_{i}, t_{0})$ it is enough to compute minimum among answers for subproblems $(t' = l_{i - 1}, t'_{0})$, where $t'$ - the time of the start of the previous time segment - is fixed, and $t'_{0}$ changes within several segments, the bounds of which depend on the number of flips between moments $l_{i - 1}$ and $l_{i}$ seconds, and parameters $l_{i - 1}$, $r_{i - 1}$, $l_{i}$, $t_{0}$. For effective computing of minimums you can use the queue of minimums, because the bounds of the segments increase with the increase of $t_{0}$. The total complexity is $O(nk)$. You can also use other data structures for computing minimum, and the complexity of such solutions is $O(n k\log\left(n\right))$.
[ "data structures", "dp" ]
2,400
null
940
A
Points on the line
We've got no test cases. A big olympiad is coming up. But the problemsetters' number one priority should be adding another problem to the round. The \textbf{diameter} of a multiset of points on the line is the largest distance between two points from this set. For example, the diameter of the multiset ${1, 3, 2, 1}$ is 2. Diameter of multiset consisting of one point is 0. You are given $n$ points on the line. What is the minimum number of points you have to remove, so that the diameter of the multiset of the remaining points will not exceed $d$?
It's clear that diameter of the multiset of points equals to difference of coordinates of point with maximum coordinate and point with minimum coordinate. So we can iterate over all possible pairs of maximum and minimum point and check number of remaining points in $O(n)$. This solution works in $O(n^{3})$. Of course, there are faster solutions.
[ "brute force", "greedy", "sortings" ]
1,200
null
940
B
Our Tanya is Crying Out Loud
Right now she actually isn't. But she will be, if you don't solve this problem. You are given integers $n$, $k$, $A$ and $B$. There is a number $x$, which is initially equal to $n$. You are allowed to perform two types of operations: - Subtract 1 from $x$. This operation costs you $A$ coins. - Divide $x$ by $k$. Can be performed only if $x$ is divisible by $k$. This operation costs you $B$ coins. What is the minimum amount of coins you have to pay to make $x$ equal to $1$?
If $k = 1$, then answer is obvious, $(n-1)\cdot\,A$, otherwise we will greedily decrease number. During each moment of time we should consider following three cases: If $n < k$, we can only $n - 1$ decrease number by 1, paying $A$ coins each time. It can be done in $O(1)$ using formula. If $n > k$ and $n$ is not divisible by $k$, we can only decrease number $(n\,m o d\,k)$ times by 1 paying $A$ coins each time. This case can be also handled in $O(1)$ using formula. If $n$ is divisible by $k$, it's always optimal to make number equals $\begin{array}{l}{{\frac{n}{k}}}\end{array}$ paying $m i n(B,(n-\frac{n}{k})\cdot A)$ coins. If $B<(n-{\frac{n}{k}})\cdot A$ then optimality is obvious. Otherwise assume we didn't make decreasing to t $\begin{array}{l}{{\frac{n}{k}}}\end{array}$ now, but did it on interval $\textstyle{\binom{n}{k}},n)$ from number $\quad n-i\cdot k$. In this case we paid $m i n(B,(m-i\cdot k-\textstyle{\frac{n}{k}}+i)\cdot A)+i\cdot k\cdot A$ coins. It equals $(n-i\cdot k-{\frac{n}{k}}+i)\cdot A+i\cdot k\cdot A=(n-{\frac{n}{k}})\cdot A-i\cdot(k-1)\cdot A+i\cdot k\cdot A=(n-{\frac{n}{k}})\cdot A+i\cdot A+i\cdot k\cdot A=(n-{\frac{n}{k}})\cdot A+i\cdot A+i\cdot k\cdot A.$ or $B+i\cdot k\cdot A$, with is not more optimal then decreasing to $\begin{array}{l}{{\frac{n}{k}}}\end{array}$ and decreasing to ${\begin{array}{l}{{\frac{n}{k}}-i}\end{array}}\,$ after that. Each case should be handled at most $\log_{k}n$ times, so complexity of the solution is $O(\log_{k}n)$.
[ "dp", "greedy" ]
1,400
null
940
C
Phone Numbers
And where the are the phone numbers? You are given a string $s$ consisting of lowercase English letters and an integer $k$. Find the lexicographically smallest string $t$ of length $k$, such that its set of letters is a subset of the set of letters of $s$ and $s$ is lexicographically smaller than $t$. It's guaranteed that the answer exists. Note that the set of letters is a set, not a multiset. For example, the set of letters of abadaba is ${a, b, d}$. String $p$ is lexicographically smaller than string $q$, if $p$ is a prefix of $q$, is not equal to $q$ or there exists $i$, such that $p_{i} < q_{i}$ and for all $j < i$ it is satisfied that $p_{j} = q_{j}$. For example, abc is lexicographically smaller than abcd , abd is lexicographically smaller than abec, afa \textbf{is not} lexicographically smaller than ab and a \textbf{is not} lexicographically smaller than a.
Consider $2$ cases: If $n < k$ we should simply add $k - n$ minimum symbols from $s$. If $n \ge k$ we need to replace all symbols in the suffix of first $k$ symbols of string consisting of largest symbols to smallest symbols and next symbol before this suffix replace with next symbol that exists in the string. Complexity of this solution is $O(n+k)$.
[ "constructive algorithms", "implementation", "strings" ]
1,500
null
940
D
Alena And The Heater
"We've tried solitary confinement, waterboarding and listening to Just In Beaver, to no avail. We need something extreme." "Little Alena got an array as a birthday present$...$" The array $b$ of length $n$ is obtained from the array $a$ of length $n$ and two integers $l$ and $r$ ($l ≤ r$) using the following procedure: $b_{1} = b_{2} = b_{3} = b_{4} = 0$. For all $5 ≤ i ≤ n$: - $b_{i} = 0$ if $a_{i}, a_{i - 1}, a_{i - 2}, a_{i - 3}, a_{i - 4} > r$ and $b_{i - 1} = b_{i - 2} = b_{i - 3} = b_{i - 4} = 1$ - $b_{i} = 1$ if $a_{i}, a_{i - 1}, a_{i - 2}, a_{i - 3}, a_{i - 4} < l$ and $b_{i - 1} = b_{i - 2} = b_{i - 3} = b_{i - 4} = 0$ - $b_{i} = b_{i - 1}$ otherwise You are given arrays $a$ and $b'$ of the same length. Find two integers $l$ and $r$ ($l ≤ r$), such that applying the algorithm described above will yield an array $b$ equal to $b'$. It's guaranteed that the answer exists.
Notice, that constraints on ${\mathit{l}}_{}^{}$ and ${\boldsymbol{J}}^{*}$ generates only sequences $b_{i}$ that equals 00001, 00000, 11110, 11111. 00000 on positions $[i;i+4]$ means that $l\leq\operatorname*{max}_{i\leq j\leq i+4}a_{j}$. 00001 on positions $[i;i+4]$ means that $l\geq\operatorname*{max}_{i\leq j\leq i+4}a_{j}+1$. 11111 on positions $[i;i+4]$ means that $r\geq\operatorname*{min}_{i\leq j\leq i+4}a_{j}$. 11110 on positions $[i;i+4]$ means that $r\leq\operatorname*{min}_{i<j<i+4}a_{j}-1$. After all we get some constraints for minimum and maximum possible values of ${\mathit{l}}_{}^{}$ and ${\boldsymbol{J}}^{*}$. Since it's guaranteed that answer exists minimum possbile value of ${\mathit{l}}_{}^{}$ and maximum possible value ${\boldsymbol{J}}^{*}$ will always be a correct answer. Pay attention that $-10^{9}\leq l$ and $r\leq10^{9}$. This solution works in $O(n)$.
[ "binary search", "implementation" ]
1,600
null
940
E
Cashback
Since you are the best Wraith King, Nizhniy Magazin «Mir» at the centre of Vinnytsia is offering you a discount. You are given an array $a$ of length $n$ and an integer $c$. The value of some array $b$ of length $k$ is the sum of its elements except for the $\left\lfloor{\frac{k}{x_{-}}}\right\rfloor$ smallest. For example, the value of the array $[3, 1, 6, 5, 2]$ with $c = 2$ is $3 + 6 + 5 = 14$. Among all possible partitions of $a$ into contiguous subarrays output the smallest possible sum of the values of these subarrays.
At first let's solve this problem with $O(n^{2}\log{n})$ complexity. It can be done using dynamic programming. Let $d p_{i}$ be minimum cost of splitting prefix with length $i$.$d p_{0}=0$, $d p_{i}=\operatorname*{min}_{j<i}d p_{j}+c o s t_{j+1,i}$ where $c o s t_{l,r}$ is a cost of $\left(r-l+1\right)-\left.{\bigl\lfloor}{\frac{r-l+1}{c}}\right\rfloor$ maximums on interval $[i,r]$. During finding these values we can iterate over $j$ from $i - 1$ to $0$, storing sum of $\left|{\frac{r-l+1}{c}}\right|$ minimums in some structure like std::multiset. The important observation for faster solution is that it's always optimal to take segments with lengths $1$ or $c$. Suppose we took segment with length less than $c$, then its cost doesn't depend on the way, we split it and it's possible to take it using segments with length $1$. Suppose we took segment with length $\textstyle{\bar{\mathbf{Z}}}$, $c\leq x\leq2\cdot c-1$, then it's possible to split it to some segment with length $c$ and split other elements to segments with length $1$. Suppose we took a segment with length $2\cdot c$, then it's not worse to take it as two segments with length ${\mathit{\mathbb{C}}}_{-}^{*}$. In other cases it's also possible to split segment to segments with lengths $1$ and ${\mathit{\mathbb{C}}}_{-}^{*}$ without loosing of optimality. It's easy to find cost of segment with length $1$, to find cost of segment with length $c$, it's possible to store elements in range $[i-c+1;i]$ in some data structure which can find minimum value fast. It can be queue with minimum or std::multiset; Complexity is $O(n\log n)$ or $O(n)$ depending on structure used.
[ "data structures", "dp", "greedy", "math" ]
2,000
null
940
F
Machine Learning
You come home and fell some unpleasant smell. Where is it coming from? You are given an array $a$. You have to answer the following queries: - You are given two integers $l$ and $r$. Let $c_{i}$ be the number of occurrences of $i$ in $a_{l: r}$, where $a_{l: r}$ is the subarray of $a$ from $l$-th element to $r$-th inclusive. Find the \textbf{Mex} of ${c_{0}, c_{1}, ..., c_{10^{9}}}$ - You are given two integers $p$ to $x$. Change $a_{p}$ to $x$. The \textbf{Mex} of a multiset of numbers is the smallest non-negative integer \textbf{not in} the set. Note that in this problem all elements of $a$ are positive, which means that $c_{0}$ = 0 and $0$ is never the answer for the query of the second type.
At first let's find out minimum length of array, such that answer for it is $c$. For it on this segment there should be a number that has one occurrence, some number that has two occurrences, etc. Length of this segment will be $1\ +\ 2\ +\ 3\ +\ \cdots\ +c\ =\ {\frac{c(c+1)}{2}}$. That's why answer won't exceed $2\cdot{\sqrt{n}}$ for any query. Let's compress numbers in such a way that numbers from array and from queries will be in range from $1$ to $n + q$. Obviously this modification won't change answer, but now elements won't exceed $\;n\!+q$. Suppose for interval $[i,r]$ we know number $c n t_{i}$ of occurrences of each number and number of occurrences of each number of occurrences $s i z e_{i}$. Then we can find Mex of this set in $O({\sqrt{n}})$. Moreover it's easy to update arrays $c n t_{i}$ and $s i z e_{i}$ for segments $[l-1;r]$, $[l+1;r]$, $[i;r-1]$ and $[i;r+1]$ in $O(1)$ time. We will represent each query as tuple of integers $(t,l,r)$ where t - is a number of change queries before this, and segment $[i,r]$. It's easy to see that $t$ can also be changed by one in $O(1)$ time. Because of $q\approx n$ will replace $\boldsymbol{\mathit{I}}$ to $\;n$ in following part of editorial. Let $P=n^{\frac{2}{3}}$ and let's sort queries by triples $(\lfloor{\frac{t}{P}}\rfloor,\lfloor{\frac{l}{P}}\rfloor,r)$ and will answer queries in this order. Border ${\boldsymbol{J}}^{*}$ will be increased by $O(n)$ for each sqaure of size ${\frac{n^{2}}{P^{2}}}={\frac{n^{2}}{n^{3}}}=n^{{\frac{2}{3}}}$ $\longrightarrow\bigcup$ sum of movements of right border is $O(n^{\frac{9}{3}})$. For each query borders ${\mathit{l}}_{}^{}$ and $l$ are moved by not more than $O(n^{\frac{2}{3}})$ $\longrightarrow\bigcup$ sum of movements is $O(n^{\frac{9}{3}})$. Solution has time complexity $O(n^{\frac{9}{3}})$.
[ "brute force", "data structures" ]
2,600
null
946
A
Partition
You are given a sequence $a$ consisting of $n$ integers. You may partition this sequence into two sequences $b$ and $c$ in such a way that every element belongs exactly to one of these sequences. Let $B$ be the sum of elements belonging to $b$, and $C$ be the sum of elements belonging to $c$ (if some of these sequences is empty, then its sum is $0$). What is the maximum possible value of $B - C$?
The answer for this problem can be calculated by the next simple formula: $\sum_{i=1}^{n}\left|a_{i}\right|$, where $|a_{i}|$ is the absolute value of $a_{i}$.
[ "greedy" ]
800
null
946
B
Weird Subtraction Process
You have two variables $a$ and $b$. Consider the following sequence of actions performed with these variables: - If $a = 0$ or $b = 0$, end the process. Otherwise, go to step $2$; - If $a ≥ 2·b$, then set the value of $a$ to $a - 2·b$, and repeat step $1$. Otherwise, go to step $3$; - If $b ≥ 2·a$, then set the value of $b$ to $b - 2·a$, and repeat step $1$. Otherwise, end the process. Initially the values of $a$ and $b$ are positive integers, and so the process will be finite. You have to determine the values of $a$ and $b$ after the process ends.
The answer can be calculated very easy by Euclid algorithm (which is described in the problem statement), but all subtractions will be replaced by taking by modulo.
[ "math", "number theory" ]
1,100
null
946
C
String Transformation
You are given a string $s$ consisting of $|s|$ small english letters. In one move you can replace any character of this string to the next character in alphabetical order (a will be replaced with b, s will be replaced with t, etc.). You cannot replace letter z with any other letter. Your target is to make some number of moves (not necessary minimal) to get string abcdefghijklmnopqrstuvwxyz (english alphabet) as a subsequence. Subsequence of the string is the string that is obtained by deleting characters at some positions. You need to print the string that will be obtained from the given string and will be contain english alphabet as a subsequence or say that it is impossible.
The problem can be solved by the next greedy algorithm. At first we need to store the last character of the alphabet we haven't obtained, for example, in variable $c$ (initially it will be equal to 'a'). Then we will just iterate over all characters of the string from left to right and if the current character of the string is not greater than $c$, we just replace it to $c$ and increase $c$ by 1. If in any moment $c$ will be greater than 'z', we got the answer. And if after iterating over the string $c$ will be not greater than 'z', the answer is <<-1>>.
[ "greedy", "strings" ]
1,300
null
946
D
Timetable
Ivan is a student at Berland State University (BSU). There are $n$ days in Berland week, and each of these days Ivan might have some classes at the university. There are $m$ working hours during each Berland day, and each lesson at the university lasts exactly one hour. If at some day Ivan's first lesson is during $i$-th hour, and last lesson is during $j$-th hour, then he spends $j - i + 1$ hours in the university during this day. If there are no lessons during some day, then Ivan stays at home and therefore spends $0$ hours in the university. Ivan doesn't like to spend a lot of time in the university, so he has decided to skip some lessons. He cannot skip more than $k$ lessons during the week. After deciding which lessons he should skip and which he should attend, every day Ivan will enter the university right before the start of the first lesson he does not skip, and leave it after the end of the last lesson he decides to attend. If Ivan skips all lessons during some day, he doesn't go to the university that day at all. Given $n$, $m$, $k$ and Ivan's timetable, can you determine the minimum number of hours he has to spend in the university during one week, if he cannot skip more than $k$ lessons?
The problem can be solved in the following dynamic programming manner. Let $dp[i][j]$ be the smallest number of hours Ivan can spend in university in the first $i$ days while having $j$ lessons skipped. To calculate it we can store $mn[i][j]$ - minimal number of hours Ivan is required to spend in the $i$-th day so that he attends $j$ lessons. Then we can iterate over all lengths from $0$ to $k$ and update $dp[i + 1][j + length_{cur}]$ with $dp[i][j] + mn[i][length_{cur}]$. Precalc works in $O(nm^{2})$ and dp can be processed in $O(nk^{2})$.
[ "dp" ]
1,800
null
946
E
Largest Beautiful Number
Yes, that's another problem with definition of "beautiful" numbers. Let's call a positive integer $x$ beautiful if its decimal representation without leading zeroes contains even number of digits, and there exists a permutation of this representation which is palindromic. For example, $4242$ is a beautiful number, since it contains $4$ digits, and there exists a palindromic permutation $2442$. Given a positive integer $s$, find the largest beautiful number which is less than $s$.
This is pretty typical problem on greedy construction: you are asked to build lexicographically maximal string. In the majority of cases it's done like this. Imagine you've built some prefix of length $i$ with all numbers equal to the prefix of length $i$ of the original string. You are also sure there exists some suffix for it that will give proper beautiful number. Now you have two options: you either put $s_{i}$ if possible and proceed to the same task of longer prefix or you put the smaller number and fill the entire suffix with the maximum possible beautiful number you can obtain. Now you should learn how to check if any valid suffix exists. It means at least the smallest possible beautiful number with current prefix is smaller than $s$. It's built like this. Let $cnt$ be the number of digits which currently have odd number of occurences. You put all zeroes but the last $cnt$ digits and then output these odd occurence number digits in increasing order. The first part can be checked with partial sums on the number of zeroes on segment in the original string and the second part has its length not greater than $10$ and can be checked naively. Overall complexity: $O(|s|)$.
[ "greedy", "implementation" ]
2,200
null
946
F
Fibonacci String Subsequences
You are given a binary string $s$ (each character of this string is either 0 or 1). Let's denote the cost of string $t$ as the number of occurences of $s$ in $t$. For example, if $s$ is 11 and $t$ is 111011, then the cost of $t$ is $3$. Let's also denote the Fibonacci strings sequence as follows: - $F(0)$ is 0; - $F(1)$ is 1; - $F(i) = F(i - 1) + F(i - 2)$ if $i > 1$, where $ + $ means the concatenation of two strings. Your task is to calculate the sum of costs of all subsequences of the string $F(x)$. Since answer may be large, calculate it modulo $10^{9} + 7$.
If $F(x)$ was not really large, then we could run the following dynamic programming solution: Let $dp[i][j]$ be the number of ways to process first $i$ characters of $F(x)$ so that the suffix of the subsequence of length $j$ matches the prefix of $s$ with length $j$. This is not really different from a usual approach with dynamic programming on KMP (constraints in this problem allow us to build KMP automaton naively without the help of any fast prefix-function algorithm). However, the length of $F(x)$ is really large. Let's consider the traversions we make in dynamic programming. Let $A$ be the KMP automaton matrix (that is, let $A[p][c]$ be the new value of prefix-function if the previous value was $p$ and we added a character $c$). Then from the state $dp[i][j]$, if the following character is $0$, we make traversions to $dp[i + 1][j]$ and to $dp[i][A[p][0]]$. This actually leads to rewriting traversions as a matrix. Let $d_{i}$ be the vector such that its $j$-th element is equal to $dp[i][j]$. Then advancing from $d_{i}$ to $d_{i + 1}$, if $i$-th character is $0$, can be represented as follows: $d_{i + 1} = d_{i} \times M_{0}$, where $M_{0}$ can be filled with the help of KMP automaton: for every $k\in[0,n]$ we add $1$ to $M_{0}[k][k]$, and also add $1$ to $M_{0}[k][A[k][0]]$. The same approach can be used to form matrix $M_{1}$ that denotes adding character $1$, and if we want to add the string $F(x)$, we can actually represent its matrix as $M_{x} = M_{x - 1} \times M_{x - 2}$. This matrix multiplication approach will run in $O(xn^{3})$, but the problem is that it doesn't give us the answer. To obtain it, we may add an auxiliary state $n + 1$ to the dynamic programming, add $1$ to it each time we traverse to state $n$, multiply it by $2$ each time we add a character, and rewrite it into matrix form.
[ "combinatorics", "dp", "matrices" ]
2,400
"#include <bits/stdc++.h>\n\nusing namespace std;\n\ntypedef vector<int> vec;\ntypedef vector<vec> mat;\n\nconst int MOD = 1000 * 1000 * 1000 + 7;\n\nint add(int x, int y)\n{\n\tint z = x + y;\n\twhile(z >= MOD)\n\t\tz -= MOD;\n\treturn z;\t\n}\n\nint mul(int x, int y)\n{\n\treturn (x * 1ll * y) % MOD;\n}\n\nvec mul(const vec& a, const mat& b)\n{\n\tint n = a.size();\n\tvec c(n, 0);\n\tfor(int i = 0; i < n; i++)\n\t\tfor(int j = 0; j < n; j++)\n\t\t\tc[i] = add(c[i], mul(a[j], b[j][i]));\n\treturn c;\n}\n\nmat mul(const mat& a, const mat& b)\n{\n\tint n = a.size();\n\tmat c(n, vec(n, 0));\n\tfor(int i = 0; i < n; i++)\n\t\tfor(int j = 0; j < n; j++)\n\t\t\tfor(int k = 0; k < n; k++)\n\t\t\t\tc[i][k] = add(c[i][k], mul(a[i][j], b[j][k]));\n\treturn c;\n}\n\nint main() { \n\tint x, n;\n\tcin >> n >> x;\n\tstring s;\n\tcin >> s;\n\tvector<int> p(n);\n\tfor(int i = 1; i < n; i++)\n\t{\n\t\tint j = p[i - 1];\n\t\twhile(s[j] != s[i] && j > 0)\n\t\t{\n\t\t\tj = p[j - 1];\n\t\t}\n\t\tif(s[j] == s[i])\n\t\t\tj++;\n\t\tp[i] = j;\n\t}\n\tvector<vector<int> > a(n + 1, vector<int>(2, 0));\n\ta[n][0] = a[n][1] = n;\n\tfor(int i = 0; i <= n; i++)\n\t{\n\t\tif (s[i] == '0')\n\t\t\ta[i][0] = i + 1;\n\t\telse if (i > 0)\n\t\t\ta[i][0] = a[p[i - 1]][0];\n\t\tif (s[i] == '1')\n\t\t\ta[i][1] = i + 1;\n\t\telse if (i > 0)\n\t\t\ta[i][1] = a[p[i - 1]][1];\n\t}\n\tvec v(n + 2, 0);\n\tv[0] = 1;\n\tvector<mat> f;\n\tmat f0(n + 2, vec(n + 2, 0));\n\tfor(int i = 0; i < n + 1; i++)\n\t{\n\t\tf0[i][i] = add(f0[i][i], 1);\n\t\tint z = a[i][0];\n\t\tf0[i][z] = add(f0[i][z], 1);\n\t\tif (z == n)\n\t\t\tf0[i][n + 1] = add(f0[i][n + 1], 1);\n\t}\n\tf0[n + 1][n + 1] = add(f0[n + 1][n + 1], 2);\n\tmat f1(n + 2, vec(n + 2, 0));\n\tfor(int i = 0; i < n + 1; i++)\n\t{\n\t\tf1[i][i] = add(f1[i][i], 1);\n\t\tint z = a[i][1];\n\t\tf1[i][z] = add(f1[i][z], 1);\n\t\tif (z == n)\n\t\t\tf1[i][n + 1] = add(f1[i][n + 1], 1);\n\t}\n\t\n\tf1[n + 1][n + 1] = add(f1[n + 1][n + 1], 2);\n\tf.push_back(f0);\n\tf.push_back(f1);\n\tfor(int i = 2; i <= x; i++)\n\t\tf.push_back(mul(f[i - 1], f[i - 2]));\n\tv = mul(v, f[x]);\n\tcout << v.back() << endl;\n\treturn 0;\n}"
946
G
Almost Increasing Array
We call an array almost increasing if we can erase not more than one element from it so that the array becomes strictly increasing (that is, every element is striclty greater than every element before it). You are given an array $a$ consisting of $n$ elements. You are allowed to replace any element with any integer number (and you may do so any number of times you need). What is the minimum number of replacements you have to perform in order to make the array almost increasing?
If the problem was to make the array strictly increasing, then we could use the following approach: for every element subtract its index from it, find the longest non-decreasing subsequence, and change every element not belonging to this sequence. In this problem we can use a similar technique. Let's iterate on the element we will remove after changing everything we need (let's call it $k$). For every $i < k$ we will subtract $i$ from $a_{i}$, and for every $i > k$ we will subtract $i - 1$ from $a_{i}$. Let's maintain the longest non-decreasing subsequence ending in every element on prefix, and the longest non-decreasing subsequence starting in every element on suffix (this can be done by a lot of different techniques, for example, two segment trees and rollbacking, which is used in our model solution). Then we need to somehow merge these sequences. The easiest way to do it is to consider only the subsequence ending in element $k - 1$ (since if we will need to consider the subsequence ending in some index less than $k - 1$, we would check this possibility choosing other value of $k$) and use some data structure (BIT, segment tree or something similar) to find the longest subsequence that can be appended to the one we fixed. After checking if we need to delete element $k$, we add it to the data structure on prefix, remove element $k + 1$ from the data structure on suffix, check if we have to remove element $k + 1$ and so on.
[ "data structures", "dp" ]
2,500
"#include <bits/stdc++.h>\n\nusing namespace std;\n\nmap<int, int> comp;\n\nvoid compress(const vector<int>& a)\n{\n\tvector<int> z;\n\tfor(int i = 0; i < a.size(); i++)\n\t{\n\t\tz.push_back(a[i] - i);\n\t\tz.push_back(a[i] - i + 1);\n\t}\n\tsort(z.begin(), z.end());\n\tz.erase(unique(z.begin(), z.end()), z.end());\n\tfor(int i = 0; i < z.size(); i++)\n\t\tcomp[z[i]] = i;\n}\n\nconst int N = 400043;\nconst int M = 8000043;\n\nint T[4 * N];\nint* where[M];\nint val[M];\nint st = 0;\n\nvoid change(int& x, int y)\n{\n\twhere[st] = &x;\n\tval[st] = x;\n\tst++;\n\tx = y;\n}\n\nvoid rollback(int new_st)\n{\n\twhile(st > new_st)\n\t{\n\t\tst--;\n\t\t*(where[st]) = val[st];\n\t}\n}\n\nvoid build(int v, int l, int r)\n{\n\tT[v] = 0;\n\tif(l < r - 1)\n\t{\n\t\tint mid = (l + r) / 2;\n\t\tbuild(v * 2 + 1, l, mid);\n\t\tbuild(v * 2 + 2, mid, r);\n\t}\n}\n\nvoid upd(int v, int l, int r, int pos, int val)\n{\n\tchange(T[v], max(T[v], val));\n\tif (l < r - 1)\n\t{\n\t\tint mid = (l + r) / 2;\n\t\tif (pos < mid)\n\t\t\tupd(v * 2 + 1, l, mid, pos, val);\n\t\telse\n\t\t\tupd(v * 2 + 2, mid, r, pos, val);\n\t}\n}\n\nint query(int v, int l, int r, int L, int R)\n{\n\tif (L >= R)\n\t\treturn 0;\n\tif (l == L && r == R)\n\t\treturn T[v];\n\tint mid = (l + r) / 2;\n\treturn max(query(v * 2 + 1, l, mid, L, min(R, mid)), query(v * 2 + 2, mid, r, max(L, mid), R));\n}\n\nint T2[4 * N];\n\nvoid build2(int v, int l, int r)\n{\n\tT2[v] = 0;\n\tif(l < r - 1)\n\t{\n\t\tint mid = (l + r) / 2;\n\t\tbuild2(v * 2 + 1, l, mid);\n\t\tbuild2(v * 2 + 2, mid, r);\n\t}\n}\n\nvoid upd2(int v, int l, int r, int pos, int val)\n{\n\tT2[v] = max(T2[v], val);\n\tif (l < r - 1)\n\t{\n\t\tint mid = (l + r) / 2;\n\t\tif (pos < mid)\n\t\t\tupd2(v * 2 + 1, l, mid, pos, val);\n\t\telse\n\t\t\tupd2(v * 2 + 2, mid, r, pos, val);\n\t}\n}\n\nint query2(int v, int l, int r, int L, int R)\n{\n\tif (L >= R)\n\t\treturn 0;\n\tif (l == L && r == R)\n\t\treturn T2[v];\n\tint mid = (l + r) / 2;\n\treturn max(query2(v * 2 + 1, l, mid, L, min(R, mid)), query2(v * 2 + 2, mid, r, max(L, mid), R));\n}\n\nint main()\n{\n\tint n;\n\tscanf(\"%d\", &n);\n\tvector<int> a(n);\n\tfor(int i = 0; i < n; i++)\n\t\tscanf(\"%d\", &a[i]);\n\tcompress(a);\n\tint mx = comp.size();\n\tint ans = 0;\n\tvector<int> states;\n\tbuild(0, 0, mx);\n\tfor(int i = n - 1; i > 0; i--)\n\t{\n\t\tstates.push_back(st);\n\t\tint cur = comp[a[i] - i + 1];\n\t\tint val = query(0, 0, mx, cur, mx);\n\t\tupd(0, 0, mx, cur, val + 1);\n\t}\n\tbuild2(0, 0, mx);\n\tans = max(ans, query(0, 0, mx, 0, mx));\n\tfor(int i = 0; i < n - 1; i++)\n\t{\n\t\trollback(states.back());\n\t\tstates.pop_back();\n\t\tint cur = comp[a[i] - i];\n\t\tint val = query2(0, 0, mx, 0, cur + 1);\n\t\tupd2(0, 0, mx, cur, val + 1);\n\t\tans = max(ans, val + 1 + query(0, 0, mx, cur, mx));\n\t}\n\tcout << n - 1 - ans << endl;\n}"
948
A
Protect Sheep
Bob is a farmer. He has a large pasture with many sheep. Recently, he has lost some of them due to wolf attacks. He thus decided to place some shepherd dogs in such a way that all his sheep are protected. The pasture is a rectangle consisting of $R × C$ cells. Each cell is either empty, contains a sheep, a wolf or a dog. Sheep and dogs always stay in place, but wolves can roam freely around the pasture, by repeatedly moving to the left, right, up or down to a neighboring cell. When a wolf enters a cell with a sheep, it consumes it. However, no wolf can enter a cell with a dog. Initially there are no dogs. Place dogs onto the pasture in such a way that no wolf can reach any sheep, or determine that it is impossible. Note that since you have many dogs, you do \textbf{not} need to minimize their number.
Suppose that there is a wolf and a sheep in adjacent cells. It is obvious that in this case, the answer "NO" - this particular wolf can always attack this sheep. Otherwise, the answer is always "YES". The simplest way of protecting all sheep is to place a dog in every empty cell. Then no wolf can move and all sheep are safe and happy.
[ "brute force", "dfs and similar", "graphs", "implementation" ]
900
null
949
A
Zebras
Oleg writes down the history of the days he lived. For each day he decides if it was good or bad. Oleg calls a non-empty sequence of days a zebra, if it starts with a bad day, ends with a bad day, and good and bad days are alternating in it. Let us denote bad days as 0 and good days as 1. Then, for example, sequences of days 0, 010, 01010 are zebras, while sequences 1, 0110, 0101 are not. Oleg tells you the story of days he lived in chronological order in form of string consisting of 0 and 1. Now you are interested if it is possible to divide Oleg's life history into several \textbf{subsequences}, each of which is a zebra, and the way it can be done. Each day must belong to exactly one of the subsequences. For each of the subsequences, days forming it must be ordered chronologically. Note that subsequence does not have to be a group of consecutive days.
Simple greedy works here. Let's go from left to right and assign each element to some subsequence. At each moment we have two types of already built subsequences: zebras ("0", "010", "01010", ...) and "almost zebras" ("01", "0101", "010101"). If next element of the string is '1' we should add it to some zebra making it "almost zebra". If there are no zebras at this moment it's impossible to divide string into zebra subsequences. If next element of the string is '0' we should add it so some "almost zebra" making it simple zebra. If there are no "almost zebra"'s now just create new zebra consisting of this '0'. If there are no "almost zebra"'s at the end answer exists and built zebras satisfy all requirements otherwise there is no answer.
[ "greedy" ]
1,600
null
949
B
A Leapfrog in the Array
Dima is a beginner programmer. During his working process, he regularly has to repeat the following operation again and again: to remove every second element from the array. One day he has been bored with easy solutions of this problem, and he has come up with the following extravagant algorithm. Let's consider that initially array contains $n$ numbers from $1$ to $n$ and the number $i$ is located in the cell with the index $2i - 1$ (Indices are numbered starting from one) and other cells of the array are empty. Each step Dima selects a non-empty array cell with the maximum index and moves the number written in it to the nearest empty cell to the left of the selected one. The process continues until all $n$ numbers will appear in the first $n$ cells of the array. For example if $n = 4$, the array is changing as follows: You have to write a program that allows you to determine what number will be in the cell with index $x$ ($1 ≤ x ≤ n$) after Dima's algorithm finishes.
In odd position $p$ value $\textstyle{\frac{p+1}{2}}$ will be set. For even position $p$ let's find out position from which value has arrived and iterate over such position until we will arrive to odd position for which we know answer. At the moment of jumping to cell $p$ there are $\textstyle{\frac{p}{2}}$ elements to the right of the position $p$. So there are $n-{\frac{p}{2}}$ elements to the right of this position and jump to cell $p$ was done from position $p+(n-{\frac{p}{2}})$. During each such jump length of jump decreases at least by 2 times, so there are no more than $O(\log n)$ jumps and solution works in $O(q\log n)$.
[ "constructive algorithms", "math" ]
1,700
null
949
C
Data Center Maintenance
BigData Inc. is a corporation that has $n$ data centers indexed from $1$ to $n$ that are located all over the world. These data centers provide storage for client data (you can figure out that client data is really big!). Main feature of services offered by BigData Inc. is the access availability guarantee even under the circumstances of any data center having an outage. Such a guarantee is ensured by using the two-way replication. Two-way replication is such an approach for data storage that any piece of data is represented by two identical copies that are stored in two different data centers. For each of $m$ company clients, let us denote indices of two different data centers storing this client data as $c_{i, 1}$ and $c_{i, 2}$. In order to keep data centers operational and safe, the software running on data center computers is being updated regularly. Release cycle of BigData Inc. is one day meaning that the new version of software is being deployed to the data center computers each day. Data center software update is a non-trivial long process, that is why there is a special hour-long time frame that is dedicated for data center maintenance. During the maintenance period, data center computers are installing software updates, and thus they may be unavailable. Consider the day to be exactly $h$ hours long. For each data center there is an integer $u_{j}$ ($0 ≤ u_{j} ≤ h - 1$) defining the index of an hour of day, such that during this hour data center $j$ is unavailable due to maintenance. Summing up everything above, the condition $u_{ci, 1} ≠ u_{ci, 2}$ should hold for each client, or otherwise his data may be unaccessible while data centers that store it are under maintenance. Due to occasional timezone change in different cities all over the world, the maintenance time in some of the data centers may change by one hour sometimes. Company should be prepared for such situation, that is why they decided to conduct an experiment, choosing some non-empty subset of data centers, and shifting the maintenance time for them by an hour later (i.e. if $u_{j} = h - 1$, then the new maintenance hour would become $0$, otherwise it would become $u_{j} + 1$). Nonetheless, such an experiment should not break the accessibility guarantees, meaning that data of any client should be still available during any hour of a day after the data center maintenance times are changed. Such an experiment would provide useful insights, but changing update time is quite an expensive procedure, that is why the company asked you to find out the minimum number of data centers that have to be included in an experiment in order to keep the data accessibility guarantees.
Formally you are given a properly colored graph and you are asked to find out size of smallest non-empty subset such that after addition $1$ modulo $h$ to colors of vertices in this subset coloring will remain proper. Let's build a directed graph with $n$ vertices and edge from $u$ to $v$ iff $u$ and $v$ are connected by edge in original graph and $\left(c o l o r_{u}+1\right)\ \mathrm{In}00|\ h=c o l o r_{v}$. Now let's fix some vertex which color will be changed. It's clear that we should take into its set all vertices which are reachable from it. Now our problem is reduced to following problem: "Given directed graph find vertex with smallest number reachable from it vertices". It's just any vertex from smallest strongly connected component which is sink (strongly connected component such there is no strongly connected component reachable from it).
[ "dfs and similar", "graphs" ]
1,900
null
949
D
Curfew
Instructors of Some Informatics School make students go to bed. The house contains $n$ rooms, in each room exactly $b$ students were supposed to sleep. However, at the time of curfew it happened that many students are not located in their assigned rooms. The rooms are arranged in a row and numbered from $1$ to $n$. Initially, in $i$-th room there are $a_{i}$ students. All students are currently somewhere in the house, therefore $a_{1} + a_{2} + ... + a_{n} = nb$. Also $2$ instructors live in this house. The process of curfew enforcement is the following. One instructor starts near room $1$ and moves toward room $n$, while the second instructor starts near room $n$ and moves toward room $1$. After processing current room, each instructor moves on to the next one. Both instructors enter rooms and move simultaneously, if $n$ is odd, then only the first instructor processes the middle room. When all rooms are processed, the process ends. When an instructor processes a room, she counts the number of students in the room, then turns off the light, and locks the room. Also, if the number of students inside the processed room is not equal to $b$, the instructor writes down the number of this room into her notebook (and turns off the light, and locks the room). Instructors are in a hurry (to prepare the study plan for the next day), so they don't care about who is in the room, but only about the number of students. While instructors are inside the rooms, students can run between rooms that are not locked and not being processed. A student can run by at most $d$ rooms, that is she can move to a room with number that differs my at most $d$. Also, after (or instead of) running each student can hide under a bed in a room she is in. In this case the instructor will not count her during the processing. In each room any number of students can hide simultaneously. Formally, here is what's happening: - A curfew is announced, at this point in room $i$ there are $a_{i}$ students. - Each student can run to another room but not further than $d$ rooms away from her initial room, or stay in place. After that each student can optionally hide under a bed. - Instructors enter room $1$ and room $n$, they count students there and lock the room (after it no one can enter or leave this room). - Each student from rooms with numbers from $2$ to $n - 1$ can run to another room but not further than $d$ rooms away from her \textbf{current} room, or stay in place. Each student can optionally hide under a bed. - Instructors move from room $1$ to room $2$ and from room $n$ to room $n - 1$. - This process continues until all rooms are processed. Let $x_{1}$ denote the number of rooms in which the first instructor counted the number of non-hidden students different from $b$, and $x_{2}$ be the same number for the second instructor. Students know that the principal will only listen to one complaint, therefore they want to minimize the maximum of numbers $x_{i}$. Help them find this value if they use the optimal strategy.
Let's solve the problem in case when there is only one instructor (which moves from left to right and the only goal is to minimize number of bad rooms) I claim, that the following greedy works: Move through rooms from left to right If there are too many students inside room, send the excess students to the next room If there are not enough students, but it is possible to fulfill this room from rooms on the right (the sum $\sum_{j=i}^{\mathrm{min}(\,n-1,i+d*)(\,i+1))}\ Q_{j}$ is at least $b$), then do it. If it's not possible, then send all students to the following room. If it is the last room, say that those students are hiding in it. This greedy can be implemented in ${\mathcal{O}}(n)$ time: calculate the prefix sums on the initial $a_{i}$, this way you can check if it is possible to move students from following rooms here fast. To handle the removal students from following rooms you can maintain the current "debt" of students. When you first consider room you can repay the debt as much as you can and then check one of the cases above. Since the both left and right borders of segments are moving monotonously the debt will be "inherited" correctly. Notice, that you can only consider "paths of different students never cross", that means if first student was initially in room $i$ and moved to $a$, while the second student was in $j$ and moved to $b$, then if $i \le j$ than $a \le b$. Because otherwise you can swap students and nothing will change. The proof of the greedy (you can possibly skip it). Suppose there is a better answer, which completes the rooms $a_{1}$, $...$, $a_{k}$, while the greedy solutions completes rooms $b_{1}$, $...$, $b_{l}$, $l < k$. We will assume that in optimal solution paths of students don't intersect, that all "excessive" students are hiding in last room and that all rooms in optimal answer are either full ($b$) or empty ($0$). Otherwise it's possible to change the "correct answer in such way, that number of good rooms will not decrease. Let's $i$ is smallest index when $a_{i} \neq b_{i}$. Then $a_{i} > b_{i}$, because greedy solution would always fulfill the room $a_{i}$ if it would be possible (actually, greedy solution builts the lexmin solution). But if $a_{i} > b_{i}$ we can "patch" the supposed optimal solution and move all students which were sent to room $b_{i}$ to $a_{i}$ (we know it is possible by the greedy solution's answer). This way we can increase the common prefix with any possible "best" answer hence contradiction. Back to the problem with two instructors. Recall, that "paths of different students don't cross", hence there exists a "border", the number $x$ from $0$ to $nb$, where the first $x$ students are going to the first instructor and all others to second. One could have bruteforced that border and solved the both halfs of the array by the method above, but then the complexity will be $n^{2} \cdot b$ which is too much. We need to search for the border more efficiently. Let $f(m)$ will be the answer for first instructor, when he is given $m$ first students and $g(m)$ is the answer for second instructor when he is given all students except first $m$ ones. It is easy to see, that $f(m)$ in decreasing, while $g(m)$ is increasing (both times it is not strict monotonicity). Indeed, the more students are given to instructor, than more opportunities he has (all excessive students can always hide, so it is not a problem). We are searching for $m$ where $ans(m) = max(f(m), g(m))$ is smallest possible.Let's introduce function $z(m) = g(m) - f(m)$ - increasing (but still not strict). Let's call $m_{0}$ the smallest index, such that $z(m_{0}) \ge 0$. One can see, that a $min(ans(m_{0} - 1), ans(m_{0}))$ is the final answer. Indeed, if one will try greater $m$'s than $m_{0}$, than the $g(m)$ will be dominating in max, and hence $ans(m_{0})$ is more optimal. Otherwise, if $m < m_{0} - 1$, then $ans(m)$ is better.
[ "binary search", "brute force", "greedy", "sortings" ]
2,300
null
949
E
Binary Cards
It is never too late to play the fancy "Binary Cards" game! There is an infinite amount of cards of positive and negative ranks that are used in the game. The absolute value of any card rank is a power of two, i.e. each card has a rank of either $2^{k}$ or $ - 2^{k}$ for some integer $k ≥ 0$. There is an infinite amount of cards of any valid rank. At the beginning of the game player forms his deck that is some multiset (possibly empty) of cards. It is allowed to pick any number of cards of any rank but the small deck is considered to be a skill indicator. Game consists of $n$ rounds. In the $i$-th round jury tells the player an integer $a_{i}$. After that the player is obligated to draw such a subset of his deck that the sum of ranks of the chosen cards is equal to $a_{i}$ (it is allowed to not draw any cards, in which case the sum is considered to be equal to zero). If player fails to do so, he loses and the game is over. Otherwise, player takes back all of his cards into his deck and the game proceeds to the next round. Player is considered a winner if he is able to draw the suitable set of cards in each of the rounds. Somebody told you which numbers $a_{i}$ the jury is going to tell you in each round. Now you want to pick a deck consisting of the minimum number of cards that allows you to win the "Binary Cards" game.
There are two observations required to solve this problem: You don't have to take two cards with same number. If you took two cards with number $x$ you can take card with number $2x$ and card with number $x$ and answer will remain correct. If you took card with number $x$ you don't have to take card with number $- x$. You can take cards $2x$ and $- x$ instead. Consider all numbers. If there are no odd numbers you don't have to take $1$ or $- 1$ cards. Otherwise you have to take either $1$ or $- 1$. Try both possibilities and add value of taken card to all odd numbers. After this step all numbers are even, so you can just divide them by $2$ and solve the same problem with divided number. After each step of this algorithm maximum possible absolute value of card is also divided by $2$, so in worst case complexity will be $T(C)=2\cdot T({\frac{C}{2}})+O(C)$, where $C$ is a maximum absolute value of number. Solution of the equation is $T(C)=O(C\log C)$ so it's fast enough.
[ "brute force" ]
2,700
null
949
F
Astronomy
\url{CDN_BASE_URL/571a7070e5784ace821a58b11e4350fb}
Let's shuffle lines randomly. Select some 4 points and find their intersection point. If it's not integer or is not in bounding box it's definitely not an answer. Otherwise let's check it. Iterate over points and check if there is another point on the line that going through answer and this point. To check it we need to build some structure that can check if some line is inside it. For example you can for each point sort other points by polar angle, or store polar angles in hash table (multiplying it by number of order $C^{2}$ making different angles different). If there is no pair for some point let's stop checking it. This solution works in $O(n^{3})$. In fact no, it's $O(n^{2})$. I will write full formal proof soon, now I'll write just sketch of it. Consider planar graph where vertices are all intersection points of some lines passing through two given points. Let's find expected number of lines checking. Divide vertices into 2 classes: heavy (with degree $\mathbf{z}_{\overline{{{\ell}}}}^{\overline{{{\nu}}}}$) and light (with degree $<\frac{|V|}{2}$) Expected time of checking is sum of expected time checking light vertices and heavy vertices It's quite easy to prove it for light vertices. To proof it for heavy vertices Szemerédi-Trotter theorem might be useful
[ "geometry", "probabilities" ]
3,300
null
950
A
Left-handers, Right-handers and Ambidexters
You are at a water bowling training. There are $l$ people who play with their left hand, $r$ people, who play with their right hand, and $a$ ambidexters, who can play with left or right hand. The coach decided to form a team of even number of players, exactly half of the players should play with their right hand, and exactly half of the players should play with their left hand. One player should use only on of his hands. Ambidexters play as well with their right hand as with their left hand. In the team, an ambidexter can play with their left hand, or with their right hand. Please find the maximum possible size of the team, where equal number of players use their left and right hands, respectively.
Iterate over size of the team. Now you know how many players should play with left hand, but are not left-handed (because there are no so much left-handed players). The same with right hand. Just check if sum of these values is not more than number of ambidexters.
[ "implementation", "math" ]
800
null
950
B
Intercepted Message
Hacker Zhorik wants to decipher two secret messages he intercepted yesterday. Yeah message is a sequence of encrypted blocks, each of them consists of several bytes of information. Zhorik knows that each of the messages is an archive containing one or more files. Zhorik knows how each of these archives was transferred through the network: if an archive consists of $k$ files of sizes $l_{1}, l_{2}, ..., l_{k}$ bytes, then the $i$-th file is split to one or more blocks $b_{i, 1}, b_{i, 2}, ..., b_{i, mi}$ (here the total length of the blocks $b_{i, 1} + b_{i, 2} + ... + b_{i, mi}$ is equal to the length of the file $l_{i}$), and after that all blocks are transferred through the network, maintaining the order of files in the archive. Zhorik thinks that the two messages contain the same archive, because their total lengths are equal. However, each file can be split in blocks in different ways in the two messages. You are given the lengths of blocks in each of the two messages. Help Zhorik to determine what is the maximum number of files could be in the archive, if the Zhorik's assumption is correct.
Let's define $\mathbf{{\mu}_{i}}$ and $p_{i}$ as sums of first $\overline{{i}}$ elements of $\textstyle{\bar{\mathbf{Z}}}$ and $\mathbf{\mathbf{y}}$ ($s_{0}=0$, $s_{i}=s_{i-1}+x_{i}$, $p_{0}=0$, $p_{i}=p_{i-1}+y_{i}$). ${\mathcal{L}}_{l_{1}},{\mathcal{L}}_{l_{1}+1},\cdot\cdot,{\mathcal{L}}_{r_{1}}$ and $y l_{2},y l_{2}+1,\cdot\cdot\cdot,y r_{2}$ can be same file iff this three conditions are true: $s_{l_{1}-1}=p_{l_{2}-1}$ because we need to divide prefix into files. $s_{n}-s_{r_{1}}=p_{m}-p_{r_{2}}$ because we need to divide suffix into files. $s_{r_{1}}\-\ s_{l_{1}-1}\ =p_{r_{2}}\ -p_{l_{2}-1}$ segments have same sum. It's easy to see that if two first conditions are true then the third are true too because $s_{n}=p_{m}$ and because of this fact and condition from statement $x_{i}\geq1,y_{i}\geq1$ answer is a number of non-empty prefixes with the same sum. Time complexity is $O(n+m)$ if you use two pointers or $O((n+m)\log{(n+m)}$ if you use some data structure.
[ "greedy", "implementation" ]
1,100
null
954
A
Diagonal Walking
Mikhail walks on a 2D plane. He can go either up or right. You are given a sequence of Mikhail's moves. He thinks that this sequence is too long and he wants to make it as short as possible. In the given sequence moving up is described by character U and moving right is described by character R. Mikhail can replace any pair of consecutive moves RU or UR with a diagonal move (described as character D). After that, he can go on and do some other replacements, until there is no pair of consecutive moves RU or UR left. Your problem is to print the minimum possible length of the sequence of moves after the replacements.
Let's iterate over all characters of the string from left to right (excluding last character). Suppose $i$ is a position of the current element of the string. If $s_{i} \neq s_{i + 1}$, increase answer by $1$ and increase $i$ by $2$, else just increase $i$ by $1$.
[ "implementation" ]
800
null
954
B
String Typing
You are given a string $s$ consisting of $n$ lowercase Latin letters. You have to type this string using your keyboard. Initially, you have an empty string. Until you type the whole string, you may perform the following operation: - add a character to the end of the string. Besides, \textbf{at most once} you may perform one additional operation: copy the string and append it to itself. For example, if you have to type string abcabca, you can type it in $7$ operations if you type all the characters one by one. However, you can type it in $5$ operations if you type the string abc first and then copy it and type the last character. If you have to type string aaaaaaaaa, the best option is to type $4$ characters one by one, then copy the string, and then type the remaining character. Print the minimum number of operations you need to type the given string.
Let's consider $s[i; j)$ as a substring of string $s$ from position $i$ to position $j$ ($j$ is not included). Let's iterate over all lenghts of the copied prefix from $\textstyle{\left|{\frac{n}{2}}\right|}$ to 0 inclusive, and then if $s[0; len) = s[len; 2 \cdot len)$ then answer will be $min(n, n - len + 1)$ and iterating over smaller lenghts is not necessary.
[ "implementation", "strings" ]
1,400
null
954
C
Matrix Walk
There is a matrix $A$ of size $x × y$ filled with integers. For every $i\in[1...x]$, $j\in[1..y]$ $A_{i, j} = y(i - 1) + j$. Obviously, every integer from $[1..xy]$ occurs exactly once in this matrix. You have traversed some path in this matrix. Your path can be described as a sequence of visited cells $a_{1}$, $a_{2}$, ..., $a_{n}$ denoting that you started in the cell containing the number $a_{1}$, then moved to the cell with the number $a_{2}$, and so on. From the cell located in $i$-th line and $j$-th column (we denote this cell as $(i, j)$) you can move into one of the following cells: - $(i + 1, j)$ — only if $i < x$; - $(i, j + 1)$ — only if $j < y$; - $(i - 1, j)$ — only if $i > 1$; - $(i, j - 1)$ — only if $j > 1$. Notice that making a move requires you to go to an adjacent cell. It is not allowed to stay in the same cell. You don't know $x$ and $y$ exactly, but you have to find any possible values for these numbers such that you could start in the cell containing the integer $a_{1}$, then move to the cell containing $a_{2}$ (in one step), then move to the cell containing $a_{3}$ (also in one step) and so on. Can you choose $x$ and $y$ so that they don't contradict with your sequence of moves?
You can notice that moves of kind $(i - 1, j)$ and $(i + 1, j)$ are changing value $x$ to $x - m$ and $x + m$. Thus, you can determine $m$ by checking adjacent nodes in the path. The answer is YES if there are one or zero distinct values of differences not counting difference of $1$. You can also set $n$ to arbitrary big value, it doesn't really matter until you can fit all values. $10^{9}$ will work just fine. Finally, knowing $n$ and $m$, simulate the process and check that all moves are valid. Overall complexity: $O(n)$.
[ "implementation" ]
1,700
null
954
D
Fight Against Traffic
Little town Nsk consists of $n$ junctions connected by $m$ bidirectional roads. Each road connects two distinct junctions and no two roads connect the same pair of junctions. It is possible to get from any junction to any other junction by these roads. The distance between two junctions is equal to the minimum possible number of roads on a path between them. In order to improve the transportation system, the city council asks mayor to build one new road. The problem is that the mayor has just bought a wonderful new car and he really enjoys a ride from his home, located near junction $s$ to work located near junction $t$. Thus, he wants to build a new road in such a way that the distance between these two junctions won't decrease. You are assigned a task to compute the number of pairs of junctions that are not connected by the road, such that if the new road between these two junctions is built the distance between $s$ and $t$ won't decrease.
Let's use bfs to calculate the smallest distances to all vertices from the vertex $s$ and from the vertex $t$. These will be $d_{s}[i]$ and $d_{t}[i]$ for all $i$. $d_{s}[t] = d_{t}[s] = D$ is the the current smallest distance between $s$ and $t$. What you need is to iterate over all pairs $(u, v)$ and check if the edge between them doesn't exist and neither $(d_{s}[u] + d_{t}[v] + 1)$ nor $(d_{s}[v] + d_{t}[u] + 1)$ is smaller than $D$. Overall complexity: $O(n^{2})$.
[ "dfs and similar", "graphs", "shortest paths" ]
1,600
null
954
E
Water Taps
Consider a system of $n$ water taps all pouring water into the same container. The $i$-th water tap can be set to deliver any amount of water from $0$ to $a_{i}$ ml per second (this amount may be a real number). The water delivered by $i$-th tap has temperature $t_{i}$. If for every $i\in[1,n]$ you set $i$-th tap to deliver exactly $x_{i}$ ml of water per second, then the resulting temperature of water will be $\frac{\frac{\hbar}{\hbar}^{2}\,x_{i}t_{i}}{\frac{\hbar}{\hbar}}{x_{i}}$ (if $\sum_{i=1}^{n}x_{i}=0$, then to avoid division by zero we state that the resulting water temperature is $0$). You have to set all the water taps in such a way that the resulting temperature is exactly $T$. What is the maximum amount of water you may get per second if its temperature has to be $T$?
The following greedy strategy work. Let's turn all the taps at full power. If total temperature is greater than $T$ then we would like to decrease power on some taps with higher temperature. We want to decrease as low power as possible, so we should prioritize taps with the highest temperature. Sort all taps by temperature and find the total power on suffix you should decrease to have equal temperatures. This can be done with binary search. The same works for smaller initial temperature. Overall complexity: $O(n\log n)$.
[ "binary search", "greedy", "sortings" ]
2,000
null
954
F
Runner's Problem
You are running through a rectangular field. This field can be represented as a matrix with $3$ rows and $m$ columns. $(i, j)$ denotes a cell belonging to $i$-th row and $j$-th column. You start in $(2, 1)$ and have to end your path in $(2, m)$. From the cell $(i, j)$ you may advance to: - $(i - 1, j + 1)$ — only if $i > 1$, - $(i, j + 1)$, or - $(i + 1, j + 1)$ — only if $i < 3$. However, there are $n$ obstacles blocking your path. $k$-th obstacle is denoted by three integers $a_{k}$, $l_{k}$ and $r_{k}$, and it forbids entering any cell $(a_{k}, j)$ such that $l_{k} ≤ j ≤ r_{k}$. You have to calculate the number of different paths from $(2, 1)$ to $(2, m)$, and print it modulo $10^{9} + 7$.
There is a simple dynamic programming solution that works in $O(m)$. Let's try to improve it. Firstly, if there are no obstacles in some column $i$ and we have calculated the number of paths to every cell of the previous column, then we may get the values in column $i$ by multiplying the vector of values in column $i - 1$ by the following matrix: $\left(\begin{array}{l l l}{1}&{1}&{0}\\ {1}&{1}&{1}\\ {0}&{1}&{1}\end{array}\right)$ Then we may use binary exponentiation to skip long segments without obstacles in $O(logk)$, where $k$ is the length of the segment. Let's try to modify this matrix if we have to forbid some rows. All we need to change is to set every value in $i$-th row to $0$ if $i$-th row is forbidden. So we may skip long segments not only if they don't contain any obstacles, but also if the set of forbidden rows doesn't change on this segment. So the solution is the following: divide the whole matrix into $2n + 1$ segments by the endpoints of the obstacles, then in every segment the set of forbidden rows doesn't change (so we can skip it using fast matrix exponentiation).
[ "dp", "matrices", "sortings" ]
2,100
null
954
G
Castle Defense
Today you are going to lead a group of elven archers to defend the castle that is attacked by an army of angry orcs. Three sides of the castle are protected by impassable mountains and the remaining side is occupied by a long wall that is split into $n$ sections. At this moment there are exactly $a_{i}$ archers located at the $i$-th section of this wall. You know that archer who stands at section $i$ can shoot orcs that attack section located at distance not exceeding $r$, that is all such sections $j$ that $|i - j| ≤ r$. In particular, $r = 0$ means that archers are only capable of shooting at orcs who attack section $i$. Denote as \underline{defense level} of section $i$ the total number of archers who can shoot at the orcs attacking this section. \underline{Reliability} of the defense plan is the minimum value of defense level of individual wall section. There is a little time left till the attack so you can't redistribute archers that are already located at the wall. However, there is a reserve of $k$ archers that you can distribute among wall sections in arbitrary way. You would like to achieve maximum possible reliability of the defence plan.
Firstly, if we may obtain reliability at least $x$, then we may obtain reliability not less than $x - 1$ with the same number of archers. So we may use binary search and check whether we may obtain reliability at least $x$. How can we check it? Let's find the leftmost section such that its defense level is less than $x$. Let its index be $i$. We obviously have to add some archers controlling this section, and since every section to the left of it is already controlled, the best option where to add archers is the section with index $min(i + r, n)$. After we added enough archers, we move to next section such that its defense level is less than $x$ and do the same. If we run out of archers without protecting all the sections, then it's impossible to obtain reliability $x$. To do checking in $O(n)$, we may use prefix sums or "sliding window" technique.
[ "binary search", "data structures", "greedy", "two pointers" ]
2,000
null
954
H
Path Counting
You are given a rooted tree. Let's denote $d(x)$ as depth of node $x$: depth of the root is $1$, depth of any other node $x$ is $d(y) + 1$, where $y$ is a parent of $x$. The tree has the following property: every node $x$ with $d(x) = i$ has exactly $a_{i}$ children. Maximum possible depth of a node is $n$, and $a_{n} = 0$. We define $f_{k}$ as the number of unordered pairs of vertices in the tree such that the number of edges on the simple path between them is equal to $k$. Calculate $f_{k}$ modulo $10^{9} + 7$ for every $1 ≤ k ≤ 2n - 2$.
At first when we read the problem, a simple solution comes to our mind, take a look at the LCA (Lowest Common Ancestor) of that starting and ending vertices of the path and then use combinatorics to calculate the number of the paths, but after trying to implement this or solve this on paper it doesn't seem to be easy at all and it may even be impossible to implement this. So lets try to solve this problem in a different way. For calculating the answer, we count the number of times each path starts or ends at every vertex, and then divide them by $2$ to get the answer for each vertex. For calculating the answer to the above, it is easy to see that all vertices with the same height have the same number of paths going through them, so if we calculate the number of paths going through one of them and then multiply it by the number of the vertices in that height (let it be $c_{h}$) it gets equal to our answer. We can calculate the answer for a certain height. So to do that, we divide the paths into two types, paths that go only into the subtree of a vertex (let's call it $d_{h}$), and paths that go up (let's call it $u_{h}$). For the ones that are in the subtree, it is easy to see if there are at least $k$ other vertices that go down, we can go all paths going down (let the number of them be $p_{h, k}$), and the answer for this part, equals to: $d_{k}=\sum_{h=1}^{n}c_{h}\times p_{h,k}$ For the ones that go up, we use dynamic-programming, and we define $dp_{h, k}$ the number of paths that start at a vertex with height $h$ and have length $k$ and do not use the leftmost edge exiting the vertex at height $h$. To update this either we go down on one of the $a_{h} - 1$ paths and then we go through a path of length $k - 1$, or we go up and get a path of length $k - 1$ starting at a vertex from height $h - 1$, so the answer for this one equals to: $u_{k}=\sum_{h=1}^{n}c_{h}\times d p_{h,k}$ Now $f_{k}={\frac{d_{k}+u_{k}}{2}}$. And the final complexity of the solution will be $O(n^{2})$, but because of the large constant of the solution the time limit is higher.
[ "combinatorics", "dp" ]
2,500
null
954
I
Yet Another String Matching Problem
Suppose you have two strings $s$ and $t$, and their length is equal. You may perform the following operation any number of times: choose two different characters $c_{1}$ and $c_{2}$, and replace every occurence of $c_{1}$ in both strings with $c_{2}$. Let's denote the distance between strings $s$ and $t$ as the minimum number of operations required to make these strings equal. For example, if $s$ is abcd and $t$ is ddcb, the distance between them is $2$ — we may replace every occurence of a with b, so $s$ becomes bbcd, and then we may replace every occurence of b with d, so both strings become ddcd. You are given two strings $S$ and $T$. For every substring of $S$ consisting of $|T|$ characters you have to determine the distance between this substring and $T$.
Unfortunately, it seems we failed to eliminate bitset solutions. The approach in our model solution is the following: Firstly, let's try to find some "naive" solution for calculating the distance between two strings. We may build an undirected graph where vertices represent letters, and edges represent that one letter must be transformed into another. Then all letters in the same component should become one letter, so the answer is the number of distinct letters minus the number of components. Then let's get back to original problem. For every substring of $S$ we have to find which letters have to be merged to make it equal with $T$. This can be done with the help of FFT: to find all positions in substrings of $S$ with character $a$ that coincide with occurences of $b$ in $T$, we may compute a convolution of two following arrays: set $1$ to every position in $S$ where occurs $a$, and to every position in $T$ where $b$ occurs (all other elements should be $0$). After trying these convolutions for every pair of different characters, we compute the answer for every substring using DFS (or any other method).
[ "fft", "math" ]
2,200
null
955
A
Feed the cat
After waking up at $hh$:$mm$, Andrew realised that he had forgotten to feed his only cat for yet another time (guess why there's only one cat). The cat's current hunger level is $H$ points, moreover each minute without food increases his hunger by $D$ points. At any time Andrew can visit the store where tasty buns are sold (you can assume that is doesn't take time to get to the store and back). One such bun costs $C$ roubles and decreases hunger by $N$ points. Since the demand for bakery drops heavily in the evening, there is a special $20%$ discount for buns starting from $20$:$00$ (note that the cost might become rational). Of course, buns cannot be sold by parts. Determine the minimum amount of money Andrew has to spend in order to feed his cat. The cat is considered fed if its hunger level is less than or equal to zero.
It's optimal to buy buns either right after waking up or at $20$:$00$ (if possible) because between the awakening and $20$:$00$ cost doesn't change but cat's hunger does. There was one extra case when Andrew wakes up after $20$:$00$ and has only one possible option of buying everything since he cannot turn back time.
[ "greedy", "math" ]
1,100
hh, mm = map(int, input().split()) H, D, cost, nutr = map(int, input().split()) hh = 60 * hh + mm if hh >= 1200: print('{:.4f}'.format((H + nutr - 1) // nutr * cost * 0.8)) else: print('{:.4f}'.format(min((H + nutr - 1) // nutr * cost, (H + (1200 - hh) * D + nutr - 1) // nutr * cost * 0.8)))
955
B
Not simply beatiful strings
Let's call a string adorable if its letters can be realigned in such a way that they form two consequent groups of equal symbols (note that different groups must contain different symbols). For example, ababa is adorable (you can transform it to aaabb, where the first three letters form a group of $a$-s and others — a group of $b$-s), but cccc is not since in each possible consequent partition letters in these two groups coincide. You're given a string $s$. Check whether it can be split into two non-empty subsequences such that the strings formed by these subsequences are adorable. Here a subsequence is an arbitrary set of indexes of the string.
Since order of letters in adorable strings doesn't matter, it doesn't matter in the initial string as well. Let $d$ be the number of distinct letters in $s$. Consider the following cases one after another: If $|s| < 4$ answer is <<No>> since lengths of adorable strings cannot be less than two; If $d$ is more than $4$ answer is also <<No>> since adorable strings contain two distinct letters; If $d$ is equal to $4$ answer is always <<Yes>> (we give two types of letters to string one and other two to string two); If $d$ is equal to three answer is also <<Yes>> (based on the fact that length of $s$ is no less than $4$); If $d$ is equal to two answer depends on whether there's a letter occuring only once (because that means that one of the strings will consist of letters of the same kind); If all letters are the same, answer is <<No>> (same as the previous case).
[ "implementation" ]
1,400
from collections import Counter d = Counter(input()) if sum(d.values()) < 4 or len(d) > 4 or len(d) == 1: print('No') elif len(d) >= 3: print('Yes') elif any(d[k] == 1 for k in d): print('No') else: print('Yes')
955
C
Sad powers
You're given $Q$ queries of the form $(L, R)$. For each query you have to find the number of such $x$ that $L ≤ x ≤ R$ and there exist integer numbers $a > 0$, $p > 1$ such that $x = a^{p}$.
Let's fix some power $p$. It's obvious that there are no more than $10^{\frac{13}{p}}$ numbers $x$ such that $x^{p}$ does not exceed $10^{18}$. At the same time, only for $p = 2$ this amoung is relatively huge; for all other $p \ge 3$ the total amount of such numbers will be of the order of $10^{6}$. Let's then generate all of them and dispose of all perfect squares among them. Then answer to query $(L, R)$ is equal to the amount of generated numbers between $L$ and $R$ plus some perfect squared in range. The first value can be calculated via two binary searches. The second one is $\lfloor{\sqrt{R}}\rfloor-\lfloor{\sqrt{L-1}}\rfloor$. Note that due to precision issues the standard sqrt might produce incorrect values, so you can use additional binary searches instead. Complexity: $O(10^{6}+q\cdot\log10^{18})$.
[ "binary search", "math", "number theory" ]
2,100
#define _CRT_SECURE_NO_WARNINGS //#pragma GCC optimize("Ofast, unroll-loops") #include <iostream> #include <algorithm> #include <vector> #include <ctime> #include <unordered_set> #include <string> #include <map> #include <unordered_map> #include <random> #include <set> #include <cassert> #include <functional> #include <queue> #include <numeric> #include <bitset> #include <iterator> using namespace std; const int N = 303, M = 18; mt19937 gen(time(NULL)); #define forn(i, n) for (int i = 0; i < n; i++) #define debug(...) fprintf(stderr, __VA_ARGS__), fflush(stderr) #define all(a) (a).begin(), (a).end() #define pii pair<int, int> #define mp make_pair #define endl '\n' typedef long long ll; template<typename T = int> inline T read() { T val = 0, sign = 1; char ch; for (ch = getchar(); ch < '0' || ch > '9'; ch = getchar()) if (ch == '-') sign = -1; for (; ch >= '0' && ch <= '9'; ch = getchar()) val = val * 10 + ch - '0'; return sign * val; } vector<ll> cand; ll root(ll x) { ll l = 0, r = 1e9 + 1; while (l < r - 1) { ll m = (l + r) / 2; if (m * m > x) r = m; else l = m; } return l; } void solve() { ll l = read<ll>(), r = read<ll>(); ll ans = upper_bound(all(cand), r) - lower_bound(all(cand), l); ans += root(r) - root(l - 1); printf("%lld\n", ans); } void precalc() { ll U = 1e18; vector<ll> _cand = { 1 }; for (ll i = 2; i <= 1e6; i++) for (ll curr = i * i * i; curr <= U; curr *= i) { _cand.push_back(curr); if (curr > U / i) break; } sort(all(_cand)); _cand.erase(unique(all(_cand)), _cand.end()); for (const ll &u : _cand) { ll x = root(u); if (x * x == u) continue; cand.push_back(u); } debug("Precalc: %.3f\n", (double)(clock()) / CLOCKS_PER_SEC); } signed main() { precalc(); int t = read(); clock_t tot = clock(); while (t--) { clock_t z = clock(); solve(); //debug("Test: %.3f\n", (double)(clock() - z) / CLOCKS_PER_SEC); } debug("Total Time: %.3f\n", (double)(clock() - tot) / CLOCKS_PER_SEC); }
955
D
Scissors
Jenya has recently acquired quite a useful tool — $k$-scissors for cutting strings. They are generally used for cutting out two non-intersecting substrings of length $k$ from an arbitrary string $s$ (its length should be at least $2·k$ in order to perform this operation) and concatenating them afterwards (preserving the initial order). For example, with the help of $2$-scissors you can cut $ab$ and $de$ out of $abcde$ and concatenate them into $abde$, but not $ab$ and $bc$ since they're intersecting. It's a nice idea to test this tool before using it in practice. After looking through the papers, Jenya came up with two strings $s$ and $t$. His question is whether it is possible to apply his scissors to string $s$ such that the resulting concatenation contains $t$ as a substring?
Denote $lpos(x)$ - the minimum index in $s$ that prefix of $t$ of length $x$ might start at, provided $lpos(x) + x \ge k$ (so this prefix can be enclosed in some $k$-substring of $s$ as a suffix) or $- 1$ if there are none. Denote $rpos(x)$ in the same manner - the maximum index in $s$ that suffix of $t$ of length $x$ might end at, under the same conditions (enclosing suffix in some $k$-substring of $s$ as a prefix). It's clear that these array allow us to iterate over all possible prefix/suffix partitions of $t$ and check their correctness. Note that $rpos$ is calculated as $lpos$ on reversed strings. How do we obtain $lpos$? Let's calculate $z$-function of $s$ with respect to $t$ and say that $z(i)$ is the maximum prefix of $t$ starting at position $i$ in $s$. Which $z(i)$ might influence $lpos(x)$? First of all, they must satisfy $z(i) \ge x$. Second, as mentioned above, $i + x \ge k$. This allows us to apply all updates naively and achieve $O(n^{2})$. To speed this up we will iterate over $z$ in decreasing order and maintain viewed indexes in a set in such a way that at the moment we are up to calculate $lpos(x)$ all $i$-s such that $z(i) \ge x$ will be in. Then $lpos(x)$ will be equal to minimum $j$ in the set satisfying $j \ge k - x$. This allows us to reduce the complexity to $O(n\log n)$. Reverse both $s$ and $t$ and calculate $rpos$ in the same way. Then the only thing left is to check whether for some $x \le k$ values $lpos(x)$ and $rpos(m - x)$ can be combined to obtain the answer.
[ "brute force", "strings" ]
2,600
#define _CRT_SECURE_NO_WARNINGS //#pragma GCC optimize("Ofast, unroll-loops") #include <iostream> #include <algorithm> #include <vector> #include <ctime> #include <unordered_set> #include <string> #include <map> #include <unordered_map> #include <random> #include <set> #include <cassert> #include <functional> #include <queue> #include <numeric> #include <bitset> #include <iterator> using namespace std; const int N = 303, M = 18; mt19937 gen(time(NULL)); #define forn(i, n) for (int i = 0; i < n; i++) #define debug(...) fprintf(stderr, __VA_ARGS__), fflush(stderr) #define all(a) (a).begin(), (a).end() #define pii pair<int, int> #define mp make_pair #define endl '\n' typedef long long ll; template<typename T = int> inline T read() { T val = 0, sign = 1; char ch; for (ch = getchar(); ch < '0' || ch > '9'; ch = getchar()) if (ch == '-') sign = -1; for (; ch >= '0' && ch <= '9'; ch = getchar()) val = val * 10 + ch - '0'; return sign * val; } int n, m, k; vector<int> zf(string s, string t) { s = t + '#' + s; vector<int> z(n + m + 1); int l = 0, r = 1; for (int i = 1; i <= n + m; i++) { if (i < r) z[i] = min(z[i - l], r - i); while (i + z[i] <= n + m && s[i + z[i]] == s[z[i]]) z[i]++; if (i + z[i] > r) tie(l, r) = mp(i, i + z[i]); } vector<int> ret_z(n); forn(i, n) ret_z[i] = z[i + m + 1]; return ret_z; } vector<int> pos(string s, string t) { vector<int> pos(m + 1, -1); auto z = zf(s, t); vector<vector<int>> ord(m + 1); forn(i, n) ord[z[i]].push_back(i); set<int> st; for (int i = m; i > 0; i--) { for (const int v : ord[i]) st.insert(v); auto it = st.lower_bound(k - i); if (it == st.end()) continue; pos[i] = *it; } return pos; } void solve() { n = read(), m = read(), k = read(); string s, t; cin >> s >> t; auto z = zf(s, t); forn(i, n) if (z[i] >= m) { puts("Yes"); int L = min(i + 1, n - 2 * k + 1); int R = L + k; printf("%d %d\n", L, R); return; } auto lpos = pos(s, t); reverse(all(s)), reverse(all(t)); auto rpos = pos(s, t); reverse(all(s)); for (int i = 1; i <= min(m - 1, k); i++) { if (m - i > k) continue; if (lpos[i] == -1 || rpos[m - i] == -1) continue; int L = lpos[i] + i - 1, R = rpos[m - i] + (m - i) - 1; if (L + R >= n - 1) continue; puts("Yes"); printf("%d %d\n", lpos[i] + i - (k - 1), n - rpos[m - i] - ((m - i) - 1)); return; } puts("No"); } void precalc() { } signed main() { int t = 1; precalc(); while (t--) { clock_t z = clock(); solve(); debug("Total Time: %.3f\n", (double)(clock() - z) / CLOCKS_PER_SEC); } }
955
E
Icicles
Andrew's favourite Krakozyabra has recenly fled away and now he's eager to bring it back! At the moment the refugee is inside an icy cave with $n$ icicles dangling from the ceiling located in integer coordinates numbered from $1$ to $n$. The distance between floor and the $i$-th icicle is equal to $a_{i}$. Andrew is free to choose an arbitrary integer point $T$ in range from $1$ to $n$ inclusive and at time instant $0$ launch a sound wave spreading into both sides (left and right) at the speed of one point per second. Any icicle touched by the wave starts falling at the same speed (that means that in a second the distance from floor to icicle decreases by one but cannot become less that zero). While distance from icicle to floor is more than zero, it is considered passable; as soon as it becomes zero, the icicle blocks the path and prohibits passing. Krakozyabra is initially (i.e. at time instant $0$) is located at point $\textstyle{\frac{1}{2}}$ and starts running in the right direction at the speed of one point per second. You can assume that events in a single second happen in the following order: first Krakozyabra changes its position, and only then the sound spreads and icicles fall; in particular, that means that if Krakozyabra is currently at point $i-{\frac{1}{2}}$ and the falling (i.e. already touched by the sound wave) icicle at point $i$ is $1$ point from the floor, then Krakozyabra will pass it and find itself at $i+{\frac{1}{2}}$ and only after that the icicle will finally fall and block the path. Krakozyabra is considered entrapped if there are fallen (i.e. with $a_{i} = 0$) icicles both to the left and to the right of its current position. Help Andrew find the minimum possible time it takes to entrap Krakozyabra by choosing the optimal value of $T$ or report that this mission is impossible.
Fix some point $T$ and launch the wave. Icicle at $i$ will reach the floor in $f_{T}(i) = a_{i} + |T - i|$ seconds. Krakozyabra will definitely stop at minimum icicle $j$ such that $f_{T}(j) < j$ and wait for something to the left of it to fall. Note that some icicle to the right of $j$ might also fall earlier than $j$ itself. So the answer for this fixed $T$ is $max(min_{1 \le i < j}f_{T}(i), min_{j \le i \le n}f_{T}(i))$. This approach gives us a $O(n^{2})$ solution. How to speed this up? Let's get rid of the absolute value. For $i \le T$ absolute value is unfolded as $f_{T}(i) = a_{i} + T - i$ and for $i > T$ as $f_{T}(i) = a_{i} - T + i$. Rewrite the inequality $f_{T}(i) < i$ according to the observations above. It's easy to see that for $i \le T$ it is equal to $T < 2 \cdot i - a_{i}$ and for $i > T$ - $a_{i} < T$. Build some range max/min structure and find $j$ assuming $j < T$, and if unsuccessfully - assuming $j \le T$ with respect to the given inequalities. The only thing left is to carefully find minimums on suffix/prefix. Complexity: $O(n\log n)$.
[]
2,900
#define _CRT_SECURE_NO_WARNINGS #include <iostream> #include <algorithm> #include <vector> #include <ctime> #include <unordered_set> #include <string> #include <map> #include <unordered_map> #include <random> #include <set> #include <cassert> #include <functional> #include <queue> #include <numeric> #include <bitset> using namespace std; const int N = 105000, M = 350; mt19937 gen(time(NULL)); #define forn(i, n) for (int i = 0; i < n; i++) #define debug(...) fprintf(stderr, __VA_ARGS__), fflush(stderr) #define all(a) (a).begin(), (a).end() #define pii pair<int, int> #define mp make_pair #define endl '\n' typedef long long ll; template<typename T = int> inline T read() { T val = 0, sign = 1; char ch; for (ch = getchar(); ch < '0' || ch > '9'; ch = getchar()) if (ch == '-') sign = -1; for (; ch >= '0' && ch <= '9'; ch = getchar()) val = val * 10 + ch - '0'; return sign * val; } ll a[N], falls[N]; struct spt { vector<vector<ll>> dp; vector<int> log; int n; bool isMax; void init(vector<int> a) { for (int i = 1; i <= n; i++) dp[0][i] = a[i]; for (int k = 1; (1 << k) <= n; k++) for (int i = 1; i + (1 << k) <= n + 1; i++) { ll lval = dp[k - 1][i], rval = dp[k - 1][i + (1 << (k - 1))]; if (isMax) dp[k][i] = max(lval, rval); else dp[k][i] = min(lval, rval); } } ll get(int l, int r) { if (l > r) assert(0); int k = log[r - l + 1]; ll lval = dp[k][l], rval = dp[k][r - (1 << k) + 1]; return isMax ? max(lval, rval) : min(lval, rval); } spt(int n, bool isMax = true) : n(n), isMax(isMax) { log.resize(n + 1); for (int i = 2; i <= n; i++) log[i] = log[i / 2] + 1; dp.resize(20); forn(i, 20) { dp[i].resize(n + 1); if (!isMax) fill(all(dp[i]), 1e18); } } }; /* 1. i < T => a[i] + T - i - i < 0 => T < 2i - a[i] 2. i >= T => a[i] + i - T - i < 0 ==> T > a[i] min(i) : T > a[i] if found => ans */ ll lm[N], rm[N]; int ws1[N], n; void solve() { n = read(); for (int i = 1; i <= n; i++) a[i] = read(); ll ans = 1e18; int L = 1, R = 1; fill_n(lm, N, 1e18), fill_n(rm, N, 1e18); for (int i = 1; i <= n; i++) ws1[i] = 2 * i - a[i]; auto S = spt(n, false); S.init(vector<int>(a, a + n + 1)); auto LS = spt(n, false); vector<int> la(n + 1); for (int i = 1; i <= n; i++) la[i] = a[i] - i; LS.init(la); auto RS = spt(n, false); for (int i = 1; i <= n; i++) la[i] = a[i] + i; RS.init(la); for (int T = 1; T <= n; T++) { while (L <= T && T >= 2 * L - a[L]) L++; int j = -1; if (L <= T) j = L; else { int lx = T, rx = n; int ind_ans = -1; while (lx <= rx) { int mx = (lx + rx) / 2; if (S.get(T, mx) >= T) { lx = mx + 1; } else { ind_ans = mx; rx = mx - 1; } } if (ind_ans == -1) continue; j = ind_ans; } ll lp = 0, rp = 0; debug("[%d -> %d]\n", T, j); if (j <= T) { lp = LS.get(1, j - 1) + T; rp = min(LS.get(j, T) + T, RS.get(T, n) - T); } else { lp = min(LS.get(1, T) + T, RS.get(T, j - 1) - T); rp = RS.get(j, n) - T; } debug("%lld %lld\n", lp, rp); ans = min(ans, max(lp, rp)); } if (ans == 1e18) puts("-1"); else printf("%lld\n", ans); } void precalc() { } signed main() { int t = 1; precalc(); while (t--) { clock_t z = clock(); solve(); debug("Total Time: %.3f\n", (double)(clock() - z) / CLOCKS_PER_SEC); } }