Description
给定n个数,某个连续区间[L,R]的收益为(gcd(A_l,A_{l+1},A_{l+2}...A_r)*(r-l+1)),
求收益最大的区间的收益值
(1 leq n leq 50000,A_i<=10^9)
Solution
设f[i][j]为区间[i,j]的gcd,那么就有(f[i][j]=gcd(f[i][j-1],A_i)) ,
由此可以固定右端点算出每个区间的gcd,同时更新Ans
用一个数组储存当前所有gcd的值,
如果有相同的gcd,与上一个区间合并即可,否则增加一个新的gcd的值
Code
#include <cstdio>
#include <algorithm>
#define N 500010
using namespace std;
int n, A[N], tot, l[N];
long long Ans;
inline int read() {
int x = 0, f = 1; char ch = getchar();
while (ch < '0' || ch > '9') {if (ch == '-')f = -1; ch = getchar();}
while (ch >= '0' && ch <= '9') {x = x * 10 + ch - '0'; ch = getchar();}
return x * f;
}
int gcd(int a, int b) {
return (b == 0) ? a : gcd(b, a % b);
}
int main() {
n = read();
for (int k, i = 1; i <= n; ++i) {
A[++tot] = read();
l[tot] = 1, k = 0;
for (int j = 1; j <= tot; ++j) {
A[j] = gcd(A[j], A[tot]);
if (A[j] == A[k]) l[k] += l[j];
else A[++k] = A[j], l[k] = l[j];
}
int s = 0; tot = k;
for (int j = tot; j; --j) {
s += l[j];
Ans = max(Ans, 1ll * A[j] * s);
}
}
printf("%lld
", Ans);
return 0;
}