index int64 0 0 | repo_id stringlengths 26 205 | file_path stringlengths 51 246 | content stringlengths 8 433k | __index_level_0__ int64 0 10k |
|---|---|---|---|---|
0 | Create_ds/CodeGen/data/transcoder_evaluation_gfg | Create_ds/CodeGen/data/transcoder_evaluation_gfg/java/PARTITION_INTO_TWO_SUBARRAYS_OF_LENGTHS_K_AND_N_K_SUCH_THAT_THE_DIFFERENCE_OF_SUMS_IS_MAXIMUM.java | // Copyright (c) 2019-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree.
//
import java.util. *;
import java.util.stream.*;
import java.lang.*;
import javafx.util.Pair;
public class PARTITION_INTO_TWO_SUBARRAYS_OF_LENGTHS_K_AND_N_K_SUCH_THAT_THE_DIFFERENCE_OF_SUMS_IS_MAXIMUM{
static int f_gold ( int arr [ ] , int N , int k ) {
int M , S = 0 , S1 = 0 , max_difference = 0 ;
for ( int i = 0 ;
i < N ;
i ++ ) S += arr [ i ] ;
int temp ;
for ( int i = 0 ;
i < N ;
i ++ ) {
for ( int j = i + 1 ;
j < N ;
j ++ ) {
if ( arr [ i ] < arr [ j ] ) {
temp = arr [ i ] ;
arr [ i ] = arr [ j ] ;
arr [ j ] = temp ;
}
}
}
M = Math . max ( k , N - k ) ;
for ( int i = 0 ;
i < M ;
i ++ ) S1 += arr [ i ] ;
max_difference = S1 - ( S - S1 ) ;
return max_difference ;
}
//TOFILL
public static void main(String args[]) {
int n_success = 0;
List<int [ ]> param0 = new ArrayList<>();
param0.add(new int[]{1,5,5,9,9,19,22,24,27,33,39,39,40,41,42,43,44,45,48,52,52,53,53,55,55,56,57,57,60,60,61,62,65,66,67,70,71,72,73,77,78,79,84,87,89,91,95,98});
param0.add(new int[]{-22,-28});
param0.add(new int[]{0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1});
param0.add(new int[]{63,72,2,94,89,11,95,79,90,9,70,28,25,74,16,36,50,91,38,47,47,13,27,29,31,35});
param0.add(new int[]{-86,-78,-76,-76,-66,-62,-62,-38,-34,-32,-30,-26,-22,-4,-4,2,8,8,10,22,52,52,58,64,66,66,66,70,82,82});
param0.add(new int[]{0,0,0,1,1,1,1,1,1,0,1,1,1,1,1,0,1,1,1,0,1,0,0,0,0,0,0,1,1,0,1,0,0,0,0,0,1,0,1,1,0,0,0,1,0,0,1,0,0});
param0.add(new int[]{1,2,2,9,9,12,13,26,26,33,34,35,51,57,70,79,83});
param0.add(new int[]{98,-72,2,40,-20,-14,42,8,14,-58,-18,-70,-8,-66,-68,72,82,-38,-78,2,-66,-88,-34,52,12,84,72,-28,-34,60,-60,12,-28,-42,22,-66,88,-96});
param0.add(new int[]{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1});
param0.add(new int[]{21,85,64,20,4,5,2});
List<Integer> param1 = new ArrayList<>();
param1.add(41);
param1.add(1);
param1.add(20);
param1.add(23);
param1.add(29);
param1.add(42);
param1.add(9);
param1.add(28);
param1.add(37);
param1.add(5);
List<Integer> param2 = new ArrayList<>();
param2.add(44);
param2.add(1);
param2.add(29);
param2.add(16);
param2.add(24);
param2.add(32);
param2.add(16);
param2.add(28);
param2.add(27);
param2.add(6);
for(int i = 0; i < param0.size(); ++i)
{
if(f_filled(param0.get(i),param1.get(i),param2.get(i)) == f_gold(param0.get(i),param1.get(i),param2.get(i)))
{
n_success+=1;
}
}
System.out.println("#Results:" + n_success + ", " + param0.size());
}
} | 6,200 |
0 | Create_ds/CodeGen/data/transcoder_evaluation_gfg | Create_ds/CodeGen/data/transcoder_evaluation_gfg/java/GIVEN_A_SORTED_AND_ROTATED_ARRAY_FIND_IF_THERE_IS_A_PAIR_WITH_A_GIVEN_SUM_1.java | // Copyright (c) 2019-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree.
//
import java.util. *;
import java.util.stream.*;
import java.lang.*;
import javafx.util.Pair;
public class GIVEN_A_SORTED_AND_ROTATED_ARRAY_FIND_IF_THERE_IS_A_PAIR_WITH_A_GIVEN_SUM_1{
static int f_gold ( int arr [ ] , int n , int x ) {
int i ;
for ( i = 0 ;
i < n - 1 ;
i ++ ) if ( arr [ i ] > arr [ i + 1 ] ) break ;
int l = ( i + 1 ) % n ;
int r = i ;
int cnt = 0 ;
while ( l != r ) {
if ( arr [ l ] + arr [ r ] == x ) {
cnt ++ ;
if ( l == ( r - 1 + n ) % n ) {
return cnt ;
}
l = ( l + 1 ) % n ;
r = ( r - 1 + n ) % n ;
}
else if ( arr [ l ] + arr [ r ] < x ) l = ( l + 1 ) % n ;
else r = ( n + r - 1 ) % n ;
}
return cnt ;
}
//TOFILL
public static void main(String args[]) {
int n_success = 0;
List<int [ ]> param0 = new ArrayList<>();
param0.add(new int[]{24,54});
param0.add(new int[]{68,-30,-18,-6,70,-40,86,98,-24,-48});
param0.add(new int[]{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1});
param0.add(new int[]{84,44,40,45,2,41,52,17,50,41,5,52,48,90,13,55,34,55,94,44,41,2});
param0.add(new int[]{-92,-76,-74,-72,-68,-64,-58,-44,-44,-38,-26,-24,-20,-12,-8,-8,-4,10,10,10,20,20,26,26,28,50,52,54,60,66,72,74,78,78,78,80,86,88});
param0.add(new int[]{1,1,1,1,0,1,0,0,1,0,0,0,0,1,0,1,0,1,1,1});
param0.add(new int[]{5,5,15,19,22,24,26,27,28,32,37,39,40,43,49,52,55,56,58,58,59,62,67,68,77,79,79,80,81,87,95,95,96,98,98});
param0.add(new int[]{-98,28,54,44,-98,-70,48,-98,56,4,-18,26,-8,-58,30,82,4,-38,42,64,-28});
param0.add(new int[]{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1});
param0.add(new int[]{26,72,74,86,98,86,22,6,95,36,11,82,34,3,50,36,81,94,55,30,62,53,50,95,32,83,9,16});
List<Integer> param1 = new ArrayList<>();
param1.add(1);
param1.add(8);
param1.add(33);
param1.add(18);
param1.add(29);
param1.add(19);
param1.add(28);
param1.add(17);
param1.add(24);
param1.add(19);
List<Integer> param2 = new ArrayList<>();
param2.add(1);
param2.add(8);
param2.add(28);
param2.add(16);
param2.add(30);
param2.add(10);
param2.add(34);
param2.add(14);
param2.add(24);
param2.add(16);
for(int i = 0; i < param0.size(); ++i)
{
if(f_filled(param0.get(i),param1.get(i),param2.get(i)) == f_gold(param0.get(i),param1.get(i),param2.get(i)))
{
n_success+=1;
}
}
System.out.println("#Results:" + n_success + ", " + param0.size());
}
} | 6,201 |
0 | Create_ds/CodeGen/data/transcoder_evaluation_gfg | Create_ds/CodeGen/data/transcoder_evaluation_gfg/java/PROGRAM_DISTANCE_TWO_POINTS_EARTH.java | // Copyright (c) 2019-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree.
//
import java.util. *;
import java.util.stream.*;
import java.lang.*;
import javafx.util.Pair;
public class PROGRAM_DISTANCE_TWO_POINTS_EARTH{
public static double f_gold ( double lat1 , double lat2 , double lon1 , double lon2 ) {
lon1 = Math . toRadians ( lon1 ) ;
lon2 = Math . toRadians ( lon2 ) ;
lat1 = Math . toRadians ( lat1 ) ;
lat2 = Math . toRadians ( lat2 ) ;
double dlon = lon2 - lon1 ;
double dlat = lat2 - lat1 ;
double a = Math . pow ( Math . sin ( dlat / 2 ) , 2 ) + Math . cos ( lat1 ) * Math . cos ( lat2 ) * Math . pow ( Math . sin ( dlon / 2 ) , 2 ) ;
double c = 2 * Math . asin ( Math . sqrt ( a ) ) ;
double r = 6371 ;
return ( c * r ) ;
}
//TOFILL
public static void main(String args[]) {
int n_success = 0;
List<Double> param0 = new ArrayList<>();
param0.add(6578.099266893886);
param0.add(-9410.77783405426);
param0.add(6641.858718352012);
param0.add(-4142.202863100186);
param0.add(4181.4508741498075);
param0.add(-7745.655002884576);
param0.add(7725.024650172891);
param0.add(-5881.135770052704);
param0.add(8322.143446980337);
param0.add(-1772.9564288056765);
List<Double> param1 = new ArrayList<>();
param1.add(482.6430542568438);
param1.add(-1203.2861272633245);
param1.add(3498.0959749989424);
param1.add(-6286.669946106916);
param1.add(3845.247033476332);
param1.add(-8197.864556657836);
param1.add(9295.96418476119);
param1.add(-4802.015139707946);
param1.add(1841.108539911126);
param1.add(-8246.345733364455);
List<Double> param2 = new ArrayList<>();
param2.add(1342.7044674704348);
param2.add(-1947.91060583419);
param2.add(228.4572635598181);
param2.add(-2742.3608603803173);
param2.add(4909.334120366857);
param2.add(-3667.1524343381157);
param2.add(588.3703338670609);
param2.add(-35.713164290259726);
param2.add(9049.321929418034);
param2.add(-9716.057194373958);
List<Double> param3 = new ArrayList<>();
param3.add(3416.2819128903197);
param3.add(-781.7419983063755);
param3.add(5599.787943215038);
param3.add(-6584.987721971118);
param3.add(5159.242793722949);
param3.add(-8067.806767671396);
param3.add(1220.0418662747136);
param3.add(-4696.734461092275);
param3.add(4470.7365519306095);
param3.add(-8367.588380851601);
for(int i = 0; i < param0.size(); ++i)
{
if(Math.abs(1 - (0.0000001 + Math.abs(f_gold(param0.get(i),param1.get(i),param2.get(i),param3.get(i))) )/ (Math.abs(f_filled(param0.get(i),param1.get(i),param2.get(i),param3.get(i))) + 0.0000001)) < 0.001)
{
n_success+=1;
}
}
System.out.println("#Results:" + n_success + ", " + param0.size());
}
} | 6,202 |
0 | Create_ds/CodeGen/data/transcoder_evaluation_gfg | Create_ds/CodeGen/data/transcoder_evaluation_gfg/java/COUNT_TOTAL_SET_BITS_IN_ALL_NUMBERS_FROM_1_TO_N.java | // Copyright (c) 2019-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree.
//
import java.util. *;
import java.util.stream.*;
import java.lang.*;
import javafx.util.Pair;
public class COUNT_TOTAL_SET_BITS_IN_ALL_NUMBERS_FROM_1_TO_N{
static int f_gold ( int n ) {
int i = 0 ;
int ans = 0 ;
while ( ( 1 << i ) <= n ) {
boolean k = false ;
int change = 1 << i ;
for ( int j = 0 ;
j <= n ;
j ++ ) {
if ( k == true ) ans += 1 ;
else ans += 0 ;
if ( change == 1 ) {
k = ! k ;
change = 1 << i ;
}
else {
change -- ;
}
}
i ++ ;
}
return ans ;
}
//TOFILL
public static void main(String args[]) {
int n_success = 0;
List<Integer> param0 = new ArrayList<>();
param0.add(90);
param0.add(56);
param0.add(43);
param0.add(31);
param0.add(77);
param0.add(35);
param0.add(43);
param0.add(66);
param0.add(15);
param0.add(95);
for(int i = 0; i < param0.size(); ++i)
{
if(f_filled(param0.get(i)) == f_gold(param0.get(i)))
{
n_success+=1;
}
}
System.out.println("#Results:" + n_success + ", " + param0.size());
}
} | 6,203 |
0 | Create_ds/CodeGen/data/transcoder_evaluation_gfg | Create_ds/CodeGen/data/transcoder_evaluation_gfg/java/CHECK_GIVEN_SENTENCE_GIVEN_SET_SIMPLE_GRAMMER_RULES.java | // Copyright (c) 2019-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree.
//
import java.util. *;
import java.util.stream.*;
import java.lang.*;
import javafx.util.Pair;
public class CHECK_GIVEN_SENTENCE_GIVEN_SET_SIMPLE_GRAMMER_RULES{
static boolean f_gold ( char [ ] str ) {
int len = str . length ;
if ( str [ 0 ] < 'A' || str [ 0 ] > 'Z' ) return false ;
if ( str [ len - 1 ] != '.' ) return false ;
int prev_state = 0 , curr_state = 0 ;
int index = 1 ;
while ( index <= str . length ) {
if ( str [ index ] >= 'A' && str [ index ] <= 'Z' ) curr_state = 0 ;
else if ( str [ index ] == ' ' ) curr_state = 1 ;
else if ( str [ index ] >= 'a' && str [ index ] <= 'z' ) curr_state = 2 ;
else if ( str [ index ] == '.' ) curr_state = 3 ;
if ( prev_state == curr_state && curr_state != 2 ) return false ;
if ( prev_state == 2 && curr_state == 0 ) return false ;
if ( curr_state == 3 && prev_state != 1 ) return ( index + 1 == str . length ) ;
index ++ ;
prev_state = curr_state ;
}
return false ;
}
//TOFILL
public static void main(String args[]) {
int n_success = 0;
List<String> param0 = Arrays.asList("I love cinema.", "The vertex is S.",
"I am single.", "My name is KG.",
"I lovE cinema.", "GeeksQuiz. is a quiz site.",
"I love Geeksquiz and Geeksforgeeks.",
" You are my friend.", "I love cinema", "Hello world !");
for(int i = 0; i < param0.size(); ++i)
{
if(f_filled(param0.get(i).toCharArray()) == f_gold(param0.get(i).toCharArray()))
{
n_success+=1;
}
}
System.out.println("#Results:" + n_success + ", " + param0.size());
}
} | 6,204 |
0 | Create_ds/CodeGen/data/transcoder_evaluation_gfg | Create_ds/CodeGen/data/transcoder_evaluation_gfg/java/CHECK_EXIST_TWO_ELEMENTS_ARRAY_WHOSE_SUM_EQUAL_SUM_REST_ARRAY.java | // Copyright (c) 2019-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree.
//
import java.util. *;
import java.util.stream.*;
import java.lang.*;
import javafx.util.Pair;
public class CHECK_EXIST_TWO_ELEMENTS_ARRAY_WHOSE_SUM_EQUAL_SUM_REST_ARRAY{
static boolean f_gold ( int arr [ ] , int n ) {
int sum = 0 ;
for ( int i = 0 ;
i < n ;
i ++ ) {
sum += arr [ i ] ;
}
if ( sum % 2 != 0 ) {
return false ;
}
sum = sum / 2 ;
HashSet < Integer > s = new HashSet < Integer > ( ) ;
for ( int i = 0 ;
i < n ;
i ++ ) {
int val = sum - arr [ i ] ;
if ( s . contains ( val ) && val == ( int ) s . toArray ( ) [ s . size ( ) - 1 ] ) {
System . out . printf ( "Pair elements are %d and %d\n" , arr [ i ] , val ) ;
return true ;
}
s . add ( arr [ i ] ) ;
}
return false ;
}
//TOFILL
public static void main(String args[]) {
int n_success = 0;
List<int [ ]> param0 = new ArrayList<>();
param0.add(new int[]{2, 11, 5, 1, 4, 7});
param0.add(new int[]{2, 4, 2, 1, 11, 15});
param0.add(new int[]{0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1});
param0.add(new int[]{69,6,24,30,75,37,61,76,19,18,90,9,49,24,58,97,18,85,24,93,71,98,92,59,75,75,75,70,35,58,50,1,64,66,33});
param0.add(new int[]{-94,-94,-92,-74,-60,-58,-56,-44,-42,-40,-28,-14,2,4,14,20,24,28,40,42,42,66,78,78,80,82,96});
param0.add(new int[]{1,0,1,1,0,0,1,1,0,0,1,1,0,1});
param0.add(new int[]{21,26,26,27,61,62,96});
param0.add(new int[]{-54,86,20,26});
param0.add(new int[]{0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1});
param0.add(new int[]{44,35,26,15,56,6,36,53,15,66,20,53,99,96,51,12,61,19,79,40,99,42,86,8,11,54,93,46,23,47,41,26,66,5,86,52,64,51,4,21,63,14,7,53,31,8,9,63});
List<Integer> param1 = new ArrayList<>();
param1.add(6);
param1.add(6);
param1.add(13);
param1.add(18);
param1.add(26);
param1.add(10);
param1.add(6);
param1.add(3);
param1.add(4);
param1.add(31);
for(int i = 0; i < param0.size(); ++i)
{
if(f_filled(param0.get(i),param1.get(i)) == f_gold(param0.get(i),param1.get(i)))
{
n_success+=1;
}
}
System.out.println("#Results:" + n_success + ", " + param0.size());
}
} | 6,205 |
0 | Create_ds/CodeGen/data/transcoder_evaluation_gfg | Create_ds/CodeGen/data/transcoder_evaluation_gfg/java/CHECK_POSSIBLE_TRANSFORM_ONE_STRING_ANOTHER.java | // Copyright (c) 2019-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree.
//
import java.util. *;
import java.util.stream.*;
import java.lang.*;
import javafx.util.Pair;
public class CHECK_POSSIBLE_TRANSFORM_ONE_STRING_ANOTHER{
static boolean f_gold ( String s1 , String s2 ) {
int n = s1 . length ( ) ;
int m = s2 . length ( ) ;
boolean dp [ ] [ ] = new boolean [ n + 1 ] [ m + 1 ] ;
for ( int i = 0 ;
i <= n ;
i ++ ) {
for ( int j = 0 ;
j <= m ;
j ++ ) {
dp [ i ] [ j ] = false ;
}
}
dp [ 0 ] [ 0 ] = true ;
for ( int i = 0 ;
i < s1 . length ( ) ;
i ++ ) {
for ( int j = 0 ;
j <= s2 . length ( ) ;
j ++ ) {
if ( dp [ i ] [ j ] ) {
if ( j < s2 . length ( ) && ( Character . toUpperCase ( s1 . charAt ( i ) ) == s2 . charAt ( j ) ) ) dp [ i + 1 ] [ j + 1 ] = true ;
if ( ! Character . isUpperCase ( s1 . charAt ( i ) ) ) dp [ i + 1 ] [ j ] = true ;
}
}
}
return ( dp [ n ] [ m ] ) ;
}
//TOFILL
public static void main(String args[]) {
int n_success = 0;
List<String> param0 = new ArrayList<>();
param0.add("daBcd");
param0.add("417514");
param0.add("010000");
param0.add("ZcKYguiMrdyn");
param0.add("argaju");
param0.add("1110101101");
param0.add("ySOCoSaygi");
param0.add("204");
param0.add("10011100000010");
param0.add("nMAioozPmY");
List<String> param1 = new ArrayList<>();
param1.add("ABC");
param1.add("9");
param1.add("1111011010");
param1.add("iz");
param1.add("RAJ");
param1.add("110101001");
param1.add("aRhxkYqh");
param1.add("6986871066");
param1.add("0");
param1.add("WZFdDKw");
for(int i = 0; i < param0.size(); ++i)
{
if(f_filled(param0.get(i),param1.get(i)) == f_gold(param0.get(i),param1.get(i)))
{
n_success+=1;
}
}
System.out.println("#Results:" + n_success + ", " + param0.size());
}
} | 6,206 |
0 | Create_ds/CodeGen/data/transcoder_evaluation_gfg | Create_ds/CodeGen/data/transcoder_evaluation_gfg/java/FIND_SUM_EVEN_FACTORS_NUMBER.java | // Copyright (c) 2019-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree.
//
import java.util. *;
import java.util.stream.*;
import java.lang.*;
import javafx.util.Pair;
public class FIND_SUM_EVEN_FACTORS_NUMBER{
public static int f_gold ( int n ) {
if ( n % 2 != 0 ) return 0 ;
int res = 1 ;
for ( int i = 2 ;
i <= Math . sqrt ( n ) ;
i ++ ) {
int count = 0 , curr_sum = 1 ;
int curr_term = 1 ;
while ( n % i == 0 ) {
count ++ ;
n = n / i ;
if ( i == 2 && count == 1 ) curr_sum = 0 ;
curr_term *= i ;
curr_sum += curr_term ;
}
res *= curr_sum ;
}
if ( n >= 2 ) res *= ( 1 + n ) ;
return res ;
}
//TOFILL
public static void main(String args[]) {
int n_success = 0;
List<Integer> param0 = new ArrayList<>();
param0.add(71);
param0.add(78);
param0.add(39);
param0.add(36);
param0.add(49);
param0.add(17);
param0.add(53);
param0.add(66);
param0.add(92);
param0.add(71);
for(int i = 0; i < param0.size(); ++i)
{
if(f_filled(param0.get(i)) == f_gold(param0.get(i)))
{
n_success+=1;
}
}
System.out.println("#Results:" + n_success + ", " + param0.size());
}
} | 6,207 |
0 | Create_ds/CodeGen/data/transcoder_evaluation_gfg | Create_ds/CodeGen/data/transcoder_evaluation_gfg/java/CALCULATE_AREA_TETRAHEDRON.java | // Copyright (c) 2019-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree.
//
import java.util. *;
import java.util.stream.*;
import java.lang.*;
import javafx.util.Pair;
public class CALCULATE_AREA_TETRAHEDRON{
static double f_gold ( int side ) {
double volume = ( Math . pow ( side , 3 ) / ( 6 * Math . sqrt ( 2 ) ) ) ;
return volume ;
}
//TOFILL
public static void main(String args[]) {
int n_success = 0;
List<Integer> param0 = new ArrayList<>();
param0.add(58);
param0.add(56);
param0.add(35);
param0.add(99);
param0.add(13);
param0.add(45);
param0.add(40);
param0.add(92);
param0.add(7);
param0.add(13);
for(int i = 0; i < param0.size(); ++i)
{
if(Math.abs(1 - (0.0000001 + Math.abs(f_gold(param0.get(i))) )/ (Math.abs(f_filled(param0.get(i))) + 0.0000001)) < 0.001)
{
n_success+=1;
}
}
System.out.println("#Results:" + n_success + ", " + param0.size());
}
} | 6,208 |
0 | Create_ds/CodeGen/data/transcoder_evaluation_gfg | Create_ds/CodeGen/data/transcoder_evaluation_gfg/java/DYNAMIC_PROGRAMMING_SET_13_CUTTING_A_ROD.java | // Copyright (c) 2019-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree.
//
import java.util. *;
import java.util.stream.*;
import java.lang.*;
import javafx.util.Pair;
public class DYNAMIC_PROGRAMMING_SET_13_CUTTING_A_ROD{
static int f_gold ( int price [ ] , int n ) {
int val [ ] = new int [ n + 1 ] ;
val [ 0 ] = 0 ;
for ( int i = 1 ;
i <= n ;
i ++ ) {
int max_val = Integer . MIN_VALUE ;
for ( int j = 0 ;
j < i ;
j ++ ) max_val = Math . max ( max_val , price [ j ] + val [ i - j - 1 ] ) ;
val [ i ] = max_val ;
}
return val [ n ] ;
}
//TOFILL
public static void main(String args[]) {
int n_success = 0;
List<int [ ]> param0 = new ArrayList<>();
param0.add(new int[]{5,7,15,16,18,22,22,30,34,35,37,41,42,42,43,47,49,52,53,55,58,60,62,62,62,65,65,67,69,73,73,73,75,78,83,84,86,90,91,91,93,94,96});
param0.add(new int[]{50,-30,-84,-2,-96,-54,-14,56,-48,70,38,-86,16,-48,66,34,36,40,40,36,-16,-92,30});
param0.add(new int[]{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1});
param0.add(new int[]{79,33,54,12,53,9,29,45,85,20,6,52,8,26,43,42,17,54,8,70,5,71,1,81,42,59,42,63,8,86,29,16,72});
param0.add(new int[]{-78,-64,-38,-22,2,8,28,32,58,72,72,90});
param0.add(new int[]{1,0,1,1,1,0,0,1,0,0,1,1,0,1,1,0,1,0,0,1,0,0,1,1,0,1,1,0,0,0});
param0.add(new int[]{1,3,6,7,10,17,18,22,23,24,28,31,37,43,48,54,56,65,70,71,73,74,79,84,87,95,96});
param0.add(new int[]{-30,20,-72,-86,-8});
param0.add(new int[]{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1});
param0.add(new int[]{96,99,20,87,17,13,45,65,33,13,59,77,35,79,20,51,69,71,55,37,23,35,82,70});
List<Integer> param1 = new ArrayList<>();
param1.add(37);
param1.add(19);
param1.add(29);
param1.add(22);
param1.add(11);
param1.add(20);
param1.add(21);
param1.add(3);
param1.add(21);
param1.add(19);
for(int i = 0; i < param0.size(); ++i)
{
if(f_filled(param0.get(i),param1.get(i)) == f_gold(param0.get(i),param1.get(i)))
{
n_success+=1;
}
}
System.out.println("#Results:" + n_success + ", " + param0.size());
}
} | 6,209 |
0 | Create_ds/CodeGen/data/transcoder_evaluation_gfg | Create_ds/CodeGen/data/transcoder_evaluation_gfg/java/SQUARED_TRIANGULAR_NUMBER_SUM_CUBES.java | // Copyright (c) 2019-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree.
//
import java.util. *;
import java.util.stream.*;
import java.lang.*;
import javafx.util.Pair;
public class SQUARED_TRIANGULAR_NUMBER_SUM_CUBES{
static int f_gold ( int s ) {
int sum = 0 ;
for ( int n = 1 ;
sum < s ;
n ++ ) {
sum += n * n * n ;
if ( sum == s ) return n ;
}
return - 1 ;
}
//TOFILL
public static void main(String args[]) {
int n_success = 0;
List<Integer> param0 = new ArrayList<>();
param0.add(15);
param0.add(36);
param0.add(39);
param0.add(43);
param0.add(75);
param0.add(49);
param0.add(56);
param0.add(14);
param0.add(62);
param0.add(97);
for(int i = 0; i < param0.size(); ++i)
{
if(f_filled(param0.get(i)) == f_gold(param0.get(i)))
{
n_success+=1;
}
}
System.out.println("#Results:" + n_success + ", " + param0.size());
}
} | 6,210 |
0 | Create_ds/CodeGen/data/transcoder_evaluation_gfg | Create_ds/CodeGen/data/transcoder_evaluation_gfg/java/FREQUENT_ELEMENT_ARRAY.java | // Copyright (c) 2019-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree.
//
import java.util. *;
import java.util.stream.*;
import java.lang.*;
import javafx.util.Pair;
public class FREQUENT_ELEMENT_ARRAY{
static int f_gold ( int arr [ ] , int n ) {
Arrays . sort ( arr ) ;
int max_count = 1 , res = arr [ 0 ] ;
int curr_count = 1 ;
for ( int i = 1 ;
i < n ;
i ++ ) {
if ( arr [ i ] == arr [ i - 1 ] ) curr_count ++ ;
else {
if ( curr_count > max_count ) {
max_count = curr_count ;
res = arr [ i - 1 ] ;
}
curr_count = 1 ;
}
}
if ( curr_count > max_count ) {
max_count = curr_count ;
res = arr [ n - 1 ] ;
}
return res ;
}
//TOFILL
public static void main(String args[]) {
int n_success = 0;
List<int [ ]> param0 = new ArrayList<>();
param0.add(new int[]{1,1,3,11,11,11,18,20,26,26,27,30,33,39,39,42,42,48,51,51,51,51,60,66,66,68,68,69,71,72,73,76,76,77,77,77,78,90,96});
param0.add(new int[]{46,-8,64,-46,-38,92,-14,-22,-32,48,72,96,30,66,94,36,42,-18,14,-74,80,96,-4});
param0.add(new int[]{0,0,0,0,0,0,1});
param0.add(new int[]{93,32,3,31,67,96,52,80,70,49,45,23,58,87,31,56,21,71,55,97});
param0.add(new int[]{-98,-96,-84,-82,-72,-64,-62,-56,-52,-52,-48,-46,-42,-36,-32,-30,-30,-18,-16,-10,-2,0,6,18,22,22,40,42,50,54,64,68,68,72,80,82,84,96});
param0.add(new int[]{1,1,0,0,0,1,0,0,0,1,0,0,0,1,1,1,1,0,0,1,0,0,1,0,0,1,0,1,0,0,0,1,1,1,0,0,1,0,1,1,0});
param0.add(new int[]{9,12,13,28,43,46,64,66,68,89,92});
param0.add(new int[]{22,-8,-56,68,-12,-26,-40,-46,-42,-80,4,-42,-72,-22,36,22,-94,48,96,80,-52,46,90,94,36,92,-12,-24,-60,-32,92,18,76,40,-32,6,-22,86,86,-88,38,50,32,78,-82,54,-40,18});
param0.add(new int[]{0,0,0,0,0,0,1,1,1});
param0.add(new int[]{81,74,32,41,85,65,81,74,40,64,97,4,61,43,54,96,62,2,97,86,80,25,9,31,16,29,4,63,76,41,5,95});
List<Integer> param1 = new ArrayList<>();
param1.add(25);
param1.add(18);
param1.add(6);
param1.add(15);
param1.add(20);
param1.add(29);
param1.add(6);
param1.add(41);
param1.add(4);
param1.add(16);
for(int i = 0; i < param0.size(); ++i)
{
if(f_filled(param0.get(i),param1.get(i)) == f_gold(param0.get(i),param1.get(i)))
{
n_success+=1;
}
}
System.out.println("#Results:" + n_success + ", " + param0.size());
}
} | 6,211 |
0 | Create_ds/CodeGen/data/transcoder_evaluation_gfg | Create_ds/CodeGen/data/transcoder_evaluation_gfg/java/FIND_SUM_EVEN_INDEX_BINOMIAL_COEFFICIENTS_1.java | // Copyright (c) 2019-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree.
//
import java.util. *;
import java.util.stream.*;
import java.lang.*;
import javafx.util.Pair;
public class FIND_SUM_EVEN_INDEX_BINOMIAL_COEFFICIENTS_1{
static int f_gold ( int n ) {
return ( 1 << ( n - 1 ) ) ;
}
//TOFILL
public static void main(String args[]) {
int n_success = 0;
List<Integer> param0 = new ArrayList<>();
param0.add(56);
param0.add(28);
param0.add(4);
param0.add(24);
param0.add(72);
param0.add(30);
param0.add(48);
param0.add(32);
param0.add(13);
param0.add(19);
for(int i = 0; i < param0.size(); ++i)
{
if(f_filled(param0.get(i)) == f_gold(param0.get(i)))
{
n_success+=1;
}
}
System.out.println("#Results:" + n_success + ", " + param0.size());
}
} | 6,212 |
0 | Create_ds/CodeGen/data/transcoder_evaluation_gfg | Create_ds/CodeGen/data/transcoder_evaluation_gfg/java/PYTHON_PROGRAM_FIND_PERIMETER_CIRCUMFERENCE_SQUARE_RECTANGLE_1.java | // Copyright (c) 2019-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree.
//
import java.util. *;
import java.util.stream.*;
import java.lang.*;
import javafx.util.Pair;
public class PYTHON_PROGRAM_FIND_PERIMETER_CIRCUMFERENCE_SQUARE_RECTANGLE_1{
static int f_gold ( int l , int w ) {
return ( 2 * ( l + w ) ) ;
}
//TOFILL
public static void main(String args[]) {
int n_success = 0;
List<Integer> param0 = new ArrayList<>();
param0.add(58);
param0.add(37);
param0.add(56);
param0.add(22);
param0.add(77);
param0.add(34);
param0.add(74);
param0.add(37);
param0.add(21);
param0.add(75);
List<Integer> param1 = new ArrayList<>();
param1.add(39);
param1.add(49);
param1.add(52);
param1.add(43);
param1.add(12);
param1.add(31);
param1.add(54);
param1.add(52);
param1.add(37);
param1.add(30);
for(int i = 0; i < param0.size(); ++i)
{
if(f_filled(param0.get(i),param1.get(i)) == f_gold(param0.get(i),param1.get(i)))
{
n_success+=1;
}
}
System.out.println("#Results:" + n_success + ", " + param0.size());
}
} | 6,213 |
0 | Create_ds/CodeGen/data/transcoder_evaluation_gfg | Create_ds/CodeGen/data/transcoder_evaluation_gfg/java/LONGEST_REPEATING_AND_NON_OVERLAPPING_SUBSTRING.java | // Copyright (c) 2019-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree.
//
import java.util. *;
import java.util.stream.*;
import java.lang.*;
import javafx.util.Pair;
public class LONGEST_REPEATING_AND_NON_OVERLAPPING_SUBSTRING{
static String f_gold ( String str ) {
int n = str . length ( ) ;
int LCSRe [ ] [ ] = new int [ n + 1 ] [ n + 1 ] ;
String res = "" ;
int res_length = 0 ;
int i , index = 0 ;
for ( i = 1 ;
i <= n ;
i ++ ) {
for ( int j = i + 1 ;
j <= n ;
j ++ ) {
if ( str . charAt ( i - 1 ) == str . charAt ( j - 1 ) && LCSRe [ i - 1 ] [ j - 1 ] < ( j - i ) ) {
LCSRe [ i ] [ j ] = LCSRe [ i - 1 ] [ j - 1 ] + 1 ;
if ( LCSRe [ i ] [ j ] > res_length ) {
res_length = LCSRe [ i ] [ j ] ;
index = Math . max ( i , index ) ;
}
}
else {
LCSRe [ i ] [ j ] = 0 ;
}
}
}
if ( res_length > 0 ) {
for ( i = index - res_length + 1 ;
i <= index ;
i ++ ) {
res += str . charAt ( i - 1 ) ;
}
}
return res ;
}
//TOFILL
public static void main(String args[]) {
int n_success = 0;
List<String> param0 = new ArrayList<>();
param0.add("fbfHTjE");
param0.add("09285256323");
param0.add("0011000101110");
param0.add("ue JkVZTt");
param0.add("48387612426300");
param0.add("010");
param0.add("ddRrUz");
param0.add("1049162633793");
param0.add("100011");
param0.add("iJfadiVaQqv");
for(int i = 0; i < param0.size(); ++i)
{
if(f_filled(param0.get(i)).equals(f_gold(param0.get(i))))
{
n_success+=1;
}
}
System.out.println("#Results:" + n_success + ", " + param0.size());
}
} | 6,214 |
0 | Create_ds/CodeGen/data/transcoder_evaluation_gfg | Create_ds/CodeGen/data/transcoder_evaluation_gfg/java/NUMBER_N_DIGIT_STEPPING_NUMBERS.java | // Copyright (c) 2019-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree.
//
import java.util. *;
import java.util.stream.*;
import java.lang.*;
import javafx.util.Pair;
public class NUMBER_N_DIGIT_STEPPING_NUMBERS{
static long f_gold ( int n ) {
int dp [ ] [ ] = new int [ n + 1 ] [ 10 ] ;
if ( n == 1 ) return 10 ;
for ( int j = 0 ;
j <= 9 ;
j ++ ) dp [ 1 ] [ j ] = 1 ;
for ( int i = 2 ;
i <= n ;
i ++ ) {
for ( int j = 0 ;
j <= 9 ;
j ++ ) {
if ( j == 0 ) dp [ i ] [ j ] = dp [ i - 1 ] [ j + 1 ] ;
else if ( j == 9 ) dp [ i ] [ j ] = dp [ i - 1 ] [ j - 1 ] ;
else dp [ i ] [ j ] = dp [ i - 1 ] [ j - 1 ] + dp [ i - 1 ] [ j + 1 ] ;
}
}
long sum = 0 ;
for ( int j = 1 ;
j <= 9 ;
j ++ ) sum += dp [ n ] [ j ] ;
return sum ;
}
//TOFILL
public static void main(String args[]) {
int n_success = 0;
List<Integer> param0 = new ArrayList<>();
param0.add(18);
param0.add(66);
param0.add(73);
param0.add(70);
param0.add(26);
param0.add(41);
param0.add(20);
param0.add(25);
param0.add(52);
param0.add(13);
for(int i = 0; i < param0.size(); ++i)
{
if(f_filled(param0.get(i)) == f_gold(param0.get(i)))
{
n_success+=1;
}
}
System.out.println("#Results:" + n_success + ", " + param0.size());
}
} | 6,215 |
0 | Create_ds/CodeGen/data/transcoder_evaluation_gfg | Create_ds/CodeGen/data/transcoder_evaluation_gfg/java/PROGRAM_FIND_STRING_START_END_GEEKS.java | // Copyright (c) 2019-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree.
//
import java.util. *;
import java.util.stream.*;
import java.lang.*;
import javafx.util.Pair;
public class PROGRAM_FIND_STRING_START_END_GEEKS{
static boolean f_gold ( String str , String corner ) {
int n = str . length ( ) ;
int cl = corner . length ( ) ;
if ( n < cl ) return false ;
return ( str . substring ( 0 , cl ) . equals ( corner ) && str . substring ( n - cl , n ) . equals ( corner ) ) ;
}
//TOFILL
public static void main(String args[]) {
int n_success = 0;
List<String> param0 = new ArrayList<>();
param0.add("geeksmanishgeeks");
param0.add("shreyadhatwalia");
param0.add("10000100");
param0.add("abaa");
param0.add("30645530");
param0.add("0000011011001");
param0.add("dkqEd");
param0.add("48694119324654");
param0.add("1101010010");
param0.add("Ks");
List<String> param1 = new ArrayList<>();
param1.add("geeks");
param1.add("abc");
param1.add("100");
param1.add("a");
param1.add("30");
param1.add("001");
param1.add("d");
param1.add("654");
param1.add("11");
param1.add("KsFLmngGGOmHKs");
for(int i = 0; i < param0.size(); ++i)
{
if(f_filled(param0.get(i),param1.get(i)) == f_gold(param0.get(i),param1.get(i)))
{
n_success+=1;
}
}
System.out.println("#Results:" + n_success + ", " + param0.size());
}
} | 6,216 |
0 | Create_ds/CodeGen/data/transcoder_evaluation_gfg | Create_ds/CodeGen/data/transcoder_evaluation_gfg/java/FIND_NUMBER_ENDLESS_POINTS.java | // Copyright (c) 2019-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree.
//
import java.util. *;
import java.util.stream.*;
import java.lang.*;
import javafx.util.Pair;
public class FIND_NUMBER_ENDLESS_POINTS{
static int f_gold ( boolean input [ ] [ ] , int n ) {
boolean row [ ] [ ] = new boolean [ n ] [ n ] ;
boolean col [ ] [ ] = new boolean [ n ] [ n ] ;
for ( int j = 0 ;
j < n ;
j ++ ) {
boolean isEndless = true ;
for ( int i = n - 1 ;
i >= 0 ;
i -- ) {
if ( input [ i ] [ j ] == false ) isEndless = false ;
col [ i ] [ j ] = isEndless ;
}
}
for ( int i = 0 ;
i < n ;
i ++ ) {
boolean isEndless = true ;
for ( int j = n - 1 ;
j >= 0 ;
j -- ) {
if ( input [ i ] [ j ] == false ) isEndless = false ;
row [ i ] [ j ] = isEndless ;
}
}
int ans = 0 ;
for ( int i = 0 ;
i < n ;
i ++ ) for ( int j = 1 ;
j < n ;
j ++ ) if ( row [ i ] [ j ] && col [ i ] [ j ] ) ans ++ ;
return ans ;
}
//TOFILL
public static void main(String args[]) {
int n_success = 0;
List<boolean [ ] [ ]> param0 = new ArrayList<>();
param0.add(new boolean[][]{new boolean[]{false,false,false,true},new boolean[]{false,true,true,true},new boolean[]{false,false,true,true},new boolean[]{true,true,true,true}});
param0.add(new boolean[][]{new boolean[]{true,false,true,true,true,true,false,false,false},new boolean[]{false,true,true,true,true,true,false,true,true},new boolean[]{false,true,false,true,false,true,true,true,false},new boolean[]{false,false,false,false,true,true,false,false,true},new boolean[]{true,true,true,true,false,true,true,false,false},new boolean[]{false,false,true,true,false,true,false,false,true},new boolean[]{true,true,false,false,false,true,true,false,true},new boolean[]{false,true,true,false,false,false,false,false,false},new boolean[]{true,false,false,false,true,true,false,false,true}});
param0.add(new boolean[][]{new boolean[]{false,false,false,true},new boolean[]{false,false,true,true},new boolean[]{false,false,false,true},new boolean[]{false,false,true,true}});
param0.add(new boolean[][]{new boolean[]{false,true,true,false,false,true,false,false,true,false,false,false,true,false,false,true,false,true,true,false,false,true,true,true,true,true,true,false,false,true,false,false,false,true,false,true,true,true,false},new boolean[]{false,true,true,true,false,false,true,false,true,true,true,true,true,false,true,false,false,true,false,true,true,true,true,false,false,true,false,true,true,false,true,false,true,false,false,false,true,true,false},new boolean[]{false,true,true,false,true,false,false,true,false,true,true,false,true,true,false,true,false,true,false,true,false,true,true,false,true,false,true,false,true,false,false,false,false,true,false,false,true,true,true},new boolean[]{false,false,false,true,false,true,true,true,true,false,true,true,true,false,false,false,false,true,false,false,false,true,true,false,true,true,false,true,false,false,false,false,false,true,true,true,false,false,false},new boolean[]{true,false,false,true,false,false,false,true,false,true,true,false,false,true,true,true,false,false,false,false,false,true,true,false,true,false,false,true,false,true,false,false,false,true,false,true,false,true,false},new boolean[]{false,true,true,false,false,true,false,true,false,true,false,true,true,true,true,true,false,true,false,false,false,true,true,true,false,false,false,false,true,true,true,false,true,false,true,true,false,true,true},new boolean[]{false,false,true,false,true,true,true,true,false,true,true,true,true,false,false,true,false,true,false,false,false,true,true,true,false,true,true,true,false,false,false,false,false,true,true,false,true,true,false},new boolean[]{false,true,false,false,true,false,false,false,true,false,true,true,true,false,true,true,false,false,false,true,true,true,false,true,false,false,true,false,true,false,false,true,false,true,true,false,true,false,true},new boolean[]{true,true,true,false,true,true,true,false,false,false,false,true,true,false,false,false,true,false,false,true,false,false,false,true,true,false,false,false,true,true,false,true,false,true,false,false,false,true,false},new boolean[]{false,false,true,false,true,true,true,false,true,false,false,false,true,false,true,false,true,false,false,false,false,true,false,false,true,false,true,false,false,true,false,true,true,false,true,false,false,false,false},new boolean[]{true,false,true,true,true,false,true,true,false,true,false,true,false,false,false,true,true,true,true,true,false,true,true,false,true,true,true,true,false,false,true,false,false,false,false,true,false,false,false},new boolean[]{false,true,true,false,true,false,true,true,true,true,false,false,false,false,true,false,true,true,true,false,true,false,false,true,true,true,true,false,false,true,false,false,true,false,false,true,false,true,true},new boolean[]{false,false,false,false,true,false,false,true,true,true,false,true,true,false,true,false,false,false,true,true,true,true,true,false,false,true,false,false,true,false,true,false,false,false,true,true,true,false,false},new boolean[]{false,true,false,true,false,true,true,true,false,false,true,true,true,false,false,true,true,false,true,true,false,true,false,true,true,false,false,true,false,false,true,false,false,true,true,false,false,false,true},new boolean[]{false,false,true,false,true,true,false,false,false,true,true,true,true,true,false,true,false,false,false,false,false,false,true,false,false,false,false,false,true,true,false,false,false,true,false,true,true,false,false},new boolean[]{false,true,false,true,true,true,true,false,false,false,true,true,false,true,true,false,false,true,false,true,true,true,true,true,false,true,false,true,true,true,false,false,true,true,false,false,false,false,false},new boolean[]{true,true,false,false,true,true,true,false,false,false,true,true,true,true,false,true,false,false,true,true,false,true,true,true,false,true,true,false,false,false,true,true,false,false,false,false,true,false,true},new boolean[]{false,false,false,true,false,false,true,false,true,true,false,true,true,true,false,true,false,false,true,true,false,false,true,false,false,true,false,false,false,true,false,false,false,true,false,false,false,false,false},new boolean[]{false,true,false,false,true,false,true,true,true,false,true,true,true,true,true,false,false,false,true,false,true,true,true,false,true,false,true,false,false,true,true,true,true,true,false,true,true,true,true},new boolean[]{true,false,true,false,true,true,false,false,false,true,true,false,true,true,true,true,true,false,false,true,false,true,false,true,true,true,true,true,false,false,true,true,false,true,false,true,false,false,false},new boolean[]{true,true,false,false,false,false,false,true,true,true,false,true,false,true,true,true,false,true,false,true,true,false,true,true,true,false,false,true,true,true,false,true,false,true,true,false,true,false,true},new boolean[]{false,false,false,false,true,true,true,false,false,true,true,true,false,false,true,true,true,false,true,false,false,true,false,false,true,false,true,true,true,true,false,true,true,false,false,true,false,true,true},new boolean[]{false,true,true,false,true,true,true,true,false,false,true,false,false,true,true,true,false,false,false,true,true,true,false,true,true,true,true,false,true,false,true,false,false,false,true,false,false,true,true},new boolean[]{true,false,false,false,false,true,true,false,false,true,false,false,true,true,false,false,true,true,true,false,true,true,false,false,true,false,true,false,false,true,true,true,true,true,false,false,true,true,true},new boolean[]{true,true,true,false,false,true,false,true,false,true,true,true,true,false,false,true,true,true,false,false,false,true,false,false,false,false,false,true,true,true,false,true,true,false,false,false,true,true,true},new boolean[]{true,false,true,true,true,false,false,true,true,false,false,false,true,true,false,true,false,true,true,true,false,false,false,true,false,false,true,true,true,false,true,false,false,true,true,true,false,false,true},new boolean[]{false,false,false,true,true,false,false,false,true,true,false,false,false,true,false,true,false,false,false,false,true,true,true,true,true,true,true,false,false,false,false,false,false,false,false,true,false,false,true},new boolean[]{false,false,false,true,false,false,false,true,false,false,true,false,false,true,false,true,true,false,true,true,true,true,true,true,false,false,false,true,true,true,true,false,false,false,false,false,true,true,true},new boolean[]{false,true,false,true,true,false,true,true,true,true,true,true,false,false,true,true,true,true,false,false,true,false,true,false,true,true,true,true,true,true,false,true,true,true,true,false,true,true,false},new boolean[]{true,false,false,true,false,true,true,true,true,false,false,true,false,false,false,true,true,true,false,false,true,false,false,false,false,true,false,true,true,false,false,true,false,false,true,true,true,true,true},new boolean[]{false,true,true,true,false,false,true,false,false,true,false,false,true,true,true,false,false,true,false,false,false,true,false,true,true,true,false,true,false,false,true,true,false,false,false,true,false,true,false},new boolean[]{false,false,true,false,true,false,false,false,false,true,false,false,false,true,true,false,false,true,false,false,true,false,true,false,true,false,false,false,true,true,false,true,false,false,false,true,false,true,true},new boolean[]{false,true,false,false,true,true,true,true,true,true,false,false,true,false,true,false,false,true,true,true,true,false,false,true,false,true,false,true,true,true,true,true,true,false,true,false,false,true,true},new boolean[]{false,false,false,true,true,true,false,false,false,false,true,true,false,true,false,false,true,false,false,false,true,true,true,true,false,true,false,true,true,true,false,true,true,true,false,false,false,false,false},new boolean[]{false,false,true,true,true,false,true,false,true,true,true,true,false,true,false,true,false,false,true,false,false,true,false,true,false,true,false,true,true,false,false,false,true,false,false,false,true,false,true},new boolean[]{false,false,false,false,true,true,false,true,false,true,false,true,true,true,false,false,false,true,false,false,true,false,false,false,false,false,true,false,true,true,true,false,false,true,true,true,true,true,false},new boolean[]{true,true,true,true,false,false,false,true,false,false,false,true,false,false,true,false,false,false,false,false,true,true,false,false,false,false,false,true,true,true,true,true,true,true,true,false,true,true,true},new boolean[]{true,false,false,true,true,false,true,false,false,false,true,false,true,false,false,false,false,true,true,false,false,false,true,false,false,true,true,true,false,true,true,false,false,false,false,true,false,false,false},new boolean[]{true,true,false,true,true,false,true,true,false,false,true,true,true,false,true,false,true,false,true,false,true,false,true,true,true,true,false,false,false,false,false,true,true,false,false,true,true,false,false}});
param0.add(new boolean[][]{new boolean[]{false,false,false,false,false,true,true,true,true},new boolean[]{false,false,false,false,true,true,true,true,true},new boolean[]{false,false,false,false,false,true,true,true,true},new boolean[]{false,false,false,false,false,true,true,true,true},new boolean[]{false,false,false,false,false,false,true,true,true},new boolean[]{true,true,true,true,true,true,true,true,true},new boolean[]{false,false,true,true,true,true,true,true,true},new boolean[]{false,false,false,false,false,true,true,true,true},new boolean[]{false,false,false,false,false,false,true,true,true}});
param0.add(new boolean[][]{new boolean[]{false,true,true,true,true,false,false,true,false,false,false,true,true,false,true,false,false,false,false,true,true,true,true,false,false},new boolean[]{false,true,false,false,false,false,true,true,true,true,false,true,true,false,true,true,true,false,true,false,true,true,false,false,true},new boolean[]{true,false,false,false,true,false,false,true,true,false,true,false,true,true,false,false,true,false,true,true,true,false,false,true,true},new boolean[]{false,true,true,false,true,true,true,true,false,true,false,false,false,true,false,false,false,false,true,false,true,true,false,true,false},new boolean[]{true,true,true,false,true,true,false,false,true,true,false,false,false,true,true,false,true,false,false,true,true,false,false,true,false},new boolean[]{true,false,false,true,false,false,true,false,true,true,true,false,false,true,false,true,true,false,false,false,false,false,true,true,false},new boolean[]{true,false,false,false,false,false,false,false,true,false,true,false,true,false,false,false,true,true,true,true,false,true,false,false,false},new boolean[]{true,false,true,false,false,false,false,false,true,false,true,false,true,false,true,false,false,true,true,false,false,true,false,true,false},new boolean[]{true,true,true,false,true,true,true,false,false,false,true,true,false,true,true,false,true,true,false,false,false,true,false,true,false},new boolean[]{true,false,true,false,true,false,true,true,false,true,true,false,false,true,true,true,true,true,false,false,true,false,true,true,false},new boolean[]{true,false,false,false,true,true,false,false,true,false,false,false,true,true,false,true,false,false,true,true,false,false,false,false,true},new boolean[]{false,true,false,true,true,false,true,false,false,true,false,false,false,false,false,true,false,true,true,true,false,true,true,false,false},new boolean[]{true,false,false,true,true,false,false,true,false,true,false,false,false,true,false,false,true,true,false,true,true,true,true,true,false},new boolean[]{false,true,true,true,true,false,false,false,false,true,true,true,true,false,true,true,false,false,true,true,true,true,true,true,false},new boolean[]{true,false,true,false,false,true,false,true,true,true,true,false,true,true,false,true,false,true,true,false,true,true,true,false,true},new boolean[]{true,true,true,false,false,false,true,false,true,false,true,false,true,true,false,false,true,true,true,false,false,true,true,false,true},new boolean[]{false,false,true,true,true,false,false,false,true,true,false,true,true,true,false,true,false,true,true,false,false,false,false,false,false},new boolean[]{false,false,false,true,true,true,true,false,false,true,true,true,false,true,true,false,true,true,true,false,false,true,false,true,false},new boolean[]{false,false,true,false,false,true,false,true,false,false,false,false,true,false,false,false,false,true,false,true,false,false,true,false,false},new boolean[]{false,false,true,true,false,false,false,true,true,true,false,false,true,false,false,true,true,false,false,false,false,true,false,true,false},new boolean[]{true,false,false,false,false,true,false,true,false,false,false,false,true,false,true,false,false,true,true,true,false,false,false,true,true},new boolean[]{false,true,false,false,true,false,false,true,false,true,true,true,true,false,true,false,true,true,false,true,true,false,false,false,false},new boolean[]{true,false,true,true,false,true,true,true,true,true,true,false,false,true,true,true,false,false,false,true,false,true,true,false,false},new boolean[]{true,true,true,false,true,false,true,true,true,false,true,true,true,false,false,false,false,true,false,true,true,true,true,false,true},new boolean[]{true,true,true,true,false,true,false,false,false,true,false,false,true,false,true,false,true,true,false,false,false,true,false,false,true}});
param0.add(new boolean[][]{new boolean[]{false,false,false,false,false,false,false,false,false,false,false,false,false,false,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true},new boolean[]{false,false,false,false,false,false,false,false,false,false,false,false,false,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true},new boolean[]{false,false,false,false,false,false,false,false,false,false,false,false,false,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true},new boolean[]{false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true},new boolean[]{false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,true,true,true,true,true,true,true,true,true,true,true,true,true,true},new boolean[]{false,false,false,false,false,false,false,false,false,false,false,false,false,false,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true},new boolean[]{false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,true,true,true,true,true,true,true,true,true,true,true},new boolean[]{false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,true,true,true,true,true,true,true,true,true,true},new boolean[]{false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true},new boolean[]{false,false,false,false,false,false,false,false,false,false,false,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true},new boolean[]{false,false,false,false,false,false,false,false,false,false,false,false,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true},new boolean[]{false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,true,true,true,true,true,true,true,true,true,true,true,true,true,true},new boolean[]{false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,true,true,true,true,true,true,true,true,true,true,true,true},new boolean[]{false,false,false,false,false,false,false,false,false,false,false,false,false,false,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true},new boolean[]{false,false,false,false,false,false,false,false,false,false,false,false,false,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true},new boolean[]{false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,true,true,true,true,true,true,true,true,true,true,true,true},new boolean[]{false,false,false,false,false,false,false,false,false,false,false,false,false,false,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true},new boolean[]{false,false,false,false,false,false,false,false,false,false,false,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true},new boolean[]{false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,true,true,true,true,true,true,true,true,true,true,true,true},new boolean[]{false,false,false,false,false,false,false,false,false,false,false,false,false,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true},new boolean[]{false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,true,true,true,true,true,true,true,true,true,true,true,true},new boolean[]{false,false,false,false,false,false,false,false,false,false,false,false,false,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true},new boolean[]{false,false,false,false,false,false,false,false,false,false,false,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true},new boolean[]{false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,true,true,true,true,true,true,true,true},new boolean[]{false,false,false,false,false,false,false,false,false,false,false,false,false,false,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true},new boolean[]{false,false,false,false,false,false,false,false,false,false,false,false,false,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true,true},new boolean[]{false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,true,true,true,true,true,true,true,true,true,true,true,true,true},new boolean[]{false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,true,true,true,true,true,true,true,true,true,true,true,true,true,true},new boolean[]{false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,true,true,true,true,true,true,true,true,true,true,true,true,true},new boolean[]{false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,false,true,true,true,true,true,true,true,true,true,true,true}});
param0.add(new boolean[][]{new boolean[]{true,false,true,false,false,false,true,true,false,true,false,true,true,true,false,false,false,false,true,false,true,false,false,true,false,false,true},new boolean[]{false,true,false,true,false,false,true,true,false,false,false,true,true,false,true,false,true,false,true,false,true,false,false,false,false,true,true},new boolean[]{true,false,true,true,true,false,false,true,true,true,true,true,true,true,true,false,false,false,true,false,true,true,false,false,true,false,true},new boolean[]{true,true,false,false,false,false,false,false,false,false,false,false,false,false,false,false,true,false,true,false,false,true,true,true,false,false,true},new boolean[]{true,false,true,false,false,true,true,false,false,true,true,true,false,false,false,true,false,true,false,true,true,true,true,true,true,false,false},new boolean[]{true,false,false,true,true,false,false,true,false,true,true,true,true,false,false,true,true,true,false,false,true,false,false,true,true,true,false},new boolean[]{true,false,true,true,true,true,false,true,false,false,false,true,true,false,true,true,false,true,true,false,true,false,false,false,false,false,false},new boolean[]{true,false,false,true,false,false,true,true,true,false,true,false,false,false,true,true,true,true,false,true,false,false,true,false,true,false,true},new boolean[]{false,true,false,true,false,true,true,true,false,true,true,false,false,false,true,false,false,false,false,true,true,false,true,false,true,false,true},new boolean[]{false,true,true,true,false,true,false,true,false,true,true,false,true,true,true,true,true,true,true,false,true,true,false,false,true,false,true},new boolean[]{false,true,false,false,true,true,false,false,false,false,true,true,false,false,true,false,false,true,true,true,true,false,false,true,false,false,true},new boolean[]{false,false,true,true,false,true,true,true,false,false,false,false,true,false,true,false,true,false,false,true,false,false,true,true,true,false,false},new boolean[]{true,true,false,false,true,true,false,false,true,true,true,false,false,true,true,false,false,false,true,false,false,false,true,false,false,false,true},new boolean[]{false,true,true,true,false,true,true,true,false,false,false,false,false,true,true,false,false,false,false,false,true,false,true,true,false,true,false},new boolean[]{true,true,true,true,true,true,true,true,true,false,true,true,true,true,false,false,false,false,false,true,false,false,false,false,false,false,true},new boolean[]{false,false,false,true,false,false,false,false,false,true,false,false,false,false,false,false,false,true,false,true,false,true,false,true,false,true,false},new boolean[]{true,true,false,true,true,true,true,true,true,false,false,true,true,false,true,true,false,false,false,false,false,true,true,false,false,false,false},new boolean[]{false,false,false,false,true,true,true,false,true,true,false,true,false,false,true,true,false,false,false,false,true,true,false,true,true,false,false},new boolean[]{true,false,true,true,false,true,false,false,false,false,false,false,false,false,true,false,true,true,false,true,true,true,true,false,false,false,true},new boolean[]{true,false,false,false,true,false,true,false,true,true,false,false,false,true,false,true,true,true,false,false,false,true,false,true,true,false,true},new boolean[]{true,false,true,true,true,true,false,true,true,false,true,true,true,false,false,true,true,false,false,false,false,false,true,false,true,true,true},new boolean[]{true,true,false,false,false,true,false,true,true,true,true,false,true,true,true,true,true,true,false,false,false,false,true,true,false,false,false},new boolean[]{true,false,false,false,false,false,false,true,true,true,false,true,false,false,false,false,true,false,false,false,true,true,false,true,true,true,true},new boolean[]{false,true,true,true,true,false,false,false,true,true,false,true,false,false,false,true,false,false,true,true,true,false,false,false,true,true,true},new boolean[]{false,false,true,true,false,true,true,false,false,true,true,true,false,false,true,false,true,true,true,true,false,true,true,true,true,false,false},new boolean[]{true,true,false,true,false,true,false,true,true,false,false,true,false,false,true,true,false,false,true,true,false,true,true,true,true,false,false},new boolean[]{true,false,true,false,true,true,true,true,true,false,false,false,false,false,false,true,true,false,false,false,false,false,false,true,false,true,true}});
param0.add(new boolean[][]{new boolean[]{false,false,false,true},new boolean[]{false,true,true,true},new boolean[]{false,false,false,true},new boolean[]{false,true,true,true}});
param0.add(new boolean[][]{new boolean[]{true,true,false,false,true,true,true,true,true,false,true,true,false,true,true,false,false,false,false,false,true,false,true,false,true,true,false,true},new boolean[]{false,false,true,true,false,false,false,true,true,false,false,true,false,true,false,false,true,true,false,false,true,true,true,true,false,true,false,false},new boolean[]{true,true,false,false,false,true,false,true,true,true,false,true,false,true,false,false,true,true,false,true,true,false,true,true,false,false,false,false},new boolean[]{true,false,true,false,true,false,true,false,false,true,true,true,true,true,true,false,true,false,false,true,false,false,false,true,false,true,false,true},new boolean[]{true,true,true,true,false,false,false,true,true,false,true,false,true,false,true,true,true,true,false,false,true,true,true,true,false,true,true,true},new boolean[]{true,false,true,true,true,true,true,true,false,true,false,false,false,false,false,true,false,true,true,false,true,true,false,true,false,false,false,true},new boolean[]{true,true,false,false,false,true,true,false,true,false,true,false,false,false,true,true,true,false,false,true,true,false,true,false,false,false,true,false},new boolean[]{false,true,true,true,true,false,false,true,false,false,false,false,false,false,false,false,true,false,true,false,false,true,false,true,true,true,false,true},new boolean[]{true,false,true,false,false,false,true,false,true,true,true,true,false,true,true,true,false,false,true,true,false,false,false,false,true,false,false,false},new boolean[]{false,false,true,true,false,true,false,false,true,true,true,true,false,false,true,false,false,true,true,false,true,false,true,true,false,true,true,true},new boolean[]{true,false,true,true,true,true,false,true,true,true,false,true,true,false,false,false,true,false,true,true,true,true,true,false,false,false,false,false},new boolean[]{false,false,false,false,true,false,true,true,true,false,false,false,false,true,false,false,true,true,false,true,true,true,true,true,true,true,true,false},new boolean[]{false,false,false,true,true,false,false,true,false,false,false,false,true,true,true,true,false,false,true,true,true,true,true,true,true,true,false,false},new boolean[]{false,true,true,true,true,true,true,true,true,true,true,true,true,true,false,true,true,true,true,true,false,false,false,false,true,false,true,false},new boolean[]{false,true,false,false,false,true,true,false,false,true,false,true,false,true,false,true,true,false,true,true,false,false,true,false,true,false,false,true},new boolean[]{true,true,false,true,true,true,true,true,false,false,false,true,true,false,false,true,true,true,false,false,false,false,true,false,true,true,false,true},new boolean[]{true,false,true,false,false,false,true,true,false,true,true,false,true,true,true,true,true,true,true,false,false,false,false,false,false,false,false,true},new boolean[]{true,false,true,false,true,false,false,false,true,true,true,false,true,true,true,false,false,false,false,false,true,true,true,true,true,true,false,false},new boolean[]{true,false,true,false,true,true,true,false,false,false,false,false,false,false,true,true,false,false,false,true,true,true,true,false,true,false,false,false},new boolean[]{false,false,true,false,true,false,true,false,true,true,false,true,true,true,false,false,true,true,true,false,false,false,false,false,false,false,false,false},new boolean[]{true,false,true,false,true,true,true,true,false,true,true,false,false,true,true,false,true,false,true,true,true,true,true,true,false,false,true,false},new boolean[]{true,false,false,true,false,false,false,false,false,true,true,false,false,true,false,false,true,false,true,false,true,false,true,true,false,true,false,false},new boolean[]{false,true,true,true,true,true,true,false,false,true,true,false,true,false,true,true,true,false,true,true,true,true,false,true,false,false,false,false},new boolean[]{true,true,false,false,true,true,false,false,true,false,false,false,true,false,false,false,false,false,true,true,true,false,true,true,false,false,true,false},new boolean[]{false,true,true,true,true,true,true,true,false,true,false,false,false,true,true,false,false,true,true,false,false,true,false,true,true,false,true,false},new boolean[]{true,true,true,true,true,true,true,true,false,true,false,false,true,false,true,false,true,true,true,true,false,false,true,false,true,false,true,true},new boolean[]{true,false,true,true,true,false,false,true,false,true,true,false,false,false,true,true,true,false,false,true,false,false,true,true,true,true,false,true},new boolean[]{false,true,true,false,false,false,true,true,true,true,false,true,true,false,false,false,true,true,true,true,false,true,true,true,true,false,true,false}});
List<Integer> param1 = new ArrayList<>();
param1.add(2);
param1.add(4);
param1.add(2);
param1.add(30);
param1.add(7);
param1.add(13);
param1.add(19);
param1.add(15);
param1.add(3);
param1.add(18);
for(int i = 0; i < param0.size(); ++i)
{
if(f_filled(param0.get(i),param1.get(i)) == f_gold(param0.get(i),param1.get(i)))
{
n_success+=1;
}
}
System.out.println("#Results:" + n_success + ", " + param0.size());
}
} | 6,217 |
0 | Create_ds/CodeGen/data/transcoder_evaluation_gfg | Create_ds/CodeGen/data/transcoder_evaluation_gfg/java/CHECK_STRING_FOLLOWS_ANBN_PATTERN_NOT.java | // Copyright (c) 2019-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree.
//
import java.util. *;
import java.util.stream.*;
import java.lang.*;
import javafx.util.Pair;
public class CHECK_STRING_FOLLOWS_ANBN_PATTERN_NOT{
public static boolean f_gold ( String s ) {
int l = s . length ( ) ;
if ( l % 2 == 1 ) {
return false ;
}
int i = 0 ;
int j = l - 1 ;
while ( i < j ) {
if ( s . charAt ( i ) != 'a' || s . charAt ( j ) != 'b' ) {
return false ;
}
i ++ ;
j -- ;
}
return true ;
}
//TOFILL
public static void main(String args[]) {
int n_success = 0;
List<String> param0 = new ArrayList<>();
param0.add("ba");
param0.add("aabb");
param0.add("abab");
param0.add("aaabb");
param0.add("aabbb");
param0.add("abaabbaa");
param0.add("abaababb");
param0.add("bbaa");
param0.add("11001000");
param0.add("ZWXv te");
for(int i = 0; i < param0.size(); ++i)
{
if(f_filled(param0.get(i)) == f_gold(param0.get(i)))
{
n_success+=1;
}
}
System.out.println("#Results:" + n_success + ", " + param0.size());
}
} | 6,218 |
0 | Create_ds/CodeGen/data/transcoder_evaluation_gfg | Create_ds/CodeGen/data/transcoder_evaluation_gfg/java/MAXIMUM_SUBARRAY_SUM_USING_PREFIX_SUM.java | // Copyright (c) 2019-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree.
//
import java.util. *;
import java.util.stream.*;
import java.lang.*;
import javafx.util.Pair;
public class MAXIMUM_SUBARRAY_SUM_USING_PREFIX_SUM{
static int f_gold ( int arr [ ] , int n ) {
int min_prefix_sum = 0 ;
int res = Integer . MIN_VALUE ;
int prefix_sum [ ] = new int [ n ] ;
prefix_sum [ 0 ] = arr [ 0 ] ;
for ( int i = 1 ;
i < n ;
i ++ ) prefix_sum [ i ] = prefix_sum [ i - 1 ] + arr [ i ] ;
for ( int i = 0 ;
i < n ;
i ++ ) {
res = Math . max ( res , prefix_sum [ i ] - min_prefix_sum ) ;
min_prefix_sum = Math . min ( min_prefix_sum , prefix_sum [ i ] ) ;
}
return res ;
}
//TOFILL
public static void main(String args[]) {
int n_success = 0;
List<int [ ]> param0 = new ArrayList<>();
param0.add(new int[]{8,9,11,17,18,19,23,24,27,30,31,31,35,44,46,47,49,51,55,58,59,61,65,67,71,71,71,71,78,78,82,91,98});
param0.add(new int[]{-82,-28,-66,-52,-36,36,-88,52,-62,46,42,26,-60,18,-52,38,94,-68,44,-94,14,36,-70});
param0.add(new int[]{0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1});
param0.add(new int[]{28,36,42,42,5,52,74,86,55,82,59,81,4,90,24,34,20,99,86,25,52,48,62,5,67,83,60,72,80,73,38,55,8,70,95});
param0.add(new int[]{-92,-52,-24,36,56});
param0.add(new int[]{0,1,1,1,0,1,0,1,0,0,1,1,0,1,1,0,0,0});
param0.add(new int[]{1,1,4,4,7,7,17,18,20,26,26,32,37,38,42,44,44,46,50,53,57,58,58,60,61,61,64,74,75,77,83,83,84,84,85,87,88,90,95,96,97,98,99,99});
param0.add(new int[]{-86,2,26,54,-16,16,48,24,50,-10,-32,-62,48,-12,-66});
param0.add(new int[]{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1});
param0.add(new int[]{58,14,79,11,31,28,61,86,25,27,75,78,32,55,86,48,15,51,6,78,23,82,16,62,35,51,91,16,79,38,97,30,23,58,95,57,82,35,57,43,22,41,58,69,25,65,13,79});
List<Integer> param1 = new ArrayList<>();
param1.add(20);
param1.add(15);
param1.add(19);
param1.add(19);
param1.add(3);
param1.add(13);
param1.add(25);
param1.add(13);
param1.add(14);
param1.add(39);
for(int i = 0; i < param0.size(); ++i)
{
if(f_filled(param0.get(i),param1.get(i)) == f_gold(param0.get(i),param1.get(i)))
{
n_success+=1;
}
}
System.out.println("#Results:" + n_success + ", " + param0.size());
}
} | 6,219 |
0 | Create_ds/CodeGen/data/transcoder_evaluation_gfg | Create_ds/CodeGen/data/transcoder_evaluation_gfg/java/COUNT_INDEX_PAIRS_EQUAL_ELEMENTS_ARRAY_1.java | // Copyright (c) 2019-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree.
//
import java.util. *;
import java.util.stream.*;
import java.lang.*;
import javafx.util.Pair;
public class COUNT_INDEX_PAIRS_EQUAL_ELEMENTS_ARRAY_1{
public static int f_gold ( int arr [ ] , int n ) {
HashMap < Integer , Integer > hm = new HashMap < > ( ) ;
for ( int i = 0 ;
i < n ;
i ++ ) {
if ( hm . containsKey ( arr [ i ] ) ) hm . put ( arr [ i ] , hm . get ( arr [ i ] ) + 1 ) ;
else hm . put ( arr [ i ] , 1 ) ;
}
int ans = 0 ;
for ( Map . Entry < Integer , Integer > it : hm . entrySet ( ) ) {
int count = it . getValue ( ) ;
ans += ( count * ( count - 1 ) ) / 2 ;
}
return ans ;
}
//TOFILL
public static void main(String args[]) {
int n_success = 0;
List<int [ ]> param0 = new ArrayList<>();
param0.add(new int[]{5,11,18,22,40,46,50,51,53,55,64,67,73,78,86});
param0.add(new int[]{14,-98,98,58,-82,90,-80,-56,-30,-36,-56,-30,-58,68,72,-76,38,-90,-72,4,-32,32,-28,2,12,-72,54,2,0,-74,8,12,46,72,-84,-66,70,18,26,72,-26,44,-8,20,-32,-56,28});
param0.add(new int[]{0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1});
param0.add(new int[]{93,23,62,64,31,78,99});
param0.add(new int[]{-94,-94,-92,-86,-84,-76,-76,-68,-66,-56,-56,-54,-50,-46,-38,-34,-34,-30,-26,-18,-16,2,8,42,52,54,56,64,68,82,82,82,94,96,98});
param0.add(new int[]{0});
param0.add(new int[]{3,18,18,20,21,23,24,27,35,36,38,40,46,50,50,51,52,53,59,61,63,63,65,66,68,68,70,71,74,75,96,98});
param0.add(new int[]{-68,40,16,50,36,42,-20,-46,-92,4,-18,-12,48,0,-46,64,-74,-50,42,44,-56,28,-10,78,62,70,-60,12,-44,-78});
param0.add(new int[]{0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1});
param0.add(new int[]{31,5});
List<Integer> param1 = new ArrayList<>();
param1.add(14);
param1.add(24);
param1.add(13);
param1.add(4);
param1.add(19);
param1.add(0);
param1.add(19);
param1.add(23);
param1.add(30);
param1.add(1);
for(int i = 0; i < param0.size(); ++i)
{
if(f_filled(param0.get(i),param1.get(i)) == f_gold(param0.get(i),param1.get(i)))
{
n_success+=1;
}
}
System.out.println("#Results:" + n_success + ", " + param0.size());
}
} | 6,220 |
0 | Create_ds/CodeGen/data/transcoder_evaluation_gfg | Create_ds/CodeGen/data/transcoder_evaluation_gfg/java/HOW_TO_COMPUTE_MOD_OF_A_BIG_NUMBER.java | // Copyright (c) 2019-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree.
//
import java.util. *;
import java.util.stream.*;
import java.lang.*;
import javafx.util.Pair;
public class HOW_TO_COMPUTE_MOD_OF_A_BIG_NUMBER{
static int f_gold ( String num , int a ) {
int res = 0 ;
for ( int i = 0 ;
i < num . length ( ) ;
i ++ ) res = ( res * 10 + ( int ) num . charAt ( i ) - '0' ) % a ;
return res ;
}
//TOFILL
public static void main(String args[]) {
int n_success = 0;
List<String> param0 = new ArrayList<>();
param0.add("RElCP");
param0.add("0139035510");
param0.add("00011110");
param0.add("TwanZWwLNXhFN");
param0.add("6247009752778");
param0.add("0100001011011");
param0.add("NCh");
param0.add("00714746542");
param0.add("101000100");
param0.add("MSTkXmlbPkV");
List<Integer> param1 = new ArrayList<>();
param1.add(13);
param1.add(44);
param1.add(86);
param1.add(66);
param1.add(55);
param1.add(33);
param1.add(75);
param1.add(54);
param1.add(93);
param1.add(78);
for(int i = 0; i < param0.size(); ++i)
{
if(f_filled(param0.get(i),param1.get(i)) == f_gold(param0.get(i),param1.get(i)))
{
n_success+=1;
}
}
System.out.println("#Results:" + n_success + ", " + param0.size());
}
} | 6,221 |
0 | Create_ds/CodeGen/data/transcoder_evaluation_gfg | Create_ds/CodeGen/data/transcoder_evaluation_gfg/java/LARGEST_SUBARRAY_WITH_EQUAL_NUMBER_OF_0S_AND_1S.java | // Copyright (c) 2019-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree.
//
import java.util. *;
import java.util.stream.*;
import java.lang.*;
import javafx.util.Pair;
public class LARGEST_SUBARRAY_WITH_EQUAL_NUMBER_OF_0S_AND_1S{
static int f_gold ( int arr [ ] , int n ) {
int sum = 0 ;
int maxsize = - 1 , startindex = 0 ;
int endindex = 0 ;
for ( int i = 0 ;
i < n - 1 ;
i ++ ) {
sum = ( arr [ i ] == 0 ) ? - 1 : 1 ;
for ( int j = i + 1 ;
j < n ;
j ++ ) {
if ( arr [ j ] == 0 ) sum += - 1 ;
else sum += 1 ;
if ( sum == 0 && maxsize < j - i + 1 ) {
maxsize = j - i + 1 ;
startindex = i ;
}
}
}
endindex = startindex + maxsize - 1 ;
if ( maxsize == - 1 ) System . out . println ( "No such subarray" ) ;
else System . out . println ( startindex + " to " + endindex ) ;
return maxsize ;
}
//TOFILL
public static void main(String args[]) {
int n_success = 0;
List<int [ ]> param0 = new ArrayList<>();
param0.add(new int[]{56,8,67,35,19,82,81,66,10,24,82,2,42,48,18,63,48,74,60,64,64,95,95,20,95,55,63,96,54});
param0.add(new int[]{78,67,1,78,48,83,17,19,21,44,99,68,16,54,9});
param0.add(new int[]{3,69,97,21,12,67,45,53,77,70,26,43});
param0.add(new int[]{21,80,29,22,77,64,42,4,71,75,62,27,30,36,66,37,49,97});
param0.add(new int[]{18,66,9,90,21,95,74,48,44,9,43,17});
param0.add(new int[]{42,41,87,3,64,25,96,55,99,57,32,64,10,75,69,95,11,36,15,2,78,70,14,54,11,28,55,47,27,85,47,62,97,68,44,70,12,27,36,85,76,91,17,75,83,34,32,89,55});
param0.add(new int[]{44});
param0.add(new int[]{1,43,28,17,30,46,89,51,15,70,96,79,65,55,8});
param0.add(new int[]{25,91,68,4,35,49,33});
param0.add(new int[]{14,86,22,42,94,54,28,41,48,8,82,84,99,92,33,75,38,31,59,86,21,6,77,89,79,83,57,26,89,45,60,55,60,76,76,6,40,57,38,44,7,98,64,65,88,73,88,99});
List<Integer> param1 = new ArrayList<>();
param1.add(26);
param1.add(8);
param1.add(9);
param1.add(10);
param1.add(10);
param1.add(41);
param1.add(0);
param1.add(9);
param1.add(4);
param1.add(26);
for(int i = 0; i < param0.size(); ++i)
{
if(f_filled(param0.get(i),param1.get(i)) == f_gold(param0.get(i),param1.get(i)))
{
n_success+=1;
}
}
System.out.println("#Results:" + n_success + ", " + param0.size());
}
} | 6,222 |
0 | Create_ds/CodeGen/data/transcoder_evaluation_gfg | Create_ds/CodeGen/data/transcoder_evaluation_gfg/java/COUNT_PAIRS_DIFFERENCE_EQUAL_K_1.java | // Copyright (c) 2019-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree.
//
import java.util. *;
import java.util.stream.*;
import java.lang.*;
import javafx.util.Pair;
public class COUNT_PAIRS_DIFFERENCE_EQUAL_K_1{
static int f_gold ( int arr [ ] , int n , int k ) {
int count = 0 ;
Arrays . sort ( arr ) ;
int l = 0 ;
int r = 0 ;
while ( r < n ) {
if ( arr [ r ] - arr [ l ] == k ) {
count ++ ;
l ++ ;
r ++ ;
}
else if ( arr [ r ] - arr [ l ] > k ) l ++ ;
else r ++ ;
}
return count ;
}
//TOFILL
public static void main(String args[]) {
int n_success = 0;
List<int [ ]> param0 = new ArrayList<>();
param0.add(new int[]{5,5,10,19,29,32,40,60,65,70,72,89,92});
param0.add(new int[]{-38,40,8,64,-38,56,4,8,84,60,-48,-78,-82,-88,-30,58,-58,62,-52,-98,24,22,14,68,-74,48,-56,-72,-90,26,-10,58,40,36,-80,68,58,-74,-46,-62,-12,74,-58});
param0.add(new int[]{0,0,1});
param0.add(new int[]{16,80,59,29,14,44,13,76,7,65,62,1,34,49,70,96,73,71,42,73,66,96});
param0.add(new int[]{-98,-88,-58,-56,-48,-34,-22,-18,-14,-14,-8,-4,-2,2,18,38,42,46,54,68,70,90,94,96,98});
param0.add(new int[]{0,1,1});
param0.add(new int[]{11,43,50,58,60,68,75});
param0.add(new int[]{86,94,-80,0,52,-56,42,88,-10,24,6,8});
param0.add(new int[]{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1});
param0.add(new int[]{54,99,4,14,9,34,81,36,80,50,34,9,7});
List<Integer> param1 = new ArrayList<>();
param1.add(7);
param1.add(24);
param1.add(1);
param1.add(12);
param1.add(23);
param1.add(2);
param1.add(4);
param1.add(11);
param1.add(29);
param1.add(9);
List<Integer> param2 = new ArrayList<>();
param2.add(12);
param2.add(36);
param2.add(1);
param2.add(16);
param2.add(22);
param2.add(1);
param2.add(4);
param2.add(9);
param2.add(30);
param2.add(8);
for(int i = 0; i < param0.size(); ++i)
{
if(f_filled(param0.get(i),param1.get(i),param2.get(i)) == f_gold(param0.get(i),param1.get(i),param2.get(i)))
{
n_success+=1;
}
}
System.out.println("#Results:" + n_success + ", " + param0.size());
}
} | 6,223 |
0 | Create_ds/CodeGen/data/transcoder_evaluation_gfg | Create_ds/CodeGen/data/transcoder_evaluation_gfg/java/COUNT_ROTATIONS_DIVISIBLE_4.java | // Copyright (c) 2019-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree.
//
import java.util. *;
import java.util.stream.*;
import java.lang.*;
import javafx.util.Pair;
public class COUNT_ROTATIONS_DIVISIBLE_4{
static int f_gold ( String n ) {
int len = n . length ( ) ;
if ( len == 1 ) {
int oneDigit = n . charAt ( 0 ) - '0' ;
if ( oneDigit % 4 == 0 ) return 1 ;
return 0 ;
}
int twoDigit , count = 0 ;
for ( int i = 0 ;
i < ( len - 1 ) ;
i ++ ) {
twoDigit = ( n . charAt ( i ) - '0' ) * 10 + ( n . charAt ( i + 1 ) - '0' ) ;
if ( twoDigit % 4 == 0 ) count ++ ;
}
twoDigit = ( n . charAt ( len - 1 ) - '0' ) * 10 + ( n . charAt ( 0 ) - '0' ) ;
if ( twoDigit % 4 == 0 ) count ++ ;
return count ;
}
//TOFILL
public static void main(String args[]) {
int n_success = 0;
List<String> param0 = new ArrayList<>();
param0.add("MRRuQJvxe");
param0.add("87395768");
param0.add("10111100110111");
param0.add("aVDUEfzG");
param0.add("55794792");
param0.add("111010");
param0.add("cndMLMJVmzuH");
param0.add("487717559382");
param0.add("11110");
param0.add("dRMDPyr");
for(int i = 0; i < param0.size(); ++i)
{
if(f_filled(param0.get(i)) == f_gold(param0.get(i)))
{
n_success+=1;
}
}
System.out.println("#Results:" + n_success + ", " + param0.size());
}
} | 6,224 |
0 | Create_ds/CodeGen/data/transcoder_evaluation_gfg | Create_ds/CodeGen/data/transcoder_evaluation_gfg/java/DISTRIBUTING_ITEMS_PERSON_CANNOT_TAKE_TWO_ITEMS_TYPE_1.java | // Copyright (c) 2019-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree.
//
import java.util. *;
import java.util.stream.*;
import java.lang.*;
import javafx.util.Pair;
public class DISTRIBUTING_ITEMS_PERSON_CANNOT_TAKE_TWO_ITEMS_TYPE_1{
static boolean f_gold ( int arr [ ] , int n , int k ) {
HashMap < Integer , Integer > hash = new HashMap < > ( ) ;
for ( int i = 0 ;
i < n ;
i ++ ) {
if ( ! hash . containsKey ( arr [ i ] ) ) hash . put ( arr [ i ] , 0 ) ;
hash . put ( arr [ i ] , hash . get ( arr [ i ] ) + 1 ) ;
}
for ( Map . Entry x : hash . entrySet ( ) ) if ( ( int ) x . getValue ( ) > 2 * k ) return false ;
return true ;
}
//TOFILL
public static void main(String args[]) {
int n_success = 0;
List<int [ ]> param0 = new ArrayList<>();
param0.add(new int[]{1,1,2,3,1});
param0.add(new int[]{2,3,3,5,3,3});
param0.add(new int[]{0,0,1,1,1});
param0.add(new int[]{7,60,78,91,80,75,85,21,41,63,1,84,69,13,94,25,54,54,52,68,53,35,17,37,98,27,2,31});
param0.add(new int[]{-96,-94,-82,-80,-78,-66,-36,-24,-18,-12,-2,-2,6,8,10,12,36,38,42,58,64,68,82,84,86,88,94});
param0.add(new int[]{0,1,1,1,0,0,0,0,1,0,0,0,1,0,0,1,1,1,1,1,1,0,0,0,0,1,0,1,1,0,0,0,0,1,1,0,0,0,1,0,0,1,1,1,0});
param0.add(new int[]{16,19,25,25,32,37,48,59,60,60,71,74,77,81,91,94});
param0.add(new int[]{-62,-94,72,-22,86,-80,64,98,-82,-50,12,-4,56,46,-80,2,-86,-44,-26,68,-94,-82,74,26,94,40,50,-40,-42,-10});
param0.add(new int[]{0,0,0,0,0,1,1,1});
param0.add(new int[]{83,57,2,47,70,22,49,51,25,57,32,7,8,99,6,86,24,79,42,43,1,24,68,11,24,12,43,40,14,45,11,46,12,80,66});
List<Integer> param1 = new ArrayList<>();
param1.add(5);
param1.add(6);
param1.add(2);
param1.add(24);
param1.add(24);
param1.add(34);
param1.add(10);
param1.add(20);
param1.add(5);
param1.add(21);
List<Integer> param2 = new ArrayList<>();
param2.add(2);
param2.add(2);
param2.add(1);
param2.add(2);
param2.add(3);
param2.add(2);
param2.add(8);
param2.add(4);
param2.add(2);
param2.add(33);
for(int i = 0; i < param0.size(); ++i)
{
if(f_filled(param0.get(i),param1.get(i),param2.get(i)) == f_gold(param0.get(i),param1.get(i),param2.get(i)))
{
n_success+=1;
}
}
System.out.println("#Results:" + n_success + ", " + param0.size());
}
} | 6,225 |
0 | Create_ds/CodeGen/data/transcoder_evaluation_gfg | Create_ds/CodeGen/data/transcoder_evaluation_gfg/java/MINIMUM_NUMBER_SUBSETS_DISTINCT_ELEMENTS_1.java | // Copyright (c) 2019-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree.
//
import java.util. *;
import java.util.stream.*;
import java.lang.*;
import javafx.util.Pair;
public class MINIMUM_NUMBER_SUBSETS_DISTINCT_ELEMENTS_1{
static int f_gold ( int arr [ ] , int n ) {
HashMap < Integer , Integer > mp = new HashMap < > ( ) ;
for ( int i = 0 ;
i < n ;
i ++ ) mp . put ( arr [ i ] , mp . get ( arr [ i ] ) == null ? 1 : mp . get ( arr [ i ] ) + 1 ) ;
int res = 0 ;
for ( Map . Entry < Integer , Integer > entry : mp . entrySet ( ) ) res = Math . max ( res , entry . getValue ( ) ) ;
return res ;
}
//TOFILL
public static void main(String args[]) {
int n_success = 0;
List<int [ ]> param0 = new ArrayList<>();
param0.add(new int[]{2,6,9,12,15,19,21,23,24,24,25,27,29,35,36,37,41,44,44,47,48,51,56,59,59,59,60,64,64,66,67,68,68,69,73,74,77,78,81,82,83,85,89,94,95,96,98,99});
param0.add(new int[]{96,20,-40,74,-44,98,-24,92,58,-84,-76,-14,64,-2,-84,52,-8,38,-26,-10,-62,-30,-76,58});
param0.add(new int[]{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1});
param0.add(new int[]{35,16,42,3,57,70,4,31,93,60,98,97,81,57,62,98,88,51,5,58,48,14,58,22,40,26,66,41,9,78,62,32,79,88,65,75,80,12,15,93,92,13,83,26});
param0.add(new int[]{-62,-44,-36,-18,-16,-6,4,14,22,42,68,90});
param0.add(new int[]{1,0,1,0,1,1,1,1,0,1,0,1,0,0,0,0});
param0.add(new int[]{20,25,27,29,47,47,49,53,59,66,74,82,86,86,94,94,97});
param0.add(new int[]{92,50,76,46,14,40,22});
param0.add(new int[]{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1});
param0.add(new int[]{8,82,92,42,55,4,94,73,57,7,21,71,68,97});
List<Integer> param1 = new ArrayList<>();
param1.add(30);
param1.add(20);
param1.add(31);
param1.add(37);
param1.add(11);
param1.add(12);
param1.add(13);
param1.add(3);
param1.add(27);
param1.add(12);
for(int i = 0; i < param0.size(); ++i)
{
if(f_filled(param0.get(i),param1.get(i)) == f_gold(param0.get(i),param1.get(i)))
{
n_success+=1;
}
}
System.out.println("#Results:" + n_success + ", " + param0.size());
}
} | 6,226 |
0 | Create_ds/CodeGen/data/transcoder_evaluation_gfg | Create_ds/CodeGen/data/transcoder_evaluation_gfg/java/COUNT_SORTED_ROWS_MATRIX.java | // Copyright (c) 2019-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree.
//
import java.util. *;
import java.util.stream.*;
import java.lang.*;
import javafx.util.Pair;
public class COUNT_SORTED_ROWS_MATRIX{
static int f_gold ( int mat [ ] [ ] , int r , int c ) {
int result = 0 ;
for ( int i = 0 ;
i < r ;
i ++ ) {
int j ;
for ( j = 0 ;
j < c - 1 ;
j ++ ) if ( mat [ i ] [ j + 1 ] <= mat [ i ] [ j ] ) break ;
if ( j == c - 1 ) result ++ ;
}
for ( int i = 0 ;
i < r ;
i ++ ) {
int j ;
for ( j = c - 1 ;
j > 0 ;
j -- ) if ( mat [ i ] [ j - 1 ] <= mat [ i ] [ j ] ) break ;
if ( c > 1 && j == 0 ) result ++ ;
}
return result ;
}
//TOFILL
public static void main(String args[]) {
int n_success = 0;
List<int [ ] [ ]> param0 = new ArrayList<>();
param0.add(new int[][]{new int[]{4,12,13,24,25,26,27,35,41,60,69,71,73,78,85,86,95,99},new int[]{1,13,18,25,41,42,44,45,49,49,51,52,59,63,64,67,78,97},new int[]{1,2,11,18,23,26,30,31,41,42,45,71,75,90,91,92,95,97},new int[]{26,30,44,46,46,54,56,60,67,68,75,77,77,83,87,87,94,98},new int[]{19,20,27,31,33,34,37,41,42,49,60,60,64,67,71,73,77,92},new int[]{2,6,9,11,20,29,37,41,42,44,49,58,62,76,87,89,94,97},new int[]{7,8,9,14,20,45,49,54,63,63,64,71,72,73,73,89,94,95},new int[]{2,3,7,16,17,23,23,25,44,50,58,58,59,78,83,87,90,99},new int[]{4,16,18,22,23,33,34,43,43,46,51,56,62,75,79,85,97,97},new int[]{16,18,29,32,39,53,54,55,67,70,72,72,76,76,86,87,96,96},new int[]{6,30,34,37,38,42,52,54,59,67,71,71,72,81,85,87,91,93},new int[]{2,6,6,16,18,20,21,31,40,42,50,56,62,80,80,83,91,96},new int[]{2,5,6,14,16,21,23,37,52,59,72,86,86,87,87,89,90,91},new int[]{1,10,17,20,22,25,27,32,37,37,44,49,65,78,80,81,85,95},new int[]{1,13,14,21,43,50,52,58,62,64,65,66,66,66,67,70,81,82},new int[]{1,2,9,16,17,23,25,29,30,31,42,65,73,74,82,87,92,92},new int[]{1,5,9,13,21,28,32,33,34,38,46,60,80,86,93,94,96,98},new int[]{11,18,23,24,25,26,28,48,59,59,67,72,82,83,86,89,92,96}});
param0.add(new int[][]{new int[]{82,82,2,8,-32,90,-76,-64,-66,-46,-72,-58,-28,-86,-8,-96,-62,-32,54,-16,96,28,76,90,-40,98,88,-90,4,-50,70,32,-74,-72,-72,10,36,50,-16,-36},new int[]{-52,-6,12,-6,-64,6,38,-14,-86,74,-74,82,54,2,46,-94,88,86,-32,-72,72,88,90,-8,-58,32,-90,-68,-70,72,34,74,-30,92,90,-88,82,-54,42,94},new int[]{-4,-32,-12,-96,16,-32,32,52,2,-6,2,-10,40,-64,4,-56,-50,46,54,-6,-14,-40,-98,-4,-20,98,94,60,-70,-94,52,-4,32,20,-30,-94,-50,50,-86,-66},new int[]{10,84,2,-44,-54,-82,-64,70,-20,-40,-50,10,26,-14,-88,10,-80,-48,10,16,-14,-52,74,-60,48,-60,-62,38,56,-34,86,20,74,-20,28,-46,-44,96,-58,-8},new int[]{-48,-36,-18,-66,-20,60,-36,34,-94,44,-14,-34,-84,-26,38,48,14,12,72,-76,26,50,-58,40,90,14,-40,22,-26,-24,66,-62,-34,16,-34,-30,54,-76,-26,4},new int[]{-26,56,74,-82,58,-42,-98,96,-24,-36,-86,-80,42,78,-2,-90,-8,-52,46,-20,-16,64,-36,-8,-16,-60,96,40,66,98,14,-36,-78,-40,52,60,-20,38,26,-98},new int[]{-12,60,-56,-66,68,-20,-74,30,14,-36,-22,-54,50,62,-44,14,90,66,80,76,-86,92,-80,-6,48,44,24,40,94,-42,68,28,-20,98,40,50,-18,90,6,2},new int[]{-98,4,-32,-34,-64,58,16,48,82,10,36,32,-60,-40,2,-14,-58,28,-44,60,-28,-6,-68,46,-50,62,10,44,-4,76,60,-26,52,40,-88,-56,-36,-70,-66,-22},new int[]{18,-66,-82,52,34,-86,-50,-64,18,10,-14,8,80,-76,20,76,96,-12,-36,86,-10,16,-14,66,-4,14,-82,0,2,90,78,-48,42,-60,90,-16,80,16,-64,-58},new int[]{12,8,-74,78,46,-84,20,14,-2,-42,-80,-66,-64,34,58,0,28,-8,34,92,-14,-54,82,68,64,6,30,78,-50,-28,-74,-12,-18,82,-50,-86,-2,-78,94,-66},new int[]{10,-76,58,32,-44,60,-14,24,-92,24,16,80,90,-60,-6,8,-50,90,60,82,6,84,74,-48,-98,-2,-38,74,64,52,8,-32,-58,-58,70,-14,68,46,32,74},new int[]{84,98,78,34,-94,84,10,84,10,-58,-70,-30,98,-28,-80,56,-36,96,82,38,2,-38,28,18,82,60,-16,-64,90,34,-10,98,36,40,-6,-32,-32,-24,92,12},new int[]{54,92,-30,-12,40,48,8,34,-20,-58,8,-14,0,-34,98,-32,-98,40,-44,34,94,-56,-90,64,4,-76,-34,-68,48,28,84,-4,-46,-54,72,-82,0,-82,38,-6},new int[]{44,-66,-86,54,-4,36,62,88,-16,-88,-26,-50,-84,-90,38,14,62,14,-92,64,-50,-2,-96,-4,94,-84,26,-86,-68,6,-18,-66,-56,-88,-92,-86,64,-6,-92,-12},new int[]{-36,80,-28,-42,58,-12,-66,-38,-76,34,-52,-32,-80,66,54,-2,-40,78,14,-54,6,-92,68,-40,72,-80,52,-60,98,-60,-92,26,-24,26,46,34,80,-92,16,16},new int[]{-4,60,-72,-6,46,76,-8,82,42,-68,-86,10,20,80,-22,64,-40,22,-6,-58,-74,-86,-16,-14,-76,-54,-98,-50,-74,80,-44,18,-70,-80,58,-48,-70,44,46,88},new int[]{-80,-76,-46,-92,-78,-72,-56,72,-52,-86,-48,6,84,38,-14,66,48,86,36,-80,-54,-44,-88,-18,-50,-56,-20,-14,-52,-98,-44,-76,-42,-66,-20,62,0,-54,-82,-70},new int[]{44,98,78,56,-14,-70,-24,62,88,70,-42,72,80,42,22,-90,-50,-22,14,40,42,34,66,-58,70,22,-86,58,-82,54,-20,72,20,32,8,30,52,-6,-12,-62},new int[]{-4,70,-76,22,22,44,-84,-74,34,-36,64,-78,50,72,-40,-78,-26,-66,-84,-28,-40,-96,66,36,-28,-18,4,0,20,18,78,-74,-58,-64,-68,68,-84,20,-56,-16},new int[]{0,24,64,-50,-36,70,-88,-34,70,68,-68,80,88,12,-50,74,32,18,-14,74,58,68,-62,-30,20,94,-68,96,-32,-94,-70,-44,-76,-94,34,54,-74,62,-80,-10},new int[]{-64,-26,-26,44,14,-72,-74,36,-8,-64,-34,6,18,14,74,-90,66,-12,-6,-6,-12,-58,72,18,62,-44,12,-56,66,34,44,0,-98,96,-94,-60,76,52,48,-6},new int[]{6,-58,14,82,-72,0,92,8,-6,-18,74,-66,68,-24,-20,90,-48,54,18,-24,-8,-48,72,-78,-54,84,18,-52,-36,-30,-82,-34,8,-94,-34,-78,-28,44,92,-78},new int[]{-50,-84,-82,-12,62,-72,-36,84,-36,-82,12,-52,12,-34,36,8,-24,58,6,-34,0,-22,46,98,62,80,-88,-24,98,30,22,94,-38,-24,78,62,0,-10,2,52},new int[]{94,-10,-88,-12,-10,56,-86,18,54,-20,22,-18,76,-88,-38,38,-88,-20,82,88,-80,-34,14,54,28,-46,-88,-84,-86,38,86,26,98,-28,14,-24,-22,-80,-98,58},new int[]{60,52,12,-86,-54,-30,10,-2,-54,-74,56,74,-74,92,86,-92,-28,-54,30,-56,40,96,92,16,82,-70,-80,92,-80,14,56,-6,8,-92,20,10,-50,-64,-34,50},new int[]{64,70,-74,-72,78,46,42,44,-96,-18,-62,56,-90,-14,38,82,8,-58,52,92,-90,22,-60,62,60,-64,-56,-74,92,-2,-90,-14,-56,-64,38,18,-52,-92,30,-36},new int[]{50,84,82,36,60,34,-50,-64,-72,30,8,84,48,-24,78,80,-10,-90,82,-80,-4,-94,24,92,92,-16,-80,68,60,98,-92,52,60,8,-72,12,-60,-84,-44,-34},new int[]{-98,-30,30,36,96,74,-82,-2,-72,-38,-40,10,92,30,98,-28,56,70,-84,66,40,92,42,-86,-58,-90,-10,98,-12,-80,94,4,-84,60,94,-90,74,-68,64,-76},new int[]{2,94,38,-6,64,4,-42,92,-12,54,82,90,-64,32,0,-24,-16,-68,78,54,28,-86,-56,4,16,98,32,-18,-76,90,-6,72,40,20,6,-90,52,-62,4,30},new int[]{22,90,54,-34,-30,0,-72,-6,36,28,-96,86,-2,-48,-30,8,-60,-32,24,-50,-76,-86,32,28,-66,-88,24,86,72,96,22,-32,-92,-26,48,-52,-12,4,-94,2},new int[]{-44,70,38,36,-36,46,-68,-44,-36,34,-32,-44,-22,-80,-64,28,60,92,-52,14,42,-80,-70,50,24,-34,16,64,62,-94,18,-48,-68,16,76,-42,30,-88,46,-12},new int[]{46,46,44,16,-70,-6,-78,-46,70,30,70,88,66,56,-12,4,76,-50,-28,-98,-16,-86,-68,36,28,-92,-46,-86,-2,90,6,36,-62,-30,-26,-38,22,-60,-20,-70},new int[]{80,38,-94,-42,70,-20,42,-62,-30,54,82,-94,-78,74,60,54,-52,-56,66,86,-30,-14,0,-6,-22,56,70,-86,50,82,72,-10,54,24,-46,-26,-20,-54,-96,30},new int[]{-48,94,54,-16,70,20,-20,-2,-8,84,-60,30,-18,-14,32,42,24,26,-12,-62,2,-94,26,36,-88,-22,-64,46,36,74,-44,-56,-36,-98,70,72,-68,68,76,-32},new int[]{-4,36,0,14,-42,-38,-98,-2,-44,-90,82,80,-66,38,62,34,52,44,-22,80,-74,-88,-74,24,98,8,18,-26,-4,-82,-60,44,-2,30,20,52,26,-22,-54,96},new int[]{98,-54,-12,-12,-74,34,-6,-36,-94,40,96,42,-32,-46,-46,88,-90,26,-98,30,92,-34,74,-94,36,-68,-66,74,-2,6,94,-12,82,90,-2,78,-80,-84,18,74},new int[]{-42,30,56,-74,-16,-44,4,-62,-12,-62,-22,64,56,96,-16,40,10,88,-66,54,56,96,74,-6,-36,-70,-82,74,-14,-18,-32,-70,60,26,-88,-78,-8,32,-84,90},new int[]{-44,-14,-44,96,0,54,2,74,36,-56,-98,-16,-70,68,-88,26,-18,30,62,-88,-28,-58,62,-38,-62,28,-80,-6,88,-16,64,-58,14,94,-40,2,-12,-16,-24,-64},new int[]{20,18,-94,94,-2,-74,-56,-46,62,-88,-16,-30,-10,-54,38,22,-42,32,28,-42,44,64,46,66,-96,70,-32,10,-14,72,-42,98,-54,36,76,24,-96,86,54,-88},new int[]{74,-48,90,78,-44,0,76,-16,-28,-92,10,-32,-30,-78,-8,40,-90,74,-40,16,-78,22,-42,36,68,44,42,6,-60,36,-74,-92,92,-44,40,-92,-46,56,-36,-94}});
param0.add(new int[][]{new int[]{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1},new int[]{0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1},new int[]{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1},new int[]{0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1},new int[]{0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1},new int[]{0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1},new int[]{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1},new int[]{0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1},new int[]{0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1},new int[]{0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1},new int[]{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1},new int[]{0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1},new int[]{0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1},new int[]{0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1},new int[]{0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1},new int[]{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1},new int[]{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1},new int[]{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1},new int[]{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1},new int[]{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1},new int[]{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1},new int[]{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1},new int[]{0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1},new int[]{0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1},new int[]{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1},new int[]{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1},new int[]{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1},new int[]{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1},new int[]{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1},new int[]{0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1}});
param0.add(new int[][]{new int[]{91,8,34,7,66,59,90,78,54,77,55,29,90,69,85,42,39,49,83,59,3,41,65,60,4,45,65,29,47,40,96,11,21,74,34,83,12,3,6,67,30,29,40,87,35,73,17,13,20},new int[]{38,36,55,16,85,38,67,15,37,25,81,61,31,68,31,11,23,39,35,21,66,66,52,49,55,35,40,47,99,25,91,6,50,3,62,11,46,88,95,17,40,70,35,76,59,84,4,99,84},new int[]{61,2,63,5,81,77,7,32,74,17,53,17,86,5,86,15,80,84,94,64,86,94,64,7,90,64,15,94,56,51,64,84,77,70,49,2,46,96,64,25,18,54,39,73,77,23,46,14,23},new int[]{48,22,2,60,46,8,3,70,58,6,27,23,71,92,10,45,48,85,81,86,61,27,85,75,1,49,47,82,8,74,92,40,61,27,12,30,37,66,84,36,86,40,36,96,60,96,70,27,41},new int[]{13,6,54,10,54,19,24,61,87,77,14,45,37,15,74,4,47,61,78,91,68,99,67,70,70,26,72,19,75,93,56,66,76,80,49,45,62,85,50,51,48,40,48,13,69,62,82,13,13},new int[]{25,75,45,24,38,4,19,83,38,61,21,59,71,72,76,59,36,31,72,23,16,22,68,40,28,60,89,87,87,89,16,11,45,89,75,25,43,67,69,41,66,91,38,62,73,29,13,45,68},new int[]{30,1,39,11,69,4,8,3,52,59,24,47,88,62,30,96,38,80,62,86,81,12,72,65,10,64,95,58,60,95,51,60,89,35,54,85,67,38,58,85,12,40,5,47,35,95,26,60,33},new int[]{47,58,24,5,76,9,56,45,32,69,14,63,7,2,55,36,29,59,15,64,65,80,99,2,99,23,18,98,26,38,58,52,92,53,18,40,86,93,18,26,71,65,29,91,80,91,29,44,31},new int[]{63,5,55,56,10,58,53,43,89,30,98,71,20,94,28,27,65,65,54,66,69,28,82,30,2,13,71,16,31,55,65,62,76,66,36,70,42,66,82,73,63,21,27,89,44,99,70,75,96},new int[]{6,19,62,34,59,79,75,95,84,64,95,81,81,77,83,62,24,4,18,97,33,43,57,40,90,65,10,88,84,54,68,58,40,46,88,32,1,97,4,36,41,57,30,13,43,77,88,99,29},new int[]{23,37,24,76,53,11,28,95,2,89,27,47,2,3,12,67,25,66,7,38,45,63,15,93,2,12,44,28,68,27,52,23,85,4,59,92,35,17,27,7,91,20,84,22,26,34,63,87,54},new int[]{97,74,14,36,43,72,69,25,78,13,46,10,88,50,49,98,55,43,22,78,13,78,46,9,24,32,61,91,51,53,58,95,54,47,11,21,18,60,10,27,82,66,90,40,45,52,98,85,16},new int[]{34,59,78,37,11,87,79,40,58,33,82,33,96,86,94,40,71,85,59,22,65,73,20,63,76,91,24,29,68,27,45,97,69,33,43,86,92,31,19,32,15,39,37,19,14,38,5,53,20},new int[]{44,25,58,89,40,99,34,90,26,87,63,16,43,84,77,25,48,55,7,47,43,84,3,41,28,65,34,9,43,39,76,8,52,12,75,43,16,94,18,93,12,83,54,15,27,81,46,89,24},new int[]{67,92,60,34,46,5,80,64,53,65,94,65,36,66,56,52,82,54,32,55,69,88,43,41,11,8,33,95,32,48,71,9,89,7,2,33,29,76,33,38,99,48,99,92,68,22,70,19,14},new int[]{90,32,71,27,57,73,87,90,40,24,15,27,70,87,74,29,8,30,17,87,13,93,46,87,12,30,43,80,14,3,23,75,67,51,23,49,69,69,69,54,57,46,60,43,47,70,14,30,95},new int[]{69,58,48,20,45,70,13,66,65,42,62,76,9,8,17,28,22,2,60,6,73,54,24,32,15,11,75,62,8,99,51,36,83,15,55,18,17,78,80,82,97,70,60,46,78,16,1,26,43},new int[]{34,59,69,68,91,5,24,72,81,23,64,19,72,6,66,72,91,96,65,11,28,27,27,87,87,61,29,52,86,14,41,86,59,5,42,91,22,50,9,6,99,37,24,4,8,67,62,38,99},new int[]{62,48,96,3,14,75,47,80,50,61,51,77,82,37,31,49,87,48,94,4,92,94,99,26,65,29,18,4,9,14,35,60,54,33,52,49,44,31,53,95,28,3,14,97,53,19,80,73,5},new int[]{18,14,24,76,93,33,55,40,65,59,45,3,29,17,12,4,60,72,23,82,14,94,65,19,24,50,91,80,96,78,41,37,75,77,4,94,69,80,48,5,55,85,43,58,36,3,8,40,87},new int[]{92,18,42,47,28,4,55,10,46,52,75,20,48,62,7,14,78,95,49,58,14,2,43,29,57,98,83,90,56,62,92,91,2,69,79,44,1,5,43,54,34,88,67,60,42,37,56,51,3},new int[]{28,31,22,14,75,56,68,57,39,10,73,69,72,27,79,2,99,99,10,24,48,56,19,9,21,80,36,43,11,49,85,49,84,84,28,48,13,80,39,94,8,19,97,73,3,12,29,34,34},new int[]{99,50,58,74,49,22,2,84,94,89,94,38,68,86,42,41,43,69,49,17,17,96,78,18,93,48,18,32,87,16,6,70,97,72,55,20,40,56,51,54,3,57,69,71,74,18,64,31,39},new int[]{23,18,26,32,12,65,32,90,98,14,8,79,44,56,52,33,34,31,92,95,99,11,90,65,59,95,49,27,77,64,21,33,2,69,11,67,65,89,40,12,66,60,65,10,62,48,32,84,43},new int[]{87,26,33,4,89,44,32,68,19,61,35,74,56,55,82,66,79,76,10,64,95,33,87,89,88,67,11,14,85,99,56,78,72,51,43,44,76,11,77,14,83,70,44,58,2,46,75,61,31},new int[]{93,73,8,30,6,84,16,28,43,47,80,29,89,86,91,83,98,42,91,65,20,77,34,1,24,57,77,96,66,61,55,63,7,1,52,67,85,47,32,74,88,34,94,73,7,59,78,47,42},new int[]{90,35,30,1,10,96,62,91,53,13,6,33,44,6,62,49,40,35,55,30,96,98,51,57,83,45,52,51,64,70,92,99,91,2,7,95,50,77,82,23,2,56,39,97,86,55,72,69,92},new int[]{45,12,56,49,85,32,64,91,3,47,10,82,50,33,71,53,94,32,57,63,59,65,83,85,73,94,28,95,76,11,51,17,87,12,69,65,58,31,76,94,13,42,15,43,34,14,60,88,24},new int[]{75,34,12,19,35,60,73,5,33,74,27,12,68,58,69,94,31,99,86,32,35,78,56,6,43,71,30,56,88,14,46,41,12,6,52,15,84,52,6,13,60,49,61,45,42,72,51,82,99},new int[]{95,81,81,39,93,29,96,7,99,11,94,42,1,16,99,74,68,49,15,6,15,80,68,25,86,69,76,6,64,96,87,57,94,99,39,71,3,92,68,30,5,91,49,40,5,26,58,82,90},new int[]{4,57,97,16,67,90,23,89,24,84,90,66,76,51,21,44,41,52,54,71,14,64,80,49,88,2,94,76,10,71,78,1,59,39,18,56,45,43,95,13,30,93,86,78,21,14,31,98,76},new int[]{40,86,5,71,50,83,56,89,56,6,75,48,16,31,65,10,90,63,84,63,1,81,6,21,89,58,70,18,72,49,10,68,2,99,10,51,86,63,55,77,90,32,53,48,99,76,45,31,52},new int[]{99,19,61,12,65,15,53,96,50,46,9,32,91,55,84,30,59,58,92,99,37,68,94,78,59,47,51,4,89,10,84,84,43,83,95,2,54,81,22,60,11,30,98,59,57,37,88,43,9},new int[]{14,75,98,81,61,53,54,7,97,68,98,21,92,20,12,26,14,69,52,59,36,37,89,82,13,57,26,34,12,72,12,63,91,10,21,73,46,60,8,17,5,50,30,10,83,53,97,90,39},new int[]{64,61,79,7,82,31,35,88,41,39,61,54,15,67,50,86,79,58,54,9,51,83,47,8,43,6,53,61,51,45,90,42,38,35,70,7,1,18,26,87,51,76,34,82,76,66,10,66,7},new int[]{62,86,31,83,51,75,40,72,22,4,42,47,56,77,36,55,36,36,74,55,67,3,96,88,38,68,2,34,92,83,16,97,70,13,36,65,73,20,49,53,49,13,32,47,42,29,26,81,44},new int[]{44,18,97,11,67,31,23,89,39,31,82,62,55,55,15,83,66,6,13,58,88,97,62,21,37,75,27,18,78,11,52,47,33,9,87,49,38,67,12,14,3,5,60,63,13,22,2,31,45},new int[]{55,47,20,4,13,45,34,25,95,4,13,19,1,36,74,85,51,23,35,95,23,65,63,58,67,12,18,51,21,23,38,87,92,65,69,14,48,62,86,73,41,52,12,55,85,46,88,44,38},new int[]{83,29,86,98,92,66,4,69,74,50,78,75,3,44,78,34,12,54,17,90,23,97,21,96,6,3,73,5,58,93,45,64,2,97,33,93,14,62,68,19,53,66,78,5,52,94,84,60,54},new int[]{15,44,11,54,64,99,91,94,57,73,95,25,24,4,66,11,84,83,50,89,31,83,27,75,98,49,15,3,59,20,67,67,4,67,23,97,87,17,67,57,91,34,81,99,90,29,55,88,28},new int[]{18,89,80,81,71,51,19,14,63,18,10,40,7,64,41,55,51,75,30,89,7,18,18,89,46,98,25,1,71,6,43,89,88,30,90,30,37,57,99,3,37,91,45,69,46,32,19,51,83},new int[]{11,5,99,30,60,57,35,66,16,60,93,22,7,20,58,29,91,80,59,81,52,1,51,79,88,26,92,40,12,59,9,57,42,94,24,17,79,36,48,71,83,48,88,50,69,12,62,27,22},new int[]{50,91,58,61,4,65,8,12,10,67,97,24,59,37,57,29,58,43,66,25,7,97,93,73,98,24,86,31,8,30,64,93,66,4,91,78,70,67,33,5,63,41,16,39,7,42,21,22,75},new int[]{2,16,31,71,84,77,39,36,83,7,14,43,53,3,76,98,29,68,75,3,5,94,73,21,2,97,73,48,6,66,45,85,27,99,62,67,34,66,13,39,18,11,4,35,62,55,91,86,63},new int[]{1,57,15,25,30,61,83,28,24,17,60,56,58,7,68,10,76,6,35,18,28,55,82,52,19,18,63,40,49,95,82,76,78,85,61,79,31,48,49,40,60,67,65,86,71,44,45,58,33},new int[]{64,70,88,84,20,95,73,14,2,56,94,73,83,25,93,58,49,91,76,72,10,42,73,35,49,88,12,87,78,87,78,38,57,81,12,19,14,75,71,24,78,32,23,61,8,68,61,54,4},new int[]{22,20,70,20,61,33,74,38,14,2,88,96,31,86,10,34,61,59,92,47,92,70,52,1,39,47,62,17,92,95,7,5,56,73,86,36,25,73,10,90,38,25,42,88,3,75,44,71,61},new int[]{90,36,14,93,21,25,23,58,5,43,65,53,93,76,93,25,48,20,73,42,28,2,92,13,24,28,20,88,53,90,52,86,33,31,39,58,19,80,54,24,19,48,11,17,41,13,63,56,48},new int[]{87,89,92,89,55,51,31,4,3,3,8,39,23,32,25,74,83,66,79,54,45,97,33,22,89,1,7,91,97,2,55,18,32,69,12,71,94,85,56,47,16,27,99,80,32,15,50,79,25}});
param0.add(new int[][]{new int[]{-94,-78,-30,-16,-14,22,44,44,54,60,68,72,92,94,98},new int[]{-92,-68,-52,-40,-30,-28,-20,-16,14,38,42,54,60,72,86},new int[]{-78,-68,-58,-36,-10,-10,42,48,52,52,58,68,72,78,96},new int[]{-94,-86,-84,-60,-40,0,0,22,48,56,70,72,80,90,96},new int[]{-98,-92,-80,-68,-58,38,50,52,58,60,62,62,72,86,90},new int[]{-94,-92,-70,-64,-46,-38,-32,-14,-10,-6,18,30,32,74,98},new int[]{-72,-60,-52,-50,-26,-24,-6,4,10,40,46,86,88,98,98},new int[]{-94,-72,-40,-36,-36,-28,0,18,34,36,38,44,50,54,98},new int[]{-72,-60,-40,-38,-36,-26,-18,-8,-2,2,30,34,50,76,80},new int[]{-96,-74,-46,-38,-26,-16,-10,2,2,20,28,48,48,60,90},new int[]{-86,-60,-58,-58,-46,-40,-4,2,16,18,26,62,64,78,98},new int[]{-98,-50,-12,-10,-2,12,20,40,60,66,76,78,84,90,92},new int[]{-72,-68,-68,-52,-8,-6,10,20,42,52,54,56,72,86,90},new int[]{-80,-74,-32,10,18,54,62,74,76,78,86,86,88,94,96},new int[]{-98,-78,-76,-72,-56,-30,-26,0,36,42,44,76,84,88,94}});
param0.add(new int[][]{new int[]{0,0,0,1,0,1,1,1,1,1,0,0,1,0,1,0,0,1,1,1,1,0,0,0,1,1,0},new int[]{0,0,1,0,0,0,0,0,1,0,1,0,1,1,1,0,0,1,1,1,1,1,1,0,0,0,1},new int[]{1,0,0,1,1,0,0,1,0,1,0,0,1,1,1,1,0,1,0,1,1,1,0,0,0,1,0},new int[]{1,1,1,1,1,0,0,1,0,1,0,0,0,1,0,0,0,0,1,1,1,1,1,0,0,1,0},new int[]{1,1,1,1,0,0,0,1,1,1,1,0,1,0,0,0,0,1,1,0,1,1,0,0,0,0,0},new int[]{1,0,1,1,0,0,1,1,0,1,0,0,0,1,0,0,1,0,0,1,1,1,0,1,1,0,0},new int[]{1,0,1,1,0,0,0,1,1,0,0,0,1,0,1,1,0,0,1,0,1,0,0,0,1,1,1},new int[]{1,0,0,0,0,1,0,1,0,0,0,0,1,0,1,1,0,1,1,1,0,0,0,1,0,0,0},new int[]{0,1,1,0,1,1,0,0,1,0,1,0,1,1,1,1,1,0,1,1,1,1,1,0,0,1,1},new int[]{0,1,0,0,1,0,1,1,1,0,0,1,1,0,0,0,1,1,0,0,1,1,0,1,1,0,1},new int[]{1,1,1,0,1,0,1,1,1,1,0,0,0,0,1,0,1,0,0,0,1,0,0,1,0,1,1},new int[]{0,0,0,1,0,0,1,0,0,1,0,0,1,0,0,0,0,1,0,0,0,1,1,1,0,0,1},new int[]{1,1,0,1,1,1,0,1,1,1,1,1,1,0,1,1,1,1,0,0,0,1,0,1,1,0,0},new int[]{1,1,1,0,1,1,1,0,0,1,0,0,1,1,1,0,0,1,1,1,0,0,0,0,0,1,0},new int[]{1,1,1,0,1,1,1,0,0,0,1,0,1,1,0,1,1,0,0,1,0,1,0,0,0,1,1},new int[]{0,1,1,0,0,1,0,0,1,0,1,0,1,1,1,0,1,1,0,1,0,0,0,0,1,1,0},new int[]{1,1,1,0,1,0,0,1,0,1,0,0,1,0,0,1,0,0,0,0,0,1,1,1,0,1,0},new int[]{0,0,0,0,0,0,1,1,1,0,0,1,0,0,1,0,0,0,1,1,0,1,1,1,0,0,1},new int[]{1,0,0,0,0,1,0,0,1,0,1,0,0,1,0,1,0,0,0,0,1,1,0,0,0,0,1},new int[]{0,0,0,0,0,1,0,1,1,0,0,1,0,0,1,0,0,0,0,1,0,1,0,1,1,0,1},new int[]{1,0,1,0,0,1,0,0,0,1,1,1,0,0,1,1,1,0,1,1,0,1,0,0,0,0,0},new int[]{1,1,0,0,1,0,1,1,1,0,0,0,1,0,0,0,0,1,1,0,1,1,1,0,1,0,0},new int[]{1,1,0,0,0,1,1,0,1,0,0,0,1,1,0,1,1,0,1,1,0,0,1,0,1,0,0},new int[]{0,1,0,1,0,0,0,1,0,1,0,1,1,1,1,0,1,0,0,1,0,1,0,1,0,0,1},new int[]{1,1,1,1,1,0,1,1,0,1,0,1,1,1,1,1,0,0,1,1,0,1,0,0,1,1,1},new int[]{1,1,0,1,1,0,1,1,1,1,1,1,1,1,0,0,0,0,1,0,1,1,0,1,1,0,1},new int[]{0,0,0,1,0,1,1,1,1,1,1,0,0,0,1,0,1,0,1,1,1,1,1,1,0,0,0}});
param0.add(new int[][]{new int[]{2,21,39,67,70,73,83,86,87,93},new int[]{31,42,53,56,64,65,85,89,94,98},new int[]{3,15,17,50,52,67,73,82,91,94},new int[]{12,15,16,21,23,30,33,38,50,89},new int[]{5,7,25,28,38,43,43,58,64,86},new int[]{24,26,29,33,46,47,52,71,86,96},new int[]{7,10,23,24,36,39,47,61,77,89},new int[]{1,10,26,27,61,62,64,80,85,94},new int[]{3,8,16,32,37,48,54,58,77,82},new int[]{43,52,70,76,81,84,84,85,95,99}});
param0.add(new int[][]{new int[]{62,-24,-62,-18,46,14,90,-42,-98,-52,36,96,26,-26,38,-88,88,-98,-86},new int[]{-24,58,-70,-56,68,-66,-24,30,-86,-74,98,-24,-48,-28,24,-64,22,46,40},new int[]{2,-30,-94,6,-24,-42,-70,-20,-80,14,74,72,-68,58,36,40,88,-80,54},new int[]{-24,-50,-96,-36,36,30,-58,64,98,-86,-74,-18,-64,74,-46,-24,68,34,24},new int[]{-34,96,14,-50,-68,-72,-38,-52,56,4,60,-90,-70,16,-4,0,-82,2,-16},new int[]{22,10,54,-86,14,12,64,-54,92,2,88,50,-24,-86,-32,46,-66,-26,-90},new int[]{-22,26,44,2,70,-94,-78,32,-30,-64,90,-16,68,-60,-10,-18,-64,20,-18},new int[]{72,-14,-98,-54,72,18,24,4,-16,-26,78,-80,26,-10,18,20,22,68,20},new int[]{-32,74,14,-18,88,42,6,-6,-16,-30,80,-16,24,-96,-96,-52,-38,-34,-46},new int[]{-12,-72,-48,52,-64,-30,26,64,0,34,52,-66,98,-96,-52,-96,38,-56,-32},new int[]{-2,18,-60,-52,-46,62,-10,82,-24,34,72,50,-98,-96,78,86,6,32,-60},new int[]{-44,-52,-66,-46,24,80,-68,92,-32,26,-44,30,72,-56,-56,28,-26,22,-92},new int[]{82,-58,-60,-30,-68,-18,-72,98,92,-28,-30,44,78,10,54,56,2,-92,24},new int[]{4,96,-84,68,14,-86,6,22,-6,-60,2,-38,-48,48,-74,-52,-44,-68,-96},new int[]{46,4,16,20,-12,86,-56,88,8,-68,56,14,2,-38,-20,-42,-64,86,30},new int[]{96,68,-74,14,66,-20,72,60,56,-78,-14,2,60,16,-2,-90,-46,24,68},new int[]{-80,40,72,-88,-2,12,-96,-34,-88,94,46,-62,84,-68,14,-62,-26,-94,-66},new int[]{24,-60,-30,-22,-42,-2,-52,76,-16,26,-82,64,88,6,-42,-46,36,50,98},new int[]{-30,-16,-80,-16,-42,-6,60,-78,-94,-42,-20,44,-78,70,48,-84,-52,-22,46}});
param0.add(new int[][]{new int[]{0,0,0,0,0,0,0,0,0,1,1,1,1,1},new int[]{0,0,0,0,1,1,1,1,1,1,1,1,1,1},new int[]{0,0,0,0,1,1,1,1,1,1,1,1,1,1},new int[]{0,0,0,0,0,0,0,1,1,1,1,1,1,1},new int[]{0,0,0,0,0,0,0,0,0,1,1,1,1,1},new int[]{0,0,0,0,0,0,1,1,1,1,1,1,1,1},new int[]{0,0,0,0,1,1,1,1,1,1,1,1,1,1},new int[]{0,0,0,0,0,0,0,0,1,1,1,1,1,1},new int[]{0,0,0,0,1,1,1,1,1,1,1,1,1,1},new int[]{0,0,0,0,0,0,0,0,1,1,1,1,1,1},new int[]{0,0,0,0,0,0,1,1,1,1,1,1,1,1},new int[]{0,0,0,0,0,0,0,0,0,1,1,1,1,1},new int[]{0,0,0,0,0,0,0,1,1,1,1,1,1,1},new int[]{0,0,0,1,1,1,1,1,1,1,1,1,1,1}});
param0.add(new int[][]{new int[]{58,85,97,21,67,89,63,21,3,59,28,4,57,94,75,40,26,76,91,6,64,58,31,26,69,56},new int[]{61,73,86,49,29,98,33,19,25,73,53,43,38,38,35,8,76,31,86,93,82,13,22,28,38,88},new int[]{36,22,61,11,68,82,29,74,11,31,71,46,70,47,91,56,26,34,52,41,82,3,21,59,15,3},new int[]{67,75,36,39,7,71,38,63,36,73,77,63,61,19,58,96,24,71,76,5,92,80,56,51,57,11},new int[]{81,94,93,62,55,71,63,25,30,12,82,98,12,57,44,59,67,18,56,20,37,80,66,57,34,64},new int[]{69,90,68,50,46,79,27,12,24,37,33,24,2,33,50,3,21,20,30,30,27,8,82,99,71,83},new int[]{4,52,66,74,99,99,10,51,25,84,50,37,10,56,36,42,92,89,70,67,17,89,44,63,1,34},new int[]{78,19,58,40,15,68,31,14,96,72,74,34,10,64,69,91,12,65,82,30,20,76,73,22,49,65},new int[]{11,46,64,46,13,96,43,95,47,18,45,16,69,36,53,50,24,68,43,91,31,48,47,1,91,44},new int[]{86,37,91,17,78,5,39,37,62,68,26,91,19,64,42,55,65,56,85,33,90,70,97,51,61,42},new int[]{47,84,97,98,53,58,83,86,30,42,4,72,67,32,50,37,43,92,40,6,1,98,25,16,36,18},new int[]{5,15,23,78,81,92,74,55,30,59,43,27,48,24,33,90,79,61,16,76,13,75,13,91,86,97},new int[]{50,81,63,53,30,92,83,19,43,90,40,66,2,92,72,35,87,11,26,55,26,92,80,79,68,73},new int[]{2,55,80,76,99,98,8,31,23,87,99,75,72,45,79,70,84,36,9,78,44,45,38,96,66,39},new int[]{78,28,1,62,38,69,48,57,89,60,15,7,67,99,63,37,65,27,1,8,17,15,1,39,11,49},new int[]{20,70,15,29,42,31,49,87,50,11,66,55,21,35,77,7,65,3,92,86,52,36,16,55,25,59},new int[]{24,90,55,67,66,96,58,49,21,1,39,30,65,55,57,64,98,27,90,65,43,26,10,77,86,9},new int[]{40,44,98,40,1,40,6,30,39,41,10,55,44,38,44,86,95,80,86,41,40,94,35,46,87,36},new int[]{30,21,73,92,41,17,19,71,53,19,80,65,93,1,69,48,95,54,81,52,50,72,91,9,73,74},new int[]{42,87,8,31,39,47,35,29,70,42,94,53,27,53,67,51,28,86,27,77,8,84,48,34,71,2},new int[]{84,68,18,85,35,63,98,68,95,24,85,10,23,88,15,70,15,46,46,52,4,72,21,75,11,21},new int[]{21,1,28,27,46,61,52,56,43,9,88,19,41,40,12,90,49,56,92,65,3,46,16,46,45,64},new int[]{65,27,31,4,16,63,97,48,45,39,37,7,89,99,19,93,57,16,25,43,80,27,70,63,50,69},new int[]{97,69,6,27,72,96,13,62,99,28,63,5,85,45,67,97,60,65,21,24,85,46,21,6,31,19},new int[]{89,76,25,93,74,3,97,44,8,25,95,57,65,17,32,72,31,85,38,53,76,1,58,41,87,76},new int[]{42,30,40,72,77,45,71,43,39,3,8,52,99,92,80,1,83,60,29,93,9,96,50,73,32,92}});
List<Integer> param1 = new ArrayList<>();
param1.add(14);
param1.add(28);
param1.add(28);
param1.add(48);
param1.add(14);
param1.add(19);
param1.add(6);
param1.add(11);
param1.add(8);
param1.add(25);
List<Integer> param2 = new ArrayList<>();
param2.add(17);
param2.add(27);
param2.add(16);
param2.add(37);
param2.add(7);
param2.add(20);
param2.add(5);
param2.add(18);
param2.add(10);
param2.add(14);
for(int i = 0; i < param0.size(); ++i)
{
if(f_filled(param0.get(i),param1.get(i),param2.get(i)) == f_gold(param0.get(i),param1.get(i),param2.get(i)))
{
n_success+=1;
}
}
System.out.println("#Results:" + n_success + ", " + param0.size());
}
} | 6,227 |
0 | Create_ds/CodeGen/data/transcoder_evaluation_gfg | Create_ds/CodeGen/data/transcoder_evaluation_gfg/java/COUNT_SUBSTRINGS_WITH_SAME_FIRST_AND_LAST_CHARACTERS.java | // Copyright (c) 2019-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree.
//
import java.util. *;
import java.util.stream.*;
import java.lang.*;
import javafx.util.Pair;
public class COUNT_SUBSTRINGS_WITH_SAME_FIRST_AND_LAST_CHARACTERS{
static int f_gold ( String s ) {
int result = 0 ;
int n = s . length ( ) ;
for ( int i = 0 ;
i < n ;
i ++ ) for ( int j = i ;
j < n ;
j ++ ) if ( s . charAt ( i ) == s . charAt ( j ) ) result ++ ;
return result ;
}
//TOFILL
public static void main(String args[]) {
int n_success = 0;
List<String> param0 = new ArrayList<>();
param0.add("LZIKA");
param0.add("0556979952");
param0.add("110010");
param0.add("kGaYfd");
param0.add("413567670657");
param0.add("01001");
param0.add("EQPuFa");
param0.add("48848378");
param0.add("110");
param0.add("PLehNeP");
for(int i = 0; i < param0.size(); ++i)
{
if(f_filled(param0.get(i)) == f_gold(param0.get(i)))
{
n_success+=1;
}
}
System.out.println("#Results:" + n_success + ", " + param0.size());
}
} | 6,228 |
0 | Create_ds/CodeGen/data/transcoder_evaluation_gfg | Create_ds/CodeGen/data/transcoder_evaluation_gfg/java/DIFFERENCE_BETWEEN_HIGHEST_AND_LEAST_FREQUENCIES_IN_AN_ARRAY.java | // Copyright (c) 2019-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree.
//
import java.util. *;
import java.util.stream.*;
import java.lang.*;
import javafx.util.Pair;
public class DIFFERENCE_BETWEEN_HIGHEST_AND_LEAST_FREQUENCIES_IN_AN_ARRAY{
static int f_gold ( int arr [ ] , int n ) {
Arrays . sort ( arr ) ;
int count = 0 , max_count = 0 , min_count = n ;
for ( int i = 0 ;
i < ( n - 1 ) ;
i ++ ) {
if ( arr [ i ] == arr [ i + 1 ] ) {
count += 1 ;
continue ;
}
else {
max_count = Math . max ( max_count , count ) ;
min_count = Math . min ( min_count , count ) ;
count = 0 ;
}
}
return ( max_count - min_count ) ;
}
//TOFILL
public static void main(String args[]) {
int n_success = 0;
List<int [ ]> param0 = new ArrayList<>();
param0.add(new int[]{5,15,19,22,28,29,39,46,46,49,51,55,62,69,72,72,72,74,79,92,92,93,95,96});
param0.add(new int[]{-26,-54,92,76,-92,-14,-24,-70,-78,-50,-48,-22,12,2,-34,-60,4,-32,-10,52,-92,-74,18,34,6,-66,42,-10,-6,56,92});
param0.add(new int[]{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1});
param0.add(new int[]{59,35,13,79,61,97,92,48,98,38,65,54,31,49,81,22,96,29,65,48,92,66,25,21,26,1,32,73,46,5,40,17,53,93,83,29});
param0.add(new int[]{-70,-34,-32,-30,-14,80,86,90});
param0.add(new int[]{0,1,0,1,1,0,0,0,1,1,1,0,0,0,1,1,1,1,0,0,0,0,1,1,1,1,1,1,0,0,1,1,0,1,1,0});
param0.add(new int[]{9});
param0.add(new int[]{94,10,70,42});
param0.add(new int[]{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1});
param0.add(new int[]{64,76,49,55,92,15,4,8,95,60,90,3,7,79,84,17,96,10,80,26,22,15});
List<Integer> param1 = new ArrayList<>();
param1.add(15);
param1.add(30);
param1.add(24);
param1.add(29);
param1.add(4);
param1.add(23);
param1.add(0);
param1.add(2);
param1.add(24);
param1.add(20);
for(int i = 0; i < param0.size(); ++i)
{
if(f_filled(param0.get(i),param1.get(i)) == f_gold(param0.get(i),param1.get(i)))
{
n_success+=1;
}
}
System.out.println("#Results:" + n_success + ", " + param0.size());
}
} | 6,229 |
0 | Create_ds/CodeGen/data/transcoder_evaluation_gfg | Create_ds/CodeGen/data/transcoder_evaluation_gfg/java/CHECK_WHETHER_NUMBER_DUCK_NUMBER_NOT.java | // Copyright (c) 2019-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree.
//
import java.util. *;
import java.util.stream.*;
import java.lang.*;
import javafx.util.Pair;
public class CHECK_WHETHER_NUMBER_DUCK_NUMBER_NOT{
static int f_gold ( String num ) {
int len = num . length ( ) ;
int count_zero = 0 ;
char ch ;
for ( int i = 1 ;
i < len ;
i ++ ) {
ch = num . charAt ( i ) ;
if ( ch == '0' ) count_zero ++ ;
}
return count_zero ;
}
//TOFILL
public static void main(String args[]) {
int n_success = 0;
List<String> param0 = new ArrayList<>();
param0.add("HLlQWSphZcIC");
param0.add("080287724");
param0.add("0000100000");
param0.add(" Q");
param0.add("4247040983");
param0.add("00001011101");
param0.add("LbNsnYTHmLbCf");
param0.add("24");
param0.add("110");
param0.add("ie");
for(int i = 0; i < param0.size(); ++i)
{
if(f_filled(param0.get(i)) == f_gold(param0.get(i)))
{
n_success+=1;
}
}
System.out.println("#Results:" + n_success + ", " + param0.size());
}
} | 6,230 |
0 | Create_ds/CodeGen/data/transcoder_evaluation_gfg | Create_ds/CodeGen/data/transcoder_evaluation_gfg/java/COUNT_SINGLE_NODE_ISOLATED_SUB_GRAPHS_DISCONNECTED_GRAPH.java | // Copyright (c) 2019-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree.
//
import java.util. *;
import java.util.stream.*;
import java.lang.*;
import javafx.util.Pair;
public class COUNT_SINGLE_NODE_ISOLATED_SUB_GRAPHS_DISCONNECTED_GRAPH{
static int f_gold ( int [ ] graph , int N ) {
int count = 0 ;
for ( int i = 1 ;
i < 7 ;
i ++ ) {
if ( graph [ i ] == 0 ) count ++ ;
}
return count ;
}
//TOFILL
public static void main(String args[]) {
int n_success = 0;
List<int [ ]> param0 = new ArrayList<>();
param0.add(new int[]{18,26,39,43,46,57,63,76,84,88});
param0.add(new int[]{76,-92,-40,48,84,8,28,64,84,-58,40,48,-8,22,84,-14,-32,-66,84,-74,10,50,96,92,-60,70,0,2,16,-26});
param0.add(new int[]{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1});
param0.add(new int[]{15,76,11,70,34,54,4,33,20,93,51,9,58,50,23,97,42,28,98,3,21,39,20,11,38});
param0.add(new int[]{-96,-84,-74,-58,-52,-52,-28,-24,-22,-12,-12,-8,-6,-2,-2,8,10,20,24,32,36,36,46,54,66,88,94});
param0.add(new int[]{0,1,1,1,1,0,0,0,0,0,0,1,1,0,1,1,0,1,1,1,0,1,1,1,0,1,0,1,0,0,0,1,0,0,0,0,0,0,1,1,0,1,1,1,0,1});
param0.add(new int[]{1,1,4,9,13,18,18,21,22,32,33,39,41,44,51,55,56,59,60,61,63,68,69,71,72,73,74,74,75,81,83,87,88,92,94,97});
param0.add(new int[]{10,54,-64,30,-50,-4,14,-96,-22,80,-36,-36,-92,58,28,10,32,-82,-6,-40,0,-46,-68,-18,-16,-38,-22,-68,-82,76,70,-48,10,50,82,98,-22,-74,22,-60,-70,46,84,88,-34,-30,88,26});
param0.add(new int[]{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1});
param0.add(new int[]{25,39,1,6,86,45,19,76,65,29,9});
List<Integer> param1 = new ArrayList<>();
param1.add(8);
param1.add(15);
param1.add(31);
param1.add(12);
param1.add(20);
param1.add(24);
param1.add(22);
param1.add(35);
param1.add(41);
param1.add(7);
for(int i = 0; i < param0.size(); ++i)
{
if(f_filled(param0.get(i),param1.get(i)) == f_gold(param0.get(i),param1.get(i)))
{
n_success+=1;
}
}
System.out.println("#Results:" + n_success + ", " + param0.size());
}
} | 6,231 |
0 | Create_ds/CodeGen/data/transcoder_evaluation_gfg | Create_ds/CodeGen/data/transcoder_evaluation_gfg/java/AREA_SQUARE_CIRCUMSCRIBED_CIRCLE.java | // Copyright (c) 2019-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree.
//
import java.util. *;
import java.util.stream.*;
import java.lang.*;
import javafx.util.Pair;
public class AREA_SQUARE_CIRCUMSCRIBED_CIRCLE{
static int f_gold ( int r ) {
return ( 2 * r * r ) ;
}
//TOFILL
public static void main(String args[]) {
int n_success = 0;
List<Integer> param0 = new ArrayList<>();
param0.add(14);
param0.add(78);
param0.add(45);
param0.add(66);
param0.add(18);
param0.add(32);
param0.add(60);
param0.add(16);
param0.add(99);
param0.add(65);
for(int i = 0; i < param0.size(); ++i)
{
if(f_filled(param0.get(i)) == f_gold(param0.get(i)))
{
n_success+=1;
}
}
System.out.println("#Results:" + n_success + ", " + param0.size());
}
} | 6,232 |
0 | Create_ds/CodeGen/data/transcoder_evaluation_gfg | Create_ds/CodeGen/data/transcoder_evaluation_gfg/java/NUMBER_NON_NEGATIVE_INTEGRAL_SOLUTIONS_B_C_N.java | // Copyright (c) 2019-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree.
//
import java.util. *;
import java.util.stream.*;
import java.lang.*;
import javafx.util.Pair;
public class NUMBER_NON_NEGATIVE_INTEGRAL_SOLUTIONS_B_C_N{
static int f_gold ( int n ) {
int result = 0 ;
for ( int i = 0 ;
i <= n ;
i ++ ) for ( int j = 0 ;
j <= n - i ;
j ++ ) for ( int k = 0 ;
k <= ( n - i - j ) ;
k ++ ) if ( i + j + k == n ) result ++ ;
return result ;
}
//TOFILL
public static void main(String args[]) {
int n_success = 0;
List<Integer> param0 = new ArrayList<>();
param0.add(62);
param0.add(44);
param0.add(37);
param0.add(81);
param0.add(14);
param0.add(20);
param0.add(76);
param0.add(72);
param0.add(96);
param0.add(52);
for(int i = 0; i < param0.size(); ++i)
{
if(f_filled(param0.get(i)) == f_gold(param0.get(i)))
{
n_success+=1;
}
}
System.out.println("#Results:" + n_success + ", " + param0.size());
}
} | 6,233 |
0 | Create_ds/CodeGen/data/transcoder_evaluation_gfg | Create_ds/CodeGen/data/transcoder_evaluation_gfg/java/CHECK_TWO_GIVEN_CIRCLES_TOUCH_INTERSECT.java | // Copyright (c) 2019-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree.
//
import java.util. *;
import java.util.stream.*;
import java.lang.*;
import javafx.util.Pair;
public class CHECK_TWO_GIVEN_CIRCLES_TOUCH_INTERSECT{
static int f_gold ( int x1 , int y1 , int x2 , int y2 , int r1 , int r2 ) {
int distSq = ( x1 - x2 ) * ( x1 - x2 ) + ( y1 - y2 ) * ( y1 - y2 ) ;
int radSumSq = ( r1 + r2 ) * ( r1 + r2 ) ;
if ( distSq == radSumSq ) return 1 ;
else if ( distSq > radSumSq ) return - 1 ;
else return 0 ;
}
//TOFILL
public static void main(String args[]) {
int n_success = 0;
List<Integer> param0 = new ArrayList<>();
param0.add(11);
param0.add(87);
param0.add(51);
param0.add(89);
param0.add(64);
param0.add(57);
param0.add(65);
param0.add(32);
param0.add(73);
param0.add(3);
List<Integer> param1 = new ArrayList<>();
param1.add(36);
param1.add(1);
param1.add(1);
param1.add(67);
param1.add(10);
param1.add(86);
param1.add(90);
param1.add(23);
param1.add(61);
param1.add(99);
List<Integer> param2 = new ArrayList<>();
param2.add(62);
param2.add(62);
param2.add(47);
param2.add(9);
param2.add(79);
param2.add(99);
param2.add(42);
param2.add(28);
param2.add(63);
param2.add(6);
List<Integer> param3 = new ArrayList<>();
param3.add(64);
param3.add(64);
param3.add(90);
param3.add(52);
param3.add(45);
param3.add(43);
param3.add(82);
param3.add(26);
param3.add(77);
param3.add(19);
List<Integer> param4 = new ArrayList<>();
param4.add(50);
param4.add(54);
param4.add(14);
param4.add(94);
param4.add(67);
param4.add(83);
param4.add(77);
param4.add(60);
param4.add(92);
param4.add(21);
List<Integer> param5 = new ArrayList<>();
param5.add(4);
param5.add(41);
param5.add(71);
param5.add(21);
param5.add(78);
param5.add(63);
param5.add(32);
param5.add(45);
param5.add(76);
param5.add(28);
for(int i = 0; i < param0.size(); ++i)
{
if(f_filled(param0.get(i),param1.get(i),param2.get(i),param3.get(i),param4.get(i),param5.get(i)) == f_gold(param0.get(i),param1.get(i),param2.get(i),param3.get(i),param4.get(i),param5.get(i)))
{
n_success+=1;
}
}
System.out.println("#Results:" + n_success + ", " + param0.size());
}
} | 6,234 |
0 | Create_ds/CodeGen/data/transcoder_evaluation_gfg | Create_ds/CodeGen/data/transcoder_evaluation_gfg/java/MINIMUM_NUMBER_PLATFORMS_REQUIRED_RAILWAYBUS_STATION.java | // Copyright (c) 2019-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree.
//
import java.util. *;
import java.util.stream.*;
import java.lang.*;
import javafx.util.Pair;
public class MINIMUM_NUMBER_PLATFORMS_REQUIRED_RAILWAYBUS_STATION{
static int f_gold ( int arr [ ] , int dep [ ] , int n ) {
Arrays . sort ( arr ) ;
Arrays . sort ( dep ) ;
int plat_needed = 1 , result = 1 ;
int i = 1 , j = 0 ;
while ( i < n && j < n ) {
if ( arr [ i ] <= dep [ j ] ) {
plat_needed ++ ;
i ++ ;
if ( plat_needed > result ) result = plat_needed ;
}
else {
plat_needed -- ;
j ++ ;
}
}
return result ;
}
//TOFILL
public static void main(String args[]) {
int n_success = 0;
List<int [ ]> param0 = new ArrayList<>();
param0.add(new int[]{8,24,28,64,75,86,93,95});
param0.add(new int[]{2,-30,-8,-78,58,-42,-94,84,-58,14,78,34,30,6,-18,-92,0,94,-54,58,0,-86,66,86,8,-26,50,16,-30,-68,98,-28,-4,-6});
param0.add(new int[]{0,0,0,0,0,0,1});
param0.add(new int[]{51,5,48,61,71,2,4,35,50,76,59,64,81,5,21,95});
param0.add(new int[]{-64,-52,44,52,90});
param0.add(new int[]{0,0,1,0,1,0,1,1,0,1,1,1,0,1,0,1,0,1,0,0,0,1,1,1,0,1,0,1,1,1});
param0.add(new int[]{2,15,25,55,72,96,98});
param0.add(new int[]{-60,30,-58,52,40,74,74,76,-72,-48,8,-56,-24,-40,-98,-76,-56,-20,30,-30,-34,4,-34});
param0.add(new int[]{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1});
param0.add(new int[]{37,84,20,34,56,1,87,72});
List<int [ ]> param1 = new ArrayList<>();
param1.add(new int[]{19,30,41,51,62,68,85,96});
param1.add(new int[]{40,22,-24,80,-76,-4,-8,-34,96,-98,16,28,14,52,10,-10,-62,64,-48,10,-64,-90,-52,46,34,50,50,-84,68,-12,-44,28,-22,78});
param1.add(new int[]{0,0,0,0,0,1,1});
param1.add(new int[]{67,84,86,43,50,90,49,8,40,67,5,51,40,28,31,47});
param1.add(new int[]{-62,-16,22,26,58});
param1.add(new int[]{0,0,1,1,1,0,1,1,0,0,0,0,1,1,0,0,0,1,1,0,0,0,0,1,0,0,0,0,0,0});
param1.add(new int[]{3,6,11,19,26,37,39});
param1.add(new int[]{-96,-40,-76,52,-20,-28,-64,-72,36,56,52,34,14,8,-50,6,-82,-98,-8,18,-76,-66,-22});
param1.add(new int[]{0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1});
param1.add(new int[]{68,62,84,54,15,29,70,96});
List<Integer> param2 = new ArrayList<>();
param2.add(6);
param2.add(18);
param2.add(6);
param2.add(8);
param2.add(3);
param2.add(17);
param2.add(6);
param2.add(20);
param2.add(22);
param2.add(6);
for(int i = 0; i < param0.size(); ++i)
{
if(f_filled(param0.get(i),param1.get(i),param2.get(i)) == f_gold(param0.get(i),param1.get(i),param2.get(i)))
{
n_success+=1;
}
}
System.out.println("#Results:" + n_success + ", " + param0.size());
}
} | 6,235 |
0 | Create_ds/CodeGen/codegen_sources/test_generation/evosuite_tests_translators/tests/resources | Create_ds/CodeGen/codegen_sources/test_generation/evosuite_tests_translators/tests/resources/java_evosuite_tests/floats.java | /*
* This file was automatically generated by EvoSuite
* Wed Apr 21 13:16:53 GMT 2021
*/
import org.junit.Test;
import static org.junit.Assert.*;
import org.evosuite.runtime.EvoRunner;
import org.evosuite.runtime.EvoRunnerParameters;
import org.junit.runner.RunWith;
@RunWith(EvoRunner.class) @EvoRunnerParameters(mockJVMNonDeterminism = true, useVFS = true, useVNET = true, resetStaticState = true, separateClassLoader = true)
public class CLASS_9167f62308cfc555ab31a6e4dcdcc95ca2bdcab48016d16bd5b42146ef1977eb_ESTest extends CLASS_9167f62308cfc555ab31a6e4dcdcc95ca2bdcab48016d16bd5b42146ef1977eb_ESTest_scaffolding {
@Test(timeout = 4000)
public void test0() throws Throwable {
float float0 = CLASS_9167f62308cfc555ab31a6e4dcdcc95ca2bdcab48016d16bd5b42146ef1977eb.sqr(31337.701F);
assertEquals(9.8205152E8F, float0, 0.01F);
}
@Test(timeout = 4000)
public void test1() throws Throwable {
float float0 = CLASS_9167f62308cfc555ab31a6e4dcdcc95ca2bdcab48016d16bd5b42146ef1977eb.sqr(0.0F);
assertEquals(0.0F, float0, 0.01F);
}
@Test(timeout = 4000)
public void test2() throws Throwable {
CLASS_9167f62308cfc555ab31a6e4dcdcc95ca2bdcab48016d16bd5b42146ef1977eb cLASS_9167f62308cfc555ab31a6e4dcdcc95ca2bdcab48016d16bd5b42146ef1977eb0 = new CLASS_9167f62308cfc555ab31a6e4dcdcc95ca2bdcab48016d16bd5b42146ef1977eb();
}
} | 6,236 |
0 | Create_ds/CodeGen/codegen_sources/test_generation/evosuite_tests_translators/tests/resources | Create_ds/CodeGen/codegen_sources/test_generation/evosuite_tests_translators/tests/resources/java_evosuite_tests/doubles.java | /*
* This file was automatically generated by EvoSuite
* Wed Apr 21 13:17:21 GMT 2021
*/
import org.junit.Test;
import static org.junit.Assert.*;
import org.evosuite.runtime.EvoRunner;
import org.evosuite.runtime.EvoRunnerParameters;
import org.junit.runner.RunWith;
@RunWith(EvoRunner.class) @EvoRunnerParameters(mockJVMNonDeterminism = true, useVFS = true, useVNET = true, resetStaticState = true, separateClassLoader = true)
public class CLASS_4819651a89a417bce7b2158d1101004f26892e6022f6d1e6348175e23666ec38_ESTest extends CLASS_4819651a89a417bce7b2158d1101004f26892e6022f6d1e6348175e23666ec38_ESTest_scaffolding {
@Test(timeout = 4000)
public void test0() throws Throwable {
Double double0 = new Double(3051.0);
double double1 = CLASS_4819651a89a417bce7b2158d1101004f26892e6022f6d1e6348175e23666ec38.simplep(double0);
assertEquals(3051.0, double1, 1.0E-4);
} | 6,237 |
0 | Create_ds/CodeGen/codegen_sources/test_generation/evosuite_tests_translators/tests/resources | Create_ds/CodeGen/codegen_sources/test_generation/evosuite_tests_translators/tests/resources/java_evosuite_tests/java_list.java | /*
* This file was automatically generated by EvoSuite
* Wed Apr 21 13:41:06 GMT 2021
*/
import org.junit.Test;
import static org.junit.Assert.*;
import java.util.ArrayList;
import org.evosuite.runtime.EvoRunner;
import org.evosuite.runtime.EvoRunnerParameters;
import org.junit.runner.RunWith;
@RunWith(EvoRunner.class) @EvoRunnerParameters(mockJVMNonDeterminism = true, useVFS = true, useVNET = true, resetStaticState = true, separateClassLoader = true)
public class CLASS_db35bde703321c750c7134d5769b704c9ab7f9841c6654abb814683a361f9de1_ESTest extends CLASS_db35bde703321c750c7134d5769b704c9ab7f9841c6654abb814683a361f9de1_ESTest_scaffolding {
@Test(timeout = 4000)
public void test0() throws Throwable {
ArrayList<Double> arrayList0 = new ArrayList<Double>();
Double double0 = new Double(0.0);
arrayList0.add(double0);
Double double1 = new Double(1.0);
CLASS_db35bde703321c750c7134d5769b704c9ab7f9841c6654abb814683a361f9de1.ExpandArrayByLastE_Double(arrayList0, double1);
assertFalse(arrayList0.contains(double1));
}
@Test(timeout = 4000)
public void test1() throws Throwable {
ArrayList<Double> arrayList0 = new ArrayList<Double>();
Double double0 = new Double(0.0);
arrayList0.add(double0);
CLASS_db35bde703321c750c7134d5769b704c9ab7f9841c6654abb814683a361f9de1.ExpandArrayByLastE_Double(arrayList0, double0);
assertTrue(arrayList0.contains(0.0));
}
@Test(timeout = 4000)
public void test2() throws Throwable {
ArrayList<Double> arrayList0 = new ArrayList<Double>();
Double double0 = new Double(9000.554);
arrayList0.add(double0);
CLASS_db35bde703321c750c7134d5769b704c9ab7f9841c6654abb814683a361f9de1.ExpandArrayByLastE_Double(arrayList0, double0);
assertEquals(9001, arrayList0.size());
}
@Test(timeout = 4000)
public void test3() throws Throwable {
ArrayList<Double> arrayList0 = new ArrayList<Double>();
Double double0 = new Double(0.0);
CLASS_db35bde703321c750c7134d5769b704c9ab7f9841c6654abb814683a361f9de1.ExpandArrayByLastE_Double(arrayList0, double0);
assertFalse(arrayList0.contains(double0));
}
@Test(timeout = 4000)
public void test4() throws Throwable {
CLASS_db35bde703321c750c7134d5769b704c9ab7f9841c6654abb814683a361f9de1 cLASS_db35bde703321c750c7134d5769b704c9ab7f9841c6654abb814683a361f9de1_0 = new CLASS_db35bde703321c750c7134d5769b704c9ab7f9841c6654abb814683a361f9de1();
}
}
| 6,238 |
0 | Create_ds/CodeGen/codegen_sources/test_generation/evosuite_tests_translators/tests/resources | Create_ds/CodeGen/codegen_sources/test_generation/evosuite_tests_translators/tests/resources/java_evosuite_tests/strings.java | /*
* This file was automatically generated by EvoSuite
* Wed Apr 21 13:16:49 GMT 2021
*/
import org.junit.Test;
import static org.junit.Assert.*;
import org.evosuite.runtime.EvoRunner;
import org.evosuite.runtime.EvoRunnerParameters;
import org.junit.runner.RunWith;
@RunWith(EvoRunner.class) @EvoRunnerParameters(mockJVMNonDeterminism = true, useVFS = true, useVNET = true, resetStaticState = true, separateClassLoader = true)
public class CLASS_27c05c7602b81c3bcf7ac99abf940ebc4c909da67935bb59bc30e51ac3933ace_ESTest extends CLASS_27c05c7602b81c3bcf7ac99abf940ebc4c909da67935bb59bc30e51ac3933ace_ESTest_scaffolding {
@Test(timeout = 4000)
public void test0() throws Throwable {
CLASS_27c05c7602b81c3bcf7ac99abf940ebc4c909da67935bb59bc30e51ac3933ace cLASS_27c05c7602b81c3bcf7ac99abf940ebc4c909da67935bb59bc30e51ac3933ace0 = new CLASS_27c05c7602b81c3bcf7ac99abf940ebc4c909da67935bb59bc30e51ac3933ace();
}
@Test(timeout = 4000)
public void test1() throws Throwable {
String string0 = CLASS_27c05c7602b81c3bcf7ac99abf940ebc4c909da67935bb59bc30e51ac3933ace.getProperty("", "");
assertNull(string0);
}
} | 6,239 |
0 | Create_ds/CodeGen/codegen_sources/test_generation/evosuite_tests_translators/tests/resources | Create_ds/CodeGen/codegen_sources/test_generation/evosuite_tests_translators/tests/resources/java_evosuite_tests/strings_null_casting.java | /*
* This file was automatically generated by EvoSuite
* Wed Apr 21 13:16:57 GMT 2021
*/
public class CLASS_c2a773c670339b0d7be430a133f7f597ae56ad8ebb7f7209c0fe9edbd248fd04_ESTest extends CLASS_c2a773c670339b0d7be430a133f7f597ae56ad8ebb7f7209c0fe9edbd248fd04_ESTest_scaffolding {
@Test(timeout = 4000)
public void test2() throws Throwable {
boolean boolean0 = CLASS_c2a773c670339b0d7be430a133f7f597ae56ad8ebb7f7209c0fe9edbd248fd04.isExtension("", (String) null);
assertFalse(boolean0);
} | 6,240 |
0 | Create_ds/CodeGen/codegen_sources/test_generation/evosuite_tests_translators/tests/resources | Create_ds/CodeGen/codegen_sources/test_generation/evosuite_tests_translators/tests/resources/java_evosuite_tests/integer_array_check.java | /*
* This file was automatically generated by EvoSuite
* Wed Apr 14 18:54:05 GMT 2021
*/
import org.junit.Test;
import static org.junit.Assert.*;
import org.evosuite.runtime.EvoRunner;
import org.evosuite.runtime.EvoRunnerParameters;
import org.junit.runner.RunWith;
@RunWith(EvoRunner.class) @EvoRunnerParameters(mockJVMNonDeterminism = true, useVFS = true, useVNET = true, resetStaticState = true, separateClassLoader = true)
public class PERMUTE_TWO_ARRAYS_SUM_EVERY_PAIR_GREATER_EQUAL_K_ESTest extends PERMUTE_TWO_ARRAYS_SUM_EVERY_PAIR_GREATER_EQUAL_K_ESTest_scaffolding {
@Test(timeout = 4000)
public void test0() throws Throwable {
Integer[] integerArray0 = new Integer[2];
int int0 = (-1);
Integer integer0 = new Integer((-1));
assertEquals((-1), (int)integer0);
assertTrue(integer0.equals((Object)int0));
assertNotNull(integer0);
integerArray0[0] = integer0;
Integer integer1 = new Integer(1);
assertEquals(1, (int)integer1);
assertFalse(integer1.equals((Object)int0));
assertFalse(integer1.equals((Object)integer0));
assertNotNull(integer1);
integerArray0[1] = integer1;
int[] intArray0 = new int[3];
intArray0[2] = int0;
boolean boolean0 = PERMUTE_TWO_ARRAYS_SUM_EVERY_PAIR_GREATER_EQUAL_K.isPossible(integerArray0, intArray0, 1, 0);
assertTrue(boolean0);
assertArrayEquals(new int[] {(-1), 0, 0}, intArray0);
assertEquals(2, integerArray0.length);
assertEquals(3, intArray0.length);
}
@Test(timeout = 4000)
public void test1() throws Throwable {
Integer[] integerArray0 = new Integer[2];
int int0 = (-1);
Integer integer0 = new Integer((-1));
assertEquals((-1), (int)integer0);
assertTrue(integer0.equals((Object)int0));
assertNotNull(integer0);
integerArray0[0] = integer0;
int int1 = 1;
integerArray0[1] = integer0;
int[] intArray0 = new int[3];
intArray0[2] = int0;
boolean boolean0 = PERMUTE_TWO_ARRAYS_SUM_EVERY_PAIR_GREATER_EQUAL_K.isPossible(integerArray0, intArray0, int1, (-50146));
assertTrue(boolean0);
assertArrayEquals(new int[] {(-1), 0, 0}, intArray0);
assertFalse(int1 == int0);
assertEquals(2, integerArray0.length);
assertEquals(3, intArray0.length);
}
@Test(timeout = 4000)
public void test2() throws Throwable {
Integer[] integerArray0 = new Integer[2];
Integer integer0 = new Integer((-1));
assertEquals((-1), (int)integer0);
assertNotNull(integer0);
integerArray0[0] = integer0;
integerArray0[1] = integer0;
int[] intArray0 = new int[3];
boolean boolean0 = PERMUTE_TWO_ARRAYS_SUM_EVERY_PAIR_GREATER_EQUAL_K.isPossible(integerArray0, intArray0, (-54229), 1);
assertTrue(boolean0);
assertArrayEquals(new int[] {0, 0, 0}, intArray0);
assertEquals(2, integerArray0.length);
assertEquals(3, intArray0.length);
}
@Test(timeout = 4000)
public void test3() throws Throwable {
Integer[] integerArray0 = new Integer[2];
Integer integer0 = new Integer((-1));
assertEquals((-1), (int)integer0);
assertNotNull(integer0);
integerArray0[0] = integer0;
int int0 = 1;
integerArray0[1] = integerArray0[0];
int[] intArray0 = new int[3];
boolean boolean0 = PERMUTE_TWO_ARRAYS_SUM_EVERY_PAIR_GREATER_EQUAL_K.isPossible(integerArray0, intArray0, 1, int0);
assertFalse(boolean0);
assertArrayEquals(new int[] {0, 0, 0}, intArray0);
assertEquals(2, integerArray0.length);
assertEquals(3, intArray0.length);
}
@Test(timeout = 4000)
public void test4() throws Throwable {
PERMUTE_TWO_ARRAYS_SUM_EVERY_PAIR_GREATER_EQUAL_K pERMUTE_TWO_ARRAYS_SUM_EVERY_PAIR_GREATER_EQUAL_K0 = new PERMUTE_TWO_ARRAYS_SUM_EVERY_PAIR_GREATER_EQUAL_K();
assertNotNull(pERMUTE_TWO_ARRAYS_SUM_EVERY_PAIR_GREATER_EQUAL_K0);
}
} | 6,241 |
0 | Create_ds/CodeGen/codegen_sources/test_generation/evosuite_tests_translators/tests/resources | Create_ds/CodeGen/codegen_sources/test_generation/evosuite_tests_translators/tests/resources/java_evosuite_tests/integer_array_casting.java | /*
* This file was automatically generated by EvoSuite
* Wed Apr 21 13:17:52 GMT 2021
*/
public class CLASS_196a45f8932c033f06f6a086488b268404e77353d16c9bc6407a417f237da6db_ESTest extends CLASS_196a45f8932c033f06f6a086488b268404e77353d16c9bc6407a417f237da6db_ESTest_scaffolding {
@Test(timeout = 4000)
public void test3() throws Throwable {
int int0 = CLASS_196a45f8932c033f06f6a086488b268404e77353d16c9bc6407a417f237da6db.missingNumber((int[]) null);
assertEquals(0, int0);
} | 6,242 |
0 | Create_ds/CodeGen/codegen_sources/test_generation/evosuite_tests_translators/tests/resources | Create_ds/CodeGen/codegen_sources/test_generation/evosuite_tests_translators/tests/resources/java_evosuite_tests/different_object_name.java | /*
* This file was automatically generated by EvoSuite
* Fri Jun 18 14:55:38 GMT 2021
*/
import org.junit.Test;
import static org.junit.Assert.*;
import static org.evosuite.runtime.EvoAssertions.*;
import org.evosuite.runtime.EvoRunner;
import org.evosuite.runtime.EvoRunnerParameters;
import org.junit.runner.RunWith;
@RunWith(EvoRunner.class) @EvoRunnerParameters(mockJVMNonDeterminism = true, useVFS = true, useVNET = true, resetStaticState = true, separateClassLoader = true)
public class CLASS_FIND_THE_NUMBER_OCCURRING_ODD_NUMBER_OF_TIMES_2_ESTest extends CLASS_FIND_THE_NUMBER_OCCURRING_ODD_NUMBER_OF_TIMES_2_ESTest_scaffolding {
@Test(timeout = 4000)
public void test0() throws Throwable {
CLASS_FIND_THE_NUMBER_OCCURRING_ODD_NUMBER_OF_TIMES_2 cLASS_FIND_THE_NUMBER_OCCURRING_ODD_NUMBER_OF_TIMES_2_0 = new CLASS_FIND_THE_NUMBER_OCCURRING_ODD_NUMBER_OF_TIMES_2();
int[] intArray0 = new int[1];
int int0 = cLASS_FIND_THE_NUMBER_OCCURRING_ODD_NUMBER_OF_TIMES_2_0.getOddOccurrence(intArray0, (-39131));
assertEquals(0, int0);
}
} | 6,243 |
0 | Create_ds/sputnik/src/test/java/com/airbnb/sputnik | Create_ds/sputnik/src/test/java/com/airbnb/sputnik/tools/BeansDataset.java | package com.airbnb.sputnik.tools;
import com.airbnb.sputnik.tools.beans.JsonData;
import com.airbnb.sputnik.tools.beans.ParsedData;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Encoder;
import org.apache.spark.sql.Encoders;
import org.apache.spark.sql.SparkSession;
import java.util.Collections;
public class BeansDataset {
public static Dataset<ParsedData> getDataset(SparkSession sparkSession) {
JsonData jsonData = new JsonData();
jsonData.setInnerFieldOne("hi");
ParsedData parsedData = new ParsedData();
parsedData.setJsonFieldOne(jsonData);
parsedData.setFieldOne("someValue1");
Encoder<ParsedData> jsonDataEncoder = Encoders.bean(ParsedData.class);
Dataset<ParsedData> dataset =
sparkSession.createDataset(Collections.singletonList(parsedData), jsonDataEncoder);
return dataset;
}
}
| 6,244 |
0 | Create_ds/sputnik/src/test/java/com/airbnb/sputnik/tools | Create_ds/sputnik/src/test/java/com/airbnb/sputnik/tools/beans/MapData.java | package com.airbnb.sputnik.tools.beans;
public class MapData {
private String innerFieldOne;
private String innerFieldTwo;
public String getInnerFieldOne() {
return innerFieldOne;
}
public void setInnerFieldOne(String innerFieldOne) {
this.innerFieldOne = innerFieldOne;
}
public String getInnerFieldTwo() {
return innerFieldTwo;
}
public void setInnerFieldTwo(String innerFieldTwo) {
this.innerFieldTwo = innerFieldTwo;
}
}
| 6,245 |
0 | Create_ds/sputnik/src/test/java/com/airbnb/sputnik/tools | Create_ds/sputnik/src/test/java/com/airbnb/sputnik/tools/beans/ParsedData.java | package com.airbnb.sputnik.tools.beans;
import com.airbnb.sputnik.annotations.Comment;
import com.airbnb.sputnik.annotations.FieldsFormatting;
import com.airbnb.sputnik.annotations.JsonField;
import com.airbnb.sputnik.annotations.TableName;
import com.google.common.base.CaseFormat;
import java.io.Serializable;
@TableName("default.someTableParsedData")
@FieldsFormatting(CaseFormat.LOWER_UNDERSCORE)
public class ParsedData implements Serializable {
@Comment("Some comment")
private String fieldOne;
@JsonField private JsonData jsonFieldOne;
public String getFieldOne() {
return fieldOne;
}
public void setFieldOne(String fieldOne) {
this.fieldOne = fieldOne;
}
public JsonData getJsonFieldOne() {
return jsonFieldOne;
}
public void setJsonFieldOne(JsonData jsonFieldOne) {
this.jsonFieldOne = jsonFieldOne;
}
}
| 6,246 |
0 | Create_ds/sputnik/src/test/java/com/airbnb/sputnik/tools | Create_ds/sputnik/src/test/java/com/airbnb/sputnik/tools/beans/JsonData.java | package com.airbnb.sputnik.tools.beans;
import java.io.Serializable;
public class JsonData implements Serializable {
private String innerFieldOne;
public String getInnerFieldOne() {
return innerFieldOne;
}
public void setInnerFieldOne(String innerFieldOne) {
this.innerFieldOne = innerFieldOne;
}
}
| 6,247 |
0 | Create_ds/sputnik/src/test/java/com/airbnb/sputnik/tools | Create_ds/sputnik/src/test/java/com/airbnb/sputnik/tools/beans/ParsedDataMap.java | package com.airbnb.sputnik.tools.beans;
import com.airbnb.sputnik.annotations.MapField;
public class ParsedDataMap {
private String fieldOne;
@MapField private MapData mapData;
public String getFieldOne() {
return fieldOne;
}
public void setFieldOne(String fieldOne) {
this.fieldOne = fieldOne;
}
public MapData getMapData() {
return mapData;
}
public void setMapData(MapData mapData) {
this.mapData = mapData;
}
}
| 6,248 |
0 | Create_ds/sputnik/src/main/java/com/airbnb/sputnik | Create_ds/sputnik/src/main/java/com/airbnb/sputnik/enums/TableFileFormat.java | package com.airbnb.sputnik.enums;
public enum TableFileFormat { ORC, PARQUET, RCFILE }
| 6,249 |
0 | Create_ds/sputnik/src/main/java/com/airbnb/sputnik | Create_ds/sputnik/src/main/java/com/airbnb/sputnik/annotations/JsonField.java | package com.airbnb.sputnik.annotations;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.FIELD)
public @interface JsonField {
}
| 6,250 |
0 | Create_ds/sputnik/src/main/java/com/airbnb/sputnik | Create_ds/sputnik/src/main/java/com/airbnb/sputnik/annotations/TableParallelism.java | package com.airbnb.sputnik.annotations;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
public @interface TableParallelism {
int value();
}
| 6,251 |
0 | Create_ds/sputnik/src/main/java/com/airbnb/sputnik | Create_ds/sputnik/src/main/java/com/airbnb/sputnik/annotations/TableName.java | package com.airbnb.sputnik.annotations;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
public @interface TableName {
String value();
}
| 6,252 |
0 | Create_ds/sputnik/src/main/java/com/airbnb/sputnik | Create_ds/sputnik/src/main/java/com/airbnb/sputnik/annotations/FieldsFormatting.java | package com.airbnb.sputnik.annotations;
import com.google.common.base.CaseFormat;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
public @interface FieldsFormatting {
CaseFormat value();
}
| 6,253 |
0 | Create_ds/sputnik/src/main/java/com/airbnb/sputnik | Create_ds/sputnik/src/main/java/com/airbnb/sputnik/annotations/MapField.java | package com.airbnb.sputnik.annotations;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.FIELD)
public @interface MapField {}
| 6,254 |
0 | Create_ds/sputnik/src/main/java/com/airbnb/sputnik | Create_ds/sputnik/src/main/java/com/airbnb/sputnik/annotations/Comment.java | package com.airbnb.sputnik.annotations;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.FIELD)
public @interface Comment {
String value();
}
| 6,255 |
0 | Create_ds/sputnik/src/main/java/com/airbnb/sputnik | Create_ds/sputnik/src/main/java/com/airbnb/sputnik/annotations/FieldsSubsetIsAllowed.java | package com.airbnb.sputnik.annotations;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
public @interface FieldsSubsetIsAllowed {
boolean value();
}
| 6,256 |
0 | Create_ds/sputnik/src/main/java/com/airbnb/sputnik | Create_ds/sputnik/src/main/java/com/airbnb/sputnik/annotations/TableFormat.java | package com.airbnb.sputnik.annotations;
import com.airbnb.sputnik.enums.TableFileFormat;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
public @interface TableFormat {
TableFileFormat value();
}
| 6,257 |
0 | Create_ds/sputnik/src/main/java/com/airbnb/sputnik | Create_ds/sputnik/src/main/java/com/airbnb/sputnik/annotations/TableDescription.java | package com.airbnb.sputnik.annotations;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
public @interface TableDescription {
String value();
}
| 6,258 |
0 | Create_ds/sputnik/src/main/java/com/airbnb/sputnik | Create_ds/sputnik/src/main/java/com/airbnb/sputnik/annotations/PartitioningField.java | package com.airbnb.sputnik.annotations;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.FIELD)
public @interface PartitioningField {
} | 6,259 |
0 | Create_ds/sputnik/src/main/java/com/airbnb/sputnik/annotations | Create_ds/sputnik/src/main/java/com/airbnb/sputnik/annotations/checks/NotEmptyCheck.java | package com.airbnb.sputnik.annotations.checks;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
public @interface NotEmptyCheck {
}
| 6,260 |
0 | Create_ds/sputnik/src/main/java/com/airbnb/sputnik/annotations | Create_ds/sputnik/src/main/java/com/airbnb/sputnik/annotations/checks/RecordMinCount.java | package com.airbnb.sputnik.annotations.checks;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.TYPE)
public @interface RecordMinCount {
int minCount();
}
| 6,261 |
0 | Create_ds/sputnik/src/main/java/com/airbnb/sputnik/annotations | Create_ds/sputnik/src/main/java/com/airbnb/sputnik/annotations/checks/NotNull.java | package com.airbnb.sputnik.annotations.checks;
import java.lang.annotation.ElementType;
import java.lang.annotation.Retention;
import java.lang.annotation.RetentionPolicy;
import java.lang.annotation.Target;
@Retention(RetentionPolicy.RUNTIME)
@Target(ElementType.FIELD)
public @interface NotNull {
}
| 6,262 |
0 | Create_ds/iceberg/pig/src/test/java/com/netflix/iceberg | Create_ds/iceberg/pig/src/test/java/com/netflix/iceberg/pig/SchemaUtilTest.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.pig;
import com.netflix.iceberg.Schema;
import com.netflix.iceberg.types.Types.BinaryType;
import com.netflix.iceberg.types.Types.BooleanType;
import com.netflix.iceberg.types.Types.DecimalType;
import com.netflix.iceberg.types.Types.DoubleType;
import com.netflix.iceberg.types.Types.FloatType;
import com.netflix.iceberg.types.Types.IntegerType;
import com.netflix.iceberg.types.Types.ListType;
import com.netflix.iceberg.types.Types.LongType;
import com.netflix.iceberg.types.Types.MapType;
import com.netflix.iceberg.types.Types.StringType;
import com.netflix.iceberg.types.Types.StructType;
import org.apache.pig.ResourceSchema;
import org.apache.pig.impl.logicalLayer.FrontendException;
import org.junit.Test;
import java.io.IOException;
import static com.netflix.iceberg.types.Types.NestedField.optional;
import static com.netflix.iceberg.types.Types.NestedField.required;
import static org.junit.Assert.*;
public class SchemaUtilTest {
@Test
public void testPrimitive() throws IOException {
Schema icebergSchema = new Schema(
optional(1, "b", BooleanType.get()),
optional(1, "i", IntegerType.get()),
optional(2, "l", LongType.get()),
optional(3, "f", FloatType.get()),
optional(4, "d", DoubleType.get()),
optional(5, "dec", DecimalType.of(0,2)),
optional(5, "s", StringType.get()),
optional(6,"bi", BinaryType.get())
);
ResourceSchema pigSchema = SchemaUtil.convert(icebergSchema);
assertEquals("b:boolean,i:int,l:long,f:float,d:double,dec:bigdecimal,s:chararray,bi:bytearray", pigSchema.toString());
}
@Test
public void testComplex() throws IOException {
convertToPigSchema(
new Schema(
optional(1, "bag", ListType.ofOptional(2, BooleanType.get())),
optional(3, "map", MapType.ofOptional(4,5, StringType.get(), DoubleType.get())),
optional(6, "tuple", StructType.of(optional(7, "i", IntegerType.get()), optional(8,"f", FloatType.get())))
),"bag:{(boolean)},map:[double],tuple:(i:int,f:float)", null
);
}
@Test(expected = FrontendException.class)
public void invalidMap() throws IOException {
convertToPigSchema(new Schema(
optional(1, "invalid", MapType.ofOptional(2,3, IntegerType.get(), DoubleType.get()))
), "", "");
}
@Test
public void nestedMaps() throws IOException {
convertToPigSchema(new Schema(
optional(1, "nested",
MapType.ofOptional(2,3, StringType.get(),
MapType.ofOptional(4,5, StringType.get(),
MapType.ofOptional(6,7, StringType.get(), DecimalType.of(10,2)))))
),"nested:[[[bigdecimal]]]", "");
}
@Test
public void nestedBags() throws IOException {
convertToPigSchema(new Schema(
optional(1, "nested",
ListType.ofOptional(2,
ListType.ofOptional(3,
ListType.ofOptional(4, DoubleType.get()))))
), "nested:{({({(double)})})}", "");
}
@Test
public void nestedTuples() throws IOException {
convertToPigSchema(new Schema(
optional(1,"first", StructType.of(
optional(2, "second", StructType.of(
optional(3, "third", StructType.of(
optional(4, "val", StringType.get())
))
))
))
), "first:(second:(third:(val:chararray)))", "");
}
@Test
public void complexNested() throws IOException {
convertToPigSchema(new Schema(
optional(1,"t", StructType.of(
optional(2, "b", ListType.ofOptional(3,StructType.of(
optional(4, "i", IntegerType.get()),
optional(5,"s", StringType.get())
)))
)),
optional(6, "m1", MapType.ofOptional(7,8, StringType.get(), StructType.of(
optional(9, "b", ListType.ofOptional(10, BinaryType.get()) ),
optional(11, "m2", MapType.ofOptional(12,13, StringType.get(), IntegerType.get()))
))),
optional(14, "b1", ListType.ofOptional(15,
MapType.ofOptional(16,17, StringType.get(),
ListType.ofOptional(18, FloatType.get()))))
), "t:(b:{(i:int,s:chararray)}),m1:[(b:{(bytearray)},m2:[int])],b1:{([{(float)}])}", "");
}
@Test
public void mapConversions() throws IOException {
// consistent behavior for maps conversions. The below test case, correctly does not specify map key types
convertToPigSchema(
new Schema(
required(
1, "a",
MapType.ofRequired(
2, 3,
StringType.get(),
ListType.ofRequired(
4, StructType.of(
required(5, "b", LongType.get()),
required(6, "c", StringType.get())))))),
"a:[{(b:long,c:chararray)}]",
"We do not specify the map key type here");
// struct<a:map<string,map<string,double>>> -> (a:[[double]])
// As per https://pig.apache.org/docs/latest/basic.html#map-schema. It seems that
// we only need to specify value type as keys are always of type chararray
convertToPigSchema(
new Schema(
StructType.of(
required(1, "a", MapType.ofRequired(
2, 3,
StringType.get(),
MapType.ofRequired(4, 5, StringType.get(), DoubleType.get())))
).fields()),
"a:[[double]]",
"A map key type does not need to be specified");
}
@Test
public void testTupleInMap() throws IOException {
Schema icebergSchema = new Schema(
optional(
1, "nested_list",
MapType.ofOptional(
2, 3,
StringType.get(),
ListType.ofOptional(
4, StructType.of(
required(5, "id", LongType.get()),
optional(6, "data", StringType.get()))))));
ResourceSchema pigSchema = SchemaUtil.convert(icebergSchema);
assertEquals("nested_list:[{(id:long,data:chararray)}]", pigSchema.toString()); // The output should contain a nested struct within a list within a map, I think.
}
@Test
public void testLongInBag() throws IOException {
Schema icebergSchema = new Schema(
optional(
1, "nested_list",
MapType.ofOptional(
2, 3,
StringType.get(),
ListType.ofRequired(5, LongType.get()))));
SchemaUtil.convert(icebergSchema);
}
@Test
public void doubleWrappingTuples() throws IOException {
// struct<a:array<struct<b:string>>> -> (a:{(b:chararray)})
convertToPigSchema(
new Schema(
StructType.of(
required(1, "a", ListType.ofRequired(2, StructType.of(required(3, "b", StringType.get()))))
).fields()),
"a:{(b:chararray)}",
"A tuple inside a bag should not be double wrapped");
// struct<a:array<boolean>> -> "(a:{(boolean)})
convertToPigSchema(
new Schema(StructType.of(required(1, "a", ListType.ofRequired(2, BooleanType.get()))).fields()),
"a:{(boolean)}",
"boolean (or anything non-tuple) element inside a bag should be wrapped inside a tuple"
);
}
private static void convertToPigSchema(Schema icebergSchema, String expectedPigSchema, String assertMessage) throws IOException {
ResourceSchema pigSchema = SchemaUtil.convert(icebergSchema);
assertEquals(assertMessage, expectedPigSchema, pigSchema.toString());
}
} | 6,263 |
0 | Create_ds/iceberg/pig/src/main/java/com/netflix/iceberg | Create_ds/iceberg/pig/src/main/java/com/netflix/iceberg/pig/PigParquetReader.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.pig;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.iceberg.Schema;
import com.netflix.iceberg.parquet.ParquetValueReader;
import com.netflix.iceberg.parquet.ParquetValueReaders;
import com.netflix.iceberg.parquet.ParquetValueReaders.BinaryAsDecimalReader;
import com.netflix.iceberg.parquet.ParquetValueReaders.FloatAsDoubleReader;
import com.netflix.iceberg.parquet.ParquetValueReaders.IntAsLongReader;
import com.netflix.iceberg.parquet.ParquetValueReaders.IntegerAsDecimalReader;
import com.netflix.iceberg.parquet.ParquetValueReaders.LongAsDecimalReader;
import com.netflix.iceberg.parquet.ParquetValueReaders.PrimitiveReader;
import com.netflix.iceberg.parquet.ParquetValueReaders.RepeatedKeyValueReader;
import com.netflix.iceberg.parquet.ParquetValueReaders.RepeatedReader;
import com.netflix.iceberg.parquet.ParquetValueReaders.ReusableEntry;
import com.netflix.iceberg.parquet.ParquetValueReaders.StringReader;
import com.netflix.iceberg.parquet.ParquetValueReaders.StructReader;
import com.netflix.iceberg.parquet.ParquetValueReaders.UnboxedReader;
import com.netflix.iceberg.parquet.TypeWithSchemaVisitor;
import com.netflix.iceberg.types.Type.TypeID;
import com.netflix.iceberg.types.Types;
import org.apache.parquet.column.ColumnDescriptor;
import org.apache.parquet.schema.DecimalMetadata;
import org.apache.parquet.schema.GroupType;
import org.apache.parquet.schema.MessageType;
import org.apache.parquet.schema.PrimitiveType;
import org.apache.parquet.schema.Type;
import org.apache.pig.backend.executionengine.ExecException;
import org.apache.pig.data.BagFactory;
import org.apache.pig.data.DataBag;
import org.apache.pig.data.DataByteArray;
import org.apache.pig.data.Tuple;
import org.apache.pig.data.TupleFactory;
import java.time.Instant;
import java.time.OffsetDateTime;
import java.time.ZoneOffset;
import java.time.temporal.ChronoUnit;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import static com.netflix.iceberg.parquet.ParquetSchemaUtil.convert;
import static com.netflix.iceberg.parquet.ParquetSchemaUtil.hasIds;
import static com.netflix.iceberg.parquet.ParquetValueReaders.option;
import static java.lang.String.format;
public class PigParquetReader {
private final ParquetValueReader reader;
public PigParquetReader(Schema readSchema, MessageType fileSchema, Map<Integer, Object> partitionValues) {
this.reader = buildReader(convert(readSchema, fileSchema.getName()), readSchema, partitionValues);
}
@SuppressWarnings("unchecked")
public static ParquetValueReader<Tuple> buildReader(MessageType fileSchema, Schema expectedSchema, Map<Integer, Object> partitionValues) {
if (hasIds(fileSchema)) {
return (ParquetValueReader<Tuple>)
TypeWithSchemaVisitor.visit(expectedSchema.asStruct(), fileSchema,
new ReadBuilder(fileSchema, partitionValues));
} else {
return (ParquetValueReader<Tuple>)
TypeWithSchemaVisitor.visit(expectedSchema.asStruct(), fileSchema,
new FallbackReadBuilder(fileSchema, partitionValues));
}
}
private static class FallbackReadBuilder extends ReadBuilder {
FallbackReadBuilder(MessageType type, Map<Integer, Object> partitionValues) {
super(type, partitionValues);
}
@Override
public ParquetValueReader<?> message(Types.StructType expected, MessageType message, List<ParquetValueReader<?>> fieldReaders) {
// the top level matches by ID, but the remaining IDs are missing
return super.struct(expected, message, fieldReaders);
}
@Override
public ParquetValueReader<?> struct(Types.StructType ignored, GroupType struct, List<ParquetValueReader<?>> fieldReaders) {
// the expected struct is ignored because nested fields are never found when the
List<ParquetValueReader<?>> newFields = Lists.newArrayListWithExpectedSize(
fieldReaders.size());
List<Type> types = Lists.newArrayListWithExpectedSize(fieldReaders.size());
List<Type> fields = struct.getFields();
for (int i = 0; i < fields.size(); i += 1) {
Type fieldType = fields.get(i);
int fieldD = type.getMaxDefinitionLevel(path(fieldType.getName())) - 1;
newFields.add(option(fieldType, fieldD, fieldReaders.get(i)));
types.add(fieldType);
}
return new TupleReader(types, newFields, partitionValues);
}
}
private static class ReadBuilder extends TypeWithSchemaVisitor<ParquetValueReader<?>> {
final MessageType type;
final Map<Integer, Object> partitionValues;
ReadBuilder(MessageType type, Map<Integer, Object> partitionValues) {
this.type = type;
this.partitionValues = partitionValues;
}
@Override
public ParquetValueReader<?> message(Types.StructType expected, MessageType message, List<ParquetValueReader<?>> fieldReaders) {
return struct(expected, message.asGroupType(), fieldReaders);
}
@Override
public ParquetValueReader<?> struct(Types.StructType expected, GroupType struct, List<ParquetValueReader<?>> fieldReaders) {
// match the expected struct's order
Map<Integer, ParquetValueReader<?>> readersById = Maps.newHashMap();
Map<Integer, Type> typesById = Maps.newHashMap();
List<Type> fields = struct.getFields();
for (int i = 0; i < fields.size(); i += 1) {
Type fieldType = fields.get(i);
int fieldD = type.getMaxDefinitionLevel(path(fieldType.getName())) - 1;
int id = fieldType.getId().intValue();
readersById.put(id, option(fieldType, fieldD, fieldReaders.get(i)));
typesById.put(id, fieldType);
}
List<Types.NestedField> expectedFields = expected != null ?
expected.fields() : ImmutableList.of();
List<ParquetValueReader<?>> reorderedFields = Lists.newArrayListWithExpectedSize(
expectedFields.size());
List<Type> types = Lists.newArrayListWithExpectedSize(expectedFields.size());
for (Types.NestedField field : expectedFields) {
int id = field.fieldId();
ParquetValueReader<?> reader = readersById.get(id);
if (reader != null) {
reorderedFields.add(reader);
types.add(typesById.get(id));
} else {
reorderedFields.add(ParquetValueReaders.nulls());
types.add(null);
}
}
return new TupleReader(types, reorderedFields, partitionValues);
}
@Override
public ParquetValueReader<?> list(Types.ListType expectedList, GroupType array, ParquetValueReader<?> elementReader) {
GroupType repeated = array.getFields().get(0).asGroupType();
String[] repeatedPath = currentPath();
int repeatedD = type.getMaxDefinitionLevel(repeatedPath) - 1;
int repeatedR = type.getMaxRepetitionLevel(repeatedPath) - 1;
Type elementType = repeated.getType(0);
int elementD = type.getMaxDefinitionLevel(path(elementType.getName())) - 1;
return new ArrayReader<>(repeatedD, repeatedR, option(elementType, elementD, elementReader));
}
@Override
public ParquetValueReader<?> map(Types.MapType expectedMap, GroupType map, ParquetValueReader<?> keyReader, ParquetValueReader<?> valueReader) {
GroupType repeatedKeyValue = map.getFields().get(0).asGroupType();
String[] repeatedPath = currentPath();
int repeatedD = type.getMaxDefinitionLevel(repeatedPath) - 1;
int repeatedR = type.getMaxRepetitionLevel(repeatedPath) - 1;
Type keyType = repeatedKeyValue.getType(0);
int keyD = type.getMaxDefinitionLevel(path(keyType.getName())) - 1;
Type valueType = repeatedKeyValue.getType(1);
int valueD = type.getMaxDefinitionLevel(path(valueType.getName())) - 1;
return new MapReader<>(repeatedD, repeatedR,
option(keyType, keyD, keyReader), option(valueType, valueD, valueReader));
}
@Override
public ParquetValueReader<?> primitive(com.netflix.iceberg.types.Type.PrimitiveType expected, PrimitiveType primitive) {
ColumnDescriptor desc = type.getColumnDescription(currentPath());
if (primitive.getOriginalType() != null) {
switch (primitive.getOriginalType()) {
case ENUM:
case JSON:
case UTF8: return new StringReader(desc);
case DATE: return new DateReader(desc);
case INT_8:
case INT_16:
case INT_32:
if (expected != null && expected.typeId() == Types.LongType.get().typeId()) {
return new IntAsLongReader(desc);
} else {
return new UnboxedReader(desc);
}
case INT_64: return new UnboxedReader<>(desc);
case TIMESTAMP_MILLIS: return new TimestampMillisReader(desc);
case TIMESTAMP_MICROS: return new TimestampMicrosReader(desc);
case DECIMAL:
DecimalMetadata decimal = primitive.getDecimalMetadata();
switch (primitive.getPrimitiveTypeName()) {
case BINARY:
case FIXED_LEN_BYTE_ARRAY: return new BinaryAsDecimalReader(desc, decimal.getScale());
case INT32: return new IntegerAsDecimalReader(desc, decimal.getScale());
case INT64: return new LongAsDecimalReader(desc, decimal.getScale());
default:
throw new UnsupportedOperationException(
"Unsupported base type for decimal: " + primitive.getPrimitiveTypeName());
}
default:
throw new UnsupportedOperationException("Unsupported type: " + primitive.getOriginalType());
}
}
switch (primitive.getPrimitiveTypeName()) {
case FIXED_LEN_BYTE_ARRAY:
case BINARY:
return new BytesReader(desc);
case INT32:
if (expected != null && expected.typeId() == TypeID.LONG) {
return new IntAsLongReader(desc);
} else {
return new UnboxedReader<>(desc);
}
case FLOAT:
if (expected != null && expected.typeId() == TypeID.DOUBLE) {
return new FloatAsDoubleReader(desc);
} else {
return new UnboxedReader<>(desc);
}
case BOOLEAN:
case INT64:
case DOUBLE:
return new UnboxedReader<>(desc);
default:
throw new UnsupportedOperationException("Unsupported type: " + primitive);
}
}
private String[] currentPath() {
String[] path = new String[fieldNames.size()];
if (!fieldNames.isEmpty()) {
Iterator<String> iter = fieldNames.descendingIterator();
for (int i = 0; iter.hasNext(); i += 1) {
path[i] = iter.next();
}
}
return path;
}
protected String[] path(String name) {
String[] path = new String[fieldNames.size() + 1];
path[fieldNames.size()] = name;
if (!fieldNames.isEmpty()) {
Iterator<String> iter = fieldNames.descendingIterator();
for (int i = 0; iter.hasNext(); i += 1) {
path[i] = iter.next();
}
}
return path;
}
}
private static class DateReader extends PrimitiveReader<String> {
private static final OffsetDateTime EPOCH = Instant.ofEpochSecond(0).atOffset(ZoneOffset.UTC);
DateReader(ColumnDescriptor desc) {
super(desc);
}
@Override
public String read(String reuse) {
OffsetDateTime day = EPOCH.plusDays(column.nextInteger());
return format("%04d-%02d-%02d", day.getYear(), day.getMonth().getValue(), day.getDayOfMonth());
}
}
private static class BytesReader extends PrimitiveReader<DataByteArray> {
BytesReader(ColumnDescriptor desc) {
super(desc);
}
@Override
public DataByteArray read(DataByteArray reuse) {
byte[] bytes = column.nextBinary().getBytes();
return new DataByteArray(bytes);
}
}
private static class TimestampMicrosReader extends UnboxedReader<String> {
private static final OffsetDateTime EPOCH = Instant.ofEpochSecond(0).atOffset(ZoneOffset.UTC);
TimestampMicrosReader(ColumnDescriptor desc) {
super(desc);
}
@Override
public String read(String ignored) {
return ChronoUnit.MICROS.addTo(EPOCH, column.nextLong()).toString();
}
}
private static class TimestampMillisReader extends UnboxedReader<String> {
private static final OffsetDateTime EPOCH = Instant.ofEpochSecond(0).atOffset(ZoneOffset.UTC);
TimestampMillisReader(ColumnDescriptor desc) {
super(desc);
}
@Override
public String read(String ignored) {
return ChronoUnit.MILLIS.addTo(EPOCH, column.nextLong()).toString();
}
}
private static class MapReader<K, V> extends RepeatedKeyValueReader<Map<K, V>, Map<K, V>, K, V> {
ReusableEntry<K, V> nullEntry = new ReusableEntry<>();
MapReader(int definitionLevel, int repetitionLevel,
ParquetValueReader<K> keyReader, ParquetValueReader<V> valueReader) {
super(definitionLevel, repetitionLevel, keyReader, valueReader);
}
@Override
protected Map<K, V> newMapData(Map<K, V> reuse) {
return new LinkedHashMap<>();
}
@Override
protected Map.Entry<K, V> getPair(Map<K, V> reuse) {
return nullEntry;
}
@Override
protected void addPair(Map<K, V> map, K key, V value) {
map.put(key, value);
}
@Override
protected Map<K, V> buildMap(Map<K, V> map) {
return map;
}
}
private static class ArrayReader<T> extends RepeatedReader<DataBag, DataBag, T> {
private final BagFactory BF = BagFactory.getInstance();
private final TupleFactory TF = TupleFactory.getInstance();
ArrayReader(int definitionLevel, int repetitionLevel, ParquetValueReader<T> reader) {
super(definitionLevel, repetitionLevel, reader);
}
@Override
protected DataBag newListData(DataBag reuse) {
return BF.newDefaultBag();
}
@Override
protected T getElement(DataBag list) {
return null;
}
@Override
protected void addElement(DataBag bag, T element) {
bag.add(TF.newTuple(element));
}
@Override
protected DataBag buildList(DataBag bag) {
return bag;
}
}
private static class TupleReader extends StructReader<Tuple, Tuple> {
private static final TupleFactory TF = TupleFactory.getInstance();
private final Map<Integer, Object> partitionValues;
private final int columns;
protected TupleReader(List<Type> types, List<ParquetValueReader<?>> readers, Map<Integer, Object> partitionValues) {
super(types, readers);
this.partitionValues = partitionValues;
this.columns = types.size() + partitionValues.size();
}
@Override
protected Tuple newStructData(Tuple reuse) {
return TF.newTuple(columns);
}
@Override
protected Object getField(Tuple tuple, int pos) {
return null;
}
@Override
protected Tuple buildStruct(Tuple tuple) {
for (Map.Entry<Integer, Object> e : partitionValues.entrySet()) {
try {
tuple.set(e.getKey(), e.getValue());
} catch (ExecException ex) {
throw new RuntimeException("Error setting value for key" + e.getKey(), ex);
}
}
return tuple;
}
@Override
protected void set(Tuple tuple, int pos, Object value) {
try {
tuple.set(pos, value);
} catch (ExecException e) {
throw new RuntimeException(format("Error setting tuple value for pos: %d, value: %s", pos, value), e);
}
}
}
}
| 6,264 |
0 | Create_ds/iceberg/pig/src/main/java/com/netflix/iceberg | Create_ds/iceberg/pig/src/main/java/com/netflix/iceberg/pig/IcebergStorage.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.pig;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.iceberg.Schema;
import com.netflix.iceberg.Table;
import com.netflix.iceberg.expressions.Expressions;
import com.netflix.iceberg.types.Types;
import org.apache.hadoop.fs.Path;
import org.apache.pig.impl.util.ObjectSerializer;
import org.apache.pig.impl.util.UDFContext;
import org.mortbay.log.Log;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.netflix.iceberg.Tables;
import com.netflix.iceberg.hadoop.HadoopTables;
import com.netflix.iceberg.pig.IcebergPigInputFormat.IcebergRecordReader;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.util.ReflectionUtils;
import org.apache.pig.Expression;
import org.apache.pig.Expression.*;
import org.apache.pig.LoadFunc;
import org.apache.pig.LoadMetadata;
import org.apache.pig.LoadPredicatePushdown;
import org.apache.pig.LoadPushDown;
import org.apache.pig.ResourceSchema;
import org.apache.pig.ResourceStatistics;
import org.apache.pig.backend.hadoop.executionengine.mapReduceLayer.PigSplit;
import org.apache.pig.data.Tuple;
import org.apache.pig.impl.logicalLayer.FrontendException;
import java.io.IOException;
import java.io.Serializable;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.stream.Collectors;
import static java.lang.String.format;
import static java.util.Arrays.asList;
import static com.netflix.iceberg.expressions.Expressions.and;
import static com.netflix.iceberg.expressions.Expressions.equal;
import static com.netflix.iceberg.expressions.Expressions.greaterThan;
import static com.netflix.iceberg.expressions.Expressions.greaterThanOrEqual;
import static com.netflix.iceberg.expressions.Expressions.isNull;
import static com.netflix.iceberg.expressions.Expressions.lessThan;
import static com.netflix.iceberg.expressions.Expressions.lessThanOrEqual;
import static com.netflix.iceberg.expressions.Expressions.not;
import static com.netflix.iceberg.expressions.Expressions.notEqual;
import static com.netflix.iceberg.expressions.Expressions.or;
import static com.netflix.iceberg.pig.IcebergPigInputFormat.ICEBERG_FILTER_EXPRESSION;
import static com.netflix.iceberg.pig.IcebergPigInputFormat.ICEBERG_PROJECTED_FIELDS;
import static com.netflix.iceberg.pig.IcebergPigInputFormat.ICEBERG_SCHEMA;
import static org.apache.pig.Expression.OpType.OP_AND;
import static org.apache.pig.Expression.OpType.OP_BETWEEN;
import static org.apache.pig.Expression.OpType.OP_EQ;
import static org.apache.pig.Expression.OpType.OP_GE;
import static org.apache.pig.Expression.OpType.OP_GT;
import static org.apache.pig.Expression.OpType.OP_IN;
import static org.apache.pig.Expression.OpType.OP_LE;
import static org.apache.pig.Expression.OpType.OP_LT;
import static org.apache.pig.Expression.OpType.OP_NE;
import static org.apache.pig.Expression.OpType.OP_NOT;
import static org.apache.pig.Expression.OpType.OP_NULL;
import static org.apache.pig.Expression.OpType.OP_OR;
public class IcebergStorage extends LoadFunc implements LoadMetadata, LoadPredicatePushdown, LoadPushDown {
private static final Logger LOG = LoggerFactory.getLogger(IcebergStorage.class);
public static final String PIG_ICEBERG_TABLES_IMPL = "pig.iceberg.tables.impl";
private static Tables iceberg;
private static Map<String, Table> tables = Maps.newConcurrentMap();
private static Map<String, String> locations = Maps.newConcurrentMap();
private String signature;
private IcebergRecordReader reader;
@Override
public void setLocation(String location, Job job) {
LOG.info(format("[%s]: setLocation() -> %s ", signature, location));
locations.put(signature, location);
Configuration conf = job.getConfiguration();
copyUDFContextToConfiguration(conf, ICEBERG_SCHEMA);
copyUDFContextToConfiguration(conf, ICEBERG_PROJECTED_FIELDS);
copyUDFContextToConfiguration(conf, ICEBERG_FILTER_EXPRESSION);
}
@Override
public InputFormat getInputFormat() {
LOG.info(format("[%s]: getInputFormat()", signature));
String location = locations.get(signature);
return new IcebergPigInputFormat(tables.get(location));
}
@Override
public Tuple getNext() throws IOException {
if (!reader.nextKeyValue()) {
return null;
}
return (Tuple) reader.getCurrentValue();
}
@Override
public void prepareToRead(RecordReader reader, PigSplit split) {
LOG.info(format("[%s]: prepareToRead() -> %s", signature, split));
this.reader = (IcebergRecordReader) reader;
}
@Override
public ResourceSchema getSchema(String location, Job job) throws IOException {
LOG.info(format("[%s]: getSchema() -> %s", signature, location));
Schema schema = load(location, job).schema();
storeInUDFContext(ICEBERG_SCHEMA, schema);
return SchemaUtil.convert(schema);
}
@Override
public ResourceStatistics getStatistics(String location, Job job) {
LOG.info(format("[%s]: getStatistics() -> : %s", signature, location));
return null;
}
@Override
public String[] getPartitionKeys(String location, Job job) {
LOG.info(format("[%s]: getPartitionKeys()", signature));
return new String[0];
}
@Override
public void setPartitionFilter(Expression partitionFilter) {
LOG.info(format("[%s]: setPartitionFilter() -> %s", signature, partitionFilter));
}
@Override
public List<String> getPredicateFields(String location, Job job) throws IOException {
LOG.info(format("[%s]: getPredicateFields() -> %s", signature, location));
Schema schema = load(location, job).schema();
List<String> result = Lists.newArrayList();
for (Types.NestedField nf : schema.columns()) {
switch (nf.type().typeId()) {
case MAP:
case LIST:
case STRUCT:
continue;
default:
result.add(nf.name());
}
}
return result;
}
@Override
public List<Expression.OpType> getSupportedExpressionTypes() {
LOG.info(format("[%s]: getSupportedExpressionTypes()", signature));
return asList(OP_AND, OP_OR, OP_EQ, OP_NE, OP_NOT, OP_GE, OP_GT, OP_LE, OP_LT, OP_BETWEEN, OP_IN, OP_NULL);
}
@Override
public void setPushdownPredicate(Expression predicate) throws IOException {
LOG.info(format("[%s]: setPushdownPredicate()", signature));
LOG.info(format("[%s]: Pig predicate expression: %s", signature, predicate));
com.netflix.iceberg.expressions.Expression icebergExpression = convert(predicate);
LOG.info(format("[%s]: Iceberg predicate expression: %s", signature, icebergExpression));
storeInUDFContext(ICEBERG_FILTER_EXPRESSION, icebergExpression);
}
private com.netflix.iceberg.expressions.Expression convert(Expression e) throws IOException {
OpType op = e.getOpType();
if (e instanceof BinaryExpression) {
Expression lhs = ((BinaryExpression) e).getLhs();
Expression rhs = ((BinaryExpression) e).getRhs();
switch (op) {
case OP_AND:
return and(convert(lhs), convert(rhs));
case OP_OR:
return or(convert(lhs), convert(rhs));
case OP_BETWEEN:
BetweenExpression between = (BetweenExpression) rhs;
return and(
convert(OP_GE, (Column) lhs, (Const) between.getLower()),
convert(OP_LE, (Column) lhs, (Const) between.getUpper())
);
case OP_IN:
return ((InExpression) rhs).getValues().stream()
.map((value) -> convert(OP_EQ, (Column) lhs, (Const) value))
.reduce(Expressions.alwaysFalse(), (m, v) -> (or(m, v)));
default:
if (lhs instanceof Column && rhs instanceof Const) {
return convert(op, (Column) lhs, (Const) rhs);
} else if (lhs instanceof Const && rhs instanceof Column) {
throw new FrontendException("Invalid expression ordering " + e);
}
}
} else if (e instanceof UnaryExpression) {
Expression unary = ((UnaryExpression) e).getExpression();
switch (op) {
case OP_NOT: return not(convert(unary));
case OP_NULL: return isNull(((Column)unary).getName());
default:
throw new FrontendException("Unsupported unary operator" + op);
}
}
throw new FrontendException("Failed to pushdown expression " + e);
}
private com.netflix.iceberg.expressions.Expression convert(OpType op, Column col, Const constant) {
String name = col.getName();
Object value = constant.getValue();
switch (op) {
case OP_GE: return greaterThanOrEqual(name, value);
case OP_GT: return greaterThan(name, value);
case OP_LE: return lessThanOrEqual(name, value);
case OP_LT: return lessThan(name, value);
case OP_EQ: return equal(name, value);
case OP_NE: return notEqual(name, value);
}
throw new RuntimeException(format("[%s]: Failed to pushdown expression: %s %s %s", signature, col, op, constant));
}
@Override
public List<OperatorSet> getFeatures() {
return Collections.singletonList(OperatorSet.PROJECTION);
}
@Override
public RequiredFieldResponse pushProjection(RequiredFieldList requiredFieldList) {
LOG.info(format("[%s]: pushProjection() -> %s", signature, requiredFieldList));
try {
List<String> projection = requiredFieldList.getFields().stream().map(RequiredField::getAlias).collect(Collectors.toList());
storeInUDFContext(ICEBERG_PROJECTED_FIELDS, (Serializable) projection);
} catch (IOException e) {
throw new RuntimeException(e);
}
return new RequiredFieldResponse(true);
}
@Override
public void setUDFContextSignature(String signature) {
this.signature = signature;
}
private void storeInUDFContext(String key, Serializable value) throws IOException {
Properties properties = UDFContext.getUDFContext().getUDFProperties(this.getClass(), new String[]{signature});
properties.setProperty(key, ObjectSerializer.serialize(value));
}
private void copyUDFContextToConfiguration(Configuration conf, String key) {
String value = UDFContext.getUDFContext().getUDFProperties(this.getClass(), new String[]{signature}).getProperty(key);
if (value != null) {
conf.set(key, value);
}
}
@Override
public String relativeToAbsolutePath(String location, Path curDir) throws IOException {
return location;
}
@SuppressWarnings("unchecked")
public <T extends Serializable> T getFromUDFContext(String key, Class<T> clazz) throws IOException {
Properties properties = UDFContext.getUDFContext().getUDFProperties(this.getClass(), new String[]{signature});
return (T) ObjectSerializer.deserialize(properties.getProperty(key));
}
private Table load(String location, Job job) throws IOException {
if(iceberg == null) {
Class<?> tablesImpl = job.getConfiguration().getClass(PIG_ICEBERG_TABLES_IMPL, HadoopTables.class);
Log.info("Initializing iceberg tables implementation: " + tablesImpl);
iceberg = (Tables) ReflectionUtils.newInstance(tablesImpl, job.getConfiguration());
}
Table result = tables.get(location);
if (result == null) {
try {
LOG.info(format("[%s]: Loading table for location: %s", signature, location));
result = iceberg.load(location);
tables.put(location, result);
} catch (Exception e) {
throw new FrontendException("Failed to instantiate tables implementation", e);
}
}
return result;
}
}
| 6,265 |
0 | Create_ds/iceberg/pig/src/main/java/com/netflix/iceberg | Create_ds/iceberg/pig/src/main/java/com/netflix/iceberg/pig/IcebergPigInputFormat.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.pig;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.netflix.iceberg.CombinedScanTask;
import com.netflix.iceberg.DataFile;
import com.netflix.iceberg.FileScanTask;
import com.netflix.iceberg.PartitionSpec;
import com.netflix.iceberg.Schema;
import com.netflix.iceberg.Table;
import com.netflix.iceberg.TableScan;
import com.netflix.iceberg.expressions.Expression;
import com.netflix.iceberg.hadoop.HadoopInputFile;
import com.netflix.iceberg.io.CloseableIterable;
import com.netflix.iceberg.io.InputFile;
import com.netflix.iceberg.parquet.Parquet;
import com.netflix.iceberg.types.Type;
import com.netflix.iceberg.types.TypeUtil;
import com.netflix.iceberg.types.Types;
import org.apache.commons.lang.SerializationUtils;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.mapreduce.InputFormat;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.pig.data.DataByteArray;
import org.apache.pig.impl.util.ObjectSerializer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;
import static com.netflix.iceberg.pig.SchemaUtil.project;
public class IcebergPigInputFormat<T> extends InputFormat<Void, T> {
private static final Logger LOG = LoggerFactory.getLogger(IcebergPigInputFormat.class);
static final String ICEBERG_SCHEMA = "iceberg.schema";
static final String ICEBERG_PROJECTED_FIELDS = "iceberg.projected.fields";
static final String ICEBERG_FILTER_EXPRESSION = "iceberg.filter.expression";
private Table table;
private List<InputSplit> splits;
IcebergPigInputFormat(Table table) {
this.table = table;
}
@Override
@SuppressWarnings("unchecked")
public List<InputSplit> getSplits(JobContext context) throws IOException {
if (splits != null) {
LOG.info("Returning cached splits: " + splits.size());
return splits;
}
splits = Lists.newArrayList();
TableScan scan = table.newScan();
//Apply Filters
Expression filterExpression = (Expression) ObjectSerializer.deserialize(context.getConfiguration().get(ICEBERG_FILTER_EXPRESSION));
if (filterExpression != null) {
LOG.info("Filter Expression: " + filterExpression);
scan = scan.filter(filterExpression);
}
//Wrap in Splits
try (CloseableIterable<CombinedScanTask> tasks = scan.planTasks()) {
tasks.forEach((scanTask) -> splits.add(new IcebergSplit(scanTask)));
}
return splits;
}
@Override
public RecordReader<Void, T> createRecordReader(InputSplit split, TaskAttemptContext context) {
return new IcebergRecordReader<>();
}
private static class IcebergSplit extends InputSplit implements Writable {
private CombinedScanTask task;
IcebergSplit(CombinedScanTask task) {
this.task = task;
}
public IcebergSplit() {
}
@Override
public long getLength() {
return task.files().stream().mapToLong(FileScanTask::length).sum();
}
@Override
public String[] getLocations() {
return new String[0];
}
@Override
public void write(DataOutput out) throws IOException {
byte[] data = SerializationUtils.serialize(this.task);
out.writeInt(data.length);
out.write(data);
}
@Override
public void readFields(DataInput in) throws IOException {
byte[] data = new byte[in.readInt()];
in.readFully(data);
this.task = (CombinedScanTask) SerializationUtils.deserialize(data);
}
}
public class IcebergRecordReader<T> extends RecordReader<Void, T> {
private TaskAttemptContext context;
private Iterator<FileScanTask> tasks;
private FileScanTask currentTask;
private CloseableIterable reader;
private Iterator<T> recordIterator;
private T currentRecord;
@Override
public void initialize(InputSplit split, TaskAttemptContext context) throws IOException {
this.context = context;
CombinedScanTask task = ((IcebergSplit) split).task;
tasks = task.files().iterator();
advance();
}
@SuppressWarnings("unchecked")
private boolean advance() throws IOException {
if(reader != null) {
reader.close();
}
if (!tasks.hasNext()) {
return false;
}
currentTask = tasks.next();
Schema tableSchema = (Schema) ObjectSerializer.deserialize(context.getConfiguration().get(ICEBERG_SCHEMA));
List<String> projectedFields = (List<String>) ObjectSerializer.deserialize(context.getConfiguration().get(ICEBERG_PROJECTED_FIELDS));
Schema projectedSchema = projectedFields != null ? project(tableSchema, projectedFields) : tableSchema;
PartitionSpec spec = currentTask.asFileScanTask().spec();
DataFile file = currentTask.file();
InputFile inputFile = HadoopInputFile.fromLocation(file.path(), context.getConfiguration());
Set<Integer> idColumns = spec.identitySourceIds();
// schema needed for the projection and filtering
boolean hasJoinedPartitionColumns = !idColumns.isEmpty();
switch (file.format()) {
case PARQUET:
Map<Integer, Object> partitionValueMap = Maps.newHashMap();
if (hasJoinedPartitionColumns) {
Schema readSchema = TypeUtil.selectNot(projectedSchema, idColumns);
Schema partitionSchema = TypeUtil.select(tableSchema, idColumns);
Schema projectedPartitionSchema = TypeUtil.select(projectedSchema, idColumns);
for (Types.NestedField field : projectedPartitionSchema.columns()) {
int tupleIndex = projectedSchema.columns().indexOf(field);
int partitionIndex = partitionSchema.columns().indexOf(field);
Object partitionValue = file.partition().get(partitionIndex, Object.class);
partitionValueMap.put(tupleIndex, convertPartitionValue(field.type(), partitionValue));
}
reader = Parquet.read(inputFile)
.project(readSchema)
.split(currentTask.start(), currentTask.length())
.filter(currentTask.residual())
.createReaderFunc(fileSchema -> PigParquetReader.buildReader(fileSchema, readSchema, partitionValueMap))
.build();
} else {
reader = Parquet.read(inputFile)
.project(projectedSchema)
.split(currentTask.start(), currentTask.length())
.filter(currentTask.residual())
.createReaderFunc(fileSchema -> PigParquetReader.buildReader(fileSchema, projectedSchema, partitionValueMap))
.build();
}
recordIterator = reader.iterator();
break;
default:
throw new UnsupportedOperationException("Unsupported file format: " + file.format());
}
return true;
}
private Object convertPartitionValue(Type type, Object value) {
if(type.typeId() == Types.BinaryType.get().typeId()) {
ByteBuffer buffer = (ByteBuffer) value;
return new DataByteArray(buffer.get(new byte[buffer.remaining()]).array());
}
return value;
}
@Override
public boolean nextKeyValue() throws IOException {
if (recordIterator.hasNext() || advance()) {
currentRecord = recordIterator.next();
return true;
}
return false;
}
@Override
public Void getCurrentKey() {
return null;
}
@Override
public T getCurrentValue() {
return currentRecord;
}
@Override
public float getProgress() {
return 0;
}
@Override
public void close() {
}
}
}
| 6,266 |
0 | Create_ds/iceberg/pig/src/main/java/com/netflix/iceberg | Create_ds/iceberg/pig/src/main/java/com/netflix/iceberg/pig/SchemaUtil.java | /*
* Copyright 2018 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.pig;
import com.google.common.collect.Lists;
import com.netflix.iceberg.Schema;
import com.netflix.iceberg.types.Type;
import com.netflix.iceberg.types.Types;
import org.apache.pig.ResourceSchema;
import org.apache.pig.ResourceSchema.ResourceFieldSchema;
import org.apache.pig.data.DataType;
import org.apache.pig.impl.logicalLayer.FrontendException;
import java.io.IOException;
import java.util.List;
import static java.lang.String.format;
import static com.netflix.iceberg.types.Types.ListType;
import static com.netflix.iceberg.types.Types.MapType;
import static com.netflix.iceberg.types.Types.NestedField;
import static com.netflix.iceberg.types.Types.StructType;
public class SchemaUtil {
public static ResourceSchema convert(Schema icebergSchema) throws IOException {
ResourceSchema result = new ResourceSchema();
result.setFields(convertFields(icebergSchema.columns()));
return result;
}
private static ResourceFieldSchema [] convertFields(List<Types.NestedField> fields) throws IOException {
List<ResourceFieldSchema> result = Lists.newArrayList();
for (Types.NestedField nf : fields) {
result.add(convert(nf));
}
return result.toArray(new ResourceFieldSchema[0]);
}
private static ResourceFieldSchema convert(Types.NestedField field) throws IOException {
ResourceFieldSchema result = convert(field.type());
result.setName(field.name());
result.setDescription(format("FieldId: %s", field.fieldId()));
return result;
}
private static ResourceFieldSchema convert(Type type) throws IOException {
ResourceFieldSchema result = new ResourceFieldSchema();
result.setType(convertType(type));
if (!type.isPrimitiveType()) {
result.setSchema(convertComplex(type));
}
return result;
}
private static byte convertType(Type type) throws IOException {
switch (type.typeId()) {
case BOOLEAN: return DataType.BOOLEAN;
case INTEGER: return DataType.INTEGER;
case LONG: return DataType.LONG;
case FLOAT: return DataType.FLOAT;
case DOUBLE: return DataType.DOUBLE;
case TIMESTAMP: return DataType.CHARARRAY;
case DATE: return DataType.CHARARRAY;
case STRING: return DataType.CHARARRAY;
case FIXED: return DataType.BYTEARRAY;
case BINARY: return DataType.BYTEARRAY;
case DECIMAL: return DataType.BIGDECIMAL;
case STRUCT: return DataType.TUPLE;
case LIST: return DataType.BAG;
case MAP: return DataType.MAP;
default:
throw new FrontendException("Unsupported primitive type:" + type);
}
}
private static ResourceSchema convertComplex(Type type) throws IOException {
ResourceSchema result = new ResourceSchema();
switch (type.typeId()) {
case STRUCT:
StructType structType = type.asStructType();
List<ResourceFieldSchema> fields = Lists.newArrayList();
for (Types.NestedField f : structType.fields()) {
fields.add(convert(f));
}
result.setFields(fields.toArray(new ResourceFieldSchema[0]));
return result;
case LIST:
ListType listType = type.asListType();
ResourceFieldSchema [] elementFieldSchemas = new ResourceFieldSchema[]{convert(listType.elementType())};
if (listType.elementType().isStructType()) {
result.setFields(elementFieldSchemas);
} else {
//Wrap non-struct types in tuples
ResourceSchema elementSchema = new ResourceSchema();
elementSchema.setFields(elementFieldSchemas);
ResourceFieldSchema tupleSchema = new ResourceFieldSchema();
tupleSchema.setType(DataType.TUPLE);
tupleSchema.setSchema(elementSchema);
result.setFields(new ResourceFieldSchema[]{tupleSchema});
}
return result;
case MAP:
MapType mapType = type.asMapType();
if (mapType.keyType().typeId() != Type.TypeID.STRING) {
throw new FrontendException("Unsupported map key type: " + mapType.keyType());
}
result.setFields(new ResourceFieldSchema[]{convert(mapType.valueType())});
return result;
default:
throw new FrontendException("Unsupported complex type: " + type);
}
}
public static Schema project(Schema schema, List<String> requiredFields) {
List<NestedField> columns = Lists.newArrayList();
for (String column : requiredFields) {
columns.add(schema.findField(column));
}
return new Schema(columns);
}
}
| 6,267 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/TableTestBase.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterators;
import com.google.common.collect.Lists;
import com.google.common.io.Files;
import com.netflix.iceberg.types.Types;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule;
import org.junit.rules.TemporaryFolder;
import java.io.File;
import java.util.Iterator;
import java.util.List;
import static com.netflix.iceberg.Files.localInput;
import static com.netflix.iceberg.types.Types.NestedField.required;
public class TableTestBase {
// Schema passed to create tables
static final Schema SCHEMA = new Schema(
required(3, "id", Types.IntegerType.get()),
required(4, "data", Types.StringType.get())
);
// Partition spec used to create tables
static final PartitionSpec SPEC = PartitionSpec.builderFor(SCHEMA)
.bucket("data", 16)
.build();
static final DataFile FILE_A = DataFiles.builder(SPEC)
.withPath("/path/to/data-a.parquet")
.withFileSizeInBytes(0)
.withPartitionPath("data_bucket=0") // easy way to set partition data for now
.withRecordCount(0)
.build();
static final DataFile FILE_B = DataFiles.builder(SPEC)
.withPath("/path/to/data-b.parquet")
.withFileSizeInBytes(0)
.withPartitionPath("data_bucket=1") // easy way to set partition data for now
.withRecordCount(0)
.build();
static final DataFile FILE_C = DataFiles.builder(SPEC)
.withPath("/path/to/data-c.parquet")
.withFileSizeInBytes(0)
.withPartitionPath("data_bucket=2") // easy way to set partition data for now
.withRecordCount(0)
.build();
static final DataFile FILE_D = DataFiles.builder(SPEC)
.withPath("/path/to/data-d.parquet")
.withFileSizeInBytes(0)
.withPartitionPath("data_bucket=3") // easy way to set partition data for now
.withRecordCount(0)
.build();
@Rule
public TemporaryFolder temp = new TemporaryFolder();
File tableDir = null;
File metadataDir = null;
TestTables.TestTable table = null;
@Before
public void setupTable() throws Exception {
this.tableDir = temp.newFolder();
tableDir.delete(); // created by table create
this.metadataDir = new File(tableDir, "metadata");
this.table = create(SCHEMA, SPEC);
}
@After
public void cleanupTables() {
TestTables.clearTables();
}
List<File> listManifestFiles() {
return listManifestFiles(tableDir);
}
List<File> listManifestFiles(File tableDir) {
return Lists.newArrayList(new File(tableDir, "metadata").listFiles((dir, name) ->
!name.startsWith("snap") && Files.getFileExtension(name).equalsIgnoreCase("avro")));
}
private TestTables.TestTable create(Schema schema, PartitionSpec spec) {
return TestTables.create(tableDir, "test", schema, spec);
}
TestTables.TestTable load() {
return TestTables.load(tableDir, "test");
}
Integer version() {
return TestTables.metadataVersion("test");
}
TableMetadata readMetadata() {
return TestTables.readMetadata("test");
}
void validateSnapshot(Snapshot old, Snapshot snap, DataFile... newFiles) {
List<ManifestFile> oldManifests = old != null ? old.manifests() : ImmutableList.of();
// copy the manifests to a modifiable list and remove the existing manifests
List<ManifestFile> newManifests = Lists.newArrayList(snap.manifests());
for (ManifestFile oldManifest : oldManifests) {
Assert.assertTrue("New snapshot should contain old manifests",
newManifests.remove(oldManifest));
}
Assert.assertEquals("Should create 1 new manifest and reuse old manifests",
1, newManifests.size());
ManifestFile manifest = newManifests.get(0);
long id = snap.snapshotId();
Iterator<String> newPaths = paths(newFiles).iterator();
for (ManifestEntry entry : ManifestReader.read(localInput(manifest.path())).entries()) {
DataFile file = entry.file();
Assert.assertEquals("Path should match expected", newPaths.next(), file.path().toString());
Assert.assertEquals("File's snapshot ID should match", id, entry.snapshotId());
}
Assert.assertFalse("Should find all files in the manifest", newPaths.hasNext());
}
List<String> paths(DataFile... dataFiles) {
List<String> paths = Lists.newArrayListWithExpectedSize(dataFiles.length);
for (DataFile file : dataFiles) {
paths.add(file.path().toString());
}
return paths;
}
static void validateManifest(ManifestFile manifest,
Iterator<Long> ids,
Iterator<DataFile> expectedFiles) {
validateManifest(manifest.path(), ids, expectedFiles);
}
static void validateManifest(String manifest,
Iterator<Long> ids,
Iterator<DataFile> expectedFiles) {
for (ManifestEntry entry : ManifestReader.read(localInput(manifest)).entries()) {
DataFile file = entry.file();
DataFile expected = expectedFiles.next();
Assert.assertEquals("Path should match expected",
expected.path().toString(), file.path().toString());
Assert.assertEquals("Snapshot ID should match expected ID",
(long) ids.next(), entry.snapshotId());
}
Assert.assertFalse("Should find all files in the manifest", expectedFiles.hasNext());
}
static void validateManifestEntries(ManifestFile manifest,
Iterator<Long> ids,
Iterator<DataFile> expectedFiles,
Iterator<ManifestEntry.Status> expectedStatuses) {
validateManifestEntries(manifest.path(), ids, expectedFiles, expectedStatuses);
}
static void validateManifestEntries(String manifest,
Iterator<Long> ids,
Iterator<DataFile> expectedFiles,
Iterator<ManifestEntry.Status> expectedStatuses) {
for (ManifestEntry entry : ManifestReader.read(localInput(manifest)).entries()) {
DataFile file = entry.file();
DataFile expected = expectedFiles.next();
final ManifestEntry.Status expectedStatus = expectedStatuses.next();
Assert.assertEquals("Path should match expected",
expected.path().toString(), file.path().toString());
Assert.assertEquals("Snapshot ID should match expected ID",
(long) ids.next(), entry.snapshotId());
Assert.assertEquals("Entry status should match expected ID",
expectedStatus, entry.status());
}
Assert.assertFalse("Should find all files in the manifest", expectedFiles.hasNext());
}
static Iterator<ManifestEntry.Status> statuses(ManifestEntry.Status... statuses) {
return Iterators.forArray(statuses);
}
static Iterator<Long> ids(Long... ids) {
return Iterators.forArray(ids);
}
static Iterator<DataFile> files(DataFile... files) {
return Iterators.forArray(files);
}
static Iterator<DataFile> files(ManifestFile manifest) {
return ManifestReader.read(localInput(manifest.path())).iterator();
}
}
| 6,268 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/TestMergeAppend.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.netflix.iceberg.ManifestEntry.Status;
import com.netflix.iceberg.exceptions.CommitFailedException;
import org.junit.Assert;
import org.junit.Test;
import java.io.File;
import java.util.Set;
import static com.google.common.collect.Iterators.concat;
public class TestMergeAppend extends TableTestBase {
@Test
public void testEmptyTableAppend() {
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
TableMetadata base = readMetadata();
Assert.assertNull("Should not have a current snapshot", base.currentSnapshot());
Snapshot pending = table.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.apply();
Assert.assertEquals("Should create 1 manifest for initial write",
1, pending.manifests().size());
long pendingId = pending.snapshotId();
validateManifest(pending.manifests().get(0), ids(pendingId, pendingId), files(FILE_A, FILE_B));
}
@Test
public void testMergeWithExistingManifest() {
// merge all manifests for this test
table.updateProperties().set("commit.manifest.min-count-to-merge", "1").commit();
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
table.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
TableMetadata base = readMetadata();
long baseId = base.currentSnapshot().snapshotId();
Assert.assertEquals("Should create 1 manifest for initial write",
1, base.currentSnapshot().manifests().size());
ManifestFile initialManifest = base.currentSnapshot().manifests().get(0);
Snapshot pending = table.newAppend()
.appendFile(FILE_C)
.appendFile(FILE_D)
.apply();
Assert.assertEquals("Should contain 1 merged manifest for second write",
1, pending.manifests().size());
ManifestFile newManifest = pending.manifests().get(0);
Assert.assertNotEquals("Should not contain manifest from initial write",
initialManifest, newManifest);
long pendingId = pending.snapshotId();
validateManifest(newManifest,
ids(pendingId, pendingId, baseId, baseId),
concat(files(FILE_C, FILE_D), files(initialManifest)));
}
@Test
public void testMergeWithExistingManifestAfterDelete() {
// merge all manifests for this test
table.updateProperties().set("commit.manifest.min-count-to-merge", "1").commit();
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
table.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
TableMetadata base = readMetadata();
long baseId = base.currentSnapshot().snapshotId();
Assert.assertEquals("Should create 1 manifest for initial write",
1, base.currentSnapshot().manifests().size());
ManifestFile initialManifest = base.currentSnapshot().manifests().get(0);
table.newDelete()
.deleteFile(FILE_A)
.commit();
TableMetadata delete = readMetadata();
long deleteId = delete.currentSnapshot().snapshotId();
Assert.assertEquals("Should create 1 filtered manifest for delete",
1, delete.currentSnapshot().manifests().size());
ManifestFile deleteManifest = delete.currentSnapshot().manifests().get(0);
validateManifestEntries(deleteManifest,
ids(deleteId, baseId),
files(FILE_A, FILE_B),
statuses(Status.DELETED, Status.EXISTING));
Snapshot pending = table.newAppend()
.appendFile(FILE_C)
.appendFile(FILE_D)
.apply();
Assert.assertEquals("Should contain 1 merged manifest for second write",
1, pending.manifests().size());
ManifestFile newManifest = pending.manifests().get(0);
Assert.assertNotEquals("Should not contain manifest from initial write",
initialManifest, newManifest);
long pendingId = pending.snapshotId();
// the deleted entry from the previous manifest should be removed
validateManifestEntries(newManifest,
ids(pendingId, pendingId, baseId),
files(FILE_C, FILE_D, FILE_B),
statuses(Status.ADDED, Status.ADDED, Status.EXISTING));
}
@Test
public void testMinMergeCount() {
// only merge when there are at least 4 manifests
table.updateProperties().set("commit.manifest.min-count-to-merge", "4").commit();
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
table.newFastAppend()
.appendFile(FILE_A)
.commit();
long idFileA = readMetadata().currentSnapshot().snapshotId();
table.newFastAppend()
.appendFile(FILE_B)
.commit();
long idFileB = readMetadata().currentSnapshot().snapshotId();
Assert.assertEquals("Should have 2 manifests from setup writes",
2, readMetadata().currentSnapshot().manifests().size());
table.newAppend()
.appendFile(FILE_C)
.commit();
long idFileC = readMetadata().currentSnapshot().snapshotId();
TableMetadata base = readMetadata();
Assert.assertEquals("Should have 3 unmerged manifests",
3, base.currentSnapshot().manifests().size());
Set<ManifestFile> unmerged = Sets.newHashSet(base.currentSnapshot().manifests());
Snapshot pending = table.newAppend()
.appendFile(FILE_D)
.apply();
Assert.assertEquals("Should contain 1 merged manifest after the 4th write",
1, pending.manifests().size());
ManifestFile newManifest = pending.manifests().get(0);
Assert.assertFalse("Should not contain previous manifests", unmerged.contains(newManifest));
long pendingId = pending.snapshotId();
validateManifest(newManifest,
ids(pendingId, idFileC, idFileB, idFileA),
files(FILE_D, FILE_C, FILE_B, FILE_A));
}
@Test
public void testMergeSizeTargetWithExistingManifest() {
// use a small limit on manifest size to prevent merging
table.updateProperties()
.set(TableProperties.MANIFEST_TARGET_SIZE_BYTES, "10")
.commit();
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
table.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
TableMetadata base = readMetadata();
long baseId = base.currentSnapshot().snapshotId();
Assert.assertEquals("Should create 1 manifest for initial write",
1, base.currentSnapshot().manifests().size());
ManifestFile initialManifest = base.currentSnapshot().manifests().get(0);
Snapshot pending = table.newAppend()
.appendFile(FILE_C)
.appendFile(FILE_D)
.apply();
Assert.assertEquals("Should contain 2 unmerged manifests after second write",
2, pending.manifests().size());
ManifestFile newManifest = pending.manifests().get(0);
Assert.assertNotEquals("Should not contain manifest from initial write",
initialManifest, newManifest);
long pendingId = pending.snapshotId();
validateManifest(newManifest, ids(pendingId, pendingId), files(FILE_C, FILE_D));
validateManifest(pending.manifests().get(1), ids(baseId, baseId), files(initialManifest));
}
@Test
public void testChangedPartitionSpec() {
table.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
TableMetadata base = readMetadata();
Assert.assertEquals("Should create 1 manifest for initial write",
1, base.currentSnapshot().manifests().size());
ManifestFile initialManifest = base.currentSnapshot().manifests().get(0);
// build the new spec using the table's schema, which uses fresh IDs
PartitionSpec newSpec = PartitionSpec.builderFor(base.schema())
.bucket("data", 16)
.bucket("id", 4)
.build();
// commit the new partition spec to the table manually
table.ops().commit(base, base.updatePartitionSpec(newSpec));
DataFile newFileC = DataFiles.builder(newSpec)
.copy(FILE_C)
.withPartitionPath("data_bucket=2/id_bucket=3")
.build();
Snapshot pending = table.newAppend()
.appendFile(newFileC)
.apply();
Assert.assertEquals("Should use 2 manifest files",
2, pending.manifests().size());
// new manifest comes first
validateManifest(pending.manifests().get(0), ids(pending.snapshotId()), files(newFileC));
Assert.assertEquals("Second manifest should be the initial manifest with the old spec",
initialManifest, pending.manifests().get(1));
}
@Test
public void testChangedPartitionSpecMergeExisting() {
table.newAppend()
.appendFile(FILE_A)
.commit();
long id1 = readMetadata().currentSnapshot().snapshotId();
// create a second compatible manifest
table.newFastAppend()
.appendFile(FILE_B)
.commit();
long id2 = readMetadata().currentSnapshot().snapshotId();
TableMetadata base = readMetadata();
Assert.assertEquals("Should contain 2 manifests",
2, base.currentSnapshot().manifests().size());
ManifestFile manifest = base.currentSnapshot().manifests().get(0);
// build the new spec using the table's schema, which uses fresh IDs
PartitionSpec newSpec = PartitionSpec.builderFor(base.schema())
.bucket("data", 16)
.bucket("id", 4)
.build();
// commit the new partition spec to the table manually
table.ops().commit(base, base.updatePartitionSpec(newSpec));
DataFile newFileC = DataFiles.builder(newSpec)
.copy(FILE_C)
.withPartitionPath("data_bucket=2/id_bucket=3")
.build();
Snapshot pending = table.newAppend()
.appendFile(newFileC)
.apply();
Assert.assertEquals("Should use 2 manifest files",
2, pending.manifests().size());
Assert.assertFalse("First manifest should not be in the new snapshot",
pending.manifests().contains(manifest));
validateManifest(pending.manifests().get(0), ids(pending.snapshotId()), files(newFileC));
validateManifest(pending.manifests().get(1), ids(id2, id1), files(FILE_B, FILE_A));
}
@Test
public void testFailure() {
// merge all manifests for this test
table.updateProperties().set("commit.manifest.min-count-to-merge", "1").commit();
table.newAppend()
.appendFile(FILE_A)
.commit();
TableMetadata base = readMetadata();
long baseId = base.currentSnapshot().snapshotId();
ManifestFile initialManifest = base.currentSnapshot().manifests().get(0);
table.ops().failCommits(5);
AppendFiles append = table.newAppend().appendFile(FILE_B);
Snapshot pending = append.apply();
Assert.assertEquals("Should merge to 1 manifest", 1, pending.manifests().size());
ManifestFile newManifest = pending.manifests().get(0);
Assert.assertTrue("Should create new manifest", new File(newManifest.path()).exists());
validateManifest(newManifest,
ids(pending.snapshotId(), baseId),
concat(files(FILE_B), files(initialManifest)));
AssertHelpers.assertThrows("Should retry 4 times and throw last failure",
CommitFailedException.class, "Injected failure", append::commit);
Assert.assertFalse("Should clean up new manifest", new File(newManifest.path()).exists());
}
@Test
public void testRecovery() {
// merge all manifests for this test
table.updateProperties().set("commit.manifest.min-count-to-merge", "1").commit();
table.newAppend()
.appendFile(FILE_A)
.commit();
TableMetadata base = readMetadata();
long baseId = base.currentSnapshot().snapshotId();
ManifestFile initialManifest = base.currentSnapshot().manifests().get(0);
table.ops().failCommits(3);
AppendFiles append = table.newAppend().appendFile(FILE_B);
Snapshot pending = append.apply();
Assert.assertEquals("Should merge to 1 manifest", 1, pending.manifests().size());
ManifestFile newManifest = pending.manifests().get(0);
Assert.assertTrue("Should create new manifest", new File(newManifest.path()).exists());
validateManifest(newManifest,
ids(pending.snapshotId(), baseId),
concat(files(FILE_B), files(initialManifest)));
append.commit();
TableMetadata metadata = readMetadata();
Assert.assertTrue("Should reuse the new manifest", new File(newManifest.path()).exists());
Assert.assertEquals("Should commit the same new manifest during retry",
Lists.newArrayList(newManifest), metadata.currentSnapshot().manifests());
}
}
| 6,269 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/TestReplaceFiles.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.netflix.iceberg.exceptions.CommitFailedException;
import com.netflix.iceberg.exceptions.ValidationException;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.internal.util.collections.Sets;
import java.io.File;
import java.util.Collections;
import static com.netflix.iceberg.ManifestEntry.Status.ADDED;
import static com.netflix.iceberg.ManifestEntry.Status.DELETED;
import static com.netflix.iceberg.ManifestEntry.Status.EXISTING;
public class TestReplaceFiles extends TableTestBase {
@Test
public void testEmptyTable() {
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
TableMetadata base = readMetadata();
Assert.assertNull("Should not have a current snapshot", base.currentSnapshot());
AssertHelpers.assertThrows("Expected an exception",
ValidationException.class,
"Missing required files to delete: /path/to/data-a.parquet",
() -> table.newRewrite()
.rewriteFiles(Sets.newSet(FILE_A), Sets.newSet(FILE_B))
.commit());
}
@Test
public void testAddOnly() {
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
AssertHelpers.assertThrows("Expected an exception",
IllegalArgumentException.class,
"Files to add can not be null or empty",
() -> table.newRewrite()
.rewriteFiles(Sets.newSet(FILE_A), Collections.emptySet())
.apply());
}
@Test
public void testDeleteOnly() {
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
AssertHelpers.assertThrows("Expected an exception",
IllegalArgumentException.class,
"Files to delete cannot be null or empty",
() -> table.newRewrite()
.rewriteFiles(Collections.emptySet(), Sets.newSet(FILE_A))
.apply());
}
@Test
public void testDeleteWithDuplicateEntriesInManifest() {
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
table.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
TableMetadata base = readMetadata();
long baseSnapshotId = base.currentSnapshot().snapshotId();
Assert.assertEquals("Should create 1 manifest for initial write",
1, base.currentSnapshot().manifests().size());
ManifestFile initialManifest = base.currentSnapshot().manifests().get(0);
Snapshot pending = table.newRewrite()
.rewriteFiles(Sets.newSet(FILE_A), Sets.newSet(FILE_C))
.apply();
Assert.assertEquals("Should contain 2 manifest",
2, pending.manifests().size());
Assert.assertFalse("Should not contain manifest from initial write",
pending.manifests().contains(initialManifest));
long pendingId = pending.snapshotId();
validateManifestEntries(pending.manifests().get(0),
ids(pendingId),
files(FILE_C),
statuses(ADDED));
validateManifestEntries(pending.manifests().get(1),
ids(pendingId,pendingId, baseSnapshotId),
files(FILE_A, FILE_A, FILE_B),
statuses(DELETED, DELETED, EXISTING));
// We should only get the 3 manifests that this test is expected to add.
Assert.assertEquals("Only 3 manifests should exist", 3, listManifestFiles().size());
}
@Test
public void testAddAndDelete() {
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
table.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
TableMetadata base = readMetadata();
long baseSnapshotId = base.currentSnapshot().snapshotId();
Assert.assertEquals("Should create 1 manifest for initial write",
1, base.currentSnapshot().manifests().size());
ManifestFile initialManifest = base.currentSnapshot().manifests().get(0);
Snapshot pending = table.newRewrite()
.rewriteFiles(Sets.newSet(FILE_A), Sets.newSet(FILE_C))
.apply();
Assert.assertEquals("Should contain 2 manifest",
2, pending.manifests().size());
Assert.assertFalse("Should not contain manifest from initial write",
pending.manifests().contains(initialManifest));
long pendingId = pending.snapshotId();
validateManifestEntries(pending.manifests().get(0),
ids(pendingId),
files(FILE_C),
statuses(ADDED));
validateManifestEntries(pending.manifests().get(1),
ids(pendingId, baseSnapshotId),
files(FILE_A, FILE_B),
statuses(DELETED, EXISTING));
// We should only get the 3 manifests that this test is expected to add.
Assert.assertEquals("Only 3 manifests should exist", 3, listManifestFiles().size());
}
@Test
public void testFailure() {
table.newAppend()
.appendFile(FILE_A)
.commit();
table.ops().failCommits(5);
RewriteFiles rewrite = table.newRewrite()
.rewriteFiles(Sets.newSet(FILE_A), Sets.newSet(FILE_B));
Snapshot pending = rewrite.apply();
Assert.assertEquals("Should produce 2 manifests", 2, pending.manifests().size());
ManifestFile manifest1 = pending.manifests().get(0);
ManifestFile manifest2 = pending.manifests().get(1);
validateManifestEntries(manifest1,
ids(pending.snapshotId()), files(FILE_B), statuses(ADDED));
validateManifestEntries(manifest2,
ids(pending.snapshotId()), files(FILE_A), statuses(DELETED));
AssertHelpers.assertThrows("Should retry 4 times and throw last failure",
CommitFailedException.class, "Injected failure", rewrite::commit);
Assert.assertFalse("Should clean up new manifest", new File(manifest1.path()).exists());
Assert.assertFalse("Should clean up new manifest", new File(manifest2.path()).exists());
// As commit failed all the manifests added with rewrite should be cleaned up
Assert.assertEquals("Only 1 manifest should exist", 1, listManifestFiles().size());
}
@Test
public void testRecovery() {
table.newAppend()
.appendFile(FILE_A)
.commit();
table.ops().failCommits(3);
RewriteFiles rewrite = table.newRewrite().rewriteFiles(Sets.newSet(FILE_A), Sets.newSet(FILE_B));
Snapshot pending = rewrite.apply();
Assert.assertEquals("Should produce 2 manifests", 2, pending.manifests().size());
ManifestFile manifest1 = pending.manifests().get(0);
ManifestFile manifest2 = pending.manifests().get(1);
validateManifestEntries(manifest1,
ids(pending.snapshotId()), files(FILE_B), statuses(ADDED));
validateManifestEntries(manifest2,
ids(pending.snapshotId()), files(FILE_A), statuses(DELETED));
rewrite.commit();
Assert.assertTrue("Should reuse the manifest for appends", new File(manifest1.path()).exists());
Assert.assertTrue("Should reuse the manifest with deletes", new File(manifest2.path()).exists());
TableMetadata metadata = readMetadata();
Assert.assertTrue("Should commit the manifest for append",
metadata.currentSnapshot().manifests().contains(manifest2));
// 2 manifests added by rewrite and 1 original manifest should be found.
Assert.assertEquals("Only 3 manifests should exist", 3, listManifestFiles().size());
}
@Test
public void testDeleteNonExistentFile() {
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
table.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
TableMetadata base = readMetadata();
Assert.assertEquals("Should create 1 manifest for initial write",
1, base.currentSnapshot().manifests().size());
AssertHelpers.assertThrows("Expected an exception",
ValidationException.class,
"Missing required files to delete: /path/to/data-c.parquet",
() -> table.newRewrite()
.rewriteFiles(Sets.newSet(FILE_C), Sets.newSet(FILE_D))
.commit());
Assert.assertEquals("Only 1 manifests should exist", 1, listManifestFiles().size());
}
@Test
public void testAlreadyDeletedFile() {
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
table.newAppend()
.appendFile(FILE_A)
.commit();
TableMetadata base = readMetadata();
Assert.assertEquals("Should create 1 manifest for initial write",
1, base.currentSnapshot().manifests().size());
RewriteFiles rewrite = table.newRewrite();
Snapshot pending = rewrite
.rewriteFiles(Sets.newSet(FILE_A), Sets.newSet(FILE_B))
.apply();
Assert.assertEquals("Should contain 2 manifest",
2, pending.manifests().size());
long pendingId = pending.snapshotId();
validateManifestEntries(pending.manifests().get(0),
ids(pendingId),
files(FILE_B),
statuses(ADDED));
validateManifestEntries(pending.manifests().get(1),
ids(pendingId, base.currentSnapshot().snapshotId()),
files(FILE_A),
statuses(DELETED));
rewrite.commit();
AssertHelpers.assertThrows("Expected an exception",
ValidationException.class,
"Missing required files to delete: /path/to/data-a.parquet",
() -> table.newRewrite()
.rewriteFiles(Sets.newSet(FILE_A), Sets.newSet(FILE_D))
.commit());
Assert.assertEquals("Only 3 manifests should exist", 3, listManifestFiles().size());
}
}
| 6,270 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/TestTableMetadataJson.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.fasterxml.jackson.core.JsonGenerator;
import com.fasterxml.jackson.databind.JsonNode;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.netflix.iceberg.TableMetadata.SnapshotLogEntry;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.types.Types;
import com.netflix.iceberg.util.JsonUtil;
import org.junit.Assert;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import java.io.IOException;
import java.io.StringWriter;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import java.util.Random;
import static com.netflix.iceberg.Files.localInput;
import static com.netflix.iceberg.TableMetadataParser.CURRENT_SNAPSHOT_ID;
import static com.netflix.iceberg.TableMetadataParser.FORMAT_VERSION;
import static com.netflix.iceberg.TableMetadataParser.LAST_COLUMN_ID;
import static com.netflix.iceberg.TableMetadataParser.LAST_UPDATED_MILLIS;
import static com.netflix.iceberg.TableMetadataParser.LOCATION;
import static com.netflix.iceberg.TableMetadataParser.PARTITION_SPEC;
import static com.netflix.iceberg.TableMetadataParser.PROPERTIES;
import static com.netflix.iceberg.TableMetadataParser.SCHEMA;
import static com.netflix.iceberg.TableMetadataParser.SNAPSHOTS;
public class TestTableMetadataJson {
@Rule
public TemporaryFolder temp = new TemporaryFolder();
public TableOperations ops = new LocalTableOperations(temp);
@Test
public void testJsonConversion() throws Exception {
Schema schema = new Schema(
Types.NestedField.required(1, "x", Types.LongType.get()),
Types.NestedField.required(2, "y", Types.LongType.get()),
Types.NestedField.required(3, "z", Types.LongType.get())
);
PartitionSpec spec = PartitionSpec.builderFor(schema).withSpecId(5).build();
long previousSnapshotId = System.currentTimeMillis() - new Random(1234).nextInt(3600);
Snapshot previousSnapshot = new BaseSnapshot(
null, previousSnapshotId, null, previousSnapshotId, ImmutableList.of(
new GenericManifestFile(localInput("file:/tmp/manfiest.1.avro"), spec.specId())));
long currentSnapshotId = System.currentTimeMillis();
Snapshot currentSnapshot = new BaseSnapshot(
null, currentSnapshotId, previousSnapshotId, currentSnapshotId, ImmutableList.of(
new GenericManifestFile(localInput("file:/tmp/manfiest.2.avro"), spec.specId())));
List<SnapshotLogEntry> snapshotLog = ImmutableList.<SnapshotLogEntry>builder()
.add(new SnapshotLogEntry(previousSnapshot.timestampMillis(), previousSnapshot.snapshotId()))
.add(new SnapshotLogEntry(currentSnapshot.timestampMillis(), currentSnapshot.snapshotId()))
.build();
TableMetadata expected = new TableMetadata(ops, null, "s3://bucket/test/location",
System.currentTimeMillis(), 3, schema, 5, ImmutableList.of(spec),
ImmutableMap.of("property", "value"), currentSnapshotId,
Arrays.asList(previousSnapshot, currentSnapshot), snapshotLog);
String asJson = TableMetadataParser.toJson(expected);
TableMetadata metadata = TableMetadataParser.fromJson(ops, null,
JsonUtil.mapper().readValue(asJson, JsonNode.class));
Assert.assertEquals("Table location should match",
expected.location(), metadata.location());
Assert.assertEquals("Last column ID should match",
expected.lastColumnId(), metadata.lastColumnId());
Assert.assertEquals("Schema should match",
expected.schema().asStruct(), metadata.schema().asStruct());
Assert.assertEquals("Partition spec should match",
expected.spec().toString(), metadata.spec().toString());
Assert.assertEquals("Default spec ID should match",
expected.defaultSpecId(), metadata.defaultSpecId());
Assert.assertEquals("PartitionSpec map should match",
expected.specs(), metadata.specs());
Assert.assertEquals("Properties should match",
expected.properties(), metadata.properties());
Assert.assertEquals("Snapshot logs should match",
expected.snapshotLog(), metadata.snapshotLog());
Assert.assertEquals("Current snapshot ID should match",
currentSnapshotId, metadata.currentSnapshot().snapshotId());
Assert.assertEquals("Parent snapshot ID should match",
(Long) previousSnapshotId, metadata.currentSnapshot().parentId());
Assert.assertEquals("Current snapshot files should match",
currentSnapshot.manifests(), metadata.currentSnapshot().manifests());
Assert.assertEquals("Previous snapshot ID should match",
previousSnapshotId, metadata.snapshot(previousSnapshotId).snapshotId());
Assert.assertEquals("Previous snapshot files should match",
previousSnapshot.manifests(),
metadata.snapshot(previousSnapshotId).manifests());
}
@Test
public void testFromJsonSortsSnapshotLog() throws Exception {
Schema schema = new Schema(
Types.NestedField.required(1, "x", Types.LongType.get()),
Types.NestedField.required(2, "y", Types.LongType.get()),
Types.NestedField.required(3, "z", Types.LongType.get())
);
PartitionSpec spec = PartitionSpec.builderFor(schema).withSpecId(5).build();
long previousSnapshotId = System.currentTimeMillis() - new Random(1234).nextInt(3600);
Snapshot previousSnapshot = new BaseSnapshot(
ops, previousSnapshotId, null, previousSnapshotId, ImmutableList.of(
new GenericManifestFile(localInput("file:/tmp/manfiest.1.avro"), spec.specId())));
long currentSnapshotId = System.currentTimeMillis();
Snapshot currentSnapshot = new BaseSnapshot(
ops, currentSnapshotId, previousSnapshotId, currentSnapshotId, ImmutableList.of(
new GenericManifestFile(localInput("file:/tmp/manfiest.2.avro"), spec.specId())));
List<SnapshotLogEntry> reversedSnapshotLog = Lists.newArrayList();
TableMetadata expected = new TableMetadata(ops, null, "s3://bucket/test/location",
System.currentTimeMillis(), 3, schema, 5, ImmutableList.of(spec),
ImmutableMap.of("property", "value"), currentSnapshotId,
Arrays.asList(previousSnapshot, currentSnapshot), reversedSnapshotLog);
// add the entries after creating TableMetadata to avoid the sorted check
reversedSnapshotLog.add(
new SnapshotLogEntry(currentSnapshot.timestampMillis(), currentSnapshot.snapshotId()));
reversedSnapshotLog.add(
new SnapshotLogEntry(previousSnapshot.timestampMillis(), previousSnapshot.snapshotId()));
String asJson = TableMetadataParser.toJson(expected);
TableMetadata metadata = TableMetadataParser.fromJson(ops, null,
JsonUtil.mapper().readValue(asJson, JsonNode.class));
List<SnapshotLogEntry> expectedSnapshotLog = ImmutableList.<SnapshotLogEntry>builder()
.add(new SnapshotLogEntry(previousSnapshot.timestampMillis(), previousSnapshot.snapshotId()))
.add(new SnapshotLogEntry(currentSnapshot.timestampMillis(), currentSnapshot.snapshotId()))
.build();
Assert.assertEquals("Snapshot logs should match",
expectedSnapshotLog, metadata.snapshotLog());
}
@Test
public void testBackwardCompatMissingPartitionSpecList() throws Exception {
Schema schema = new Schema(
Types.NestedField.required(1, "x", Types.LongType.get()),
Types.NestedField.required(2, "y", Types.LongType.get()),
Types.NestedField.required(3, "z", Types.LongType.get())
);
PartitionSpec spec = PartitionSpec.builderFor(schema).identity("x").withSpecId(6).build();
long previousSnapshotId = System.currentTimeMillis() - new Random(1234).nextInt(3600);
Snapshot previousSnapshot = new BaseSnapshot(
ops, previousSnapshotId, null, previousSnapshotId, ImmutableList.of(
new GenericManifestFile(localInput("file:/tmp/manfiest.1.avro"), spec.specId())));
long currentSnapshotId = System.currentTimeMillis();
Snapshot currentSnapshot = new BaseSnapshot(
ops, currentSnapshotId, previousSnapshotId, currentSnapshotId, ImmutableList.of(
new GenericManifestFile(localInput("file:/tmp/manfiest.2.avro"), spec.specId())));
TableMetadata expected = new TableMetadata(ops, null, "s3://bucket/test/location",
System.currentTimeMillis(), 3, schema, 6, ImmutableList.of(spec),
ImmutableMap.of("property", "value"), currentSnapshotId,
Arrays.asList(previousSnapshot, currentSnapshot), ImmutableList.of());
String asJson = toJsonWithoutSpecList(expected);
TableMetadata metadata = TableMetadataParser.fromJson(ops, null,
JsonUtil.mapper().readValue(asJson, JsonNode.class));
Assert.assertEquals("Table location should match",
expected.location(), metadata.location());
Assert.assertEquals("Last column ID should match",
expected.lastColumnId(), metadata.lastColumnId());
Assert.assertEquals("Schema should match",
expected.schema().asStruct(), metadata.schema().asStruct());
Assert.assertEquals("Partition spec should be the default",
expected.spec().toString(), metadata.spec().toString());
Assert.assertEquals("Default spec ID should default to TableMetadata.INITIAL_SPEC_ID",
TableMetadata.INITIAL_SPEC_ID, metadata.defaultSpecId());
Assert.assertEquals("PartitionSpec should contain the spec",
1, metadata.specs().size());
Assert.assertTrue("PartitionSpec should contain the spec",
metadata.specs().get(0).compatibleWith(spec));
Assert.assertEquals("PartitionSpec should have ID TableMetadata.INITIAL_SPEC_ID",
TableMetadata.INITIAL_SPEC_ID, metadata.specs().get(0).specId());
Assert.assertEquals("Properties should match",
expected.properties(), metadata.properties());
Assert.assertEquals("Snapshot logs should match",
expected.snapshotLog(), metadata.snapshotLog());
Assert.assertEquals("Current snapshot ID should match",
currentSnapshotId, metadata.currentSnapshot().snapshotId());
Assert.assertEquals("Parent snapshot ID should match",
(Long) previousSnapshotId, metadata.currentSnapshot().parentId());
Assert.assertEquals("Current snapshot files should match",
currentSnapshot.manifests(), metadata.currentSnapshot().manifests());
Assert.assertEquals("Previous snapshot ID should match",
previousSnapshotId, metadata.snapshot(previousSnapshotId).snapshotId());
Assert.assertEquals("Previous snapshot files should match",
previousSnapshot.manifests(),
metadata.snapshot(previousSnapshotId).manifests());
}
public static String toJsonWithoutSpecList(TableMetadata metadata) {
StringWriter writer = new StringWriter();
try {
JsonGenerator generator = JsonUtil.factory().createGenerator(writer);
generator.writeStartObject(); // start table metadata object
generator.writeNumberField(FORMAT_VERSION, TableMetadata.TABLE_FORMAT_VERSION);
generator.writeStringField(LOCATION, metadata.location());
generator.writeNumberField(LAST_UPDATED_MILLIS, metadata.lastUpdatedMillis());
generator.writeNumberField(LAST_COLUMN_ID, metadata.lastColumnId());
generator.writeFieldName(SCHEMA);
SchemaParser.toJson(metadata.schema(), generator);
// mimic an old writer by writing only partition-spec and not the default ID or spec list
generator.writeFieldName(PARTITION_SPEC);
PartitionSpecParser.toJsonFields(metadata.spec(), generator);
generator.writeObjectFieldStart(PROPERTIES);
for (Map.Entry<String, String> keyValue : metadata.properties().entrySet()) {
generator.writeStringField(keyValue.getKey(), keyValue.getValue());
}
generator.writeEndObject();
generator.writeNumberField(CURRENT_SNAPSHOT_ID,
metadata.currentSnapshot() != null ? metadata.currentSnapshot().snapshotId() : -1);
generator.writeArrayFieldStart(SNAPSHOTS);
for (Snapshot snapshot : metadata.snapshots()) {
SnapshotParser.toJson(snapshot, generator);
}
generator.writeEndArray();
// skip the snapshot log
generator.writeEndObject(); // end table metadata object
generator.flush();
} catch (IOException e) {
throw new RuntimeIOException(e, "Failed to write json for: %s", metadata);
}
return writer.toString();
}
}
| 6,271 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/TableMetadataParserTest.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.netflix.iceberg.io.OutputFile;
import com.netflix.iceberg.types.Types.BooleanType;
import org.apache.commons.compress.compressors.gzip.GzipCompressorInputStream;
import org.apache.hadoop.conf.Configuration;
import org.junit.After;
import org.junit.Assert;
import org.junit.Test;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.nio.file.Paths;
import static avro.shaded.com.google.common.collect.Lists.newArrayList;
import static com.netflix.iceberg.ConfigProperties.COMPRESS_METADATA;
import static com.netflix.iceberg.PartitionSpec.unpartitioned;
import static com.netflix.iceberg.TableMetadata.newTableMetadata;
import static com.netflix.iceberg.TableMetadataParser.getFileExtension;
import static com.netflix.iceberg.types.Types.NestedField.optional;
public class TableMetadataParserTest {
private final Schema SCHEMA = new Schema(newArrayList(optional(1, "b", BooleanType.get())));
private final TableMetadata EXPECTED = newTableMetadata(null, SCHEMA, unpartitioned(), "file://tmp/db/table");
@Test
public void testCompressionProperty() throws IOException {
final boolean[] props = {true, false};
final Configuration configuration = new Configuration();
for (boolean prop : props) {
configuration.setBoolean(COMPRESS_METADATA, prop);
final OutputFile outputFile = Files.localOutput(getFileExtension(configuration));
TableMetadataParser.write(EXPECTED, outputFile);
Assert.assertEquals(prop, isCompressed(getFileExtension(configuration)));
final TableMetadata read = TableMetadataParser.read(null, Files.localInput(new File(getFileExtension(configuration))));
verifyMetadata(read);
}
}
@After
public void cleanup() throws IOException {
final boolean[] props = {true, false};
Configuration configuration = new Configuration();
for (boolean prop : props) {
configuration.setBoolean(COMPRESS_METADATA, prop);
java.nio.file.Files.deleteIfExists(Paths.get(getFileExtension(configuration)));
}
}
private void verifyMetadata(TableMetadata read) {
Assert.assertEquals(EXPECTED.schema().asStruct(), read.schema().asStruct());
Assert.assertEquals(EXPECTED.location(), read.location());
Assert.assertEquals(EXPECTED.lastColumnId(), read.lastColumnId());
Assert.assertEquals(EXPECTED.properties(), read.properties());
}
private boolean isCompressed(String path) throws IOException {
try (InputStream ignored = new GzipCompressorInputStream(new FileInputStream(new File(path)))) {
return true;
} catch (IOException e) {
if (e.getMessage().equals("Input is not in the .gz format"))
return false;
else
throw e;
}
}
}
| 6,272 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/TestReplacePartitions.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.netflix.iceberg.ManifestEntry.Status;
import com.netflix.iceberg.exceptions.ValidationException;
import org.junit.Assert;
import org.junit.Test;
import java.io.File;
import java.io.IOException;
public class TestReplacePartitions extends TableTestBase {
static final DataFile FILE_E = DataFiles.builder(SPEC)
.withPath("/path/to/data-e.parquet")
.withFileSizeInBytes(0)
.withPartitionPath("data_bucket=0") // same partition as FILE_A
.withRecordCount(0)
.build();
static final DataFile FILE_F = DataFiles.builder(SPEC)
.withPath("/path/to/data-f.parquet")
.withFileSizeInBytes(0)
.withPartitionPath("data_bucket=1") // same partition as FILE_B
.withRecordCount(0)
.build();
static final DataFile FILE_G = DataFiles.builder(SPEC)
.withPath("/path/to/data-g.parquet")
.withFileSizeInBytes(0)
.withPartitionPath("data_bucket=10") // no other partition
.withRecordCount(0)
.build();
@Test
public void testReplaceOnePartition() {
table.newFastAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
TableMetadata base = readMetadata();
long baseId = base.currentSnapshot().snapshotId();
table.newReplacePartitions()
.addFile(FILE_E)
.commit();
long replaceId = readMetadata().currentSnapshot().snapshotId();
Assert.assertNotEquals("Should create a new snapshot", baseId, replaceId);
Assert.assertEquals("Table should have 2 manifests",
2, table.currentSnapshot().manifests().size());
// manifest is not merged because it is less than the minimum
validateManifestEntries(table.currentSnapshot().manifests().get(0),
ids(replaceId),
files(FILE_E),
statuses(Status.ADDED));
validateManifestEntries(table.currentSnapshot().manifests().get(1),
ids(replaceId, baseId),
files(FILE_A, FILE_B),
statuses(Status.DELETED, Status.EXISTING));
}
@Test
public void testReplaceAndMergeOnePartition() {
// ensure the overwrite results in a merge
table.updateProperties().set(TableProperties.MANIFEST_MIN_MERGE_COUNT, "1").commit();
table.newFastAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
TableMetadata base = readMetadata();
long baseId = base.currentSnapshot().snapshotId();
table.newReplacePartitions()
.addFile(FILE_E)
.commit();
long replaceId = readMetadata().currentSnapshot().snapshotId();
Assert.assertNotEquals("Should create a new snapshot", baseId, replaceId);
Assert.assertEquals("Table should have 1 manifest",
1, table.currentSnapshot().manifests().size());
validateManifestEntries(table.currentSnapshot().manifests().get(0),
ids(replaceId, replaceId, baseId),
files(FILE_E, FILE_A, FILE_B),
statuses(Status.ADDED, Status.DELETED, Status.EXISTING));
}
@Test
public void testReplaceWithUnpartitionedTable() throws IOException {
File tableDir = temp.newFolder();
Assert.assertTrue(tableDir.delete());
Table unpartitioned = TestTables.create(
tableDir, "unpartitioned", SCHEMA, PartitionSpec.unpartitioned());
Assert.assertEquals("Table version should be 0",
0, (long) TestTables.metadataVersion("unpartitioned"));
unpartitioned.newAppend()
.appendFile(FILE_A)
.commit();
// make sure the data was successfully added
Assert.assertEquals("Table version should be 1",
1, (long) TestTables.metadataVersion("unpartitioned"));
validateSnapshot(null, TestTables.readMetadata("unpartitioned").currentSnapshot(), FILE_A);
unpartitioned.newReplacePartitions()
.addFile(FILE_B)
.commit();
Assert.assertEquals("Table version should be 2",
2, (long) TestTables.metadataVersion("unpartitioned"));
TableMetadata replaceMetadata = TestTables.readMetadata("unpartitioned");
long replaceId = replaceMetadata.currentSnapshot().snapshotId();
Assert.assertEquals("Table should have 2 manifests",
2, replaceMetadata.currentSnapshot().manifests().size());
validateManifestEntries(replaceMetadata.currentSnapshot().manifests().get(0),
ids(replaceId), files(FILE_B), statuses(Status.ADDED));
validateManifestEntries(replaceMetadata.currentSnapshot().manifests().get(1),
ids(replaceId), files(FILE_A), statuses(Status.DELETED));
}
@Test
public void testReplaceAndMergeWithUnpartitionedTable() throws IOException {
File tableDir = temp.newFolder();
Assert.assertTrue(tableDir.delete());
Table unpartitioned = TestTables.create(
tableDir, "unpartitioned", SCHEMA, PartitionSpec.unpartitioned());
// ensure the overwrite results in a merge
unpartitioned.updateProperties().set(TableProperties.MANIFEST_MIN_MERGE_COUNT, "1").commit();
Assert.assertEquals("Table version should be 1",
1, (long) TestTables.metadataVersion("unpartitioned"));
unpartitioned.newAppend()
.appendFile(FILE_A)
.commit();
// make sure the data was successfully added
Assert.assertEquals("Table version should be 2",
2, (long) TestTables.metadataVersion("unpartitioned"));
validateSnapshot(null, TestTables.readMetadata("unpartitioned").currentSnapshot(), FILE_A);
unpartitioned.newReplacePartitions()
.addFile(FILE_B)
.commit();
Assert.assertEquals("Table version should be 3",
3, (long) TestTables.metadataVersion("unpartitioned"));
TableMetadata replaceMetadata = TestTables.readMetadata("unpartitioned");
long replaceId = replaceMetadata.currentSnapshot().snapshotId();
Assert.assertEquals("Table should have 1 manifest",
1, replaceMetadata.currentSnapshot().manifests().size());
validateManifestEntries(replaceMetadata.currentSnapshot().manifests().get(0),
ids(replaceId, replaceId), files(FILE_B, FILE_A), statuses(Status.ADDED, Status.DELETED));
}
@Test
public void testValidationFailure() {
table.newFastAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
TableMetadata base = readMetadata();
long baseId = base.currentSnapshot().snapshotId();
ReplacePartitions replace = table.newReplacePartitions()
.addFile(FILE_F)
.addFile(FILE_G)
.validateAppendOnly();
AssertHelpers.assertThrows("Should reject commit with file not matching delete expression",
ValidationException.class, "Cannot commit file that conflicts with existing partition",
replace::commit);
Assert.assertEquals("Should not create a new snapshot",
baseId, readMetadata().currentSnapshot().snapshotId());
}
@Test
public void testValidationSuccess() {
table.newFastAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
TableMetadata base = readMetadata();
long baseId = base.currentSnapshot().snapshotId();
table.newReplacePartitions()
.addFile(FILE_G)
.validateAppendOnly()
.commit();
long replaceId = readMetadata().currentSnapshot().snapshotId();
Assert.assertNotEquals("Should create a new snapshot", baseId, replaceId);
Assert.assertEquals("Table should have 2 manifests",
2, table.currentSnapshot().manifests().size());
// manifest is not merged because it is less than the minimum
validateManifestEntries(table.currentSnapshot().manifests().get(0),
ids(replaceId),
files(FILE_G),
statuses(Status.ADDED));
validateManifestEntries(table.currentSnapshot().manifests().get(1),
ids(baseId, baseId),
files(FILE_A, FILE_B),
statuses(Status.ADDED, Status.ADDED));
}
}
| 6,273 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/TestScanSummary.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.netflix.iceberg;
import com.google.common.collect.ImmutableList;
import com.netflix.iceberg.util.Pair;
import org.junit.Assert;
import org.junit.Test;
import static com.netflix.iceberg.ScanSummary.timestampRange;
import static com.netflix.iceberg.ScanSummary.toMillis;
import static com.netflix.iceberg.expressions.Expressions.equal;
import static com.netflix.iceberg.expressions.Expressions.greaterThan;
import static com.netflix.iceberg.expressions.Expressions.greaterThanOrEqual;
import static com.netflix.iceberg.expressions.Expressions.lessThan;
import static com.netflix.iceberg.expressions.Expressions.lessThanOrEqual;
public class TestScanSummary {
@Test
public void testTimestampRanges() {
long lower = 1542750188523L;
long upper = 1542750695131L;
Assert.assertEquals("Should use inclusive bound",
Pair.of(Long.MIN_VALUE, upper),
timestampRange(ImmutableList.of(lessThanOrEqual("ts_ms", upper))));
Assert.assertEquals("Should use lower value for upper bound",
Pair.of(Long.MIN_VALUE, upper),
timestampRange(ImmutableList.of(
lessThanOrEqual("ts_ms", upper + 918234),
lessThanOrEqual("ts_ms", upper))));
Assert.assertEquals("Should make upper bound inclusive",
Pair.of(Long.MIN_VALUE, upper - 1),
timestampRange(ImmutableList.of(lessThan("ts_ms", upper))));
Assert.assertEquals("Should use inclusive bound",
Pair.of(lower, Long.MAX_VALUE),
timestampRange(ImmutableList.of(greaterThanOrEqual("ts_ms", lower))));
Assert.assertEquals("Should use upper value for lower bound",
Pair.of(lower, Long.MAX_VALUE),
timestampRange(ImmutableList.of(
greaterThanOrEqual("ts_ms", lower - 918234),
greaterThanOrEqual("ts_ms", lower))));
Assert.assertEquals("Should make lower bound inclusive",
Pair.of(lower + 1, Long.MAX_VALUE),
timestampRange(ImmutableList.of(greaterThan("ts_ms", lower))));
Assert.assertEquals("Should set both bounds for equals",
Pair.of(lower, lower),
timestampRange(ImmutableList.of(equal("ts_ms", lower))));
Assert.assertEquals("Should set both bounds",
Pair.of(lower, upper - 1),
timestampRange(ImmutableList.of(
greaterThanOrEqual("ts_ms", lower),
lessThan("ts_ms", upper))));
// >= lower and < lower is an empty range
AssertHelpers.assertThrows("Should reject empty ranges",
IllegalArgumentException.class, "No timestamps can match filters",
() -> timestampRange(ImmutableList.of(
greaterThanOrEqual("ts_ms", lower),
lessThan("ts_ms", lower))));
}
@Test
public void testToMillis() {
long millis = 1542750947417L;
Assert.assertEquals(1542750947000L, toMillis(millis / 1000));
Assert.assertEquals(1542750947417L, toMillis(millis));
Assert.assertEquals(1542750947417L, toMillis(millis * 1000 + 918));
}
}
| 6,274 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/TestSnapshotJson.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.collect.ImmutableList;
import org.junit.Assert;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import java.io.File;
import java.io.IOException;
import java.util.List;
import static com.netflix.iceberg.Files.localInput;
public class TestSnapshotJson {
@Rule
public TemporaryFolder temp = new TemporaryFolder();
public TableOperations ops = new LocalTableOperations(temp);
@Test
public void testJsonConversion() {
Snapshot expected = new BaseSnapshot(ops, System.currentTimeMillis(),
"file:/tmp/manifest1.avro", "file:/tmp/manifest2.avro");
String json = SnapshotParser.toJson(expected);
Snapshot snapshot = SnapshotParser.fromJson(ops, json);
Assert.assertEquals("Snapshot ID should match",
expected.snapshotId(), snapshot.snapshotId());
Assert.assertEquals("Files should match",
expected.manifests(), snapshot.manifests());
}
@Test
public void testJsonConversionWithManifestList() throws IOException {
long parentId = 1;
long id = 2;
List<ManifestFile> manifests = ImmutableList.of(
new GenericManifestFile(localInput("file:/tmp/manifest1.avro"), 0),
new GenericManifestFile(localInput("file:/tmp/manifest2.avro"), 0));
File manifestList = temp.newFile("manifests");
Assert.assertTrue(manifestList.delete());
manifestList.deleteOnExit();
try (ManifestListWriter writer = new ManifestListWriter(
Files.localOutput(manifestList), id, parentId)) {
writer.addAll(manifests);
}
Snapshot expected = new BaseSnapshot(
ops, id, parentId, System.currentTimeMillis(), localInput(manifestList));
Snapshot inMemory = new BaseSnapshot(
ops, id, parentId, expected.timestampMillis(), manifests);
Assert.assertEquals("Files should match in memory list",
inMemory.manifests(), expected.manifests());
String json = SnapshotParser.toJson(expected);
Snapshot snapshot = SnapshotParser.fromJson(ops, json);
Assert.assertEquals("Snapshot ID should match",
expected.snapshotId(), snapshot.snapshotId());
Assert.assertEquals("Timestamp should match",
expected.timestampMillis(), snapshot.timestampMillis());
Assert.assertEquals("Parent ID should match",
expected.parentId(), snapshot.parentId());
Assert.assertEquals("Manifest list should match",
expected.manifestListLocation(), snapshot.manifestListLocation());
Assert.assertEquals("Files should match",
expected.manifests(), snapshot.manifests());
}
}
| 6,275 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/TestTransaction.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.collect.Sets;
import com.netflix.iceberg.ManifestEntry.Status;
import com.netflix.iceberg.exceptions.CommitFailedException;
import org.junit.Assert;
import org.junit.Test;
import java.io.File;
import java.util.Set;
public class TestTransaction extends TableTestBase {
@Test
public void testEmptyTransaction() {
Assert.assertEquals("Table should be on version 0", 0, (int) version());
TableMetadata base = readMetadata();
Transaction t = table.newTransaction();
t.commitTransaction();
Assert.assertSame("Base metadata should not change when commit is created",
base, readMetadata());
Assert.assertEquals("Table should be on version 0", 0, (int) version());
}
@Test
public void testSingleOperationTransaction() {
Assert.assertEquals("Table should be on version 0", 0, (int) version());
TableMetadata base = readMetadata();
Transaction t = table.newTransaction();
Assert.assertSame("Base metadata should not change when commit is created",
base, readMetadata());
Assert.assertEquals("Table should be on version 0 after txn create", 0, (int) version());
t.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
Assert.assertSame("Base metadata should not change when an append is committed",
base, readMetadata());
Assert.assertEquals("Table should be on version 0 after append", 0, (int) version());
t.commitTransaction();
validateSnapshot(base.currentSnapshot(), readMetadata().currentSnapshot(), FILE_A, FILE_B);
Assert.assertEquals("Table should be on version 1 after commit", 1, (int) version());
}
@Test
public void testMultipleOperationTransaction() {
Assert.assertEquals("Table should be on version 0", 0, (int) version());
TableMetadata base = readMetadata();
Transaction t = table.newTransaction();
Assert.assertSame("Base metadata should not change when commit is created",
base, readMetadata());
Assert.assertEquals("Table should be on version 0 after txn create", 0, (int) version());
t.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
Assert.assertSame("Base metadata should not change when commit is created",
base, readMetadata());
Assert.assertEquals("Table should be on version 0 after txn create", 0, (int) version());
Snapshot appendSnapshot = t.table().currentSnapshot();
t.newDelete()
.deleteFile(FILE_A)
.commit();
Snapshot deleteSnapshot = t.table().currentSnapshot();
Assert.assertSame("Base metadata should not change when an append is committed",
base, readMetadata());
Assert.assertEquals("Table should be on version 0 after append", 0, (int) version());
t.commitTransaction();
Assert.assertEquals("Table should be on version 1 after commit", 1, (int) version());
Assert.assertEquals("Table should have one manifest after commit",
1, readMetadata().currentSnapshot().manifests().size());
Assert.assertEquals("Table snapshot should be the delete snapshot",
deleteSnapshot, readMetadata().currentSnapshot());
validateManifestEntries(readMetadata().currentSnapshot().manifests().get(0),
ids(deleteSnapshot.snapshotId(), appendSnapshot.snapshotId()),
files(FILE_A, FILE_B), statuses(Status.DELETED, Status.EXISTING));
Assert.assertEquals("Table should have a snapshot for each operation",
2, readMetadata().snapshots().size());
validateManifestEntries(readMetadata().snapshots().get(0).manifests().get(0),
ids(appendSnapshot.snapshotId(), appendSnapshot.snapshotId()),
files(FILE_A, FILE_B), statuses(Status.ADDED, Status.ADDED));
}
@Test
public void testMultipleOperationTransactionFromTable() {
Assert.assertEquals("Table should be on version 0", 0, (int) version());
TableMetadata base = readMetadata();
Transaction t = table.newTransaction();
Assert.assertSame("Base metadata should not change when commit is created",
base, readMetadata());
Assert.assertEquals("Table should be on version 0 after txn create", 0, (int) version());
t.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
Assert.assertSame("Base metadata should not change when commit is created",
base, readMetadata());
Assert.assertEquals("Table should be on version 0 after txn create", 0, (int) version());
Snapshot appendSnapshot = t.table().currentSnapshot();
t.table().newDelete()
.deleteFile(FILE_A)
.commit();
Snapshot deleteSnapshot = t.table().currentSnapshot();
Assert.assertSame("Base metadata should not change when an append is committed",
base, readMetadata());
Assert.assertEquals("Table should be on version 0 after append", 0, (int) version());
t.commitTransaction();
Assert.assertEquals("Table should be on version 1 after commit", 1, (int) version());
Assert.assertEquals("Table should have one manifest after commit",
1, readMetadata().currentSnapshot().manifests().size());
Assert.assertEquals("Table snapshot should be the delete snapshot",
deleteSnapshot, readMetadata().currentSnapshot());
validateManifestEntries(readMetadata().currentSnapshot().manifests().get(0),
ids(deleteSnapshot.snapshotId(), appendSnapshot.snapshotId()),
files(FILE_A, FILE_B), statuses(Status.DELETED, Status.EXISTING));
Assert.assertEquals("Table should have a snapshot for each operation",
2, readMetadata().snapshots().size());
validateManifestEntries(readMetadata().snapshots().get(0).manifests().get(0),
ids(appendSnapshot.snapshotId(), appendSnapshot.snapshotId()),
files(FILE_A, FILE_B), statuses(Status.ADDED, Status.ADDED));
}
@Test
public void testDetectsUncommittedChange() {
Assert.assertEquals("Table should be on version 0", 0, (int) version());
TableMetadata base = readMetadata();
Transaction t = table.newTransaction();
Assert.assertSame("Base metadata should not change when commit is created",
base, readMetadata());
Assert.assertEquals("Table should be on version 0 after txn create", 0, (int) version());
t.newAppend().appendFile(FILE_A).appendFile(FILE_B); // not committed
Assert.assertSame("Base metadata should not change when commit is created",
base, readMetadata());
Assert.assertEquals("Table should be on version 0 after txn create", 0, (int) version());
AssertHelpers.assertThrows("Should reject commit when last operation has not committed",
IllegalStateException.class,
"Cannot create new DeleteFiles: last operation has not committed",
t::newDelete);
}
@Test
public void testDetectsUncommittedChangeOnCommit() {
Assert.assertEquals("Table should be on version 0", 0, (int) version());
TableMetadata base = readMetadata();
Transaction t = table.newTransaction();
Assert.assertSame("Base metadata should not change when commit is created",
base, readMetadata());
Assert.assertEquals("Table should be on version 0 after txn create", 0, (int) version());
t.newAppend().appendFile(FILE_A).appendFile(FILE_B); // not committed
Assert.assertSame("Base metadata should not change when commit is created",
base, readMetadata());
Assert.assertEquals("Table should be on version 0 after txn create", 0, (int) version());
AssertHelpers.assertThrows("Should reject commit when last operation has not committed",
IllegalStateException.class,
"Cannot commit transaction: last operation has not committed",
t::commitTransaction);
}
@Test
public void testTransactionConflict() {
// set retries to 0 to catch the failure
table.updateProperties()
.set(TableProperties.COMMIT_NUM_RETRIES, "0")
.commit();
Assert.assertEquals("Table should be on version 1", 1, (int) version());
TableMetadata base = readMetadata();
Transaction t = table.newTransaction();
Assert.assertSame("Base metadata should not change when commit is created",
base, readMetadata());
Assert.assertEquals("Table should be on version 1 after txn create", 1, (int) version());
t.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
Assert.assertSame("Base metadata should not change when commit is created",
base, readMetadata());
Assert.assertEquals("Table should be on version 1 after append", 1, (int) version());
// cause the transaction commit to fail
table.ops().failCommits(1);
AssertHelpers.assertThrows("Transaction commit should fail",
CommitFailedException.class, "Injected failure", t::commitTransaction);
}
@Test
public void testTransactionRetry() {
// use only one retry
table.updateProperties()
.set(TableProperties.COMMIT_NUM_RETRIES, "1")
.commit();
Assert.assertEquals("Table should be on version 1", 1, (int) version());
TableMetadata base = readMetadata();
Transaction t = table.newTransaction();
Assert.assertSame("Base metadata should not change when commit is created",
base, readMetadata());
Assert.assertEquals("Table should be on version 1 after txn create", 1, (int) version());
t.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
Set<ManifestFile> appendManifests = Sets.newHashSet(t.table().currentSnapshot().manifests());
Assert.assertSame("Base metadata should not change when commit is created",
base, readMetadata());
Assert.assertEquals("Table should be on version 1 after append", 1, (int) version());
// cause the transaction commit to fail
table.ops().failCommits(1);
t.commitTransaction();
Assert.assertEquals("Table should be on version 2 after commit", 2, (int) version());
Assert.assertEquals("Should reuse manifests from initial append commit",
appendManifests, Sets.newHashSet(table.currentSnapshot().manifests()));
}
@Test
public void testTransactionRetryMergeAppend() {
// use only one retry
table.updateProperties()
.set(TableProperties.COMMIT_NUM_RETRIES, "1")
.commit();
Assert.assertEquals("Table should be on version 1", 1, (int) version());
TableMetadata base = readMetadata();
Transaction t = table.newTransaction();
Assert.assertSame("Base metadata should not change when commit is created",
base, readMetadata());
Assert.assertEquals("Table should be on version 1 after txn create", 1, (int) version());
t.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
Set<ManifestFile> appendManifests = Sets.newHashSet(t.table().currentSnapshot().manifests());
Assert.assertSame("Base metadata should not change when commit is created",
base, readMetadata());
Assert.assertEquals("Table should be on version 1 after append", 1, (int) version());
// cause the transaction commit to fail
table.newAppend()
.appendFile(FILE_C)
.appendFile(FILE_D)
.commit();
Assert.assertEquals("Table should be on version 2 after real append", 2, (int) version());
Set<ManifestFile> conflictAppendManifests = Sets.newHashSet(table.currentSnapshot().manifests());
t.commitTransaction();
Assert.assertEquals("Table should be on version 3 after commit", 3, (int) version());
Set<ManifestFile> expectedManifests = Sets.newHashSet();
expectedManifests.addAll(appendManifests);
expectedManifests.addAll(conflictAppendManifests);
Assert.assertEquals("Should reuse manifests from initial append commit and conflicting append",
expectedManifests, Sets.newHashSet(table.currentSnapshot().manifests()));
}
@Test
public void testMultipleUpdateTransactionRetryMergeCleanup() {
// use only one retry and aggressively merge manifests
table.updateProperties()
.set(TableProperties.COMMIT_NUM_RETRIES, "1")
.set(TableProperties.MANIFEST_MIN_MERGE_COUNT, "0")
.commit();
Assert.assertEquals("Table should be on version 1", 1, (int) version());
TableMetadata base = readMetadata();
Transaction t = table.newTransaction();
Assert.assertSame("Base metadata should not change when commit is created",
base, readMetadata());
Assert.assertEquals("Table should be on version 1 after txn create", 1, (int) version());
t.updateProperties()
.set("test-property", "test-value")
.commit();
t.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
Assert.assertEquals("Append should create one manifest",
1, t.table().currentSnapshot().manifests().size());
ManifestFile appendManifest = t.table().currentSnapshot().manifests().get(0);
Assert.assertSame("Base metadata should not change when commit is created",
base, readMetadata());
Assert.assertEquals("Table should be on version 1 after append", 1, (int) version());
// cause the transaction commit to fail
table.newAppend()
.appendFile(FILE_C)
.appendFile(FILE_D)
.commit();
Assert.assertEquals("Table should be on version 2 after real append", 2, (int) version());
Set<ManifestFile> conflictAppendManifests = Sets.newHashSet(table.currentSnapshot().manifests());
t.commitTransaction();
Assert.assertEquals("Table should be on version 3 after commit", 3, (int) version());
Set<ManifestFile> previousManifests = Sets.newHashSet();
previousManifests.add(appendManifest);
previousManifests.addAll(conflictAppendManifests);
Assert.assertEquals("Should merge both commit manifests into a single manifest",
1, table.currentSnapshot().manifests().size());
Assert.assertFalse("Should merge both commit manifests into a new manifest",
previousManifests.contains(table.currentSnapshot().manifests().get(0)));
Assert.assertFalse("Append manifest should be deleted", new File(appendManifest.path()).exists());
}
@Test
public void testTransactionRetryMergeCleanup() {
// use only one retry and aggressively merge manifests
table.updateProperties()
.set(TableProperties.COMMIT_NUM_RETRIES, "1")
.set(TableProperties.MANIFEST_MIN_MERGE_COUNT, "0")
.commit();
Assert.assertEquals("Table should be on version 1", 1, (int) version());
TableMetadata base = readMetadata();
Transaction t = table.newTransaction();
Assert.assertSame("Base metadata should not change when commit is created",
base, readMetadata());
Assert.assertEquals("Table should be on version 1 after txn create", 1, (int) version());
t.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
Assert.assertEquals("Append should create one manifest",
1, t.table().currentSnapshot().manifests().size());
ManifestFile appendManifest = t.table().currentSnapshot().manifests().get(0);
Assert.assertSame("Base metadata should not change when commit is created",
base, readMetadata());
Assert.assertEquals("Table should be on version 1 after append", 1, (int) version());
// cause the transaction commit to fail
table.newAppend()
.appendFile(FILE_C)
.appendFile(FILE_D)
.commit();
Assert.assertEquals("Table should be on version 2 after real append", 2, (int) version());
Set<ManifestFile> conflictAppendManifests = Sets.newHashSet(table.currentSnapshot().manifests());
t.commitTransaction();
Assert.assertEquals("Table should be on version 3 after commit", 3, (int) version());
Set<ManifestFile> previousManifests = Sets.newHashSet();
previousManifests.add(appendManifest);
previousManifests.addAll(conflictAppendManifests);
Assert.assertEquals("Should merge both commit manifests into a single manifest",
1, table.currentSnapshot().manifests().size());
Assert.assertFalse("Should merge both commit manifests into a new manifest",
previousManifests.contains(table.currentSnapshot().manifests().get(0)));
Assert.assertFalse("Append manifest should be deleted", new File(appendManifest.path()).exists());
}
}
| 6,276 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/TestTables.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Maps;
import com.netflix.iceberg.exceptions.AlreadyExistsException;
import com.netflix.iceberg.exceptions.CommitFailedException;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import com.netflix.iceberg.io.InputFile;
import com.netflix.iceberg.io.OutputFile;
import java.io.File;
import java.util.Map;
import static com.netflix.iceberg.TableMetadata.newTableMetadata;
public class TestTables {
static TestTable create(File temp, String name, Schema schema, PartitionSpec spec) {
TestTableOperations ops = new TestTableOperations(name, temp);
if (ops.current() != null) {
throw new AlreadyExistsException("Table %s already exists at location: %s", name, temp);
}
ops.commit(null, TableMetadata.newTableMetadata(ops, schema, spec, temp.toString()));
return new TestTable(ops, name);
}
static Transaction beginCreate(File temp, String name, Schema schema, PartitionSpec spec) {
TableOperations ops = new TestTableOperations(name, temp);
if (ops.current() != null) {
throw new AlreadyExistsException("Table %s already exists at location: %s", name, temp);
}
TableMetadata metadata = TableMetadata.newTableMetadata(ops, schema, spec, temp.toString());
return BaseTransaction.createTableTransaction(ops, metadata);
}
public static Transaction beginReplace(File temp, String name, Schema schema, PartitionSpec spec) {
return beginReplace(temp, name, schema, spec, ImmutableMap.of());
}
public static Transaction beginReplace(File temp, String name, Schema schema, PartitionSpec spec,
Map<String, String> properties) {
TestTableOperations ops = new TestTableOperations(name, temp);
TableMetadata current = ops.current();
TableMetadata metadata;
if (current != null) {
metadata = current.buildReplacement(schema, spec, properties);
return BaseTransaction.replaceTableTransaction(ops, metadata);
} else {
metadata = newTableMetadata(ops, schema, spec, temp.toString(), properties);
return BaseTransaction.createTableTransaction(ops, metadata);
}
}
static TestTable load(File temp, String name) {
TestTableOperations ops = new TestTableOperations(name, temp);
return new TestTable(ops, name);
}
static class TestTable extends BaseTable {
private final TestTableOperations ops;
private TestTable(TestTableOperations ops, String name) {
super(ops, name);
this.ops = ops;
}
TestTableOperations ops() {
return ops;
}
}
private static final Map<String, TableMetadata> METADATA = Maps.newHashMap();
private static final Map<String, Integer> VERSIONS = Maps.newHashMap();
static void clearTables() {
synchronized (METADATA) {
METADATA.clear();
VERSIONS.clear();
}
}
static TableMetadata readMetadata(String tableName) {
synchronized (METADATA) {
return METADATA.get(tableName);
}
}
static Integer metadataVersion(String tableName) {
synchronized (METADATA) {
return VERSIONS.get(tableName);
}
}
public static class TestTableOperations implements TableOperations {
private final String tableName;
private final File metadata;
private TableMetadata current = null;
private long lastSnapshotId = 0;
private int failCommits = 0;
public TestTableOperations(String tableName, File location) {
this.tableName = tableName;
this.metadata = new File(location, "metadata");
metadata.mkdirs();
refresh();
if (current != null) {
for (Snapshot snap : current.snapshots()) {
this.lastSnapshotId = Math.max(lastSnapshotId, snap.snapshotId());
}
} else {
this.lastSnapshotId = 0;
}
}
void failCommits(int numFailures) {
this.failCommits = numFailures;
}
@Override
public TableMetadata current() {
return current;
}
@Override
public TableMetadata refresh() {
synchronized (METADATA) {
this.current = METADATA.get(tableName);
}
return current;
}
@Override
public void commit(TableMetadata base, TableMetadata metadata) {
if (base != current) {
throw new CommitFailedException("Cannot commit changes based on stale metadata");
}
synchronized (METADATA) {
refresh();
if (base == current) {
if (failCommits > 0) {
this.failCommits -= 1;
throw new CommitFailedException("Injected failure");
}
Integer version = VERSIONS.get(tableName);
VERSIONS.put(tableName, version == null ? 0 : version + 1);
METADATA.put(tableName, metadata);
this.current = metadata;
} else {
throw new CommitFailedException(
"Commit failed: table was updated at %d", current.lastUpdatedMillis());
}
}
}
@Override
public FileIO io() {
return new LocalFileIO();
}
@Override
public String metadataFileLocation(String fileName) {
return new File(metadata, fileName).getAbsolutePath();
}
@Override
public long newSnapshotId() {
long nextSnapshotId = lastSnapshotId + 1;
this.lastSnapshotId = nextSnapshotId;
return nextSnapshotId;
}
}
static class LocalFileIO implements FileIO {
@Override
public InputFile newInputFile(String path) {
return Files.localInput(path);
}
@Override
public OutputFile newOutputFile(String path) {
return Files.localOutput(path);
}
@Override
public void deleteFile(String path) {
if (!new File(path).delete()) {
throw new RuntimeIOException("Failed to delete file: " + path);
}
}
}
}
| 6,277 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/TestDeleteFiles.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.netflix.iceberg.ManifestEntry.Status;
import org.junit.Assert;
import org.junit.Test;
public class TestDeleteFiles extends TableTestBase {
@Test
public void testMultipleDeletes() {
table.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.appendFile(FILE_C)
.commit();
Assert.assertEquals("Metadata should be at version 1", 1L, (long) version());
Snapshot append = readMetadata().currentSnapshot();
validateSnapshot(null, append, FILE_A, FILE_B, FILE_C);
table.newDelete()
.deleteFile(FILE_A)
.commit();
Assert.assertEquals("Metadata should be at version 2", 2L, (long) version());
Snapshot delete = readMetadata().currentSnapshot();
Assert.assertEquals("Should have 1 manifest", 1, delete.manifests().size());
validateManifestEntries(delete.manifests().get(0),
ids(delete.snapshotId(), append.snapshotId(), append.snapshotId()),
files(FILE_A, FILE_B, FILE_C),
statuses(Status.DELETED, Status.EXISTING, Status.EXISTING));
table.newDelete()
.deleteFile(FILE_B)
.commit();
Assert.assertEquals("Metadata should be at version 3", 3L, (long) version());
Snapshot delete2 = readMetadata().currentSnapshot();
Assert.assertEquals("Should have 1 manifest", 1, delete2.manifests().size());
validateManifestEntries(delete2.manifests().get(0),
ids(delete2.snapshotId(), append.snapshotId()),
files(FILE_B, FILE_C),
statuses(Status.DELETED, Status.EXISTING));
}
}
| 6,278 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/TestFastAppend.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.netflix.iceberg.exceptions.CommitFailedException;
import org.junit.Assert;
import org.junit.Test;
import java.io.File;
import java.util.List;
import java.util.Set;
public class TestFastAppend extends TableTestBase {
@Test
public void testEmptyTableAppend() {
Assert.assertEquals("Table should start empty", 0, listManifestFiles().size());
TableMetadata base = readMetadata();
Assert.assertNull("Should not have a current snapshot", base.currentSnapshot());
Snapshot pending = table.newFastAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.apply();
validateSnapshot(base.currentSnapshot(), pending, FILE_A, FILE_B);
}
@Test
public void testNonEmptyTableAppend() {
table.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
TableMetadata base = readMetadata();
Assert.assertNotNull("Should have a current snapshot", base.currentSnapshot());
List<ManifestFile> v2manifests = base.currentSnapshot().manifests();
Assert.assertEquals("Should have one existing manifest", 1, v2manifests.size());
// prepare a new append
Snapshot pending = table.newFastAppend()
.appendFile(FILE_C)
.appendFile(FILE_D)
.apply();
Assert.assertNotEquals("Snapshots should have unique IDs",
base.currentSnapshot().snapshotId(), pending.snapshotId());
validateSnapshot(base.currentSnapshot(), pending, FILE_C, FILE_D);
}
@Test
public void testNoMerge() {
table.newAppend()
.appendFile(FILE_A)
.commit();
table.newFastAppend()
.appendFile(FILE_B)
.commit();
TableMetadata base = readMetadata();
Assert.assertNotNull("Should have a current snapshot", base.currentSnapshot());
List<ManifestFile> v3manifests = base.currentSnapshot().manifests();
Assert.assertEquals("Should have 2 existing manifests", 2, v3manifests.size());
// prepare a new append
Snapshot pending = table.newFastAppend()
.appendFile(FILE_C)
.appendFile(FILE_D)
.apply();
Set<Long> ids = Sets.newHashSet();
for (Snapshot snapshot : base.snapshots()) {
ids.add(snapshot.snapshotId());
}
ids.add(pending.snapshotId());
Assert.assertEquals("Snapshots should have 3 unique IDs", 3, ids.size());
validateSnapshot(base.currentSnapshot(), pending, FILE_C, FILE_D);
}
@Test
public void testRefreshBeforeApply() {
// load a new copy of the table that will not be refreshed by the commit
Table stale = load();
table.newAppend()
.appendFile(FILE_A)
.commit();
TableMetadata base = readMetadata();
Assert.assertNotNull("Should have a current snapshot", base.currentSnapshot());
List<ManifestFile> v2manifests = base.currentSnapshot().manifests();
Assert.assertEquals("Should have 1 existing manifest", 1, v2manifests.size());
// commit from the stale table
AppendFiles append = stale.newFastAppend()
.appendFile(FILE_D);
Snapshot pending = append.apply();
// table should have been refreshed before applying the changes
validateSnapshot(base.currentSnapshot(), pending, FILE_D);
}
@Test
public void testRefreshBeforeCommit() {
// commit from the stale table
AppendFiles append = table.newFastAppend()
.appendFile(FILE_D);
Snapshot pending = append.apply();
validateSnapshot(null, pending, FILE_D);
table.newAppend()
.appendFile(FILE_A)
.commit();
TableMetadata base = readMetadata();
Assert.assertNotNull("Should have a current snapshot", base.currentSnapshot());
List<ManifestFile> v2manifests = base.currentSnapshot().manifests();
Assert.assertEquals("Should have 1 existing manifest", 1, v2manifests.size());
append.commit();
TableMetadata committed = readMetadata();
// apply was called before the conflicting commit, but the commit was still consistent
validateSnapshot(base.currentSnapshot(), committed.currentSnapshot(), FILE_D);
List<ManifestFile> committedManifests = Lists.newArrayList(committed.currentSnapshot().manifests());
committedManifests.removeAll(base.currentSnapshot().manifests());
Assert.assertEquals("Should reused manifest created by apply",
pending.manifests().get(0), committedManifests.get(0));
}
@Test
public void testFailure() {
// inject 5 failures
TestTables.TestTableOperations ops = table.ops();
ops.failCommits(5);
AppendFiles append = table.newFastAppend().appendFile(FILE_B);
Snapshot pending = append.apply();
ManifestFile newManifest = pending.manifests().get(0);
Assert.assertTrue("Should create new manifest", new File(newManifest.path()).exists());
AssertHelpers.assertThrows("Should retry 4 times and throw last failure",
CommitFailedException.class, "Injected failure", append::commit);
Assert.assertFalse("Should clean up new manifest", new File(newManifest.path()).exists());
}
@Test
public void testRecoveryWithManifestList() {
table.updateProperties().set(TableProperties.MANIFEST_LISTS_ENABLED, "true").commit();
// inject 3 failures, the last try will succeed
TestTables.TestTableOperations ops = table.ops();
ops.failCommits(3);
AppendFiles append = table.newFastAppend().appendFile(FILE_B);
Snapshot pending = append.apply();
ManifestFile newManifest = pending.manifests().get(0);
Assert.assertTrue("Should create new manifest", new File(newManifest.path()).exists());
append.commit();
TableMetadata metadata = readMetadata();
validateSnapshot(null, metadata.currentSnapshot(), FILE_B);
Assert.assertTrue("Should commit same new manifest", new File(newManifest.path()).exists());
Assert.assertTrue("Should commit the same new manifest",
metadata.currentSnapshot().manifests().contains(newManifest));
}
@Test
public void testRecoveryWithoutManifestList() {
table.updateProperties().set(TableProperties.MANIFEST_LISTS_ENABLED, "false").commit();
// inject 3 failures, the last try will succeed
TestTables.TestTableOperations ops = table.ops();
ops.failCommits(3);
AppendFiles append = table.newFastAppend().appendFile(FILE_B);
Snapshot pending = append.apply();
ManifestFile newManifest = pending.manifests().get(0);
Assert.assertTrue("Should create new manifest", new File(newManifest.path()).exists());
append.commit();
TableMetadata metadata = readMetadata();
validateSnapshot(null, metadata.currentSnapshot(), FILE_B);
Assert.assertTrue("Should commit same new manifest", new File(newManifest.path()).exists());
Assert.assertTrue("Should commit the same new manifest",
metadata.currentSnapshot().manifests().contains(newManifest));
}
}
| 6,279 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/TestCreateTransaction.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.netflix.iceberg.exceptions.CommitFailedException;
import com.netflix.iceberg.types.TypeUtil;
import org.junit.Assert;
import org.junit.Test;
import java.io.File;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicInteger;
import static com.netflix.iceberg.PartitionSpec.unpartitioned;
public class TestCreateTransaction extends TableTestBase {
@Test
public void testCreateTransaction() throws IOException {
File tableDir = temp.newFolder();
Assert.assertTrue(tableDir.delete());
Transaction t = TestTables.beginCreate(tableDir, "test_create", SCHEMA, unpartitioned());
Assert.assertNull("Starting a create transaction should not commit metadata",
TestTables.readMetadata("test_create"));
Assert.assertNull("Should have no metadata version",
TestTables.metadataVersion("test_create"));
t.commitTransaction();
TableMetadata meta = TestTables.readMetadata("test_create");
Assert.assertNotNull("Table metadata should be created after transaction commits", meta);
Assert.assertEquals("Should have metadata version 0",
0, (int) TestTables.metadataVersion("test_create"));
Assert.assertEquals("Should have 0 manifest files",
0, listManifestFiles(tableDir).size());
Assert.assertEquals("Table schema should match with reassigned IDs",
assignFreshIds(SCHEMA).asStruct(), meta.schema().asStruct());
Assert.assertEquals("Table spec should match", unpartitioned(), meta.spec());
Assert.assertEquals("Table should not have any snapshots", 0, meta.snapshots().size());
}
@Test
public void testCreateAndAppendWithTransaction() throws IOException {
File tableDir = temp.newFolder();
Assert.assertTrue(tableDir.delete());
Transaction t = TestTables.beginCreate(tableDir, "test_append", SCHEMA, unpartitioned());
Assert.assertNull("Starting a create transaction should not commit metadata",
TestTables.readMetadata("test_append"));
Assert.assertNull("Should have no metadata version",
TestTables.metadataVersion("test_append"));
t.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
Assert.assertNull("Appending in a transaction should not commit metadata",
TestTables.readMetadata("test_append"));
Assert.assertNull("Should have no metadata version",
TestTables.metadataVersion("test_append"));
t.commitTransaction();
TableMetadata meta = TestTables.readMetadata("test_append");
Assert.assertNotNull("Table metadata should be created after transaction commits", meta);
Assert.assertEquals("Should have metadata version 0",
0, (int) TestTables.metadataVersion("test_append"));
Assert.assertEquals("Should have 1 manifest file",
1, listManifestFiles(tableDir).size());
Assert.assertEquals("Table schema should match with reassigned IDs",
assignFreshIds(SCHEMA).asStruct(), meta.schema().asStruct());
Assert.assertEquals("Table spec should match", unpartitioned(), meta.spec());
Assert.assertEquals("Table should have one snapshot", 1, meta.snapshots().size());
validateSnapshot(null, meta.currentSnapshot(), FILE_A, FILE_B);
}
@Test
public void testCreateAndAppendWithTable() throws IOException {
File tableDir = temp.newFolder();
Assert.assertTrue(tableDir.delete());
Transaction t = TestTables.beginCreate(tableDir, "test_append", SCHEMA, unpartitioned());
Assert.assertNull("Starting a create transaction should not commit metadata",
TestTables.readMetadata("test_append"));
Assert.assertNull("Should have no metadata version",
TestTables.metadataVersion("test_append"));
Assert.assertTrue("Should return a transaction table",
t.table() instanceof BaseTransaction.TransactionTable);
t.table().newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
Assert.assertNull("Appending in a transaction should not commit metadata",
TestTables.readMetadata("test_append"));
Assert.assertNull("Should have no metadata version",
TestTables.metadataVersion("test_append"));
t.commitTransaction();
TableMetadata meta = TestTables.readMetadata("test_append");
Assert.assertNotNull("Table metadata should be created after transaction commits", meta);
Assert.assertEquals("Should have metadata version 0",
0, (int) TestTables.metadataVersion("test_append"));
Assert.assertEquals("Should have 1 manifest file",
1, listManifestFiles(tableDir).size());
Assert.assertEquals("Table schema should match with reassigned IDs",
assignFreshIds(SCHEMA).asStruct(), meta.schema().asStruct());
Assert.assertEquals("Table spec should match", unpartitioned(), meta.spec());
Assert.assertEquals("Table should have one snapshot", 1, meta.snapshots().size());
validateSnapshot(null, meta.currentSnapshot(), FILE_A, FILE_B);
}
@Test
public void testCreateAndUpdatePropertiesWithTransaction() throws IOException {
File tableDir = temp.newFolder();
Assert.assertTrue(tableDir.delete());
Transaction t = TestTables.beginCreate(tableDir, "test_properties", SCHEMA, unpartitioned());
Assert.assertNull("Starting a create transaction should not commit metadata",
TestTables.readMetadata("test_properties"));
Assert.assertNull("Should have no metadata version",
TestTables.metadataVersion("test_properties"));
t.updateProperties()
.set("test-property", "test-value")
.commit();
Assert.assertNull("Adding properties in a transaction should not commit metadata",
TestTables.readMetadata("test_properties"));
Assert.assertNull("Should have no metadata version",
TestTables.metadataVersion("test_properties"));
t.commitTransaction();
TableMetadata meta = TestTables.readMetadata("test_properties");
Assert.assertNotNull("Table metadata should be created after transaction commits", meta);
Assert.assertEquals("Should have metadata version 0",
0, (int) TestTables.metadataVersion("test_properties"));
Assert.assertEquals("Should have 0 manifest files",
0, listManifestFiles(tableDir).size());
Assert.assertEquals("Table schema should match with reassigned IDs",
assignFreshIds(SCHEMA).asStruct(), meta.schema().asStruct());
Assert.assertEquals("Table spec should match", unpartitioned(), meta.spec());
Assert.assertEquals("Table should not have any snapshots", 0, meta.snapshots().size());
Assert.assertEquals("Should have one table property", 1, meta.properties().size());
Assert.assertEquals("Should have correct table property value",
"test-value", meta.properties().get("test-property"));
}
@Test
public void testCreateAndUpdatePropertiesWithTable() throws IOException {
File tableDir = temp.newFolder();
Assert.assertTrue(tableDir.delete());
Transaction t = TestTables.beginCreate(tableDir, "test_properties", SCHEMA, unpartitioned());
Assert.assertNull("Starting a create transaction should not commit metadata",
TestTables.readMetadata("test_properties"));
Assert.assertNull("Should have no metadata version",
TestTables.metadataVersion("test_properties"));
Assert.assertTrue("Should return a transaction table",
t.table() instanceof BaseTransaction.TransactionTable);
t.table().updateProperties()
.set("test-property", "test-value")
.commit();
Assert.assertNull("Adding properties in a transaction should not commit metadata",
TestTables.readMetadata("test_properties"));
Assert.assertNull("Should have no metadata version",
TestTables.metadataVersion("test_properties"));
t.commitTransaction();
TableMetadata meta = TestTables.readMetadata("test_properties");
Assert.assertNotNull("Table metadata should be created after transaction commits", meta);
Assert.assertEquals("Should have metadata version 0",
0, (int) TestTables.metadataVersion("test_properties"));
Assert.assertEquals("Should have 0 manifest files",
0, listManifestFiles(tableDir).size());
Assert.assertEquals("Table schema should match with reassigned IDs",
assignFreshIds(SCHEMA).asStruct(), meta.schema().asStruct());
Assert.assertEquals("Table spec should match", unpartitioned(), meta.spec());
Assert.assertEquals("Table should not have any snapshots", 0, meta.snapshots().size());
Assert.assertEquals("Should have one table property", 1, meta.properties().size());
Assert.assertEquals("Should have correct table property value",
"test-value", meta.properties().get("test-property"));
}
@Test
public void testCreateDetectsUncommittedChange() throws IOException {
File tableDir = temp.newFolder();
Assert.assertTrue(tableDir.delete());
Transaction t = TestTables.beginCreate(tableDir, "uncommitted_change", SCHEMA, unpartitioned());
Assert.assertNull("Starting a create transaction should not commit metadata",
TestTables.readMetadata("uncommitted_change"));
Assert.assertNull("Should have no metadata version",
TestTables.metadataVersion("uncommitted_change"));
t.updateProperties().set("test-property", "test-value"); // not committed
AssertHelpers.assertThrows("Should reject commit when last operation has not committed",
IllegalStateException.class,
"Cannot create new DeleteFiles: last operation has not committed",
t::newDelete);
}
@Test
public void testCreateDetectsUncommittedChangeOnCommit() throws IOException {
File tableDir = temp.newFolder();
Assert.assertTrue(tableDir.delete());
Transaction t = TestTables.beginCreate(tableDir, "uncommitted_change", SCHEMA, unpartitioned());
Assert.assertNull("Starting a create transaction should not commit metadata",
TestTables.readMetadata("uncommitted_change"));
Assert.assertNull("Should have no metadata version",
TestTables.metadataVersion("uncommitted_change"));
t.updateProperties().set("test-property", "test-value"); // not committed
AssertHelpers.assertThrows("Should reject commit when last operation has not committed",
IllegalStateException.class, "Cannot commit transaction: last operation has not committed",
t::commitTransaction);
}
@Test
public void testCreateTransactionConflict() throws IOException {
File tableDir = temp.newFolder();
Assert.assertTrue(tableDir.delete());
Transaction t = TestTables.beginCreate(tableDir, "test_conflict", SCHEMA, SPEC);
Assert.assertNull("Starting a create transaction should not commit metadata",
TestTables.readMetadata("test_conflict"));
Assert.assertNull("Should have no metadata version",
TestTables.metadataVersion("test_conflict"));
Table conflict = TestTables.create(tableDir, "test_conflict", SCHEMA, unpartitioned());
Assert.assertEquals("Table schema should match with reassigned IDs",
assignFreshIds(SCHEMA).asStruct(), conflict.schema().asStruct());
Assert.assertEquals("Table spec should match conflict table, not transaction table",
unpartitioned(), conflict.spec());
Assert.assertFalse("Table should not have any snapshots",
conflict.snapshots().iterator().hasNext());
AssertHelpers.assertThrows("Transaction commit should fail",
CommitFailedException.class, "Commit failed: table was updated", t::commitTransaction);
}
private static Schema assignFreshIds(Schema schema) {
AtomicInteger lastColumnId = new AtomicInteger(0);
return TypeUtil.assignFreshIds(schema, lastColumnId::incrementAndGet);
}
}
| 6,280 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/TestSchemaUpdate.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
import com.netflix.iceberg.types.Type;
import com.netflix.iceberg.types.TypeUtil;
import com.netflix.iceberg.types.Types;
import com.netflix.iceberg.util.Pair;
import org.junit.Assert;
import org.junit.Test;
import java.util.List;
import java.util.Set;
import static com.netflix.iceberg.types.Types.NestedField.optional;
import static com.netflix.iceberg.types.Types.NestedField.required;
public class TestSchemaUpdate {
private static final Schema SCHEMA = new Schema(
required(1, "id", Types.IntegerType.get()),
optional(2, "data", Types.StringType.get()),
optional(3, "preferences", Types.StructType.of(
required(8, "feature1", Types.BooleanType.get()),
optional(9, "feature2", Types.BooleanType.get())
)),
required(4, "locations", Types.MapType.ofRequired(10, 11,
Types.StructType.of(
required(20, "address", Types.StringType.get()),
required(21, "city", Types.StringType.get()),
required(22, "state", Types.StringType.get()),
required(23, "zip", Types.IntegerType.get())
),
Types.StructType.of(
required(12, "lat", Types.FloatType.get()),
required(13, "long", Types.FloatType.get())
))),
optional(5, "points", Types.ListType.ofOptional(14,
Types.StructType.of(
required(15, "x", Types.LongType.get()),
required(16, "y", Types.LongType.get())
))),
required(6, "doubles", Types.ListType.ofRequired(17,
Types.DoubleType.get()
)),
optional(7, "properties", Types.MapType.ofOptional(18, 19,
Types.StringType.get(),
Types.StringType.get()
))
);
private static final int SCHEMA_LAST_COLUMN_ID = 23;
@Test
public void testNoChanges() {
Schema identical = new SchemaUpdate(SCHEMA, SCHEMA_LAST_COLUMN_ID).apply();
Assert.assertEquals("Should not include any changes", SCHEMA.asStruct(), identical.asStruct());
}
@Test
public void testDeleteFields() {
// use schema projection to test column deletes
Set<Integer> ALL_IDS = ImmutableSet.copyOf(TypeUtil.getProjectedIds(SCHEMA));
List<String> columns = Lists.newArrayList("id", "data", "preferences", "preferences.feature1",
"preferences.feature2", "locations", "locations.lat", "locations.long", "points",
"points.x", "points.y", "doubles", "properties");
for (String name : columns) {
Set<Integer> selected = Sets.newHashSet(ALL_IDS);
// remove the id and any nested fields from the projection
Types.NestedField nested = SCHEMA.findField(name);
selected.remove(nested.fieldId());
selected.removeAll(TypeUtil.getProjectedIds(nested.type()));
Schema del = new SchemaUpdate(SCHEMA, 19).deleteColumn(name).apply();
Assert.assertEquals("Should match projection with '" + name + "' removed",
TypeUtil.select(SCHEMA, selected).asStruct(), del.asStruct());
}
}
@Test
public void testUpdateTypes() {
Types.StructType expected = Types.StructType.of(
required(1, "id", Types.LongType.get()),
optional(2, "data", Types.StringType.get()),
optional(3, "preferences", Types.StructType.of(
required(8, "feature1", Types.BooleanType.get()),
optional(9, "feature2", Types.BooleanType.get())
)),
required(4, "locations", Types.MapType.ofRequired(10, 11,
Types.StructType.of(
required(20, "address", Types.StringType.get()),
required(21, "city", Types.StringType.get()),
required(22, "state", Types.StringType.get()),
required(23, "zip", Types.IntegerType.get())
),
Types.StructType.of(
required(12, "lat", Types.DoubleType.get()),
required(13, "long", Types.DoubleType.get())
))),
optional(5, "points", Types.ListType.ofOptional(14,
Types.StructType.of(
required(15, "x", Types.LongType.get()),
required(16, "y", Types.LongType.get())
))),
required(6, "doubles", Types.ListType.ofRequired(17,
Types.DoubleType.get()
)),
optional(7, "properties", Types.MapType.ofOptional(18, 19,
Types.StringType.get(),
Types.StringType.get()
))
);
Schema updated = new SchemaUpdate(SCHEMA, SCHEMA_LAST_COLUMN_ID)
.updateColumn("id", Types.LongType.get())
.updateColumn("locations.lat", Types.DoubleType.get())
.updateColumn("locations.long", Types.DoubleType.get())
.apply();
Assert.assertEquals("Should convert types", expected, updated.asStruct());
}
@Test
public void testUpdateFailure() {
Set<Pair<Type.PrimitiveType, Type.PrimitiveType>> allowedUpdates = Sets.newHashSet(
Pair.of(Types.IntegerType.get(), Types.LongType.get()),
Pair.of(Types.FloatType.get(), Types.DoubleType.get()),
Pair.of(Types.DecimalType.of(9, 2), Types.DecimalType.of(18, 2))
);
List<Type.PrimitiveType> primitives = Lists.newArrayList(
Types.BooleanType.get(), Types.IntegerType.get(), Types.LongType.get(),
Types.FloatType.get(), Types.DoubleType.get(), Types.DateType.get(), Types.TimeType.get(),
Types.TimestampType.withZone(), Types.TimestampType.withoutZone(),
Types.StringType.get(), Types.UUIDType.get(), Types.BinaryType.get(),
Types.FixedType.ofLength(3), Types.FixedType.ofLength(4),
Types.DecimalType.of(9, 2), Types.DecimalType.of(9, 3),
Types.DecimalType.of(18, 2)
);
for (Type.PrimitiveType fromType : primitives) {
for (Type.PrimitiveType toType : primitives) {
Schema fromSchema = new Schema(required(1, "col", fromType));
if (fromType.equals(toType) ||
allowedUpdates.contains(Pair.of(fromType, toType))) {
Schema expected = new Schema(required(1, "col", toType));
Schema result = new SchemaUpdate(fromSchema, 1).updateColumn("col", toType).apply();
Assert.assertEquals("Should allow update", expected.asStruct(), result.asStruct());
continue;
}
String typeChange = fromType.toString() + " -> " + toType.toString();
AssertHelpers.assertThrows("Should reject update: " + typeChange,
IllegalArgumentException.class, "change column type: col: " + typeChange,
() -> new SchemaUpdate(fromSchema, 1).updateColumn("col", toType));
}
}
}
@Test
public void testRename() {
Types.StructType expected = Types.StructType.of(
required(1, "id", Types.IntegerType.get()),
optional(2, "json", Types.StringType.get()),
optional(3, "options", Types.StructType.of(
required(8, "feature1", Types.BooleanType.get()),
optional(9, "newfeature", Types.BooleanType.get())
)),
required(4, "locations", Types.MapType.ofRequired(10, 11,
Types.StructType.of(
required(20, "address", Types.StringType.get()),
required(21, "city", Types.StringType.get()),
required(22, "state", Types.StringType.get()),
required(23, "zip", Types.IntegerType.get())
),
Types.StructType.of(
required(12, "latitude", Types.FloatType.get()),
required(13, "long", Types.FloatType.get())
))),
optional(5, "points", Types.ListType.ofOptional(14,
Types.StructType.of(
required(15, "X", Types.LongType.get()),
required(16, "y.y", Types.LongType.get())
))),
required(6, "doubles", Types.ListType.ofRequired(17,
Types.DoubleType.get()
)),
optional(7, "properties", Types.MapType.ofOptional(18, 19,
Types.StringType.get(),
Types.StringType.get()
))
);
Schema renamed = new SchemaUpdate(SCHEMA, SCHEMA_LAST_COLUMN_ID)
.renameColumn("data", "json")
.renameColumn("preferences", "options")
.renameColumn("preferences.feature2", "newfeature") // inside a renamed column
.renameColumn("locations.lat", "latitude")
.renameColumn("points.x", "X")
.renameColumn("points.y", "y.y") // has a '.' in the field name
.apply();
Assert.assertEquals("Should rename all fields", expected, renamed.asStruct());
}
@Test
public void testAddFields() {
Schema expected = new Schema(
required(1, "id", Types.IntegerType.get()),
optional(2, "data", Types.StringType.get()),
optional(3, "preferences", Types.StructType.of(
required(8, "feature1", Types.BooleanType.get()),
optional(9, "feature2", Types.BooleanType.get())
)),
required(4, "locations", Types.MapType.ofRequired(10, 11,
Types.StructType.of(
required(20, "address", Types.StringType.get()),
required(21, "city", Types.StringType.get()),
required(22, "state", Types.StringType.get()),
required(23, "zip", Types.IntegerType.get())
),
Types.StructType.of(
required(12, "lat", Types.FloatType.get()),
required(13, "long", Types.FloatType.get()),
optional(25, "alt", Types.FloatType.get())
))),
optional(5, "points", Types.ListType.ofOptional(14,
Types.StructType.of(
required(15, "x", Types.LongType.get()),
required(16, "y", Types.LongType.get()),
optional(26, "z", Types.LongType.get()),
optional(27, "t.t", Types.LongType.get())
))),
required(6, "doubles", Types.ListType.ofRequired(17,
Types.DoubleType.get()
)),
optional(7, "properties", Types.MapType.ofOptional(18, 19,
Types.StringType.get(),
Types.StringType.get()
)),
optional(24, "toplevel", Types.DecimalType.of(9, 2))
);
Schema added = new SchemaUpdate(SCHEMA, SCHEMA_LAST_COLUMN_ID)
.addColumn("toplevel", Types.DecimalType.of(9, 2))
.addColumn("locations", "alt", Types.FloatType.get()) // map of structs
.addColumn("points", "z", Types.LongType.get()) // list of structs
.addColumn("points", "t.t", Types.LongType.get()) // name with '.'
.apply();
Assert.assertEquals("Should match with added fields", expected.asStruct(), added.asStruct());
}
@Test
public void testAddNestedStruct() {
Schema schema = new Schema(required(1, "id", Types.IntegerType.get()));
Types.StructType struct = Types.StructType.of(
required(1, "lat", Types.IntegerType.get()), // conflicts with id
optional(2, "long", Types.IntegerType.get())
);
Schema expected = new Schema(
required(1, "id", Types.IntegerType.get()),
optional(2, "location", Types.StructType.of(
required(3, "lat", Types.IntegerType.get()),
optional(4, "long", Types.IntegerType.get())
))
);
Schema result = new SchemaUpdate(schema, 1)
.addColumn("location", struct)
.apply();
Assert.assertEquals("Should add struct and reassign column IDs",
expected.asStruct(), result.asStruct());
}
@Test
public void testAddNestedMapOfStructs() {
Schema schema = new Schema(required(1, "id", Types.IntegerType.get()));
Types.MapType map = Types.MapType.ofOptional(1, 2,
Types.StructType.of(
required(20, "address", Types.StringType.get()),
required(21, "city", Types.StringType.get()),
required(22, "state", Types.StringType.get()),
required(23, "zip", Types.IntegerType.get())
),
Types.StructType.of(
required(9, "lat", Types.IntegerType.get()),
optional(8, "long", Types.IntegerType.get())
)
);
Schema expected = new Schema(
required(1, "id", Types.IntegerType.get()),
optional(2, "locations", Types.MapType.ofOptional(3, 4,
Types.StructType.of(
required(5, "address", Types.StringType.get()),
required(6, "city", Types.StringType.get()),
required(7, "state", Types.StringType.get()),
required(8, "zip", Types.IntegerType.get())
),
Types.StructType.of(
required(9, "lat", Types.IntegerType.get()),
optional(10, "long", Types.IntegerType.get())
)
))
);
Schema result = new SchemaUpdate(schema, 1)
.addColumn("locations", map)
.apply();
Assert.assertEquals("Should add map and reassign column IDs",
expected.asStruct(), result.asStruct());
}
@Test
public void testAddNestedListOfStructs() {
Schema schema = new Schema(required(1, "id", Types.IntegerType.get()));
Types.ListType list = Types.ListType.ofOptional(1,
Types.StructType.of(
required(9, "lat", Types.IntegerType.get()),
optional(8, "long", Types.IntegerType.get())
)
);
Schema expected = new Schema(
required(1, "id", Types.IntegerType.get()),
optional(2, "locations", Types.ListType.ofOptional(3,
Types.StructType.of(
required(4, "lat", Types.IntegerType.get()),
optional(5, "long", Types.IntegerType.get())
)
))
);
Schema result = new SchemaUpdate(schema, 1)
.addColumn("locations", list)
.apply();
Assert.assertEquals("Should add map and reassign column IDs",
expected.asStruct(), result.asStruct());
}
@Test
public void testMixedChanges() {
Schema expected = new Schema(
required(1, "id", Types.LongType.get()),
optional(2, "json", Types.StringType.get()),
optional(3, "options", Types.StructType.of(
required(8, "feature1", Types.BooleanType.get()),
optional(9, "newfeature", Types.BooleanType.get())
)),
required(4, "locations", Types.MapType.ofRequired(10, 11,
Types.StructType.of(
required(20, "address", Types.StringType.get()),
required(21, "city", Types.StringType.get()),
required(22, "state", Types.StringType.get()),
required(23, "zip", Types.IntegerType.get())
),
Types.StructType.of(
required(12, "latitude", Types.DoubleType.get()),
optional(25, "alt", Types.FloatType.get())
))),
optional(5, "points", Types.ListType.ofOptional(14,
Types.StructType.of(
required(15, "X", Types.LongType.get()),
required(16, "y.y", Types.LongType.get()),
optional(26, "z", Types.LongType.get()),
optional(27, "t.t", Types.LongType.get())
))),
required(6, "doubles", Types.ListType.ofRequired(17,
Types.DoubleType.get()
)),
optional(24, "toplevel", Types.DecimalType.of(9, 2))
);
Schema updated = new SchemaUpdate(SCHEMA, SCHEMA_LAST_COLUMN_ID)
.addColumn("toplevel", Types.DecimalType.of(9, 2))
.addColumn("locations", "alt", Types.FloatType.get()) // map of structs
.addColumn("points", "z", Types.LongType.get()) // list of structs
.addColumn("points", "t.t", Types.LongType.get()) // name with '.'
.renameColumn("data", "json")
.renameColumn("preferences", "options")
.renameColumn("preferences.feature2", "newfeature") // inside a renamed column
.renameColumn("locations.lat", "latitude")
.renameColumn("points.x", "X")
.renameColumn("points.y", "y.y") // has a '.' in the field name
.updateColumn("id", Types.LongType.get())
.updateColumn("locations.lat", Types.DoubleType.get()) // use the original name
.deleteColumn("locations.long")
.deleteColumn("properties")
.apply();
Assert.assertEquals("Should match with added fields", expected.asStruct(), updated.asStruct());
}
@Test
public void testAmbiguousAdd() {
// preferences.booleans could be top-level or a field of preferences
AssertHelpers.assertThrows("Should reject ambiguous column name",
IllegalArgumentException.class, "ambiguous name: preferences.booleans", () -> {
UpdateSchema update = new SchemaUpdate(SCHEMA, SCHEMA_LAST_COLUMN_ID);
update.addColumn("preferences.booleans", Types.BooleanType.get());
}
);
}
@Test
public void testAddAlreadyExists() {
AssertHelpers.assertThrows("Should reject column name that already exists",
IllegalArgumentException.class, "already exists: preferences.feature1", () -> {
UpdateSchema update = new SchemaUpdate(SCHEMA, SCHEMA_LAST_COLUMN_ID);
update.addColumn("preferences", "feature1", Types.BooleanType.get());
}
);
AssertHelpers.assertThrows("Should reject column name that already exists",
IllegalArgumentException.class, "already exists: preferences", () -> {
UpdateSchema update = new SchemaUpdate(SCHEMA, SCHEMA_LAST_COLUMN_ID);
update.addColumn("preferences", Types.BooleanType.get());
}
);
}
@Test
public void testDeleteMissingColumn() {
AssertHelpers.assertThrows("Should reject delete missing column",
IllegalArgumentException.class, "missing column: col", () -> {
UpdateSchema update = new SchemaUpdate(SCHEMA, SCHEMA_LAST_COLUMN_ID);
update.deleteColumn("col");
}
);
}
@Test
public void testAddDeleteConflict() {
AssertHelpers.assertThrows("Should reject add then delete",
IllegalArgumentException.class, "missing column: col", () -> {
UpdateSchema update = new SchemaUpdate(SCHEMA, SCHEMA_LAST_COLUMN_ID);
update.addColumn("col", Types.IntegerType.get()).deleteColumn("col");
}
);
AssertHelpers.assertThrows("Should reject add then delete",
IllegalArgumentException.class, "column that has additions: preferences", () -> {
UpdateSchema update = new SchemaUpdate(SCHEMA, SCHEMA_LAST_COLUMN_ID);
update.addColumn("preferences", "feature3", Types.IntegerType.get())
.deleteColumn("preferences");
}
);
}
@Test
public void testRenameMissingColumn() {
AssertHelpers.assertThrows("Should reject rename missing column",
IllegalArgumentException.class, "missing column: col", () -> {
UpdateSchema update = new SchemaUpdate(SCHEMA, SCHEMA_LAST_COLUMN_ID);
update.renameColumn("col", "fail");
}
);
}
@Test
public void testRenameDeleteConflict() {
AssertHelpers.assertThrows("Should reject rename then delete",
IllegalArgumentException.class, "column that has updates: id", () -> {
UpdateSchema update = new SchemaUpdate(SCHEMA, SCHEMA_LAST_COLUMN_ID);
update.renameColumn("id", "col").deleteColumn("id");
}
);
AssertHelpers.assertThrows("Should reject rename then delete",
IllegalArgumentException.class, "missing column: col", () -> {
UpdateSchema update = new SchemaUpdate(SCHEMA, SCHEMA_LAST_COLUMN_ID);
update.renameColumn("id", "col").deleteColumn("col");
}
);
}
@Test
public void testDeleteRenameConflict() {
AssertHelpers.assertThrows("Should reject delete then rename",
IllegalArgumentException.class, "column that will be deleted: id", () -> {
UpdateSchema update = new SchemaUpdate(SCHEMA, SCHEMA_LAST_COLUMN_ID);
update.deleteColumn("id").renameColumn("id", "identifier");
}
);
}
@Test
public void testUpdateMissingColumn() {
AssertHelpers.assertThrows("Should reject rename missing column",
IllegalArgumentException.class, "missing column: col", () -> {
UpdateSchema update = new SchemaUpdate(SCHEMA, SCHEMA_LAST_COLUMN_ID);
update.updateColumn("col", Types.DateType.get());
}
);
}
@Test
public void testUpdateDeleteConflict() {
AssertHelpers.assertThrows("Should reject update then delete",
IllegalArgumentException.class, "column that has updates: id", () -> {
UpdateSchema update = new SchemaUpdate(SCHEMA, SCHEMA_LAST_COLUMN_ID);
update.updateColumn("id", Types.LongType.get()).deleteColumn("id");
}
);
}
@Test
public void testDeleteUpdateConflict() {
AssertHelpers.assertThrows("Should reject delete then update",
IllegalArgumentException.class, "column that will be deleted: id", () -> {
UpdateSchema update = new SchemaUpdate(SCHEMA, SCHEMA_LAST_COLUMN_ID);
update.deleteColumn("id").updateColumn("id", Types.LongType.get());
}
);
}
@Test
public void testDeleteMapKey() {
AssertHelpers.assertThrows("Should reject delete map key",
IllegalArgumentException.class, "Cannot delete map keys", () -> {
new SchemaUpdate(SCHEMA, SCHEMA_LAST_COLUMN_ID).deleteColumn("locations.key").apply();
}
);
}
@Test
public void testAddFieldToMapKey() {
AssertHelpers.assertThrows("Should reject add sub-field to map key",
IllegalArgumentException.class, "Cannot add fields to map keys", () -> {
new SchemaUpdate(SCHEMA, SCHEMA_LAST_COLUMN_ID)
.addColumn("locations.key", "address_line_2", Types.StringType.get()).apply();
}
);
}
@Test
public void testAlterMapKey() {
AssertHelpers.assertThrows("Should reject add sub-field to map key",
IllegalArgumentException.class, "Cannot alter map keys", () -> {
new SchemaUpdate(SCHEMA, SCHEMA_LAST_COLUMN_ID)
.updateColumn("locations.zip", Types.LongType.get()).apply();
}
);
}
@Test
public void testUpdateMapKey() {
Schema schema = new Schema(required(1, "m", Types.MapType.ofOptional(2, 3,
Types.IntegerType.get(), Types.DoubleType.get())));
AssertHelpers.assertThrows("Should reject update map key",
IllegalArgumentException.class, "Cannot update map keys", () -> {
new SchemaUpdate(schema, 3).updateColumn("m.key", Types.LongType.get()).apply();
}
);
}
}
| 6,281 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/AssertHelpers.java | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import org.junit.Assert;
import java.util.concurrent.Callable;
public class AssertHelpers {
/**
* A convenience method to avoid a large number of @Test(expected=...) tests
* @param message A String message to describe this assertion
* @param expected An Exception class that the Runnable should throw
* @param containedInMessage A String that should be contained by the thrown
* exception's message
* @param callable A Callable that is expected to throw the exception
*/
public static void assertThrows(String message,
Class<? extends Exception> expected,
String containedInMessage,
Callable callable) {
try {
callable.call();
Assert.fail("No exception was thrown (" + message + "), expected: " +
expected.getName());
} catch (Exception actual) {
handleException(message, expected, containedInMessage, actual);
}
}
/**
* A convenience method to avoid a large number of @Test(expected=...) tests
* @param message A String message to describe this assertion
* @param expected An Exception class that the Runnable should throw
* @param containedInMessage A String that should be contained by the thrown
* exception's message
* @param runnable A Runnable that is expected to throw the runtime exception
*/
public static void assertThrows(String message,
Class<? extends Exception> expected,
String containedInMessage,
Runnable runnable) {
try {
runnable.run();
Assert.fail("No exception was thrown (" + message + "), expected: " +
expected.getName());
} catch (Exception actual) {
handleException(message, expected, containedInMessage, actual);
}
}
private static void handleException(String message,
Class<? extends Exception> expected,
String containedInMessage,
Exception actual) {
try {
Assert.assertEquals(message, expected, actual.getClass());
Assert.assertTrue(
"Expected exception message (" + containedInMessage + ") missing: " +
actual.getMessage(),
actual.getMessage().contains(containedInMessage)
);
} catch (AssertionError e) {
e.addSuppressed(actual);
throw e;
}
}
}
| 6,282 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/LocalTableOperations.java | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.netflix.iceberg;
import com.google.common.collect.Maps;
import com.netflix.iceberg.exceptions.RuntimeIOException;
import java.util.Map;
import org.junit.rules.TemporaryFolder;
import java.io.IOException;
class LocalTableOperations implements TableOperations {
private final TemporaryFolder temp;
private final FileIO io;
private final Map<String, String> createdMetadataFilePaths = Maps.newHashMap();
LocalTableOperations(TemporaryFolder temp) {
this.temp = temp;
this.io = new TestTables.LocalFileIO();
}
@Override
public TableMetadata current() {
throw new UnsupportedOperationException("Not implemented for tests");
}
@Override
public TableMetadata refresh() {
throw new UnsupportedOperationException("Not implemented for tests");
}
@Override
public void commit(TableMetadata base, TableMetadata metadata) {
throw new UnsupportedOperationException("Not implemented for tests");
}
@Override
public FileIO io() {
return io;
}
@Override
public String metadataFileLocation(String fileName) {
return createdMetadataFilePaths.computeIfAbsent(fileName, name -> {
try {
return temp.newFile(name).getAbsolutePath();
} catch (IOException e) {
throw new RuntimeIOException(e);
}
});
}
@Override
public long newSnapshotId() {
throw new UnsupportedOperationException("Not implemented for tests");
}
}
| 6,283 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/TestReplaceTransaction.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.netflix.iceberg.exceptions.CommitFailedException;
import com.netflix.iceberg.types.TypeUtil;
import com.netflix.iceberg.types.Types;
import org.junit.Assert;
import org.junit.Test;
import java.io.File;
import java.io.IOException;
import java.util.concurrent.atomic.AtomicInteger;
import static com.netflix.iceberg.PartitionSpec.unpartitioned;
import static com.netflix.iceberg.types.Types.NestedField.required;
public class TestReplaceTransaction extends TableTestBase {
@Test
public void testReplaceTransaction() {
Schema newSchema = new Schema(
required(4, "id", Types.IntegerType.get()),
required(5, "data", Types.StringType.get()));
Snapshot start = table.currentSnapshot();
Schema schema = table.schema();
table.newAppend()
.appendFile(FILE_A)
.commit();
Assert.assertEquals("Version should be 1", 1L, (long) version());
validateSnapshot(start, table.currentSnapshot(), FILE_A);
Transaction replace = TestTables.beginReplace(tableDir, "test", newSchema, unpartitioned());
replace.commitTransaction();
table.refresh();
Assert.assertEquals("Version should be 2", 2L, (long) version());
Assert.assertNull("Table should not have a current snapshot", table.currentSnapshot());
Assert.assertEquals("Schema should match previous schema",
schema.asStruct(), table.schema().asStruct());
Assert.assertEquals("Partition spec should have no fields",
0, table.spec().fields().size());
}
@Test
public void testReplaceWithIncompatibleSchemaUpdate() {
Schema newSchema = new Schema(
required(4, "obj_id", Types.IntegerType.get()));
Snapshot start = table.currentSnapshot();
table.newAppend()
.appendFile(FILE_A)
.commit();
Assert.assertEquals("Version should be 1", 1L, (long) version());
validateSnapshot(start, table.currentSnapshot(), FILE_A);
Transaction replace = TestTables.beginReplace(tableDir, "test", newSchema, unpartitioned());
replace.commitTransaction();
table.refresh();
Assert.assertEquals("Version should be 2", 2L, (long) version());
Assert.assertNull("Table should not have a current snapshot", table.currentSnapshot());
Assert.assertEquals("Schema should use new schema, not compatible with previous",
new Schema(required(1, "obj_id", Types.IntegerType.get())).asStruct(),
table.schema().asStruct());
}
@Test
public void testReplaceWithNewPartitionSpec() {
PartitionSpec newSpec = PartitionSpec.unpartitioned();
Snapshot start = table.currentSnapshot();
Schema schema = table.schema();
table.newAppend()
.appendFile(FILE_A)
.commit();
Assert.assertEquals("Version should be 1", 1L, (long) version());
validateSnapshot(start, table.currentSnapshot(), FILE_A);
Transaction replace = TestTables.beginReplace(tableDir, "test", table.schema(), newSpec);
replace.commitTransaction();
table.refresh();
Assert.assertEquals("Version should be 2", 2L, (long) version());
Assert.assertNull("Table should not have a current snapshot", table.currentSnapshot());
Assert.assertEquals("Schema should use new schema, not compatible with previous",
schema.asStruct(), table.schema().asStruct());
Assert.assertEquals("Table should have new unpartitioned spec",
0, table.spec().fields().size());
}
@Test
public void testReplaceWithNewData() {
Snapshot start = table.currentSnapshot();
Schema schema = table.schema();
table.newAppend()
.appendFile(FILE_A)
.commit();
Assert.assertEquals("Version should be 1", 1L, (long) version());
validateSnapshot(start, table.currentSnapshot(), FILE_A);
Transaction replace = TestTables.beginReplace(tableDir, "test", table.schema(), table.spec());
replace.newAppend()
.appendFile(FILE_B)
.appendFile(FILE_C)
.appendFile(FILE_D)
.commit();
replace.commitTransaction();
table.refresh();
Assert.assertEquals("Version should be 2", 2L, (long) version());
Assert.assertNotNull("Table should have a current snapshot", table.currentSnapshot());
Assert.assertEquals("Schema should use new schema, not compatible with previous",
schema.asStruct(), table.schema().asStruct());
validateSnapshot(null, table.currentSnapshot(), FILE_B, FILE_C, FILE_D);
}
@Test
public void testReplaceDetectsUncommittedChangeOnCommit() {
Assert.assertEquals("Version should be 0", 0L, (long) version());
Transaction replace = TestTables.beginReplace(tableDir, "test", table.schema(), table.spec());
replace.newAppend() // not committed
.appendFile(FILE_B)
.appendFile(FILE_C)
.appendFile(FILE_D);
AssertHelpers.assertThrows("Should reject commit when last operation has not committed",
IllegalStateException.class, "Cannot commit transaction: last operation has not committed",
replace::commitTransaction);
Assert.assertEquals("Version should be 0", 0L, (long) version());
}
@Test
public void testReplaceDetectsUncommittedChangeOnTableCommit() {
Assert.assertEquals("Version should be 0", 0L, (long) version());
Transaction replace = TestTables.beginReplace(tableDir, "test", table.schema(), table.spec());
replace.table().newAppend() // not committed
.appendFile(FILE_B)
.appendFile(FILE_C)
.appendFile(FILE_D);
AssertHelpers.assertThrows("Should reject commit when last operation has not committed",
IllegalStateException.class, "Cannot commit transaction: last operation has not committed",
replace::commitTransaction);
Assert.assertEquals("Version should be 0", 0L, (long) version());
}
@Test
public void testReplaceTransactionRetry() {
Snapshot start = table.currentSnapshot();
Schema schema = table.schema();
table.newAppend()
.appendFile(FILE_A)
.commit();
Assert.assertEquals("Version should be 1", 1L, (long) version());
validateSnapshot(start, table.currentSnapshot(), FILE_A);
Transaction replace = TestTables.beginReplace(tableDir, "test", table.schema(), table.spec());
replace.newAppend()
.appendFile(FILE_B)
.appendFile(FILE_C)
.appendFile(FILE_D)
.commit();
table.ops().failCommits(1);
replace.commitTransaction();
table.refresh();
Assert.assertEquals("Version should be 2", 2L, (long) version());
Assert.assertNotNull("Table should have a current snapshot", table.currentSnapshot());
Assert.assertEquals("Schema should use new schema, not compatible with previous",
schema.asStruct(), table.schema().asStruct());
validateSnapshot(null, table.currentSnapshot(), FILE_B, FILE_C, FILE_D);
}
@Test
public void testReplaceTransactionConflict() {
Snapshot start = table.currentSnapshot();
table.newAppend()
.appendFile(FILE_A)
.commit();
Assert.assertEquals("Version should be 1", 1L, (long) version());
validateSnapshot(start, table.currentSnapshot(), FILE_A);
Transaction replace = TestTables.beginReplace(tableDir, "test", table.schema(), table.spec());
replace.newAppend()
.appendFile(FILE_B)
.appendFile(FILE_C)
.appendFile(FILE_D)
.commit();
// keep failing to trigger eventual transaction failure
((TestTables.TestTableOperations) ((BaseTransaction) replace).ops).failCommits(100);
AssertHelpers.assertThrows("Should reject commit when retries are exhausted",
CommitFailedException.class, "Injected failure",
replace::commitTransaction);
Assert.assertEquals("Version should be 1", 1L, (long) version());
table.refresh();
validateSnapshot(start, table.currentSnapshot(), FILE_A);
}
@Test
public void testReplaceToCreateAndAppend() throws IOException {
File tableDir = temp.newFolder();
Assert.assertTrue(tableDir.delete());
// this table doesn't exist.
Transaction replace = TestTables.beginReplace(tableDir, "test_append", SCHEMA, unpartitioned());
Assert.assertNull("Starting a create transaction should not commit metadata",
TestTables.readMetadata("test_append"));
Assert.assertNull("Should have no metadata version",
TestTables.metadataVersion("test_append"));
Assert.assertTrue("Should return a transaction table",
replace.table() instanceof BaseTransaction.TransactionTable);
replace.newAppend()
.appendFile(FILE_A)
.appendFile(FILE_B)
.commit();
Assert.assertNull("Appending in a transaction should not commit metadata",
TestTables.readMetadata("test_append"));
Assert.assertNull("Should have no metadata version",
TestTables.metadataVersion("test_append"));
replace.commitTransaction();
TableMetadata meta = TestTables.readMetadata("test_append");
Assert.assertNotNull("Table metadata should be created after transaction commits", meta);
Assert.assertEquals("Should have metadata version 0",
0, (int) TestTables.metadataVersion("test_append"));
Assert.assertEquals("Should have 1 manifest file",
1, listManifestFiles(tableDir).size());
Assert.assertEquals("Table schema should match with reassigned IDs",
assignFreshIds(SCHEMA).asStruct(), meta.schema().asStruct());
Assert.assertEquals("Table spec should match", unpartitioned(), meta.spec());
Assert.assertEquals("Table should have one snapshot", 1, meta.snapshots().size());
validateSnapshot(null, meta.currentSnapshot(), FILE_A, FILE_B);
}
private static Schema assignFreshIds(Schema schema) {
AtomicInteger lastColumnId = new AtomicInteger(0);
return TypeUtil.assignFreshIds(schema, lastColumnId::incrementAndGet);
}
}
| 6,284 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/TestOverwrite.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.collect.ImmutableMap;
import com.netflix.iceberg.ManifestEntry.Status;
import com.netflix.iceberg.exceptions.ValidationException;
import com.netflix.iceberg.types.Types;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;
import static com.netflix.iceberg.expressions.Expressions.and;
import static com.netflix.iceberg.expressions.Expressions.equal;
import static com.netflix.iceberg.expressions.Expressions.lessThan;
import static com.netflix.iceberg.types.Types.NestedField.optional;
import static com.netflix.iceberg.types.Types.NestedField.required;
public class TestOverwrite extends TableTestBase {
private static final Schema DATE_SCHEMA = new Schema(
required(1, "id", Types.LongType.get()),
optional(2, "data", Types.StringType.get()),
required(3, "date", Types.StringType.get()));
private static final PartitionSpec PARTITION_BY_DATE = PartitionSpec
.builderFor(DATE_SCHEMA)
.identity("date")
.build();
private static final String TABLE_NAME = "overwrite_table";
private static final DataFile FILE_0_TO_4 = DataFiles.builder(PARTITION_BY_DATE)
.withPath("/path/to/data-1.parquet")
.withFileSizeInBytes(0)
.withPartitionPath("date=2018-06-08")
.withMetrics(new Metrics(5L,
null, // no column sizes
ImmutableMap.of(1, 5L, 2, 3L), // value count
ImmutableMap.of(1, 0L, 2, 2L), // null count
ImmutableMap.of(1, longToBuffer(0L)), // lower bounds
ImmutableMap.of(1, longToBuffer(4L)) // upper bounds
))
.build();
private static final DataFile FILE_5_TO_9 = DataFiles.builder(PARTITION_BY_DATE)
.withPath("/path/to/data-2.parquet")
.withFileSizeInBytes(0)
.withPartitionPath("date=2018-06-09")
.withMetrics(new Metrics(5L,
null, // no column sizes
ImmutableMap.of(1, 5L, 2, 3L), // value count
ImmutableMap.of(1, 0L, 2, 2L), // null count
ImmutableMap.of(1, longToBuffer(5L)), // lower bounds
ImmutableMap.of(1, longToBuffer(9L)) // upper bounds
))
.build();
private static final DataFile FILE_10_TO_14 = DataFiles.builder(PARTITION_BY_DATE)
.withPath("/path/to/data-2.parquet")
.withFileSizeInBytes(0)
.withPartitionPath("date=2018-06-09")
.withMetrics(new Metrics(5L,
null, // no column sizes
ImmutableMap.of(1, 5L, 2, 3L), // value count
ImmutableMap.of(1, 0L, 2, 2L), // null count
ImmutableMap.of(1, longToBuffer(5L)), // lower bounds
ImmutableMap.of(1, longToBuffer(9L)) // upper bounds
))
.build();
private static ByteBuffer longToBuffer(long value) {
return ByteBuffer.allocate(8).order(ByteOrder.LITTLE_ENDIAN).putLong(0, value);
}
private Table table = null;
@Before
public void createTestTable() throws IOException {
File tableDir = temp.newFolder();
Assert.assertTrue(tableDir.delete());
this.table = TestTables.create(tableDir, TABLE_NAME, DATE_SCHEMA, PARTITION_BY_DATE);
table.newAppend()
.appendFile(FILE_0_TO_4)
.appendFile(FILE_5_TO_9)
.commit();
}
@Test
public void testOverwriteWithoutAppend() {
TableMetadata base = TestTables.readMetadata(TABLE_NAME);
long baseId = base.currentSnapshot().snapshotId();
table.newOverwrite()
.overwriteByRowFilter(equal("date", "2018-06-08"))
.commit();
long overwriteId = table.currentSnapshot().snapshotId();
Assert.assertNotEquals("Should create a new snapshot", baseId, overwriteId);
Assert.assertEquals("Table should have one manifest",
1, table.currentSnapshot().manifests().size());
validateManifestEntries(table.currentSnapshot().manifests().get(0),
ids(overwriteId, baseId),
files(FILE_0_TO_4, FILE_5_TO_9),
statuses(Status.DELETED, Status.EXISTING));
}
@Test
public void testOverwriteFailsDelete() {
TableMetadata base = TestTables.readMetadata(TABLE_NAME);
long baseId = base.currentSnapshot().snapshotId();
OverwriteFiles overwrite = table.newOverwrite()
.overwriteByRowFilter(and(equal("date", "2018-06-09"), lessThan("id", 9)));
AssertHelpers.assertThrows("Should reject commit with file not matching delete expression",
ValidationException.class, "Cannot delete file where some, but not all, rows match filter",
overwrite::commit);
Assert.assertEquals("Should not create a new snapshot",
baseId, table.currentSnapshot().snapshotId());
}
@Test
public void testOverwriteWithAppendOutsideOfDelete() {
TableMetadata base = TestTables.readMetadata(TABLE_NAME);
long baseId = base.currentSnapshot().snapshotId();
table.newOverwrite()
.overwriteByRowFilter(equal("date", "2018-06-08"))
.addFile(FILE_10_TO_14) // in 2018-06-09, NOT in 2018-06-08
.commit();
long overwriteId = table.currentSnapshot().snapshotId();
Assert.assertNotEquals("Should create a new snapshot", baseId, overwriteId);
Assert.assertEquals("Table should have 2 manifests",
2, table.currentSnapshot().manifests().size());
// manifest is not merged because it is less than the minimum
validateManifestEntries(table.currentSnapshot().manifests().get(0),
ids(overwriteId),
files(FILE_10_TO_14),
statuses(Status.ADDED));
validateManifestEntries(table.currentSnapshot().manifests().get(1),
ids(overwriteId, baseId),
files(FILE_0_TO_4, FILE_5_TO_9),
statuses(Status.DELETED, Status.EXISTING));
}
@Test
public void testOverwriteWithMergedAppendOutsideOfDelete() {
// ensure the overwrite results in a merge
table.updateProperties().set(TableProperties.MANIFEST_MIN_MERGE_COUNT, "1").commit();
TableMetadata base = TestTables.readMetadata(TABLE_NAME);
long baseId = base.currentSnapshot().snapshotId();
table.newOverwrite()
.overwriteByRowFilter(equal("date", "2018-06-08"))
.addFile(FILE_10_TO_14) // in 2018-06-09, NOT in 2018-06-08
.commit();
long overwriteId = table.currentSnapshot().snapshotId();
Assert.assertNotEquals("Should create a new snapshot", baseId, overwriteId);
Assert.assertEquals("Table should have one merged manifest",
1, table.currentSnapshot().manifests().size());
validateManifestEntries(table.currentSnapshot().manifests().get(0),
ids(overwriteId, overwriteId, baseId),
files(FILE_10_TO_14, FILE_0_TO_4, FILE_5_TO_9),
statuses(Status.ADDED, Status.DELETED, Status.EXISTING));
}
@Test
public void testValidatedOverwriteWithAppendOutsideOfDelete() {
// ensure the overwrite results in a merge
table.updateProperties().set(TableProperties.MANIFEST_MIN_MERGE_COUNT, "1").commit();
TableMetadata base = TestTables.readMetadata(TABLE_NAME);
long baseId = base.currentSnapshot().snapshotId();
OverwriteFiles overwrite = table.newOverwrite()
.overwriteByRowFilter(equal("date", "2018-06-08"))
.addFile(FILE_10_TO_14) // in 2018-06-09, NOT in 2018-06-08
.validateAddedFiles();
AssertHelpers.assertThrows("Should reject commit with file not matching delete expression",
ValidationException.class, "Cannot append file with rows that do not match filter",
overwrite::commit);
Assert.assertEquals("Should not create a new snapshot",
baseId, table.currentSnapshot().snapshotId());
}
@Test
public void testValidatedOverwriteWithAppendOutsideOfDeleteMetrics() {
TableMetadata base = TestTables.readMetadata(TABLE_NAME);
long baseId = base.currentSnapshot().snapshotId();
OverwriteFiles overwrite = table.newOverwrite()
.overwriteByRowFilter(and(equal("date", "2018-06-09"), lessThan("id", 10)))
.addFile(FILE_10_TO_14) // in 2018-06-09 matches, but IDs are outside range
.validateAddedFiles();
AssertHelpers.assertThrows("Should reject commit with file not matching delete expression",
ValidationException.class, "Cannot append file with rows that do not match filter",
overwrite::commit);
Assert.assertEquals("Should not create a new snapshot",
baseId, table.currentSnapshot().snapshotId());
}
@Test
public void testValidatedOverwriteWithAppendSuccess() {
TableMetadata base = TestTables.readMetadata(TABLE_NAME);
long baseId = base.currentSnapshot().snapshotId();
OverwriteFiles overwrite = table.newOverwrite()
.overwriteByRowFilter(and(equal("date", "2018-06-09"), lessThan("id", 20)))
.addFile(FILE_10_TO_14) // in 2018-06-09 matches and IDs are inside range
.validateAddedFiles();
AssertHelpers.assertThrows("Should reject commit with file not matching delete expression",
ValidationException.class, "Cannot append file with rows that do not match filter",
overwrite::commit);
Assert.assertEquals("Should not create a new snapshot",
baseId, table.currentSnapshot().snapshotId());
}
}
| 6,285 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/util/TestBinPacking.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.util;
import com.google.common.collect.Lists;
import com.netflix.iceberg.util.BinPacking.ListPacker;
import org.junit.Assert;
import org.junit.Test;
import java.util.List;
public class TestBinPacking {
@Test
public void testBasicBinPacking() {
Assert.assertEquals("Should pack the first 2 values",
l(l(1, 2), l(3), l(4), l(5)), pack(l(1, 2, 3, 4, 5), 3));
Assert.assertEquals("Should pack the first 2 values",
l(l(1, 2), l(3), l(4), l(5)), pack(l(1, 2, 3, 4, 5), 5));
Assert.assertEquals("Should pack the first 3 values",
l(l(1, 2, 3), l(4), l(5)), pack(l(1, 2, 3, 4, 5), 6));
Assert.assertEquals("Should pack the first 3 values",
l(l(1, 2, 3), l(4), l(5)), pack(l(1, 2, 3, 4, 5), 8));
Assert.assertEquals("Should pack the first 3 values, last 2 values",
l(l(1, 2, 3), l(4, 5)), pack(l(1, 2, 3, 4, 5), 9));
Assert.assertEquals("Should pack the first 4 values",
l(l(1, 2, 3, 4), l(5)), pack(l(1, 2, 3, 4, 5), 10));
Assert.assertEquals("Should pack the first 4 values",
l(l(1, 2, 3, 4), l(5)), pack(l(1, 2, 3, 4, 5), 14));
Assert.assertEquals("Should pack the first 5 values",
l(l(1, 2, 3, 4, 5)), pack(l(1, 2, 3, 4, 5), 15));
}
@Test
public void testReverseBinPackingSingleLookback() {
Assert.assertEquals("Should pack the first 2 values",
l(l(1, 2), l(3), l(4), l(5)), packEnd(l(1, 2, 3, 4, 5), 3, 1));
Assert.assertEquals("Should pack the first 2 values",
l(l(1, 2), l(3), l(4), l(5)), packEnd(l(1, 2, 3, 4, 5), 4, 1));
Assert.assertEquals("Should pack the second and third values",
l(l(1), l(2, 3), l(4), l(5)), packEnd(l(1, 2, 3, 4, 5), 5, 1));
Assert.assertEquals("Should pack the first 3 values",
l(l(1, 2, 3), l(4), l(5)), packEnd(l(1, 2, 3, 4, 5), 6, 1));
Assert.assertEquals("Should pack the first two pairs of values",
l(l(1, 2), l(3, 4), l(5)), packEnd(l(1, 2, 3, 4, 5), 7, 1));
Assert.assertEquals("Should pack the first two pairs of values",
l(l(1, 2), l(3, 4), l(5)), packEnd(l(1, 2, 3, 4, 5), 8, 1));
Assert.assertEquals("Should pack the first 3 values, last 2 values",
l(l(1, 2, 3), l(4, 5)), packEnd(l(1, 2, 3, 4, 5), 9, 1));
Assert.assertEquals("Should pack the first 3 values, last 2 values",
l(l(1, 2, 3), l(4, 5)), packEnd(l(1, 2, 3, 4, 5), 11, 1));
Assert.assertEquals("Should pack the first 3 values, last 2 values",
l(l(1, 2), l(3, 4, 5)), packEnd(l(1, 2, 3, 4, 5), 12, 1));
Assert.assertEquals("Should pack the last 4 values",
l(l(1), l(2, 3, 4, 5)), packEnd(l(1, 2, 3, 4, 5), 14, 1));
Assert.assertEquals("Should pack the first 5 values",
l(l(1, 2, 3, 4, 5)), packEnd(l(1, 2, 3, 4, 5), 15, 1));
}
@Test
public void testReverseBinPackingUnlimitedLookback() {
Assert.assertEquals("Should pack the first 2 values",
l(l(1, 2), l(3), l(4), l(5)), packEnd(l(1, 2, 3, 4, 5), 3));
Assert.assertEquals("Should pack 1 with 3",
l(l(2), l(1, 3), l(4), l(5)), packEnd(l(1, 2, 3, 4, 5), 4));
Assert.assertEquals("Should pack 2,3 and 1,4",
l(l(2, 3), l(1, 4), l(5)), packEnd(l(1, 2, 3, 4, 5), 5));
Assert.assertEquals("Should pack 2,4 and 1,5",
l(l(3), l(2, 4), l(1, 5)), packEnd(l(1, 2, 3, 4, 5), 6));
Assert.assertEquals("Should pack 3,4 and 2,5",
l(l(1), l(3, 4), l(2, 5)), packEnd(l(1, 2, 3, 4, 5), 7));
Assert.assertEquals("Should pack 1,2,3 and 3,5",
l(l(1, 2, 4), l(3, 5)), packEnd(l(1, 2, 3, 4, 5), 8));
Assert.assertEquals("Should pack the first 3 values, last 2 values",
l(l(1, 2, 3), l(4, 5)), packEnd(l(1, 2, 3, 4, 5), 9));
Assert.assertEquals("Should pack 2,3 and 1,4,5",
l(l(2, 3), l(1, 4, 5)), packEnd(l(1, 2, 3, 4, 5), 10));
Assert.assertEquals("Should pack 1,3 and 2,4,5",
l(l(1, 3), l(2, 4, 5)), packEnd(l(1, 2, 3, 4, 5), 11));
Assert.assertEquals("Should pack 1,2 and 3,4,5",
l(l(1, 2), l(3, 4, 5)), packEnd(l(1, 2, 3, 4, 5), 12));
Assert.assertEquals("Should pack 1,2 and 3,4,5",
l(l(2), l(1, 3, 4, 5)), packEnd(l(1, 2, 3, 4, 5), 13));
Assert.assertEquals("Should pack the last 4 values",
l(l(1), l(2, 3, 4, 5)), packEnd(l(1, 2, 3, 4, 5), 14));
Assert.assertEquals("Should pack the first 5 values",
l(l(1, 2, 3, 4, 5)), packEnd(l(1, 2, 3, 4, 5), 15));
}
@Test
public void testBinPackingLookBack() {
// lookback state:
// 1. [5]
// 2. [5, 1]
// 3. [5, 1], [5]
// 4. [5, 1, 1], [5]
// 5. [5, 1, 1], [5], [5]
// 6. [5, 1, 1, 1], [5], [5]
Assert.assertEquals("Unlimited look-back: should merge ones into first bin",
l(l(5, 1, 1, 1), l(5), l(5)), pack(l(5, 1, 5, 1, 5, 1), 8));
// lookback state:
// 1. [5]
// 2. [5, 1]
// 3. [5, 1], [5]
// 4. [5, 1, 1], [5]
// 5. [5], [5] ([5, 1, 1] drops out of look-back)
// 6. [5, 1], [5]
Assert.assertEquals("2 bin look-back: should merge two ones into first bin",
l(l(5, 1, 1), l(5, 1), l(5)), pack(l(5, 1, 5, 1, 5, 1), 8, 2));
// lookback state:
// 1. [5]
// 2. [5, 1]
// 3. [5] ([5, 1] drops out of look-back)
// 4. [5, 1]
// 5. [5] ([5, 1] #2 drops out of look-back)
// 6. [5, 1]
Assert.assertEquals("1 bin look-back: should merge ones with fives",
l(l(5, 1), l(5, 1), l(5, 1)), pack(l(5, 1, 5, 1, 5, 1), 8, 1));
}
private List<List<Integer>> pack(List<Integer> items, long targetWeight) {
return pack(items, targetWeight, Integer.MAX_VALUE);
}
private List<List<Integer>> pack(List<Integer> items, long targetWeight, int lookback) {
ListPacker<Integer> packer = new ListPacker<>(targetWeight, lookback);
return packer.pack(items, Integer::longValue);
}
private List<List<Integer>> packEnd(List<Integer> items, long targetWeight) {
return packEnd(items, targetWeight, Integer.MAX_VALUE);
}
private List<List<Integer>> packEnd(List<Integer> items, long targetWeight, int lookback) {
ListPacker<Integer> packer = new ListPacker<>(targetWeight, lookback);
return packer.packEnd(items, Integer::longValue);
}
private <T> List<T> l(T... items) {
return Lists.newArrayList(items);
}
}
| 6,286 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/avro/AvroTestHelpers.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import com.netflix.iceberg.types.Type;
import com.netflix.iceberg.types.Types;
import com.netflix.iceberg.util.CharSequenceWrapper;
import org.apache.avro.JsonProperties;
import org.apache.avro.Schema;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericData.Record;
import org.junit.Assert;
import java.time.LocalDate;
import java.time.temporal.ChronoUnit;
import java.util.Arrays;
import java.util.Collection;
import java.util.Date;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import static com.netflix.iceberg.avro.AvroSchemaUtil.toOption;
class AvroTestHelpers {
static Schema.Field optionalField(int id, String name, Schema schema) {
return addId(id, new Schema.Field(name, toOption(schema), null, JsonProperties.NULL_VALUE));
}
static Schema.Field requiredField(int id, String name, Schema schema) {
return addId(id, new Schema.Field(name, schema, null, null));
}
static Schema record(String name, Schema.Field... fields) {
return Schema.createRecord(name, null, null, false, Arrays.asList(fields));
}
static Schema.Field addId(int id, Schema.Field field) {
field.addProp(AvroSchemaUtil.FIELD_ID_PROP, id);
return field;
}
static Schema addElementId(int id, Schema schema) {
schema.addProp(AvroSchemaUtil.ELEMENT_ID_PROP, id);
return schema;
}
static Schema addKeyId(int id, Schema schema) {
schema.addProp(AvroSchemaUtil.KEY_ID_PROP, id);
return schema;
}
static Schema addValueId(int id, Schema schema) {
schema.addProp(AvroSchemaUtil.VALUE_ID_PROP, id);
return schema;
}
static void assertEquals(Types.StructType struct, Record expected, Record actual) {
List<Types.NestedField> fields = struct.fields();
for (int i = 0; i < fields.size(); i += 1) {
Type fieldType = fields.get(i).type();
Object expectedValue = expected.get(i);
Object actualValue = actual.get(i);
assertEquals(fieldType, expectedValue, actualValue);
}
}
static void assertEquals(Types.ListType list, List<?> expected, List<?> actual) {
Type elementType = list.elementType();
Assert.assertEquals("List size should match", expected.size(), actual.size());
for (int i = 0; i < expected.size(); i += 1) {
Object expectedValue = expected.get(i);
Object actualValue = actual.get(i);
assertEquals(elementType, expectedValue, actualValue);
}
}
static void assertEquals(Types.MapType map, Map<?, ?> expected, Map<?, ?> actual) {
Type valueType = map.valueType();
Assert.assertEquals("Map size should match", expected.size(), actual.size());
for (Object expectedKey : expected.keySet()) {
Object expectedValue = expected.get(expectedKey);
Object actualValue = actual.get(expectedKey);
assertEquals(valueType, expectedValue, actualValue);
}
}
private static void assertEquals(Type type, Object expected, Object actual) {
if (expected == null && actual == null) {
return;
}
switch (type.typeId()) {
case BOOLEAN:
case INTEGER:
case LONG:
case FLOAT:
case DOUBLE:
case STRING:
case DATE:
case TIME:
case TIMESTAMP:
case UUID:
case FIXED:
case BINARY:
case DECIMAL:
Assert.assertEquals("Primitive value should be equal to expected", expected, actual);
break;
case STRUCT:
Assert.assertTrue("Expected should be a Record", expected instanceof Record);
Assert.assertTrue("Actual should be a Record", actual instanceof Record);
assertEquals(type.asStructType(), (Record) expected, (Record) actual);
break;
case LIST:
Assert.assertTrue("Expected should be a List", expected instanceof List);
Assert.assertTrue("Actual should be a List", actual instanceof List);
assertEquals(type.asListType(), (List) expected, (List) actual);
break;
case MAP:
Assert.assertTrue("Expected should be a Map", expected instanceof Map);
Assert.assertTrue("Actual should be a Map", actual instanceof Map);
assertEquals(type.asMapType(), (Map<?, ?>) expected, (Map<?, ?>) actual);
break;
default:
throw new IllegalArgumentException("Not a supported type: " + type);
}
}
}
| 6,287 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/avro/RandomAvroData.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.netflix.iceberg.Schema;
import com.netflix.iceberg.types.Type;
import com.netflix.iceberg.types.TypeUtil;
import com.netflix.iceberg.types.Types;
import org.apache.avro.generic.GenericData;
import org.apache.avro.generic.GenericData.Record;
import org.apache.avro.util.Utf8;
import java.math.BigDecimal;
import java.math.BigInteger;
import java.nio.ByteBuffer;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.UUID;
import java.util.function.Supplier;
public class RandomAvroData {
public static List<Record> generate(Schema schema, int numRecords, long seed) {
RandomDataGenerator generator = new RandomDataGenerator(schema, seed);
List<Record> records = Lists.newArrayListWithExpectedSize(numRecords);
for (int i = 0; i < numRecords; i += 1) {
records.add((Record) TypeUtil.visit(schema, generator));
}
return records;
}
private static class RandomDataGenerator extends TypeUtil.CustomOrderSchemaVisitor<Object> {
private final Map<Type, org.apache.avro.Schema> typeToSchema;
private final Random random;
private RandomDataGenerator(Schema schema, long seed) {
this.typeToSchema = AvroSchemaUtil.convertTypes(schema.asStruct(), "test");
this.random = new Random(seed);
}
@Override
public Record schema(Schema schema, Supplier<Object> structResult) {
return (Record) structResult.get();
}
@Override
public Record struct(Types.StructType struct, Iterable<Object> fieldResults) {
Record rec = new Record(typeToSchema.get(struct));
List<Object> values = Lists.newArrayList(fieldResults);
for (int i = 0; i < values.size(); i += 1) {
rec.put(i, values.get(i));
}
return rec;
}
@Override
public Object field(Types.NestedField field, Supplier<Object> fieldResult) {
// return null 5% of the time when the value is optional
if (field.isOptional() && random.nextInt(20) == 1) {
return null;
}
return fieldResult.get();
}
@Override
public Object list(Types.ListType list, Supplier<Object> elementResult) {
int numElements = random.nextInt(20);
List<Object> result = Lists.newArrayListWithExpectedSize(numElements);
for (int i = 0; i < numElements; i += 1) {
// return null 5% of the time when the value is optional
if (list.isElementOptional() && random.nextInt(20) == 1) {
result.add(null);
} else {
result.add(elementResult.get());
}
}
return result;
}
@Override
public Object map(Types.MapType map, Supplier<Object> keyResult, Supplier<Object> valueResult) {
int numEntries = random.nextInt(20);
Map<Object, Object> result = Maps.newLinkedHashMap();
Supplier<Object> keyFunc;
if (map.keyType() == Types.StringType.get()) {
keyFunc = () -> keyResult.get().toString();
} else {
keyFunc = keyResult;
}
Set<Object> keySet = Sets.newHashSet();
for (int i = 0; i < numEntries; i += 1) {
Object key = keyFunc.get();
// ensure no collisions
while (keySet.contains(key)) {
key = keyFunc.get();
}
keySet.add(key);
// return null 5% of the time when the value is optional
if (map.isValueOptional() && random.nextInt(20) == 1) {
result.put(key, null);
} else {
result.put(key, valueResult.get());
}
}
return result;
}
@Override
public Object primitive(Type.PrimitiveType primitive) {
Object result = generatePrimitive(primitive, random);
// For the primitives that Avro needs a different type than Spark, fix
// them here.
switch (primitive.typeId()) {
case FIXED:
return new GenericData.Fixed(typeToSchema.get(primitive),
(byte[]) result);
case BINARY:
return ByteBuffer.wrap((byte[]) result);
case UUID:
return UUID.nameUUIDFromBytes((byte[]) result);
default:
return result;
}
}
}
private static Object generatePrimitive(Type.PrimitiveType primitive,
Random random) {
int choice = random.nextInt(20);
switch (primitive.typeId()) {
case BOOLEAN:
return choice < 10;
case INTEGER:
switch (choice) {
case 1:
return Integer.MIN_VALUE;
case 2:
return Integer.MAX_VALUE;
case 3:
return 0;
default:
return random.nextInt();
}
case LONG:
switch (choice) {
case 1:
return Long.MIN_VALUE;
case 2:
return Long.MAX_VALUE;
case 3:
return 0L;
default:
return random.nextLong();
}
case FLOAT:
switch (choice) {
case 1:
return Float.MIN_VALUE;
case 2:
return -Float.MIN_VALUE;
case 3:
return Float.MAX_VALUE;
case 4:
return -Float.MAX_VALUE;
case 5:
return Float.NEGATIVE_INFINITY;
case 6:
return Float.POSITIVE_INFINITY;
case 7:
return 0.0F;
case 8:
return Float.NaN;
default:
return random.nextFloat();
}
case DOUBLE:
switch (choice) {
case 1:
return Double.MIN_VALUE;
case 2:
return -Double.MIN_VALUE;
case 3:
return Double.MAX_VALUE;
case 4:
return -Double.MAX_VALUE;
case 5:
return Double.NEGATIVE_INFINITY;
case 6:
return Double.POSITIVE_INFINITY;
case 7:
return 0.0D;
case 8:
return Double.NaN;
default:
return random.nextDouble();
}
case DATE:
// this will include negative values (dates before 1970-01-01)
return random.nextInt() % ABOUT_380_YEARS_IN_DAYS;
case TIME:
return (random.nextLong() & Integer.MAX_VALUE) % ONE_DAY_IN_MICROS;
case TIMESTAMP:
return random.nextLong() % FIFTY_YEARS_IN_MICROS;
case STRING:
return randomString(random);
case UUID:
byte[] uuidBytes = new byte[16];
random.nextBytes(uuidBytes);
// this will hash the uuidBytes
return uuidBytes;
case FIXED:
byte[] fixed = new byte[((Types.FixedType) primitive).length()];
random.nextBytes(fixed);
return fixed;
case BINARY:
byte[] binary = new byte[random.nextInt(50)];
random.nextBytes(binary);
return binary;
case DECIMAL:
Types.DecimalType type = (Types.DecimalType) primitive;
BigInteger unscaled = randomUnscaled(type.precision(), random);
return new BigDecimal(unscaled, type.scale());
default:
throw new IllegalArgumentException(
"Cannot generate random value for unknown type: " + primitive);
}
}
private static final long FIFTY_YEARS_IN_MICROS =
(50L * (365 * 3 + 366) * 24 * 60 * 60 * 1_000_000) / 4;
private static final int ABOUT_380_YEARS_IN_DAYS = 380 * 365;
private static final long ONE_DAY_IN_MICROS = 24 * 60 * 60 * 1_000_000L;
private static final String CHARS =
"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-.!?";
private static Utf8 randomString(Random random) {
int length = random.nextInt(50);
byte[] buffer = new byte[length];
for (int i = 0; i < length; i += 1) {
buffer[i] = (byte) CHARS.charAt(random.nextInt(CHARS.length()));
}
return new Utf8(buffer);
}
private static final String DIGITS = "0123456789";
private static BigInteger randomUnscaled(int precision, Random random) {
int length = random.nextInt(precision);
if (length == 0) {
return BigInteger.ZERO;
}
StringBuilder sb = new StringBuilder();
for (int i = 0; i < length; i += 1) {
sb.append(DIGITS.charAt(random.nextInt(DIGITS.length())));
}
return new BigInteger(sb.toString());
}
}
| 6,288 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/avro/TestGenericAvro.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import com.google.common.collect.Lists;
import com.netflix.iceberg.Files;
import com.netflix.iceberg.Schema;
import com.netflix.iceberg.io.FileAppender;
import org.apache.avro.generic.GenericData.Record;
import org.junit.Assert;
import java.io.File;
import java.io.IOException;
import java.util.List;
public class TestGenericAvro extends AvroDataTest {
protected void writeAndValidate(Schema schema) throws IOException {
List<Record> expected = RandomAvroData.generate(schema, 100, 0L);
File testFile = temp.newFile();
Assert.assertTrue("Delete should succeed", testFile.delete());
try (FileAppender<Record> writer = Avro.write(Files.localOutput(testFile))
.schema(schema)
.named("test")
.build()) {
for (Record rec : expected) {
writer.add(rec);
}
}
List<Record> rows;
try (AvroIterable<Record> reader = Avro.read(Files.localInput(testFile))
.project(schema)
.build()) {
rows = Lists.newArrayList(reader);
}
for (int i = 0; i < expected.size(); i += 1) {
AvroTestHelpers.assertEquals(schema.asStruct(), expected.get(i), rows.get(i));
}
}
}
| 6,289 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/avro/TestSchemaConversions.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import com.google.common.collect.Lists;
import com.netflix.iceberg.types.Type;
import com.netflix.iceberg.types.Types;
import org.apache.avro.LogicalTypes;
import org.apache.avro.Schema;
import org.apache.avro.SchemaBuilder;
import org.junit.Assert;
import org.junit.Test;
import java.util.List;
import static com.netflix.iceberg.avro.AvroTestHelpers.addElementId;
import static com.netflix.iceberg.avro.AvroTestHelpers.addKeyId;
import static com.netflix.iceberg.avro.AvroTestHelpers.addValueId;
import static com.netflix.iceberg.avro.AvroTestHelpers.optionalField;
import static com.netflix.iceberg.avro.AvroTestHelpers.record;
import static com.netflix.iceberg.avro.AvroTestHelpers.requiredField;
import static com.netflix.iceberg.types.Types.NestedField.optional;
import static com.netflix.iceberg.types.Types.NestedField.required;
public class TestSchemaConversions {
@Test
public void testPrimitiveTypes() {
List<Type> primitives = Lists.newArrayList(
Types.BooleanType.get(),
Types.IntegerType.get(),
Types.LongType.get(),
Types.FloatType.get(),
Types.DoubleType.get(),
Types.DateType.get(),
Types.TimeType.get(),
Types.TimestampType.withZone(),
Types.TimestampType.withoutZone(),
Types.StringType.get(),
Types.UUIDType.get(),
Types.FixedType.ofLength(12),
Types.BinaryType.get(),
Types.DecimalType.of(9, 4)
);
List<Schema> avroPrimitives = Lists.newArrayList(
Schema.create(Schema.Type.BOOLEAN),
Schema.create(Schema.Type.INT),
Schema.create(Schema.Type.LONG),
Schema.create(Schema.Type.FLOAT),
Schema.create(Schema.Type.DOUBLE),
LogicalTypes.date().addToSchema(Schema.create(Schema.Type.INT)),
LogicalTypes.timeMicros().addToSchema(Schema.create(Schema.Type.LONG)),
addAdjustToUtc(LogicalTypes.timestampMicros().addToSchema(Schema.create(Schema.Type.LONG)), true),
addAdjustToUtc(LogicalTypes.timestampMicros().addToSchema(Schema.create(Schema.Type.LONG)), false),
Schema.create(Schema.Type.STRING),
LogicalTypes.uuid().addToSchema(Schema.createFixed("uuid_fixed", null, null, 16)),
Schema.createFixed("fixed_12", null, null, 12),
Schema.create(Schema.Type.BYTES),
LogicalTypes.decimal(9, 4).addToSchema(Schema.createFixed("decimal_9_4", null, null, 4))
);
for (int i = 0; i < primitives.size(); i += 1) {
Type type = primitives.get(i);
Schema avro = avroPrimitives.get(i);
Assert.assertEquals("Avro schema to primitive: " + avro,
type, AvroSchemaUtil.convert(avro));
Assert.assertEquals("Primitive to avro schema: " + type,
avro, AvroSchemaUtil.convert(type));
}
}
private Schema addAdjustToUtc(Schema schema, boolean adjustToUTC) {
schema.addProp(AvroSchemaUtil.ADJUST_TO_UTC_PROP, adjustToUTC);
return schema;
}
@Test
public void testStructAndPrimitiveTypes() {
Types.StructType struct = Types.StructType.of(
optional(20, "bool", Types.BooleanType.get()),
optional(21, "int", Types.IntegerType.get()),
optional(22, "long", Types.LongType.get()),
optional(23, "float", Types.FloatType.get()),
optional(24, "double", Types.DoubleType.get()),
optional(25, "date", Types.DateType.get()),
optional(27, "time", Types.TimeType.get()),
optional(28, "timestamptz", Types.TimestampType.withZone()),
optional(29, "timestamp", Types.TimestampType.withoutZone()),
optional(30, "string", Types.StringType.get()),
optional(31, "uuid", Types.UUIDType.get()),
optional(32, "fixed", Types.FixedType.ofLength(16)),
optional(33, "binary", Types.BinaryType.get()),
optional(34, "decimal", Types.DecimalType.of(14, 2))
);
Schema schema = record("primitives",
optionalField(20, "bool", Schema.create(Schema.Type.BOOLEAN)),
optionalField(21, "int", Schema.create(Schema.Type.INT)),
optionalField(22, "long", Schema.create(Schema.Type.LONG)),
optionalField(23, "float", Schema.create(Schema.Type.FLOAT)),
optionalField(24, "double", Schema.create(Schema.Type.DOUBLE)),
optionalField(25, "date", LogicalTypes.date().addToSchema(Schema.create(Schema.Type.INT))),
optionalField(27, "time", LogicalTypes.timeMicros().addToSchema(Schema.create(Schema.Type.LONG))),
optionalField(28, "timestamptz", addAdjustToUtc(LogicalTypes.timestampMicros().addToSchema(Schema.create(Schema.Type.LONG)), true)),
optionalField(29, "timestamp", addAdjustToUtc(LogicalTypes.timestampMicros().addToSchema(Schema.create(Schema.Type.LONG)), false)),
optionalField(30, "string", Schema.create(Schema.Type.STRING)),
optionalField(31, "uuid", LogicalTypes.uuid().addToSchema(Schema.createFixed("uuid_fixed", null, null, 16))),
optionalField(32, "fixed", Schema.createFixed("fixed_16", null, null, 16)),
optionalField(33, "binary", Schema.create(Schema.Type.BYTES)),
optionalField(34, "decimal", LogicalTypes.decimal(14, 2).addToSchema(Schema.createFixed("decimal_14_2", null, null, 6)))
);
Assert.assertEquals("Test conversion from Avro schema",
struct, AvroSchemaUtil.convert(schema));
Assert.assertEquals("Test conversion to Avro schema",
schema, AvroSchemaUtil.convert(struct, "primitives"));
}
@Test
public void testList() {
Type list = Types.ListType.ofRequired(34, Types.UUIDType.get());
Schema schema = addElementId(34, SchemaBuilder.array().items(
LogicalTypes.uuid().addToSchema(Schema.createFixed("uuid_fixed", null, null, 16))));
Assert.assertEquals("Avro schema to list",
list, AvroSchemaUtil.convert(schema));
Assert.assertEquals("List to Avro schema",
schema, AvroSchemaUtil.convert(list));
}
@Test
public void testListOfStructs() {
Type list = Types.ListType.ofRequired(34, Types.StructType.of(
required(35, "lat", Types.FloatType.get()),
required(36, "long", Types.FloatType.get())
));
Schema schema = addElementId(34, SchemaBuilder.array().items(
record("r34",
requiredField(35, "lat", Schema.create(Schema.Type.FLOAT)),
requiredField(36, "long", Schema.create(Schema.Type.FLOAT)))
));
Assert.assertEquals("Avro schema to list",
list, AvroSchemaUtil.convert(schema));
Assert.assertEquals("List to Avro schema",
schema, AvroSchemaUtil.convert(list));
}
@Test
public void testMapOfLongToBytes() {
Type map = Types.MapType.ofRequired(33, 34, Types.LongType.get(), Types.BinaryType.get());
Schema schema = AvroSchemaUtil.createMap(
33, Schema.create(Schema.Type.LONG),
34, Schema.create(Schema.Type.BYTES));
Assert.assertEquals("Avro schema to map",
map, AvroSchemaUtil.convert(schema));
Assert.assertEquals("Map to Avro schema",
schema, AvroSchemaUtil.convert(map));
}
@Test
public void testMapOfStringToBytes() {
Type map = Types.MapType.ofRequired(33, 34, Types.StringType.get(), Types.BinaryType.get());
Schema schema = addKeyId(33, addValueId(34, SchemaBuilder.map().values(
Schema.create(Schema.Type.BYTES))));
Assert.assertEquals("Avro schema to map",
map, AvroSchemaUtil.convert(schema));
Assert.assertEquals("Map to Avro schema",
schema, AvroSchemaUtil.convert(map));
}
@Test
public void testMapOfListToStructs() {
Type map = Types.MapType.ofRequired(33, 34,
Types.ListType.ofRequired(35, Types.IntegerType.get()),
Types.StructType.of(
required(36, "a", Types.IntegerType.get()),
optional(37, "b", Types.IntegerType.get())
));
Schema schema = AvroSchemaUtil.createMap(
33, addElementId(35, Schema.createArray(Schema.create(Schema.Type.INT))),
34, record("r34",
requiredField(36, "a", Schema.create(Schema.Type.INT)),
optionalField(37, "b", Schema.create(Schema.Type.INT))));
Assert.assertEquals("Avro schema to map",
map, AvroSchemaUtil.convert(schema));
Assert.assertEquals("Map to Avro schema",
schema, AvroSchemaUtil.convert(map));
}
@Test
public void testMapOfStringToStructs() {
Type map = Types.MapType.ofRequired(33, 34, Types.StringType.get(), Types.StructType.of(
required(35, "a", Types.IntegerType.get()),
optional(36, "b", Types.IntegerType.get())
));
Schema schema = addKeyId(33, addValueId(34, SchemaBuilder.map().values(
record("r34",
requiredField(35, "a", Schema.create(Schema.Type.INT)),
optionalField(36, "b", Schema.create(Schema.Type.INT))))));
Assert.assertEquals("Avro schema to map",
map, AvroSchemaUtil.convert(schema));
Assert.assertEquals("Map to Avro schema",
schema, AvroSchemaUtil.convert(map));
}
@Test
public void testComplexSchema() {
com.netflix.iceberg.Schema schema = new com.netflix.iceberg.Schema(
required(1, "id", Types.IntegerType.get()),
optional(2, "data", Types.StringType.get()),
optional(
3,
"preferences",
Types.StructType
.of(required(8, "feature1", Types.BooleanType.get()), optional(9, "feature2", Types.BooleanType.get()))),
required(
4,
"locations",
Types.MapType.ofRequired(
10,
11,
Types.StructType.of(
required(20, "address", Types.StringType.get()),
required(21, "city", Types.StringType.get()),
required(22, "state", Types.StringType.get()),
required(23, "zip", Types.IntegerType.get())
),
Types.StructType.of(required(12, "lat", Types.FloatType.get()), required(13, "long", Types.FloatType.get()))
)
),
optional(
5,
"points",
Types.ListType.ofOptional(
14,
Types.StructType.of(required(15, "x", Types.LongType.get()), required(16, "y", Types.LongType.get())))),
required(6, "doubles", Types.ListType.ofRequired(17, Types.DoubleType.get())),
optional(7, "properties", Types.MapType.ofOptional(18, 19, Types.StringType.get(), Types.StringType.get())));
AvroSchemaUtil.convert(schema, "newTableName").toString(true);
}
}
| 6,290 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/avro/TestReadProjection.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.netflix.iceberg.Schema;
import com.netflix.iceberg.types.Comparators;
import com.netflix.iceberg.types.Types;
import org.apache.avro.generic.GenericData.Record;
import org.junit.Assert;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import java.io.IOException;
import java.util.List;
import java.util.Map;
public abstract class TestReadProjection {
protected abstract Record writeAndRead(String desc,
Schema writeSchema,
Schema readSchema,
Record record) throws IOException;
@Rule
public TemporaryFolder temp = new TemporaryFolder();
@Test
public void testFullProjection() throws Exception {
Schema schema = new Schema(
Types.NestedField.required(0, "id", Types.LongType.get()),
Types.NestedField.optional(1, "data", Types.StringType.get())
);
Record record = new Record(AvroSchemaUtil.convert(schema, "table"));
record.put("id", 34L);
record.put("data", "test");
Record projected = writeAndRead("full_projection", schema, schema, record);
Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id"));
int cmp = Comparators.charSequences()
.compare("test", (CharSequence) projected.get("data"));
Assert.assertTrue("Should contain the correct data value", cmp == 0);
}
@Test
public void testReorderedFullProjection() throws Exception {
Schema schema = new Schema(
Types.NestedField.required(0, "id", Types.LongType.get()),
Types.NestedField.optional(1, "data", Types.StringType.get())
);
Record record = new Record(AvroSchemaUtil.convert(schema, "table"));
record.put("id", 34L);
record.put("data", "test");
Schema reordered = new Schema(
Types.NestedField.optional(1, "data", Types.StringType.get()),
Types.NestedField.required(0, "id", Types.LongType.get())
);
Record projected = writeAndRead("full_projection", schema, reordered, record);
Assert.assertEquals("Should contain the correct 0 value", "test", projected.get(0).toString());
Assert.assertEquals("Should contain the correct 1 value", 34L, projected.get(1));
}
@Test
public void testReorderedProjection() throws Exception {
Schema schema = new Schema(
Types.NestedField.required(0, "id", Types.LongType.get()),
Types.NestedField.optional(1, "data", Types.StringType.get())
);
Record record = new Record(AvroSchemaUtil.convert(schema, "table"));
record.put("id", 34L);
record.put("data", "test");
Schema reordered = new Schema(
Types.NestedField.optional(2, "missing_1", Types.StringType.get()),
Types.NestedField.optional(1, "data", Types.StringType.get()),
Types.NestedField.optional(3, "missing_2", Types.LongType.get())
);
Record projected = writeAndRead("full_projection", schema, reordered, record);
Assert.assertNull("Should contain the correct 0 value", projected.get(0));
Assert.assertEquals("Should contain the correct 1 value", "test", projected.get(1).toString());
Assert.assertNull("Should contain the correct 2 value", projected.get(2));
}
@Test
public void testEmptyProjection() throws Exception {
Schema schema = new Schema(
Types.NestedField.required(0, "id", Types.LongType.get()),
Types.NestedField.optional(1, "data", Types.StringType.get())
);
Record record = new Record(AvroSchemaUtil.convert(schema, "table"));
record.put("id", 34L);
record.put("data", "test");
Record projected = writeAndRead("empty_projection", schema, schema.select(), record);
Assert.assertNotNull("Should read a non-null record", projected);
try {
projected.get(0);
Assert.fail("Should not retrieve value with ordinal 0");
} catch (ArrayIndexOutOfBoundsException e) {
// this is expected because there are no values
}
}
@Test
public void testBasicProjection() throws Exception {
Schema writeSchema = new Schema(
Types.NestedField.required(0, "id", Types.LongType.get()),
Types.NestedField.optional(1, "data", Types.StringType.get())
);
Record record = new Record(AvroSchemaUtil.convert(writeSchema, "table"));
record.put("id", 34L);
record.put("data", "test");
Schema idOnly = new Schema(
Types.NestedField.required(0, "id", Types.LongType.get())
);
Record projected = writeAndRead("basic_projection_id", writeSchema, idOnly, record);
Assert.assertNull("Should not project data", projected.get("data"));
Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id"));
Schema dataOnly = new Schema(
Types.NestedField.optional(1, "data", Types.StringType.get())
);
projected = writeAndRead("basic_projection_data", writeSchema, dataOnly, record);
Assert.assertNull("Should not project id", projected.get("id"));
int cmp = Comparators.charSequences()
.compare("test", (CharSequence) projected.get("data"));
Assert.assertTrue("Should contain the correct data value", cmp == 0);
}
@Test
public void testRename() throws Exception {
Schema writeSchema = new Schema(
Types.NestedField.required(0, "id", Types.LongType.get()),
Types.NestedField.optional(1, "data", Types.StringType.get())
);
Record record = new Record(AvroSchemaUtil.convert(writeSchema, "table"));
record.put("id", 34L);
record.put("data", "test");
Schema readSchema = new Schema(
Types.NestedField.required(0, "id", Types.LongType.get()),
Types.NestedField.optional(1, "renamed", Types.StringType.get())
);
Record projected = writeAndRead("project_and_rename", writeSchema, readSchema, record);
Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id"));
int cmp = Comparators.charSequences()
.compare("test", (CharSequence) projected.get("renamed"));
Assert.assertTrue("Should contain the correct data/renamed value", cmp == 0);
}
@Test
public void testNestedStructProjection() throws Exception {
Schema writeSchema = new Schema(
Types.NestedField.required(0, "id", Types.LongType.get()),
Types.NestedField.optional(3, "location", Types.StructType.of(
Types.NestedField.required(1, "lat", Types.FloatType.get()),
Types.NestedField.required(2, "long", Types.FloatType.get())
))
);
Record record = new Record(AvroSchemaUtil.convert(writeSchema, "table"));
record.put("id", 34L);
Record location = new Record(
AvroSchemaUtil.fromOption(record.getSchema().getField("location").schema()));
location.put("lat", 52.995143f);
location.put("long", -1.539054f);
record.put("location", location);
Schema idOnly = new Schema(
Types.NestedField.required(0, "id", Types.LongType.get())
);
Record projected = writeAndRead("id_only", writeSchema, idOnly, record);
Record projectedLocation = (Record) projected.get("location");
Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id"));
Assert.assertNull("Should not project location", projectedLocation);
Schema latOnly = new Schema(
Types.NestedField.optional(3, "location", Types.StructType.of(
Types.NestedField.required(1, "lat", Types.FloatType.get())
))
);
projected = writeAndRead("latitude_only", writeSchema, latOnly, record);
projectedLocation = (Record) projected.get("location");
Assert.assertNull("Should not project id", projected.get("id"));
Assert.assertNotNull("Should project location", projected.get("location"));
Assert.assertNull("Should not project longitude", projectedLocation.get("long"));
Assert.assertEquals("Should project latitude",
52.995143f, (float) projectedLocation.get("lat"), 0.000001f);
Schema longOnly = new Schema(
Types.NestedField.optional(3, "location", Types.StructType.of(
Types.NestedField.required(2, "long", Types.FloatType.get())
))
);
projected = writeAndRead("longitude_only", writeSchema, longOnly, record);
projectedLocation = (Record) projected.get("location");
Assert.assertNull("Should not project id", projected.get("id"));
Assert.assertNotNull("Should project location", projected.get("location"));
Assert.assertNull("Should not project latitutde", projectedLocation.get("lat"));
Assert.assertEquals("Should project longitude",
-1.539054f, (float) projectedLocation.get("long"), 0.000001f);
Schema locationOnly = writeSchema.select("location");
projected = writeAndRead("location_only", writeSchema, locationOnly, record);
projectedLocation = (Record) projected.get("location");
Assert.assertNull("Should not project id", projected.get("id"));
Assert.assertNotNull("Should project location", projected.get("location"));
Assert.assertEquals("Should project latitude",
52.995143f, (float) projectedLocation.get("lat"), 0.000001f);
Assert.assertEquals("Should project longitude",
-1.539054f, (float) projectedLocation.get("long"), 0.000001f);
}
@Test
public void testMapProjection() throws IOException {
Schema writeSchema = new Schema(
Types.NestedField.required(0, "id", Types.LongType.get()),
Types.NestedField.optional(5, "properties",
Types.MapType.ofOptional(6, 7, Types.StringType.get(), Types.StringType.get()))
);
Map<String, String> properties = ImmutableMap.of("a", "A", "b", "B");
Record record = new Record(AvroSchemaUtil.convert(writeSchema, "table"));
record.put("id", 34L);
record.put("properties", properties);
Schema idOnly = new Schema(
Types.NestedField.required(0, "id", Types.LongType.get())
);
Record projected = writeAndRead("id_only", writeSchema, idOnly, record);
Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id"));
Assert.assertNull("Should not project properties map", projected.get("properties"));
Schema keyOnly = writeSchema.select("properties.key");
projected = writeAndRead("key_only", writeSchema, keyOnly, record);
Assert.assertNull("Should not project id", projected.get("id"));
Assert.assertEquals("Should project entire map",
properties, toStringMap((Map) projected.get("properties")));
Schema valueOnly = writeSchema.select("properties.value");
projected = writeAndRead("value_only", writeSchema, valueOnly, record);
Assert.assertNull("Should not project id", projected.get("id"));
Assert.assertEquals("Should project entire map",
properties, toStringMap((Map) projected.get("properties")));
Schema mapOnly = writeSchema.select("properties");
projected = writeAndRead("map_only", writeSchema, mapOnly, record);
Assert.assertNull("Should not project id", projected.get("id"));
Assert.assertEquals("Should project entire map",
properties, toStringMap((Map) projected.get("properties")));
}
private Map<String, ?> toStringMap(Map<?, ?> map) {
Map<String, Object> stringMap = Maps.newHashMap();
for (Map.Entry<?, ?> entry : map.entrySet()) {
if (entry.getValue() instanceof CharSequence) {
stringMap.put(entry.getKey().toString(), entry.getValue().toString());
} else {
stringMap.put(entry.getKey().toString(), entry.getValue());
}
}
return stringMap;
}
@Test
public void testMapOfStructsProjection() throws IOException {
Schema writeSchema = new Schema(
Types.NestedField.required(0, "id", Types.LongType.get()),
Types.NestedField.optional(5, "locations", Types.MapType.ofOptional(6, 7,
Types.StringType.get(),
Types.StructType.of(
Types.NestedField.required(1, "lat", Types.FloatType.get()),
Types.NestedField.required(2, "long", Types.FloatType.get())
)
))
);
Record record = new Record(AvroSchemaUtil.convert(writeSchema, "table"));
record.put("id", 34L);
Record l1 = new Record(AvroSchemaUtil.fromOption(
AvroSchemaUtil.fromOption(record.getSchema().getField("locations").schema())
.getValueType()));
l1.put("lat", 53.992811f);
l1.put("long", -1.542616f);
Record l2 = new Record(l1.getSchema());
l2.put("lat", 52.995143f);
l2.put("long", -1.539054f);
record.put("locations", ImmutableMap.of("L1", l1, "L2", l2));
Schema idOnly = new Schema(
Types.NestedField.required(0, "id", Types.LongType.get())
);
Record projected = writeAndRead("id_only", writeSchema, idOnly, record);
Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id"));
Assert.assertNull("Should not project locations map", projected.get("locations"));
projected = writeAndRead("all_locations", writeSchema, writeSchema.select("locations"), record);
Assert.assertNull("Should not project id", projected.get("id"));
Assert.assertEquals("Should project locations map",
record.get("locations"), toStringMap((Map) projected.get("locations")));
projected = writeAndRead("lat_only",
writeSchema, writeSchema.select("locations.lat"), record);
Assert.assertNull("Should not project id", projected.get("id"));
Map<String, ?> locations = toStringMap((Map) projected.get("locations"));
Assert.assertNotNull("Should project locations map", locations);
Assert.assertEquals("Should contain L1 and L2",
Sets.newHashSet("L1", "L2"), locations.keySet());
Record projectedL1 = (Record) locations.get("L1");
Assert.assertNotNull("L1 should not be null", projectedL1);
Assert.assertEquals("L1 should contain lat",
53.992811f, (float) projectedL1.get("lat"), 0.000001);
Assert.assertNull("L1 should not contain long", projectedL1.get("long"));
Record projectedL2 = (Record) locations.get("L2");
Assert.assertNotNull("L2 should not be null", projectedL2);
Assert.assertEquals("L2 should contain lat",
52.995143f, (float) projectedL2.get("lat"), 0.000001);
Assert.assertNull("L2 should not contain long", projectedL2.get("long"));
projected = writeAndRead("long_only",
writeSchema, writeSchema.select("locations.long"), record);
Assert.assertNull("Should not project id", projected.get("id"));
locations = toStringMap((Map) projected.get("locations"));
Assert.assertNotNull("Should project locations map", locations);
Assert.assertEquals("Should contain L1 and L2",
Sets.newHashSet("L1", "L2"), locations.keySet());
projectedL1 = (Record) locations.get("L1");
Assert.assertNotNull("L1 should not be null", projectedL1);
Assert.assertNull("L1 should not contain lat", projectedL1.get("lat"));
Assert.assertEquals("L1 should contain long",
-1.542616f, (float) projectedL1.get("long"), 0.000001);
projectedL2 = (Record) locations.get("L2");
Assert.assertNotNull("L2 should not be null", projectedL2);
Assert.assertNull("L2 should not contain lat", projectedL2.get("lat"));
Assert.assertEquals("L2 should contain long",
-1.539054f, (float) projectedL2.get("long"), 0.000001);
Schema latitiudeRenamed = new Schema(
Types.NestedField.optional(5, "locations", Types.MapType.ofOptional(6, 7,
Types.StringType.get(),
Types.StructType.of(
Types.NestedField.required(1, "latitude", Types.FloatType.get())
)
))
);
projected = writeAndRead("latitude_renamed", writeSchema, latitiudeRenamed, record);
Assert.assertNull("Should not project id", projected.get("id"));
locations = toStringMap((Map) projected.get("locations"));
Assert.assertNotNull("Should project locations map", locations);
Assert.assertEquals("Should contain L1 and L2",
Sets.newHashSet("L1", "L2"), locations.keySet());
projectedL1 = (Record) locations.get("L1");
Assert.assertNotNull("L1 should not be null", projectedL1);
Assert.assertEquals("L1 should contain latitude",
53.992811f, (float) projectedL1.get("latitude"), 0.000001);
Assert.assertNull("L1 should not contain lat", projectedL1.get("lat"));
Assert.assertNull("L1 should not contain long", projectedL1.get("long"));
projectedL2 = (Record) locations.get("L2");
Assert.assertNotNull("L2 should not be null", projectedL2);
Assert.assertEquals("L2 should contain latitude",
52.995143f, (float) projectedL2.get("latitude"), 0.000001);
Assert.assertNull("L2 should not contain lat", projectedL2.get("lat"));
Assert.assertNull("L2 should not contain long", projectedL2.get("long"));
}
@Test
public void testListProjection() throws IOException {
Schema writeSchema = new Schema(
Types.NestedField.required(0, "id", Types.LongType.get()),
Types.NestedField.optional(10, "values",
Types.ListType.ofOptional(11, Types.LongType.get()))
);
List<Long> values = ImmutableList.of(56L, 57L, 58L);
Record record = new Record(AvroSchemaUtil.convert(writeSchema, "table"));
record.put("id", 34L);
record.put("values", values);
Schema idOnly = new Schema(
Types.NestedField.required(0, "id", Types.LongType.get())
);
Record projected = writeAndRead("id_only", writeSchema, idOnly, record);
Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id"));
Assert.assertNull("Should not project values list", projected.get("values"));
Schema elementOnly = writeSchema.select("values.element");
projected = writeAndRead("element_only", writeSchema, elementOnly, record);
Assert.assertNull("Should not project id", projected.get("id"));
Assert.assertEquals("Should project entire list", values, projected.get("values"));
Schema listOnly = writeSchema.select("values");
projected = writeAndRead("list_only", writeSchema, listOnly, record);
Assert.assertNull("Should not project id", projected.get("id"));
Assert.assertEquals("Should project entire list", values, projected.get("values"));
}
@Test
@SuppressWarnings("unchecked")
public void testListOfStructsProjection() throws IOException {
Schema writeSchema = new Schema(
Types.NestedField.required(0, "id", Types.LongType.get()),
Types.NestedField.optional(22, "points",
Types.ListType.ofOptional(21, Types.StructType.of(
Types.NestedField.required(19, "x", Types.IntegerType.get()),
Types.NestedField.optional(18, "y", Types.IntegerType.get())
))
)
);
Record record = new Record(AvroSchemaUtil.convert(writeSchema, "table"));
record.put("id", 34L);
Record p1 = new Record(AvroSchemaUtil.fromOption(
AvroSchemaUtil.fromOption(record.getSchema().getField("points").schema())
.getElementType()));
p1.put("x", 1);
p1.put("y", 2);
Record p2 = new Record(p1.getSchema());
p2.put("x", 3);
p2.put("y", null);
record.put("points", ImmutableList.of(p1, p2));
Schema idOnly = new Schema(
Types.NestedField.required(0, "id", Types.LongType.get())
);
Record projected = writeAndRead("id_only", writeSchema, idOnly, record);
Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id"));
Assert.assertNull("Should not project points list", projected.get("points"));
projected = writeAndRead("all_points", writeSchema, writeSchema.select("points"), record);
Assert.assertNull("Should not project id", projected.get("id"));
Assert.assertEquals("Should project points list",
record.get("points"), projected.get("points"));
projected = writeAndRead("x_only", writeSchema, writeSchema.select("points.x"), record);
Assert.assertNull("Should not project id", projected.get("id"));
Assert.assertNotNull("Should project points list", projected.get("points"));
List<Record> points = (List<Record>) projected.get("points");
Assert.assertEquals("Should read 2 points", 2, points.size());
Record projectedP1 = points.get(0);
Assert.assertEquals("Should project x", 1, (int) projectedP1.get("x"));
Assert.assertNull("Should not project y", projectedP1.get("y"));
Record projectedP2 = points.get(1);
Assert.assertEquals("Should project x", 3, (int) projectedP2.get("x"));
Assert.assertNull("Should not project y", projectedP2.get("y"));
projected = writeAndRead("y_only", writeSchema, writeSchema.select("points.y"), record);
Assert.assertNull("Should not project id", projected.get("id"));
Assert.assertNotNull("Should project points list", projected.get("points"));
points = (List<Record>) projected.get("points");
Assert.assertEquals("Should read 2 points", 2, points.size());
projectedP1 = points.get(0);
Assert.assertNull("Should not project x", projectedP1.get("x"));
Assert.assertEquals("Should project y", 2, (int) projectedP1.get("y"));
projectedP2 = points.get(1);
Assert.assertNull("Should not project x", projectedP2.get("x"));
Assert.assertEquals("Should project null y", null, projectedP2.get("y"));
Schema yRenamed = new Schema(
Types.NestedField.optional(22, "points",
Types.ListType.ofOptional(21, Types.StructType.of(
Types.NestedField.optional(18, "z", Types.IntegerType.get())
))
)
);
projected = writeAndRead("y_renamed", writeSchema, yRenamed, record);
Assert.assertNull("Should not project id", projected.get("id"));
Assert.assertNotNull("Should project points list", projected.get("points"));
points = (List<Record>) projected.get("points");
Assert.assertEquals("Should read 2 points", 2, points.size());
projectedP1 = points.get(0);
Assert.assertNull("Should not project x", projectedP1.get("x"));
Assert.assertNull("Should not project y", projectedP1.get("y"));
Assert.assertEquals("Should project z", 2, (int) projectedP1.get("z"));
projectedP2 = points.get(1);
Assert.assertNull("Should not project x", projectedP2.get("x"));
Assert.assertNull("Should not project y", projectedP2.get("y"));
Assert.assertEquals("Should project null z", null, projectedP2.get("z"));
}
}
| 6,291 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/avro/AvroDataTest.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import com.netflix.iceberg.Schema;
import com.netflix.iceberg.types.Types;
import com.netflix.iceberg.types.Types.ListType;
import com.netflix.iceberg.types.Types.LongType;
import com.netflix.iceberg.types.Types.MapType;
import com.netflix.iceberg.types.Types.StructType;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
import java.io.IOException;
import static com.netflix.iceberg.types.Types.NestedField.optional;
import static com.netflix.iceberg.types.Types.NestedField.required;
public abstract class AvroDataTest {
protected abstract void writeAndValidate(Schema schema) throws IOException;
private static final StructType SUPPORTED_PRIMITIVES = StructType.of(
required(100, "id", LongType.get()),
optional(101, "data", Types.StringType.get()),
required(102, "b", Types.BooleanType.get()),
optional(103, "i", Types.IntegerType.get()),
required(104, "l", LongType.get()),
optional(105, "f", Types.FloatType.get()),
required(106, "d", Types.DoubleType.get()),
optional(107, "date", Types.DateType.get()),
required(108, "ts", Types.TimestampType.withZone()),
required(110, "s", Types.StringType.get()),
required(111, "uuid", Types.UUIDType.get()),
required(112, "fixed", Types.FixedType.ofLength(7)),
optional(113, "bytes", Types.BinaryType.get()),
required(114, "dec_9_0", Types.DecimalType.of(9, 0)),
required(115, "dec_11_2", Types.DecimalType.of(11, 2)),
required(116, "dec_38_10", Types.DecimalType.of(38, 10)) // maximum precision
);
@Rule
public TemporaryFolder temp = new TemporaryFolder();
@Test
public void testSimpleStruct() throws IOException {
writeAndValidate(new Schema(SUPPORTED_PRIMITIVES.fields()));
}
@Test
public void testArray() throws IOException {
Schema schema = new Schema(
required(0, "id", LongType.get()),
optional(1, "data", ListType.ofOptional(2, Types.StringType.get())));
writeAndValidate(schema);
}
@Test
public void testArrayOfStructs() throws IOException {
Schema schema = new Schema(
required(0, "id", LongType.get()),
optional(1, "data", ListType.ofOptional(2, SUPPORTED_PRIMITIVES)));
writeAndValidate(schema);
}
@Test
public void testMap() throws IOException {
Schema schema = new Schema(
required(0, "id", LongType.get()),
optional(1, "data", MapType.ofOptional(2, 3,
Types.StringType.get(),
Types.StringType.get())));
writeAndValidate(schema);
}
@Test
public void testNumericMapKey() throws IOException {
Schema schema = new Schema(
required(0, "id", LongType.get()),
optional(1, "data", MapType.ofOptional(2, 3,
Types.LongType.get(),
Types.StringType.get())));
writeAndValidate(schema);
}
@Test
public void testComplexMapKey() throws IOException {
Schema schema = new Schema(
required(0, "id", LongType.get()),
optional(1, "data", MapType.ofOptional(2, 3,
Types.StructType.of(
required(4, "i", Types.IntegerType.get()),
optional(5, "s", Types.StringType.get())),
Types.StringType.get())));
writeAndValidate(schema);
}
@Test
public void testMapOfStructs() throws IOException {
Schema schema = new Schema(
required(0, "id", LongType.get()),
optional(1, "data", MapType.ofOptional(2, 3,
Types.StringType.get(),
SUPPORTED_PRIMITIVES)));
writeAndValidate(schema);
}
@Test
public void testMixedTypes() throws IOException {
Schema schema = new Schema(
required(0, "id", LongType.get()),
optional(1, "list_of_maps",
ListType.ofOptional(2, MapType.ofOptional(3, 4,
Types.StringType.get(),
SUPPORTED_PRIMITIVES))),
optional(5, "map_of_lists",
MapType.ofOptional(6, 7,
Types.StringType.get(),
ListType.ofOptional(8, SUPPORTED_PRIMITIVES))),
required(9, "list_of_lists",
ListType.ofOptional(10, ListType.ofOptional(11, SUPPORTED_PRIMITIVES))),
required(12, "map_of_maps",
MapType.ofOptional(13, 14,
Types.StringType.get(),
MapType.ofOptional(15, 16,
Types.StringType.get(),
SUPPORTED_PRIMITIVES))),
required(17, "list_of_struct_of_nested_types", ListType.ofOptional(19, StructType.of(
Types.NestedField.required(20, "m1", MapType.ofOptional(21, 22,
Types.StringType.get(),
SUPPORTED_PRIMITIVES)),
Types.NestedField.optional(23, "l1", ListType.ofRequired(24, SUPPORTED_PRIMITIVES)),
Types.NestedField.required(25, "l2", ListType.ofRequired(26, SUPPORTED_PRIMITIVES)),
Types.NestedField.optional(27, "m2", MapType.ofOptional(28, 29,
Types.StringType.get(),
SUPPORTED_PRIMITIVES))
)))
);
writeAndValidate(schema);
}
}
| 6,292 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/avro/TestAvroReadProjection.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.avro;
import com.google.common.collect.Iterables;
import com.netflix.iceberg.Schema;
import com.netflix.iceberg.Files;
import com.netflix.iceberg.io.FileAppender;
import org.apache.avro.generic.GenericData;
import java.io.File;
import java.io.IOException;
public class TestAvroReadProjection extends TestReadProjection {
protected GenericData.Record writeAndRead(String desc,
Schema writeSchema,
Schema readSchema,
GenericData.Record record)
throws IOException {
File file = temp.newFile(desc + ".avro");
file.delete();
try (FileAppender<GenericData.Record> appender = Avro.write(Files.localOutput(file))
.schema(writeSchema)
.build()) {
appender.add(record);
}
Iterable<GenericData.Record> records = Avro.read(Files.localInput(file))
.project(readSchema)
.build();
return Iterables.getOnlyElement(records);
}
}
| 6,293 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/hadoop/HadoopTableTestBase.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.hadoop;
import com.google.common.base.Charsets;
import com.google.common.collect.Lists;
import com.google.common.io.Files;
import com.netflix.iceberg.DataFile;
import com.netflix.iceberg.DataFiles;
import com.netflix.iceberg.PartitionSpec;
import com.netflix.iceberg.Schema;
import com.netflix.iceberg.Table;
import com.netflix.iceberg.TableMetadata;
import com.netflix.iceberg.TableMetadataParser;
import com.netflix.iceberg.TestTables;
import com.netflix.iceberg.types.Types;
import org.apache.hadoop.conf.Configuration;
import org.junit.Before;
import org.junit.Rule;
import org.junit.rules.TemporaryFolder;
import java.io.File;
import java.io.IOException;
import java.util.List;
import static com.netflix.iceberg.Files.localInput;
import static com.netflix.iceberg.TableMetadataParser.getFileExtension;
import static com.netflix.iceberg.types.Types.NestedField.optional;
import static com.netflix.iceberg.types.Types.NestedField.required;
public class HadoopTableTestBase {
// Schema passed to create tables
static final Schema SCHEMA = new Schema(
required(3, "id", Types.IntegerType.get()),
required(4, "data", Types.StringType.get())
);
// This is the actual schema for the table, with column IDs reassigned
static final Schema TABLE_SCHEMA = new Schema(
required(1, "id", Types.IntegerType.get()),
required(2, "data", Types.StringType.get())
);
static final Schema UPDATED_SCHEMA = new Schema(
required(1, "id", Types.IntegerType.get()),
required(2, "data", Types.StringType.get()),
optional(3, "n", Types.IntegerType.get())
);
// Partition spec used to create tables
static final PartitionSpec SPEC = PartitionSpec.builderFor(SCHEMA)
.bucket("data", 16)
.build();
static final HadoopTables TABLES = new HadoopTables(new Configuration());
static final DataFile FILE_A = DataFiles.builder(SPEC)
.withPath("/path/to/data-a.parquet")
.withFileSizeInBytes(0)
.withPartitionPath("data_bucket=0") // easy way to set partition data for now
.withRecordCount(2) // needs at least one record or else metrics will filter it out
.build();
static final DataFile FILE_B = DataFiles.builder(SPEC)
.withPath("/path/to/data-b.parquet")
.withFileSizeInBytes(0)
.withPartitionPath("data_bucket=1") // easy way to set partition data for now
.withRecordCount(2) // needs at least one record or else metrics will filter it out
.build();
static final DataFile FILE_C = DataFiles.builder(SPEC)
.withPath("/path/to/data-a.parquet")
.withFileSizeInBytes(0)
.withPartitionPath("data_bucket=2") // easy way to set partition data for now
.withRecordCount(2) // needs at least one record or else metrics will filter it out
.build();
static final DataFile FILE_D = DataFiles.builder(SPEC)
.withPath("/path/to/data-a.parquet")
.withFileSizeInBytes(0)
.withPartitionPath("data_bucket=3") // easy way to set partition data for now
.withRecordCount(2) // needs at least one record or else metrics will filter it out
.build();
@Rule
public TemporaryFolder temp = new TemporaryFolder();
File tableDir = null;
String tableLocation = null;
File metadataDir = null;
File versionHintFile = null;
Table table = null;
@Before
public void setupTable() throws Exception {
this.tableDir = temp.newFolder();
tableDir.delete(); // created by table create
this.tableLocation = tableDir.toURI().toString();
this.metadataDir = new File(tableDir, "metadata");
this.versionHintFile = new File(metadataDir, "version-hint.text");
this.table = TABLES.create(SCHEMA, SPEC, tableLocation);
}
List<File> listManifestFiles() {
return Lists.newArrayList(metadataDir.listFiles((dir, name) ->
!name.startsWith("snap") && Files.getFileExtension(name).equalsIgnoreCase("avro")));
}
File version(int i) {
return new File(metadataDir, "v" + i + getFileExtension(new Configuration()));
}
TableMetadata readMetadataVersion(int version) {
return TableMetadataParser.read(new TestTables.TestTableOperations("table", tableDir),
localInput(version(version)));
}
int readVersionHint() throws IOException {
return Integer.parseInt(Files.readFirstLine(versionHintFile, Charsets.UTF_8));
}
void replaceVersionHint(int version) throws IOException {
// remove the checksum that will no longer match
new File(metadataDir, ".version-hint.text.crc").delete();
Files.write(String.valueOf(version), versionHintFile, Charsets.UTF_8);
}
}
| 6,294 |
0 | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg | Create_ds/iceberg/core/src/test/java/com/netflix/iceberg/hadoop/TestHadoopCommits.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg.hadoop;
import com.google.common.collect.Lists;
import com.netflix.iceberg.AssertHelpers;
import com.netflix.iceberg.FileScanTask;
import com.netflix.iceberg.PartitionSpec;
import com.netflix.iceberg.Table;
import com.netflix.iceberg.TableMetadata;
import com.netflix.iceberg.UpdateSchema;
import com.netflix.iceberg.exceptions.CommitFailedException;
import com.netflix.iceberg.types.Types;
import org.junit.Assert;
import org.junit.Test;
import java.io.File;
import java.util.List;
public class TestHadoopCommits extends HadoopTableTestBase {
@Test
public void testCreateTable() throws Exception {
PartitionSpec expectedSpec = PartitionSpec.builderFor(TABLE_SCHEMA)
.bucket("data", 16)
.build();
Assert.assertEquals("Table schema should match schema with reassigned ids",
TABLE_SCHEMA.asStruct(), table.schema().asStruct());
Assert.assertEquals("Table partition spec should match with reassigned ids",
expectedSpec, table.spec());
List<FileScanTask> tasks = Lists.newArrayList(table.newScan().planFiles());
Assert.assertEquals("Should not create any scan tasks", 0, tasks.size());
Assert.assertTrue("Table location should exist",
tableDir.exists());
Assert.assertTrue("Should create metadata folder",
metadataDir.exists() && metadataDir.isDirectory());
Assert.assertTrue("Should create v1 metadata",
version(1).exists() && version(1).isFile());
Assert.assertFalse("Should not create v2 or newer verions",
version(2).exists());
Assert.assertTrue("Should create version hint file",
versionHintFile.exists());
Assert.assertEquals("Should write the current version to the hint file",
1, readVersionHint());
List<File> manifests = listManifestFiles();
Assert.assertEquals("Should contain 0 Avro manifest files", 0, manifests.size());
}
@Test
public void testSchemaUpdate() throws Exception {
Assert.assertTrue("Should create v1 metadata",
version(1).exists() && version(1).isFile());
Assert.assertFalse("Should not create v2 or newer versions",
version(2).exists());
table.updateSchema()
.addColumn("n", Types.IntegerType.get())
.commit();
Assert.assertTrue("Should create v2 for the update",
version(2).exists() && version(2).isFile());
Assert.assertEquals("Should write the current version to the hint file",
2, readVersionHint());
Assert.assertEquals("Table schema should match schema with reassigned ids",
UPDATED_SCHEMA.asStruct(), table.schema().asStruct());
List<FileScanTask> tasks = Lists.newArrayList(table.newScan().planFiles());
Assert.assertEquals("Should not create any scan tasks", 0, tasks.size());
List<File> manifests = listManifestFiles();
Assert.assertEquals("Should contain 0 Avro manifest files", 0, manifests.size());
}
@Test
public void testFailedCommit() throws Exception {
// apply the change to metadata without committing
UpdateSchema update = table.updateSchema().addColumn("n", Types.IntegerType.get());
update.apply();
Assert.assertTrue("Should create v1 metadata",
version(1).exists() && version(1).isFile());
Assert.assertFalse("Should not create v2 or newer versions",
version(2).exists());
version(2).createNewFile();
AssertHelpers.assertThrows("Should fail to commit change based on v1 when v2 exists",
CommitFailedException.class, "Version 2 already exists", update::commit);
List<File> manifests = listManifestFiles();
Assert.assertEquals("Should contain 0 Avro manifest files", 0, manifests.size());
}
@Test
public void testStaleMetadata() throws Exception {
Table tableCopy = TABLES.load(tableLocation);
Assert.assertTrue("Should create v1 metadata",
version(1).exists() && version(1).isFile());
Assert.assertFalse("Should not create v2 or newer versions",
version(2).exists());
// prepare changes on the copy without committing
UpdateSchema updateCopy = tableCopy.updateSchema()
.addColumn("m", Types.IntegerType.get());
updateCopy.apply();
table.updateSchema()
.addColumn("n", Types.IntegerType.get())
.commit();
Assert.assertTrue("Should create v2 for the update",
version(2).exists() && version(2).isFile());
Assert.assertNotEquals("Unmodified copy should be out of date after update",
table.schema().asStruct(), tableCopy.schema().asStruct());
// update the table
tableCopy.refresh();
Assert.assertEquals("Copy should be back in sync",
table.schema().asStruct(), tableCopy.schema().asStruct());
AssertHelpers.assertThrows("Should fail with stale base metadata",
CommitFailedException.class, "based on stale table metadata", updateCopy::commit);
List<File> manifests = listManifestFiles();
Assert.assertEquals("Should contain 0 Avro manifest files", 0, manifests.size());
}
@Test
public void testStaleVersionHint() throws Exception {
Table stale = TABLES.load(tableLocation);
Assert.assertTrue("Should create v1 metadata",
version(1).exists() && version(1).isFile());
Assert.assertFalse("Should not create v2 or newer versions",
version(2).exists());
table.updateSchema()
.addColumn("n", Types.IntegerType.get())
.commit();
Assert.assertTrue("Should create v2 for the update",
version(2).exists() && version(2).isFile());
Assert.assertEquals("Should write the current version to the hint file",
2, readVersionHint());
Assert.assertNotEquals("Stable table schema should not match",
UPDATED_SCHEMA.asStruct(), stale.schema().asStruct());
// roll the version hint back to 1
replaceVersionHint(1);
Table reloaded = TABLES.load(tableLocation);
Assert.assertEquals("Updated schema for newly loaded table should match",
UPDATED_SCHEMA.asStruct(), reloaded.schema().asStruct());
stale.refresh();
Assert.assertEquals("Refreshed schema for stale table should match",
UPDATED_SCHEMA.asStruct(), reloaded.schema().asStruct());
}
@Test
public void testFastAppend() throws Exception {
// first append
table.newFastAppend()
.appendFile(FILE_A)
.commit();
Assert.assertTrue("Should create v2 for the update",
version(2).exists() && version(2).isFile());
Assert.assertEquals("Should write the current version to the hint file",
2, readVersionHint());
List<FileScanTask> tasks = Lists.newArrayList(table.newScan().planFiles());
Assert.assertEquals("Should scan 1 file", 1, tasks.size());
List<File> manifests = listManifestFiles();
Assert.assertEquals("Should contain only one Avro manifest file", 1, manifests.size());
// second append
table.newFastAppend()
.appendFile(FILE_B)
.commit();
Assert.assertTrue("Should create v3 for the update",
version(3).exists() && version(3).isFile());
Assert.assertEquals("Should write the current version to the hint file",
3, readVersionHint());
tasks = Lists.newArrayList(table.newScan().planFiles());
Assert.assertEquals("Should scan 2 files", 2, tasks.size());
Assert.assertEquals("Should contain 2 Avro manifest files",
2, listManifestFiles().size());
TableMetadata metadata = readMetadataVersion(3);
Assert.assertEquals("Current snapshot should contain 2 manifests",
2, metadata.currentSnapshot().manifests().size());
}
@Test
public void testMergeAppend() throws Exception {
testFastAppend(); // create 2 compatible manifest files that will be merged
// merge all manifests for this test
table.updateProperties().set("commit.manifest.min-count-to-merge", "1").commit();
// third append
table.newAppend()
.appendFile(FILE_C)
.commit();
List<FileScanTask> tasks = Lists.newArrayList(table.newScan().planFiles());
Assert.assertEquals("Should scan 3 files", 3, tasks.size());
Assert.assertEquals("Should contain 3 Avro manifest files",
3, listManifestFiles().size());
TableMetadata metadata = readMetadataVersion(5);
Assert.assertEquals("Current snapshot should contain 1 merged manifest",
1, metadata.currentSnapshot().manifests().size());
}
}
| 6,295 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/ManifestEntry.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Objects;
import com.netflix.iceberg.avro.AvroSchemaUtil;
import com.netflix.iceberg.types.Types.IntegerType;
import com.netflix.iceberg.types.Types.LongType;
import com.netflix.iceberg.types.Types.StructType;
import org.apache.avro.generic.IndexedRecord;
import org.apache.avro.specific.SpecificData;
import java.util.Collection;
import static com.netflix.iceberg.types.Types.NestedField.required;
class ManifestEntry implements IndexedRecord, SpecificData.SchemaConstructable{
enum Status {
EXISTING(0),
ADDED(1),
DELETED(2);
public static Status[] values = new Status[3];
static {
for (Status status : Status.values()) {
values[status.id] = status;
}
}
private final int id;
Status(int id) {
this.id = id;
}
public int id() {
return id;
}
public static Status fromId(int id) {
return values[id];
}
}
private final org.apache.avro.Schema schema;
private Status status = Status.EXISTING;
private long snapshotId = 0L;
private DataFile file = null;
public ManifestEntry(org.apache.avro.Schema schema) {
this.schema = schema;
}
ManifestEntry(StructType partitionType) {
this.schema = AvroSchemaUtil.convert(getSchema(partitionType), "manifest_entry");
}
private ManifestEntry(ManifestEntry toCopy) {
this.schema = toCopy.schema;
this.status = toCopy.status;
this.snapshotId = toCopy.snapshotId;
this.file = toCopy.file().copy();
}
ManifestEntry wrapExisting(long snapshotId, DataFile file) {
this.status = Status.EXISTING;
this.snapshotId = snapshotId;
this.file = file;
return this;
}
ManifestEntry wrapAppend(long snapshotId, DataFile file) {
this.status = Status.ADDED;
this.snapshotId = snapshotId;
this.file = file;
return this;
}
ManifestEntry wrapDelete(long snapshotId, DataFile file) {
this.status = Status.DELETED;
this.snapshotId = snapshotId;
this.file = file;
return this;
}
/**
* @return the status of the file, whether EXISTING, ADDED, or DELETED
*/
public Status status() {
return status;
}
/**
* @return id of the snapshot in which the file was added to the table
*/
public long snapshotId() {
return snapshotId;
}
/**
* @return a file
*/
public DataFile file() {
return file;
}
public ManifestEntry copy() {
return new ManifestEntry(this);
}
@Override
public void put(int i, Object v) {
switch (i) {
case 0:
this.status = Status.fromId((Integer) v);
return;
case 1:
this.snapshotId = (Long) v;
return;
case 2:
this.file = (DataFile) v;
return;
default:
// ignore the object, it must be from a newer version of the format
}
}
@Override
public Object get(int i) {
switch (i) {
case 0:
return status.id();
case 1:
return snapshotId;
case 2:
return file;
default:
throw new UnsupportedOperationException("Unknown field ordinal: " + i);
}
}
@Override
public org.apache.avro.Schema getSchema() {
return schema;
}
static Schema projectSchema(StructType partitionType, Collection<String> columns) {
return wrapFileSchema(
new Schema(DataFile.getType(partitionType).fields()).select(columns).asStruct());
}
static Schema getSchema(StructType partitionType) {
return wrapFileSchema(DataFile.getType(partitionType));
}
private static Schema wrapFileSchema(StructType fileStruct) {
// ids for top-level columns are assigned from 1000
return new Schema(
required(0, "status", IntegerType.get()),
required(1, "snapshot_id", LongType.get()),
required(2, "data_file", fileStruct));
}
@Override
public String toString() {
return Objects.toStringHelper(this)
.add("status", status)
.add("snapshot_id", snapshotId)
.add("file", file)
.toString();
}
}
| 6,296 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/FilteredManifest.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Preconditions;
import com.google.common.collect.Iterables;
import com.google.common.collect.Iterators;
import com.netflix.iceberg.ManifestEntry.Status;
import com.netflix.iceberg.expressions.Evaluator;
import com.netflix.iceberg.expressions.Expression;
import com.netflix.iceberg.expressions.Expressions;
import com.netflix.iceberg.expressions.InclusiveMetricsEvaluator;
import com.netflix.iceberg.expressions.Projections;
import java.util.Collection;
import java.util.Iterator;
public class FilteredManifest implements Filterable<FilteredManifest> {
private final ManifestReader reader;
private final Expression partFilter;
private final Expression rowFilter;
private final Collection<String> columns;
// lazy state
private Evaluator lazyEvaluator = null;
private InclusiveMetricsEvaluator lazyMetricsEvaluator = null;
FilteredManifest(ManifestReader reader, Expression partFilter, Expression rowFilter,
Collection<String> columns) {
Preconditions.checkNotNull(reader, "ManifestReader cannot be null");
this.reader = reader;
this.partFilter = partFilter;
this.rowFilter = rowFilter;
this.columns = columns;
}
@Override
public FilteredManifest select(Collection<String> columns) {
return new FilteredManifest(reader, partFilter, rowFilter, columns);
}
@Override
public FilteredManifest filterPartitions(Expression expr) {
return new FilteredManifest(reader,
Expressions.and(partFilter, expr),
rowFilter,
columns);
}
@Override
public FilteredManifest filterRows(Expression expr) {
Expression projected = Projections.inclusive(reader.spec()).project(expr);
return new FilteredManifest(reader,
Expressions.and(partFilter, projected),
Expressions.and(rowFilter, expr),
columns);
}
Iterable<ManifestEntry> allEntries() {
if (rowFilter != null && rowFilter != Expressions.alwaysTrue() &&
partFilter != null && partFilter != Expressions.alwaysTrue()) {
Evaluator evaluator = evaluator();
InclusiveMetricsEvaluator metricsEvaluator = metricsEvaluator();
return Iterables.filter(reader.entries(columns),
entry -> (entry != null &&
evaluator.eval(entry.file().partition()) &&
metricsEvaluator.eval(entry.file())));
} else {
return reader.entries(columns);
}
}
Iterable<ManifestEntry> liveEntries() {
if (rowFilter != null && rowFilter != Expressions.alwaysTrue() &&
partFilter != null && partFilter != Expressions.alwaysTrue()) {
Evaluator evaluator = evaluator();
InclusiveMetricsEvaluator metricsEvaluator = metricsEvaluator();
return Iterables.filter(reader.entries(columns),
entry -> (entry != null &&
entry.status() != Status.DELETED &&
evaluator.eval(entry.file().partition()) &&
metricsEvaluator.eval(entry.file())));
} else {
return Iterables.filter(reader.entries(columns),
entry -> entry != null && entry.status() != Status.DELETED);
}
}
@Override
public Iterator<DataFile> iterator() {
if (rowFilter != null && rowFilter != Expressions.alwaysTrue() &&
partFilter != null && partFilter != Expressions.alwaysTrue()) {
Evaluator evaluator = evaluator();
InclusiveMetricsEvaluator metricsEvaluator = metricsEvaluator();
return Iterators.transform(
Iterators.filter(reader.iterator(partFilter, columns),
input -> (input != null &&
evaluator.eval(input.partition()) &&
metricsEvaluator.eval(input))),
DataFile::copy);
} else {
return Iterators.transform(reader.iterator(partFilter, columns), DataFile::copy);
}
}
private Evaluator evaluator() {
if (lazyEvaluator == null) {
this.lazyEvaluator = new Evaluator(reader.spec().partitionType(), partFilter);
}
return lazyEvaluator;
}
private InclusiveMetricsEvaluator metricsEvaluator() {
if (lazyMetricsEvaluator == null) {
this.lazyMetricsEvaluator = new InclusiveMetricsEvaluator(reader.spec().schema(), rowFilter);
}
return lazyMetricsEvaluator;
}
}
| 6,297 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/BaseMetastoreTables.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableMap;
import com.netflix.iceberg.exceptions.AlreadyExistsException;
import com.netflix.iceberg.exceptions.NoSuchTableException;
import org.apache.hadoop.conf.Configuration;
import java.util.Map;
import static com.netflix.iceberg.TableMetadata.newTableMetadata;
public abstract class BaseMetastoreTables implements Tables {
private final Configuration conf;
public BaseMetastoreTables(Configuration conf) {
this.conf = conf;
}
protected abstract BaseMetastoreTableOperations newTableOps(Configuration conf,
String database, String table);
public Table load(String database, String table) {
TableOperations ops = newTableOps(conf, database, table);
if (ops.current() == null) {
throw new NoSuchTableException("Table does not exist: " + database + "." + table);
}
return new BaseTable(ops, database + "." + table);
}
public Table create(Schema schema, String database, String table) {
return create(schema, PartitionSpec.unpartitioned(), database, table);
}
public Table create(Schema schema, PartitionSpec spec, String database, String table) {
return create(schema, spec, ImmutableMap.of(), database, table);
}
public Table create(Schema schema, PartitionSpec spec, Map<String, String> properties,
String database, String table) {
TableOperations ops = newTableOps(conf, database, table);
if (ops.current() != null) {
throw new AlreadyExistsException("Table already exists: " + database + "." + table);
}
String location = defaultWarehouseLocation(conf, database, table);
TableMetadata metadata = newTableMetadata(ops, schema, spec, location, properties);
ops.commit(null, metadata);
return new BaseTable(ops, database + "." + table);
}
public Transaction beginCreate(Schema schema, PartitionSpec spec, String database, String table) {
return beginCreate(schema, spec, ImmutableMap.of(), database, table);
}
public Transaction beginCreate(Schema schema, PartitionSpec spec, Map<String, String> properties,
String database, String table) {
TableOperations ops = newTableOps(conf, database, table);
if (ops.current() != null) {
throw new AlreadyExistsException("Table already exists: " + database + "." + table);
}
String location = defaultWarehouseLocation(conf, database, table);
TableMetadata metadata = newTableMetadata(ops, schema, spec, location, properties);
return BaseTransaction.createTableTransaction(ops, metadata);
}
public Transaction beginReplace(Schema schema, PartitionSpec spec,
String database, String table) {
return beginReplace(schema, spec, ImmutableMap.of(), database, table);
}
public Transaction beginReplace(Schema schema, PartitionSpec spec, Map<String, String> properties,
String database, String table) {
TableOperations ops = newTableOps(conf, database, table);
TableMetadata current = ops.current();
TableMetadata metadata;
if (current != null) {
metadata = current.buildReplacement(schema, spec, properties);
return BaseTransaction.replaceTableTransaction(ops, metadata);
} else {
String location = defaultWarehouseLocation(conf, database, table);
metadata = newTableMetadata(ops, schema, spec, location, properties);
return BaseTransaction.createTableTransaction(ops, metadata);
}
}
protected String defaultWarehouseLocation(Configuration conf,
String database, String table) {
String warehouseLocation = conf.get("hive.metastore.warehouse.dir");
Preconditions.checkNotNull(warehouseLocation,
"Warehouse location is not set: hive.metastore.warehouse.dir=null");
return String.format("%s/%s.db/%s", warehouseLocation, database, table);
}
}
| 6,298 |
0 | Create_ds/iceberg/core/src/main/java/com/netflix | Create_ds/iceberg/core/src/main/java/com/netflix/iceberg/SchemaUpdate.java | /*
* Copyright 2017 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.iceberg;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Multimap;
import com.google.common.collect.Multimaps;
import com.netflix.iceberg.types.Type;
import com.netflix.iceberg.types.TypeUtil;
import com.netflix.iceberg.types.Types;
import java.util.Collection;
import java.util.List;
import java.util.Map;
/**
* Schema evolution API implementation.
*/
class SchemaUpdate implements UpdateSchema {
private static final int TABLE_ROOT_ID = -1;
private final TableOperations ops;
private final TableMetadata base;
private final Schema schema;
private final List<Integer> deletes = Lists.newArrayList();
private final Map<Integer, Types.NestedField> updates = Maps.newHashMap();
private final Multimap<Integer, Types.NestedField> adds =
Multimaps.newListMultimap(Maps.newHashMap(), Lists::newArrayList);
private int lastColumnId;
SchemaUpdate(TableOperations ops) {
this.ops = ops;
this.base = ops.current();
this.schema = base.schema();
this.lastColumnId = base.lastColumnId();
}
/**
* For testing only.
*/
SchemaUpdate(Schema schema, int lastColumnId) {
this.ops = null;
this.base = null;
this.schema = schema;
this.lastColumnId = lastColumnId;
}
@Override
public UpdateSchema addColumn(String name, Type type) {
Preconditions.checkArgument(!name.contains("."),
"Cannot add column with ambiguous name: %s, use addColumn(parent, name, type)", name);
return addColumn(null, name, type);
}
@Override
public UpdateSchema addColumn(String parent, String name, Type type) {
int parentId = TABLE_ROOT_ID;
if (parent != null) {
Types.NestedField parentField = schema.findField(parent);
Preconditions.checkArgument(parentField != null, "Cannot find parent struct: %s", parent);
Type parentType = parentField.type();
if (parentType.isNestedType()) {
Type.NestedType nested = parentType.asNestedType();
if (nested.isMapType()) {
// fields are added to the map value type
parentField = nested.asMapType().fields().get(1);
} else if (nested.isListType()) {
// fields are added to the element type
parentField = nested.asListType().fields().get(0);
}
}
Preconditions.checkArgument(
parentField.type().isNestedType() && parentField.type().asNestedType().isStructType(),
"Cannot add to non-struct column: %s: %s", parent, parentField.type());
parentId = parentField.fieldId();
Preconditions.checkArgument(!deletes.contains(parentId),
"Cannot add to a column that will be deleted: %s", parent);
Preconditions.checkArgument(schema.findField(parent + "." + name) == null,
"Cannot add column, name already exists: " + parent + "." + name);
} else {
Preconditions.checkArgument(schema.findField(name) == null,
"Cannot add column, name already exists: " + name);
}
// assign new IDs in order
int newId = assignNewColumnId();
adds.put(parentId, Types.NestedField.optional(newId, name,
TypeUtil.assignFreshIds(type, this::assignNewColumnId)));
return this;
}
@Override
public UpdateSchema deleteColumn(String name) {
Types.NestedField field = schema.findField(name);
Preconditions.checkArgument(field != null, "Cannot delete missing column: %s", name);
Preconditions.checkArgument(!adds.containsKey(field.fieldId()),
"Cannot delete a column that has additions: %s", name);
Preconditions.checkArgument(!updates.containsKey(field.fieldId()),
"Cannot delete a column that has updates: %s", name);
deletes.add(field.fieldId());
return this;
}
@Override
public UpdateSchema renameColumn(String name, String newName) {
Types.NestedField field = schema.findField(name);
Preconditions.checkArgument(field != null, "Cannot rename missing column: %s", name);
Preconditions.checkArgument(!deletes.contains(field.fieldId()),
"Cannot rename a column that will be deleted: %s", field.name());
// merge with an update, if present
int fieldId = field.fieldId();
Types.NestedField update = updates.get(fieldId);
if (update != null) {
updates.put(fieldId, Types.NestedField.required(fieldId, newName, update.type()));
} else {
updates.put(fieldId, Types.NestedField.required(fieldId, newName, field.type()));
}
return this;
}
@Override
public UpdateSchema updateColumn(String name, Type.PrimitiveType newType) {
Types.NestedField field = schema.findField(name);
Preconditions.checkArgument(field != null, "Cannot update missing column: %s", name);
Preconditions.checkArgument(!deletes.contains(field.fieldId()),
"Cannot update a column that will be deleted: %s", field.name());
Preconditions.checkArgument(TypeUtil.isPromotionAllowed(field.type(), newType),
"Cannot change column type: %s: %s -> %s", name, field.type(), newType);
// merge with a rename, if present
int fieldId = field.fieldId();
Types.NestedField rename = updates.get(fieldId);
if (rename != null) {
updates.put(fieldId, Types.NestedField.required(fieldId, rename.name(), newType));
} else {
updates.put(fieldId, Types.NestedField.required(fieldId, field.name(), newType));
}
return this;
}
/**
* Apply the pending changes to the original schema and returns the result.
* <p>
* This does not result in a permanent update.
*
* @return the result Schema when all pending updates are applied
*/
@Override
public Schema apply() {
return applyChanges(schema, deletes, updates, adds);
}
@Override
public void commit() {
TableMetadata update = base.updateSchema(apply(), lastColumnId);
ops.commit(base, update);
}
private int assignNewColumnId() {
int next = lastColumnId + 1;
this.lastColumnId = next;
return next;
}
private static Schema applyChanges(Schema schema, List<Integer> deletes,
Map<Integer, Types.NestedField> updates,
Multimap<Integer, Types.NestedField> adds) {
Types.StructType struct = TypeUtil
.visit(schema, new ApplyChanges(deletes, updates, adds))
.asNestedType().asStructType();
return new Schema(struct.fields());
}
private static class ApplyChanges extends TypeUtil.SchemaVisitor<Type> {
private final List<Integer> deletes;
private final Map<Integer, Types.NestedField> updates;
private final Multimap<Integer, Types.NestedField> adds;
private ApplyChanges(List<Integer> deletes,
Map<Integer, Types.NestedField> updates,
Multimap<Integer, Types.NestedField> adds) {
this.deletes = deletes;
this.updates = updates;
this.adds = adds;
}
@Override
public Type schema(Schema schema, Type structResult) {
Collection<Types.NestedField> newColumns = adds.get(TABLE_ROOT_ID);
if (newColumns != null) {
return addFields(structResult.asNestedType().asStructType(), newColumns);
}
return structResult;
}
@Override
public Type struct(Types.StructType struct, List<Type> fieldResults) {
boolean hasChange = false;
List<Types.NestedField> newFields = Lists.newArrayListWithExpectedSize(fieldResults.size());
for (int i = 0; i < fieldResults.size(); i += 1) {
Type resultType = fieldResults.get(i);
if (resultType == null) {
hasChange = true;
continue;
}
Types.NestedField field = struct.fields().get(i);
String name = field.name();
Types.NestedField update = updates.get(field.fieldId());
if (update != null && update.name() != null) {
name = update.name();
}
if (!name.equals(field.name()) || field.type() != resultType) {
hasChange = true;
if (field.isOptional()) {
newFields.add(Types.NestedField.optional(field.fieldId(), name, resultType));
} else {
newFields.add(Types.NestedField.required(field.fieldId(), name, resultType));
}
} else {
newFields.add(field);
}
}
if (hasChange) {
// TODO: What happens if there are no fields left?
return Types.StructType.of(newFields);
}
return struct;
}
@Override
public Type field(Types.NestedField field, Type fieldResult) {
// the API validates deletes, updates, and additions don't conflict
int fieldId = field.fieldId();
if (deletes.contains(fieldId)) {
return null;
}
Types.NestedField update = updates.get(field.fieldId());
if (update != null && update.type() != field.type()) {
// rename is handled in struct
return update.type();
}
Collection<Types.NestedField> newFields = adds.get(fieldId);
if (newFields != null && !newFields.isEmpty()) {
return addFields(fieldResult.asNestedType().asStructType(), newFields);
}
return fieldResult;
}
@Override
public Type list(Types.ListType list, Type result) {
// use field to apply updates
Type elementResult = field(list.fields().get(0), result);
if (elementResult == null) {
throw new IllegalArgumentException("Cannot delete element type from list: " + list);
}
if (list.elementType() == elementResult) {
return list;
}
if (list.isElementOptional()) {
return Types.ListType.ofOptional(list.elementId(), elementResult);
} else {
return Types.ListType.ofRequired(list.elementId(), elementResult);
}
}
@Override
public Type map(Types.MapType map, Type kResult, Type vResult) {
// if any updates are intended for the key, throw an exception
int keyId = map.fields().get(0).fieldId();
if (deletes.contains(keyId)) {
throw new IllegalArgumentException("Cannot delete map keys: " + map);
} else if (updates.containsKey(keyId)) {
throw new IllegalArgumentException("Cannot update map keys: " + map);
} else if (adds.containsKey(keyId)) {
throw new IllegalArgumentException("Cannot add fields to map keys: " + map);
} else if (!map.keyType().equals(kResult)) {
throw new IllegalArgumentException("Cannot alter map keys: " + map);
}
// use field to apply updates to the value
Type valueResult = field(map.fields().get(1), vResult);
if (valueResult == null) {
throw new IllegalArgumentException("Cannot delete value type from map: " + map);
}
if (map.valueType() == valueResult) {
return map;
}
if (map.isValueOptional()) {
return Types.MapType.ofOptional(map.keyId(), map.valueId(), map.keyType(), valueResult);
} else {
return Types.MapType.ofRequired(map.keyId(), map.valueId(), map.keyType(), valueResult);
}
}
@Override
public Type primitive(Type.PrimitiveType primitive) {
return primitive;
}
}
private static Types.StructType addFields(Types.StructType struct,
Collection<Types.NestedField> adds) {
List<Types.NestedField> newFields = Lists.newArrayList(struct.fields());
newFields.addAll(adds);
return Types.StructType.of(newFields);
}
}
| 6,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.