idx int64 0 41.2k | question stringlengths 73 5.81k | target stringlengths 5 918 |
|---|---|---|
23,700 | public static TableId of ( String project , String dataset , String table ) { return new TableId ( checkNotNull ( project ) , checkNotNull ( dataset ) , checkNotNull ( table ) ) ; } | Creates a table identity given project s dataset s and table s user - defined ids . |
23,701 | public static TableId of ( String dataset , String table ) { return new TableId ( null , checkNotNull ( dataset ) , checkNotNull ( table ) ) ; } | Creates a table identity given dataset s and table s user - defined ids . |
23,702 | static < T > T convertToCustomClass ( Object object , Class < T > clazz ) { return deserializeToClass ( object , clazz , ErrorPath . EMPTY ) ; } | Converts a standard library Java representation of JSON data to an object of the provided class . |
23,703 | public final Operation patchRegionBackendService ( String backendService , BackendService backendServiceResource , List < String > fieldMask ) { PatchRegionBackendServiceHttpRequest request = PatchRegionBackendServiceHttpRequest . newBuilder ( ) . setBackendService ( backendService ) . setBackendServiceResource ( backendServiceResource ) . addAllFieldMask ( fieldMask ) . build ( ) ; return patchRegionBackendService ( request ) ; } | Updates the specified regional BackendService resource with the data included in the request . There are several restrictions and guidelines to keep in mind when updating a backend service . Read Restrictions and Guidelines for more information . This method supports PATCH semantics and uses the JSON merge patch format and processing rules . |
23,704 | public final Group updateGroup ( Group group ) { UpdateGroupRequest request = UpdateGroupRequest . newBuilder ( ) . setGroup ( group ) . build ( ) ; return updateGroup ( request ) ; } | Updates an existing group . You can change any group attributes except name . |
23,705 | public final Operation deleteAccessConfigInstance ( ProjectZoneInstanceName instance , String networkInterface , String accessConfig ) { DeleteAccessConfigInstanceHttpRequest request = DeleteAccessConfigInstanceHttpRequest . newBuilder ( ) . setInstance ( instance == null ? null : instance . toString ( ) ) . setNetworkInterface ( networkInterface ) . setAccessConfig ( accessConfig ) . build ( ) ; return deleteAccessConfigInstance ( request ) ; } | Deletes an access config from an instance s network interface . |
23,706 | public final Operation detachDiskInstance ( ProjectZoneInstanceName instance , String deviceName ) { DetachDiskInstanceHttpRequest request = DetachDiskInstanceHttpRequest . newBuilder ( ) . setInstance ( instance == null ? null : instance . toString ( ) ) . setDeviceName ( deviceName ) . build ( ) ; return detachDiskInstance ( request ) ; } | Detaches a disk from an instance . |
23,707 | public final SerialPortOutput getSerialPortOutputInstance ( ProjectZoneInstanceName instance , Integer port , String start ) { GetSerialPortOutputInstanceHttpRequest request = GetSerialPortOutputInstanceHttpRequest . newBuilder ( ) . setInstance ( instance == null ? null : instance . toString ( ) ) . setPort ( port ) . setStart ( start ) . build ( ) ; return getSerialPortOutputInstance ( request ) ; } | Returns the last 1 MB of serial port output from the specified instance . |
23,708 | public final Operation setDiskAutoDeleteInstance ( ProjectZoneInstanceName instance , Boolean autoDelete , String deviceName ) { SetDiskAutoDeleteInstanceHttpRequest request = SetDiskAutoDeleteInstanceHttpRequest . newBuilder ( ) . setInstance ( instance == null ? null : instance . toString ( ) ) . setAutoDelete ( autoDelete ) . setDeviceName ( deviceName ) . build ( ) ; return setDiskAutoDeleteInstance ( request ) ; } | Sets the auto - delete flag for a disk attached to an instance . |
23,709 | public final Operation setShieldedInstanceIntegrityPolicyInstance ( ProjectZoneInstanceName instance , ShieldedInstanceIntegrityPolicy shieldedInstanceIntegrityPolicyResource , List < String > fieldMask ) { SetShieldedInstanceIntegrityPolicyInstanceHttpRequest request = SetShieldedInstanceIntegrityPolicyInstanceHttpRequest . newBuilder ( ) . setInstance ( instance == null ? null : instance . toString ( ) ) . setShieldedInstanceIntegrityPolicyResource ( shieldedInstanceIntegrityPolicyResource ) . addAllFieldMask ( fieldMask ) . build ( ) ; return setShieldedInstanceIntegrityPolicyInstance ( request ) ; } | Sets the Shielded Instance integrity policy for an instance . You can only use this method on a running instance . This method supports PATCH semantics and uses the JSON merge patch format and processing rules . |
23,710 | public final Operation updateAccessConfigInstance ( ProjectZoneInstanceName instance , String networkInterface , AccessConfig accessConfigResource ) { UpdateAccessConfigInstanceHttpRequest request = UpdateAccessConfigInstanceHttpRequest . newBuilder ( ) . setInstance ( instance == null ? null : instance . toString ( ) ) . setNetworkInterface ( networkInterface ) . setAccessConfigResource ( accessConfigResource ) . build ( ) ; return updateAccessConfigInstance ( request ) ; } | Updates the specified access config from an instance s network interface with the data included in the request . This method supports PATCH semantics and uses the JSON merge patch format and processing rules . |
23,711 | public final Operation updateNetworkInterfaceInstance ( ProjectZoneInstanceName instance , String networkInterface , NetworkInterface networkInterfaceResource , List < String > fieldMask ) { UpdateNetworkInterfaceInstanceHttpRequest request = UpdateNetworkInterfaceInstanceHttpRequest . newBuilder ( ) . setInstance ( instance == null ? null : instance . toString ( ) ) . setNetworkInterface ( networkInterface ) . setNetworkInterfaceResource ( networkInterfaceResource ) . addAllFieldMask ( fieldMask ) . build ( ) ; return updateNetworkInterfaceInstance ( request ) ; } | Updates an instance s network interface . This method follows PATCH semantics . |
23,712 | public final Operation updateShieldedInstanceConfigInstance ( ProjectZoneInstanceName instance , ShieldedInstanceConfig shieldedInstanceConfigResource , List < String > fieldMask ) { UpdateShieldedInstanceConfigInstanceHttpRequest request = UpdateShieldedInstanceConfigInstanceHttpRequest . newBuilder ( ) . setInstance ( instance == null ? null : instance . toString ( ) ) . setShieldedInstanceConfigResource ( shieldedInstanceConfigResource ) . addAllFieldMask ( fieldMask ) . build ( ) ; return updateShieldedInstanceConfigInstance ( request ) ; } | Updates the Shielded Instance config for an instance . You can only use this method on a stopped instance . This method supports PATCH semantics and uses the JSON merge patch format and processing rules . |
23,713 | public Map < String , Object > getData ( ) { Map < String , Object > result = super . getData ( ) ; Preconditions . checkNotNull ( result , "Data in a QueryDocumentSnapshot should be non-null" ) ; return result ; } | Returns the fields of the document as a Map . Field values will be converted to their native Java representation . |
23,714 | public final GenerateAccessTokenResponse generateAccessToken ( ServiceAccountName name , List < String > delegates , List < String > scope , Duration lifetime ) { GenerateAccessTokenRequest request = GenerateAccessTokenRequest . newBuilder ( ) . setName ( name == null ? null : name . toString ( ) ) . addAllDelegates ( delegates ) . addAllScope ( scope ) . setLifetime ( lifetime ) . build ( ) ; return generateAccessToken ( request ) ; } | Generates an OAuth 2 . 0 access token for a service account . |
23,715 | public final GenerateIdTokenResponse generateIdToken ( ServiceAccountName name , List < String > delegates , String audience , boolean includeEmail ) { GenerateIdTokenRequest request = GenerateIdTokenRequest . newBuilder ( ) . setName ( name == null ? null : name . toString ( ) ) . addAllDelegates ( delegates ) . setAudience ( audience ) . setIncludeEmail ( includeEmail ) . build ( ) ; return generateIdToken ( request ) ; } | Generates an OpenID Connect ID token for a service account . |
23,716 | public final SignBlobResponse signBlob ( ServiceAccountName name , List < String > delegates , ByteString payload ) { SignBlobRequest request = SignBlobRequest . newBuilder ( ) . setName ( name == null ? null : name . toString ( ) ) . addAllDelegates ( delegates ) . setPayload ( payload ) . build ( ) ; return signBlob ( request ) ; } | Signs a blob using a service account s system - managed private key . |
23,717 | public final SignJwtResponse signJwt ( ServiceAccountName name , List < String > delegates , String payload ) { SignJwtRequest request = SignJwtRequest . newBuilder ( ) . setName ( name == null ? null : name . toString ( ) ) . addAllDelegates ( delegates ) . setPayload ( payload ) . build ( ) ; return signJwt ( request ) ; } | Signs a JWT using a service account s system - managed private key . |
23,718 | public final GenerateIdentityBindingAccessTokenResponse generateIdentityBindingAccessToken ( ServiceAccountName name , List < String > scope , String jwt ) { GenerateIdentityBindingAccessTokenRequest request = GenerateIdentityBindingAccessTokenRequest . newBuilder ( ) . setName ( name == null ? null : name . toString ( ) ) . addAllScope ( scope ) . setJwt ( jwt ) . build ( ) ; return generateIdentityBindingAccessToken ( request ) ; } | Exchange a JWT signed by third party identity provider to an OAuth 2 . 0 access token |
23,719 | public Operation deprecate ( DeprecationStatus < ImageId > deprecationStatus , OperationOption ... options ) { return compute . deprecate ( getImageId ( ) , deprecationStatus , options ) ; } | Deprecates this image . |
23,720 | public final ReadSession createReadSession ( TableReference tableReference , String parent , int requestedStreams ) { CreateReadSessionRequest request = CreateReadSessionRequest . newBuilder ( ) . setTableReference ( tableReference ) . setParent ( parent ) . setRequestedStreams ( requestedStreams ) . build ( ) ; return createReadSession ( request ) ; } | Creates a new read session . A read session divides the contents of a BigQuery table into one or more streams which can then be used to read data from the table . The read session also specifies properties of the data to be read such as a list of columns or a push - down filter describing the rows to be returned . |
23,721 | public final BatchCreateReadSessionStreamsResponse batchCreateReadSessionStreams ( ReadSession session , int requestedStreams ) { BatchCreateReadSessionStreamsRequest request = BatchCreateReadSessionStreamsRequest . newBuilder ( ) . setSession ( session ) . setRequestedStreams ( requestedStreams ) . build ( ) ; return batchCreateReadSessionStreams ( request ) ; } | Creates additional streams for a ReadSession . This API can be used to dynamically adjust the parallelism of a batch processing task upwards by adding additional workers . |
23,722 | public final void finalizeStream ( Stream stream ) { FinalizeStreamRequest request = FinalizeStreamRequest . newBuilder ( ) . setStream ( stream ) . build ( ) ; finalizeStream ( request ) ; } | Triggers the graceful termination of a single stream in a ReadSession . This API can be used to dynamically adjust the parallelism of a batch processing task downwards without losing data . |
23,723 | public final SplitReadStreamResponse splitReadStream ( Stream originalStream ) { SplitReadStreamRequest request = SplitReadStreamRequest . newBuilder ( ) . setOriginalStream ( originalStream ) . build ( ) ; return splitReadStream ( request ) ; } | Splits a given read stream into two Streams . These streams are referred to as the primary and the residual of the split . The original stream can still be read from in the same manner as before . Both of the returned streams can also be read from and the total rows return by both child streams will be the same as the rows read from the original stream . |
23,724 | public static Builder newBuilder ( String query , List < UserDefinedFunction > functions ) { return newBuilder ( query ) . setUserDefinedFunctions ( functions ) ; } | Returns a builder for a BigQuery view definition . |
23,725 | public static ViewDefinition of ( String query , List < UserDefinedFunction > functions ) { return newBuilder ( query , functions ) . build ( ) ; } | Creates a BigQuery view definition given a query and some user - defined functions . |
23,726 | private static void countFile ( String fname ) throws Exception { final int bufSize = 50 * 1024 * 1024 ; Queue < Future < WorkUnit > > work = new ArrayDeque < > ( ) ; Path path = Paths . get ( new URI ( fname ) ) ; long size = Files . size ( path ) ; System . out . println ( fname + ": " + size + " bytes." ) ; int nThreads = ( int ) Math . ceil ( size / ( double ) bufSize ) ; if ( nThreads > 4 ) nThreads = 4 ; System . out . println ( "Reading the whole file using " + nThreads + " threads..." ) ; Stopwatch sw = Stopwatch . createStarted ( ) ; long total = 0 ; MessageDigest md = MessageDigest . getInstance ( "MD5" ) ; ExecutorService exec = Executors . newFixedThreadPool ( nThreads ) ; int blockIndex ; for ( blockIndex = 0 ; blockIndex < nThreads ; blockIndex ++ ) { work . add ( exec . submit ( new WorkUnit ( Files . newByteChannel ( path ) , bufSize , blockIndex ) ) ) ; } while ( ! work . isEmpty ( ) ) { WorkUnit full = work . remove ( ) . get ( ) ; md . update ( full . buf . array ( ) , 0 , full . buf . position ( ) ) ; total += full . buf . position ( ) ; if ( full . buf . hasRemaining ( ) ) { full . close ( ) ; } else { work . add ( exec . submit ( full . resetForIndex ( blockIndex ++ ) ) ) ; } } exec . shutdown ( ) ; long elapsed = sw . elapsed ( TimeUnit . SECONDS ) ; System . out . println ( "Read all " + total + " bytes in " + elapsed + "s. " ) ; String hex = String . valueOf ( BaseEncoding . base16 ( ) . encode ( md . digest ( ) ) ) ; System . out . println ( "The MD5 is: 0x" + hex ) ; if ( total != size ) { System . out . println ( "Wait, this doesn't match! We saw " + total + " bytes, " + "yet the file size is listed at " + size + " bytes." ) ; } } | Print the length and MD5 of the indicated file . |
23,727 | public final Queue updateQueue ( Queue queue , FieldMask updateMask ) { UpdateQueueRequest request = UpdateQueueRequest . newBuilder ( ) . setQueue ( queue ) . setUpdateMask ( updateMask ) . build ( ) ; return updateQueue ( request ) ; } | Updates a queue . |
23,728 | private Mutation addMutation ( ) { Preconditions . checkState ( ! committed , "Cannot modify a WriteBatch that has already been committed." ) ; Mutation mutation = new Mutation ( ) ; mutations . add ( mutation ) ; return mutation ; } | Adds a new mutation to the batch . |
23,729 | private Map < FieldPath , Object > applyFieldMask ( Map < String , Object > fields , List < FieldPath > fieldMask ) { List < FieldPath > remainingFields = new ArrayList < > ( fieldMask ) ; Map < FieldPath , Object > filteredData = applyFieldMask ( fields , remainingFields , FieldPath . empty ( ) ) ; if ( ! remainingFields . isEmpty ( ) ) { throw new IllegalArgumentException ( String . format ( "Field masks contains invalid path. No data exist at field '%s'." , remainingFields . get ( 0 ) ) ) ; } return filteredData ; } | Removes all values in fields that are not specified in fieldMask . |
23,730 | private Map < FieldPath , Object > applyFieldMask ( Map < String , Object > fields , List < FieldPath > fieldMask , FieldPath root ) { Map < FieldPath , Object > filteredMap = new HashMap < > ( ) ; for ( Entry < String , Object > entry : fields . entrySet ( ) ) { FieldPath currentField = root . append ( FieldPath . of ( entry . getKey ( ) ) ) ; if ( fieldMask . remove ( currentField ) ) { filteredMap . put ( currentField , entry . getValue ( ) ) ; } else if ( entry . getValue ( ) instanceof Map ) { filteredMap . putAll ( applyFieldMask ( ( Map < String , Object > ) entry . getValue ( ) , fieldMask , currentField ) ) ; } else if ( entry . getValue ( ) == FieldValue . DELETE_SENTINEL ) { throw new IllegalArgumentException ( String . format ( "Cannot specify FieldValue.delete() for non-merged field '%s'." , currentField ) ) ; } } return filteredMap ; } | Strips all values in fields that are not specified in fieldMask . Modifies fieldMask inline and removes all matched fields . |
23,731 | public final BatchAnnotateImagesResponse batchAnnotateImages ( List < AnnotateImageRequest > requests ) { BatchAnnotateImagesRequest request = BatchAnnotateImagesRequest . newBuilder ( ) . addAllRequests ( requests ) . build ( ) ; return batchAnnotateImages ( request ) ; } | Run image detection and annotation for a batch of images . |
23,732 | public final ListTimeSeriesPagedResponse listTimeSeries ( ProjectName name , String filter , TimeInterval interval , ListTimeSeriesRequest . TimeSeriesView view ) { ListTimeSeriesRequest request = ListTimeSeriesRequest . newBuilder ( ) . setName ( name == null ? null : name . toString ( ) ) . setFilter ( filter ) . setInterval ( interval ) . setView ( view ) . build ( ) ; return listTimeSeries ( request ) ; } | Lists time series that match a filter . This method does not require a Stackdriver account . |
23,733 | public final Operation deleteSignedUrlKeyBackendService ( String backendService , String keyName ) { DeleteSignedUrlKeyBackendServiceHttpRequest request = DeleteSignedUrlKeyBackendServiceHttpRequest . newBuilder ( ) . setBackendService ( backendService ) . setKeyName ( keyName ) . build ( ) ; return deleteSignedUrlKeyBackendService ( request ) ; } | Deletes a key for validating requests with signed URLs for this backend service . |
23,734 | public final Operation patchBackendService ( String backendService , BackendService backendServiceResource , List < String > fieldMask ) { PatchBackendServiceHttpRequest request = PatchBackendServiceHttpRequest . newBuilder ( ) . setBackendService ( backendService ) . setBackendServiceResource ( backendServiceResource ) . addAllFieldMask ( fieldMask ) . build ( ) ; return patchBackendService ( request ) ; } | Patches the specified BackendService resource with the data included in the request . There are several restrictions and guidelines to keep in mind when updating a backend service . Read Restrictions and Guidelines for more information . This method supports PATCH semantics and uses the JSON merge patch format and processing rules . |
23,735 | public final Application updateApplication ( Application application ) { UpdateApplicationRequest request = UpdateApplicationRequest . newBuilder ( ) . setApplication ( application ) . build ( ) ; return updateApplication ( request ) ; } | Updates specified application . |
23,736 | @ SuppressWarnings ( "WeakerAccess" ) public com . google . bigtable . admin . v2 . ProjectName getProjectName ( ) { return com . google . bigtable . admin . v2 . ProjectName . of ( projectId ) ; } | Gets the ProjectName this client is associated with . |
23,737 | @ SuppressWarnings ( "WeakerAccess" ) public ApiFuture < Instance > createInstanceAsync ( CreateInstanceRequest request ) { return ApiFutures . transform ( stub . createInstanceOperationCallable ( ) . futureCall ( request . toProto ( projectId ) ) , new ApiFunction < com . google . bigtable . admin . v2 . Instance , Instance > ( ) { public Instance apply ( com . google . bigtable . admin . v2 . Instance proto ) { return Instance . fromProto ( proto ) ; } } , MoreExecutors . directExecutor ( ) ) ; } | Asynchronously creates a new instance and returns its representation wrapped in a future . |
23,738 | @ SuppressWarnings ( "WeakerAccess" ) public ApiFuture < Instance > updateInstanceAsync ( UpdateInstanceRequest request ) { return ApiFutures . transform ( stub . partialUpdateInstanceOperationCallable ( ) . futureCall ( request . toProto ( projectId ) ) , new ApiFunction < com . google . bigtable . admin . v2 . Instance , Instance > ( ) { public Instance apply ( com . google . bigtable . admin . v2 . Instance proto ) { return Instance . fromProto ( proto ) ; } } , MoreExecutors . directExecutor ( ) ) ; } | Asynchronously updates a new instance and returns its representation wrapped in a future . |
23,739 | @ SuppressWarnings ( "WeakerAccess" ) public ApiFuture < Instance > getInstanceAsync ( String instanceId ) { String name = NameUtil . formatInstanceName ( projectId , instanceId ) ; com . google . bigtable . admin . v2 . GetInstanceRequest request = com . google . bigtable . admin . v2 . GetInstanceRequest . newBuilder ( ) . setName ( name ) . build ( ) ; return ApiFutures . transform ( stub . getInstanceCallable ( ) . futureCall ( request ) , new ApiFunction < com . google . bigtable . admin . v2 . Instance , Instance > ( ) { public Instance apply ( com . google . bigtable . admin . v2 . Instance proto ) { return Instance . fromProto ( proto ) ; } } , MoreExecutors . directExecutor ( ) ) ; } | Asynchronously gets the instance representation by ID wrapped in a future . |
23,740 | @ SuppressWarnings ( "WeakerAccess" ) public ApiFuture < List < Instance > > listInstancesAsync ( ) { com . google . bigtable . admin . v2 . ListInstancesRequest request = com . google . bigtable . admin . v2 . ListInstancesRequest . newBuilder ( ) . setParent ( NameUtil . formatProjectName ( projectId ) ) . build ( ) ; ApiFuture < com . google . bigtable . admin . v2 . ListInstancesResponse > responseFuture = stub . listInstancesCallable ( ) . futureCall ( request ) ; return ApiFutures . transform ( responseFuture , new ApiFunction < com . google . bigtable . admin . v2 . ListInstancesResponse , List < Instance > > ( ) { public List < Instance > apply ( com . google . bigtable . admin . v2 . ListInstancesResponse proto ) { Verify . verify ( proto . getNextPageToken ( ) . isEmpty ( ) , "Server returned an unexpected paginated response" ) ; ImmutableList . Builder < Instance > instances = ImmutableList . builder ( ) ; for ( com . google . bigtable . admin . v2 . Instance protoInstance : proto . getInstancesList ( ) ) { instances . add ( Instance . fromProto ( protoInstance ) ) ; } ImmutableList . Builder < String > failedZones = ImmutableList . builder ( ) ; for ( String locationStr : proto . getFailedLocationsList ( ) ) { failedZones . add ( NameUtil . extractZoneIdFromLocationName ( locationStr ) ) ; } if ( ! failedZones . build ( ) . isEmpty ( ) ) { throw new PartialListInstancesException ( failedZones . build ( ) , instances . build ( ) ) ; } return instances . build ( ) ; } } , MoreExecutors . directExecutor ( ) ) ; } | Asynchronously lists all of the instances in the current project . |
23,741 | @ SuppressWarnings ( "WeakerAccess" ) public ApiFuture < Void > deleteInstanceAsync ( String instanceId ) { String instanceName = NameUtil . formatInstanceName ( projectId , instanceId ) ; com . google . bigtable . admin . v2 . DeleteInstanceRequest request = com . google . bigtable . admin . v2 . DeleteInstanceRequest . newBuilder ( ) . setName ( instanceName ) . build ( ) ; return ApiFutures . transform ( stub . deleteInstanceCallable ( ) . futureCall ( request ) , new ApiFunction < Empty , Void > ( ) { public Void apply ( Empty input ) { return null ; } } , MoreExecutors . directExecutor ( ) ) ; } | Asynchronously deletes the specified instance . |
23,742 | public ApiFuture < Boolean > existsAsync ( String instanceId ) { ApiFuture < Instance > protoFuture = getInstanceAsync ( instanceId ) ; ApiFuture < Boolean > existsFuture = ApiFutures . transform ( protoFuture , new ApiFunction < Instance , Boolean > ( ) { public Boolean apply ( Instance ignored ) { return true ; } } , MoreExecutors . directExecutor ( ) ) ; return ApiFutures . catching ( existsFuture , NotFoundException . class , new ApiFunction < NotFoundException , Boolean > ( ) { public Boolean apply ( NotFoundException ignored ) { return false ; } } , MoreExecutors . directExecutor ( ) ) ; } | Asynchronously checks if the instance specified by the instanceId exists |
23,743 | @ SuppressWarnings ( "WeakerAccess" ) public ApiFuture < Cluster > createClusterAsync ( CreateClusterRequest request ) { return ApiFutures . transform ( stub . createClusterOperationCallable ( ) . futureCall ( request . toProto ( projectId ) ) , new ApiFunction < com . google . bigtable . admin . v2 . Cluster , Cluster > ( ) { public Cluster apply ( com . google . bigtable . admin . v2 . Cluster proto ) { return Cluster . fromProto ( proto ) ; } } , MoreExecutors . directExecutor ( ) ) ; } | Asynchronously creates a new cluster in the specified instance . |
23,744 | @ SuppressWarnings ( "WeakerAccess" ) public Cluster getCluster ( String instanceId , String clusterId ) { return ApiExceptions . callAndTranslateApiException ( getClusterAsync ( instanceId , clusterId ) ) ; } | Get the cluster representation by ID . |
23,745 | @ SuppressWarnings ( "WeakerAccess" ) public ApiFuture < Cluster > getClusterAsync ( String instanceId , String clusterId ) { String name = NameUtil . formatClusterName ( projectId , instanceId , clusterId ) ; com . google . bigtable . admin . v2 . GetClusterRequest request = com . google . bigtable . admin . v2 . GetClusterRequest . newBuilder ( ) . setName ( name ) . build ( ) ; return ApiFutures . transform ( stub . getClusterCallable ( ) . futureCall ( request ) , new ApiFunction < com . google . bigtable . admin . v2 . Cluster , Cluster > ( ) { public Cluster apply ( com . google . bigtable . admin . v2 . Cluster proto ) { return Cluster . fromProto ( proto ) ; } } , MoreExecutors . directExecutor ( ) ) ; } | Asynchronously gets the cluster representation by ID . |
23,746 | @ SuppressWarnings ( "WeakerAccess" ) public List < Cluster > listClusters ( String instanceId ) { return ApiExceptions . callAndTranslateApiException ( listClustersAsync ( instanceId ) ) ; } | Lists all clusters in the specified instance . |
23,747 | @ SuppressWarnings ( "WeakerAccess" ) public ApiFuture < List < Cluster > > listClustersAsync ( String instanceId ) { String name = NameUtil . formatInstanceName ( projectId , instanceId ) ; com . google . bigtable . admin . v2 . ListClustersRequest request = com . google . bigtable . admin . v2 . ListClustersRequest . newBuilder ( ) . setParent ( name ) . build ( ) ; return ApiFutures . transform ( stub . listClustersCallable ( ) . futureCall ( request ) , new ApiFunction < com . google . bigtable . admin . v2 . ListClustersResponse , List < Cluster > > ( ) { public List < Cluster > apply ( com . google . bigtable . admin . v2 . ListClustersResponse proto ) { Verify . verify ( proto . getNextPageToken ( ) . isEmpty ( ) , "Server returned an unexpected paginated response" ) ; ImmutableList . Builder < Cluster > clusters = ImmutableList . builder ( ) ; for ( com . google . bigtable . admin . v2 . Cluster cluster : proto . getClustersList ( ) ) { clusters . add ( Cluster . fromProto ( cluster ) ) ; } ImmutableList . Builder < String > failedZones = ImmutableList . builder ( ) ; for ( String locationStr : proto . getFailedLocationsList ( ) ) { failedZones . add ( NameUtil . extractZoneIdFromLocationName ( locationStr ) ) ; } if ( ! failedZones . build ( ) . isEmpty ( ) ) { throw new PartialListClustersException ( failedZones . build ( ) , clusters . build ( ) ) ; } return clusters . build ( ) ; } } , MoreExecutors . directExecutor ( ) ) ; } | Asynchronously lists all clusters in the specified instance . |
23,748 | @ SuppressWarnings ( "WeakerAccess" ) public Cluster resizeCluster ( String instanceId , String clusterId , int numServeNodes ) { return ApiExceptions . callAndTranslateApiException ( resizeClusterAsync ( instanceId , clusterId , numServeNodes ) ) ; } | Resizes the cluster s node count . Please note that only clusters that belong to a PRODUCTION instance can be resized . |
23,749 | @ SuppressWarnings ( "WeakerAccess" ) public ApiFuture < AppProfile > createAppProfileAsync ( CreateAppProfileRequest request ) { return ApiFutures . transform ( stub . createAppProfileCallable ( ) . futureCall ( request . toProto ( projectId ) ) , new ApiFunction < com . google . bigtable . admin . v2 . AppProfile , AppProfile > ( ) { public AppProfile apply ( com . google . bigtable . admin . v2 . AppProfile proto ) { return AppProfile . fromProto ( proto ) ; } } , MoreExecutors . directExecutor ( ) ) ; } | Asynchronously creates a new app profile . |
23,750 | @ SuppressWarnings ( "WeakerAccess" ) public AppProfile getAppProfile ( String instanceId , String appProfileId ) { return ApiExceptions . callAndTranslateApiException ( getAppProfileAsync ( instanceId , appProfileId ) ) ; } | Get the app profile by id . |
23,751 | @ SuppressWarnings ( "WeakerAccess" ) public ApiFuture < AppProfile > getAppProfileAsync ( String instanceId , String appProfileId ) { String name = NameUtil . formatAppProfileName ( projectId , instanceId , appProfileId ) ; GetAppProfileRequest request = GetAppProfileRequest . newBuilder ( ) . setName ( name . toString ( ) ) . build ( ) ; return ApiFutures . transform ( stub . getAppProfileCallable ( ) . futureCall ( request ) , new ApiFunction < com . google . bigtable . admin . v2 . AppProfile , AppProfile > ( ) { public AppProfile apply ( com . google . bigtable . admin . v2 . AppProfile proto ) { return AppProfile . fromProto ( proto ) ; } } , MoreExecutors . directExecutor ( ) ) ; } | Asynchronously get the app profile by id . |
23,752 | @ SuppressWarnings ( "WeakerAccess" ) public List < AppProfile > listAppProfiles ( String instanceId ) { return ApiExceptions . callAndTranslateApiException ( listAppProfilesAsync ( instanceId ) ) ; } | Lists all app profiles of the specified instance . |
23,753 | @ SuppressWarnings ( "WeakerAccess" ) public ApiFuture < List < AppProfile > > listAppProfilesAsync ( String instanceId ) { String instanceName = NameUtil . formatInstanceName ( projectId , instanceId ) ; ListAppProfilesRequest request = ListAppProfilesRequest . newBuilder ( ) . setParent ( instanceName ) . build ( ) ; ApiFuture < ListAppProfilesPage > firstPageFuture = ApiFutures . transform ( stub . listAppProfilesPagedCallable ( ) . futureCall ( request ) , new ApiFunction < ListAppProfilesPagedResponse , ListAppProfilesPage > ( ) { public ListAppProfilesPage apply ( ListAppProfilesPagedResponse response ) { return response . getPage ( ) ; } } , MoreExecutors . directExecutor ( ) ) ; ApiFuture < List < com . google . bigtable . admin . v2 . AppProfile > > allProtos = ApiFutures . transformAsync ( firstPageFuture , new ApiAsyncFunction < ListAppProfilesPage , List < com . google . bigtable . admin . v2 . AppProfile > > ( ) { List < com . google . bigtable . admin . v2 . AppProfile > responseAccumulator = Lists . newArrayList ( ) ; public ApiFuture < List < com . google . bigtable . admin . v2 . AppProfile > > apply ( ListAppProfilesPage page ) { responseAccumulator . addAll ( Lists . newArrayList ( page . getValues ( ) ) ) ; if ( ! page . hasNextPage ( ) ) { return ApiFutures . immediateFuture ( responseAccumulator ) ; } return ApiFutures . transformAsync ( page . getNextPageAsync ( ) , this , MoreExecutors . directExecutor ( ) ) ; } } , MoreExecutors . directExecutor ( ) ) ; return ApiFutures . transform ( allProtos , new ApiFunction < List < com . google . bigtable . admin . v2 . AppProfile > , List < AppProfile > > ( ) { public List < AppProfile > apply ( List < com . google . bigtable . admin . v2 . AppProfile > input ) { List < AppProfile > results = Lists . newArrayListWithCapacity ( input . size ( ) ) ; for ( com . google . bigtable . admin . v2 . AppProfile appProfile : input ) { results . add ( AppProfile . fromProto ( appProfile ) ) ; } return results ; } } , MoreExecutors . directExecutor ( ) ) ; } | Asynchronously lists all app profiles of the specified instance . |
23,754 | @ SuppressWarnings ( "WeakerAccess" ) public ApiFuture < AppProfile > updateAppProfileAsync ( UpdateAppProfileRequest request ) { return ApiFutures . transform ( stub . updateAppProfileOperationCallable ( ) . futureCall ( request . toProto ( projectId ) ) , new ApiFunction < com . google . bigtable . admin . v2 . AppProfile , AppProfile > ( ) { public AppProfile apply ( com . google . bigtable . admin . v2 . AppProfile proto ) { return AppProfile . fromProto ( proto ) ; } } , MoreExecutors . directExecutor ( ) ) ; } | Asynchronously updates an existing app profile . |
23,755 | @ SuppressWarnings ( "WeakerAccess" ) public void deleteAppProfile ( String instanceId , String appProfileId ) { ApiExceptions . callAndTranslateApiException ( deleteAppProfileAsync ( instanceId , appProfileId ) ) ; } | Deletes the specified app profile . |
23,756 | @ SuppressWarnings ( "WeakerAccess" ) public ApiFuture < Void > deleteAppProfileAsync ( String instanceId , String appProfileId ) { String name = NameUtil . formatAppProfileName ( projectId , instanceId , appProfileId ) ; DeleteAppProfileRequest request = DeleteAppProfileRequest . newBuilder ( ) . setName ( name ) . build ( ) ; return ApiFutures . transform ( stub . deleteAppProfileCallable ( ) . futureCall ( request ) , new ApiFunction < Empty , Void > ( ) { public Void apply ( Empty input ) { return null ; } } , MoreExecutors . directExecutor ( ) ) ; } | Asynchronously deletes the specified app profile . |
23,757 | @ SuppressWarnings ( "WeakerAccess" ) public ApiFuture < Policy > getIamPolicyAsync ( String instanceId ) { String name = NameUtil . formatInstanceName ( projectId , instanceId ) ; GetIamPolicyRequest request = GetIamPolicyRequest . newBuilder ( ) . setResource ( name ) . build ( ) ; final IamPolicyMarshaller marshaller = new IamPolicyMarshaller ( ) ; return ApiFutures . transform ( stub . getIamPolicyCallable ( ) . futureCall ( request ) , new ApiFunction < com . google . iam . v1 . Policy , Policy > ( ) { public Policy apply ( com . google . iam . v1 . Policy proto ) { return marshaller . fromPb ( proto ) ; } } , MoreExecutors . directExecutor ( ) ) ; } | Asynchronously gets the IAM access control policy for the specified instance . |
23,758 | @ SuppressWarnings ( "WeakerAccess" ) public Policy setIamPolicy ( String instanceId , Policy policy ) { return ApiExceptions . callAndTranslateApiException ( setIamPolicyAsync ( instanceId , policy ) ) ; } | Replaces the IAM policy associated with the specified instance . |
23,759 | @ SuppressWarnings ( "WeakerAccess" ) public ApiFuture < Policy > setIamPolicyAsync ( String instanceId , Policy policy ) { String name = NameUtil . formatInstanceName ( projectId , instanceId ) ; final IamPolicyMarshaller marshaller = new IamPolicyMarshaller ( ) ; SetIamPolicyRequest request = SetIamPolicyRequest . newBuilder ( ) . setResource ( name ) . setPolicy ( marshaller . toPb ( policy ) ) . build ( ) ; return ApiFutures . transform ( stub . setIamPolicyCallable ( ) . futureCall ( request ) , new ApiFunction < com . google . iam . v1 . Policy , Policy > ( ) { public Policy apply ( com . google . iam . v1 . Policy proto ) { return marshaller . fromPb ( proto ) ; } } , MoreExecutors . directExecutor ( ) ) ; } | Asynchronously replaces the IAM policy associated with the specified instance . |
23,760 | public static Builder newBuilder ( String projectId , String datasetId ) { return newBuilder ( DatasetId . of ( projectId , datasetId ) ) ; } | Returns a builder for the DatasetInfo object given it s user - defined project and dataset ids . |
23,761 | public static KeySet all ( ) { return new KeySet ( true , ImmutableList . < Key > of ( ) , ImmutableList . < KeyRange > of ( ) ) ; } | Creates a key set that will retrieve all rows of a table or index . |
23,762 | public final ErrorGroup updateGroup ( ErrorGroup group ) { UpdateGroupRequest request = UpdateGroupRequest . newBuilder ( ) . setGroup ( group ) . build ( ) ; return updateGroup ( request ) ; } | Replace the data for the specified group . Fails if the group does not exist . |
23,763 | public final Operation updateRegionAutoscaler ( String autoscaler , String region , Autoscaler autoscalerResource , List < String > fieldMask ) { UpdateRegionAutoscalerHttpRequest request = UpdateRegionAutoscalerHttpRequest . newBuilder ( ) . setAutoscaler ( autoscaler ) . setRegion ( region ) . setAutoscalerResource ( autoscalerResource ) . addAllFieldMask ( fieldMask ) . build ( ) ; return updateRegionAutoscaler ( request ) ; } | Updates an autoscaler in the specified project using the data included in the request . |
23,764 | @ BetaApi ( "The surface for long-running operations is not stable yet and may change in the future." ) public final OperationFuture < Empty , Struct > importAgentAsync ( ImportAgentRequest request ) { return importAgentOperationCallable ( ) . futureCall ( request ) ; } | Imports the specified agent from a ZIP file . |
23,765 | @ BetaApi ( "The surface for long-running operations is not stable yet and may change in the future." ) public final OperationFuture < Empty , Struct > restoreAgentAsync ( RestoreAgentRequest request ) { return restoreAgentOperationCallable ( ) . futureCall ( request ) ; } | Restores the specified agent from a ZIP file . |
23,766 | static String autoId ( ) { StringBuilder builder = new StringBuilder ( ) ; int maxRandom = AUTO_ID_ALPHABET . length ( ) ; for ( int i = 0 ; i < AUTO_ID_LENGTH ; i ++ ) { builder . append ( AUTO_ID_ALPHABET . charAt ( RANDOM . nextInt ( maxRandom ) ) ) ; } return builder . toString ( ) ; } | Creates a pseudo - random 20 - character ID that can be used for Firestore documents . |
23,767 | private < T > void runTransaction ( final Transaction . Function < T > transactionCallback , final SettableApiFuture < T > resultFuture , final TransactionOptions options ) { Span span = tracer . spanBuilder ( "CloudFirestore.Transaction" ) . startSpan ( ) ; try ( Scope s = tracer . withSpan ( span ) ) { runTransactionAttempt ( transactionCallback , resultFuture , options , span ) ; } } | Transaction functions that returns its result in the provided SettableFuture . |
23,768 | < RequestT , ResponseT > void streamRequest ( RequestT requestT , ApiStreamObserver < ResponseT > responseObserverT , ServerStreamingCallable < RequestT , ResponseT > callable ) { Preconditions . checkState ( ! closed , "Firestore client has already been closed" ) ; callable . serverStreamingCall ( requestT , responseObserverT ) ; } | Request funnel for all unidirectional streaming requests . |
23,769 | < RequestT , ResponseT > ApiStreamObserver < RequestT > streamRequest ( ApiStreamObserver < ResponseT > responseObserverT , BidiStreamingCallable < RequestT , ResponseT > callable ) { Preconditions . checkState ( ! closed , "Firestore client has already been closed" ) ; return callable . bidiStreamingCall ( responseObserverT ) ; } | Request funnel for all bidirectional streaming requests . |
23,770 | public CreateTableRequest addFamily ( String familyId ) { Preconditions . checkNotNull ( familyId ) ; tableRequest . putColumnFamilies ( familyId , ColumnFamily . getDefaultInstance ( ) ) ; return this ; } | Adds a new columnFamily to the configuration |
23,771 | public CreateTableRequest addSplit ( ByteString key ) { Preconditions . checkNotNull ( key ) ; createTableRequest . addInitialSplitsBuilder ( ) . setKey ( key ) ; return this ; } | Adds split at the specified key to the configuration |
23,772 | public final Operation patchSubnetwork ( ProjectRegionSubnetworkName subnetwork , Subnetwork subnetworkResource , List < String > fieldMask ) { PatchSubnetworkHttpRequest request = PatchSubnetworkHttpRequest . newBuilder ( ) . setSubnetwork ( subnetwork == null ? null : subnetwork . toString ( ) ) . setSubnetworkResource ( subnetworkResource ) . addAllFieldMask ( fieldMask ) . build ( ) ; return patchSubnetwork ( request ) ; } | Patches the specified subnetwork with the data included in the request . Only certain fields can up updated with a patch request as indicated in the field descriptions . You must specify the current fingeprint of the subnetwork resource being patched . |
23,773 | public final Operation patchSecurityPolicy ( ProjectGlobalSecurityPolicyName securityPolicy , SecurityPolicy securityPolicyResource , List < String > fieldMask ) { PatchSecurityPolicyHttpRequest request = PatchSecurityPolicyHttpRequest . newBuilder ( ) . setSecurityPolicy ( securityPolicy == null ? null : securityPolicy . toString ( ) ) . setSecurityPolicyResource ( securityPolicyResource ) . addAllFieldMask ( fieldMask ) . build ( ) ; return patchSecurityPolicy ( request ) ; } | Patches the specified policy with the data included in the request . |
23,774 | public static Builder newBuilder ( TableId sourceTable , String destinationUri ) { return newBuilder ( sourceTable , ImmutableList . of ( checkNotNull ( destinationUri ) ) ) ; } | Creates a builder for a BigQuery Extract Job configuration given source table and destination URI . |
23,775 | public static Builder newBuilder ( TableId sourceTable , List < String > destinationUris ) { return new Builder ( ) . setSourceTable ( sourceTable ) . setDestinationUris ( destinationUris ) ; } | Creates a builder for a BigQuery Extract Job configuration given source table and destination URIs . |
23,776 | public static ExtractJobConfiguration of ( TableId sourceTable , String destinationUri ) { return newBuilder ( sourceTable , destinationUri ) . build ( ) ; } | Returns a BigQuery Extract Job configuration for the given source table and destination URI . |
23,777 | public static ExtractJobConfiguration of ( TableId sourceTable , List < String > destinationUris , String format ) { return newBuilder ( sourceTable , destinationUris ) . setFormat ( format ) . build ( ) ; } | Returns a BigQuery Extract Job configuration for the given source table format and destination URIs . |
23,778 | public final ListScanConfigsPagedResponse listScanConfigs ( ProjectName parent , String filter ) { ListScanConfigsRequest request = ListScanConfigsRequest . newBuilder ( ) . setParent ( parent == null ? null : parent . toString ( ) ) . setFilter ( filter ) . build ( ) ; return listScanConfigs ( request ) ; } | Lists scan configurations for the specified project . |
23,779 | private void ensureFetching ( long blockIndex ) { if ( fetching != null ) { if ( fetching . futureBuf . isDone ( ) ) { full . add ( fetching ) ; fetching = null ; } else { return ; } } for ( WorkUnit w : full ) { if ( w . blockIndex == blockIndex ) { return ; } } if ( full . size ( ) < BUF_COUNT ) { fetching = new WorkUnit ( chan , bufSize , blockIndex ) ; } else { fetching = full . remove ( 0 ) ; fetching . resetForIndex ( blockIndex ) ; } bytesRead += bufSize ; fetching . futureBuf = exec . submit ( fetching ) ; } | make sure it now goes looking for that block index . |
23,780 | public synchronized int read ( ByteBuffer dst ) throws IOException { if ( ! open ) { throw new ClosedChannelException ( ) ; } try { if ( trackTime ) { msBetweenCallsToRead += betweenCallsToRead . elapsed ( TimeUnit . MILLISECONDS ) ; } ByteBuffer src ; try { Stopwatch waitingForData ; if ( trackTime ) { waitingForData = Stopwatch . createStarted ( ) ; } src = fetch ( position ) ; if ( trackTime ) { msWaitingForData += waitingForData . elapsed ( TimeUnit . MILLISECONDS ) ; } } catch ( InterruptedException e ) { Thread . currentThread ( ) . interrupt ( ) ; return 0 ; } catch ( ExecutionException e ) { throw new RuntimeException ( e ) ; } if ( null == src ) { nbReadsPastEnd ++ ; return - 1 ; } Stopwatch copyingData ; if ( trackTime ) { copyingData = Stopwatch . createStarted ( ) ; } long blockIndex = position / bufSize ; int offset = ( int ) ( position - ( blockIndex * bufSize ) ) ; int availableToCopy = src . position ( ) - offset ; if ( availableToCopy < 0 ) { nbReadsPastEnd ++ ; return - 1 ; } int bytesToCopy = dst . remaining ( ) ; byte [ ] array = src . array ( ) ; if ( availableToCopy < bytesToCopy ) { bytesToCopy = availableToCopy ; } dst . put ( array , offset , bytesToCopy ) ; position += bytesToCopy ; if ( trackTime ) { msCopyingData += copyingData . elapsed ( TimeUnit . MILLISECONDS ) ; } bytesReturned += bytesToCopy ; if ( availableToCopy == 0 ) { return - 1 ; } return bytesToCopy ; } finally { if ( trackTime ) { betweenCallsToRead . reset ( ) ; betweenCallsToRead . start ( ) ; } } } | Reads a sequence of bytes from this channel into the given buffer . |
23,781 | public static RegionOperationId of ( RegionId regionId , String operation ) { return new RegionOperationId ( regionId . getProject ( ) , regionId . getRegion ( ) , operation ) ; } | Returns a region operation identity given the region identity and the operation name . |
23,782 | public static RegionOperationId of ( String region , String operation ) { return new RegionOperationId ( null , region , operation ) ; } | Returns a region operation identity given the region and operation names . |
23,783 | public static RegionOperationId of ( String project , String region , String operation ) { return new RegionOperationId ( project , region , operation ) ; } | Returns a region operation identity given project region and operation names . |
23,784 | public void runQueryPermanentTable ( String destinationDataset , String destinationTable ) throws InterruptedException { String query = "SELECT corpus FROM `bigquery-public-data.samples.shakespeare` GROUP BY corpus;" ; QueryJobConfiguration queryConfig = QueryJobConfiguration . newBuilder ( query ) . setDestinationTable ( TableId . of ( destinationDataset , destinationTable ) ) . build ( ) ; for ( FieldValueList row : bigquery . query ( queryConfig ) . iterateAll ( ) ) { for ( FieldValue val : row ) { System . out . printf ( "%s," , val . toString ( ) ) ; } System . out . printf ( "\n" ) ; } } | Example of running a query and saving the results to a table . |
23,785 | public void runUncachedQuery ( ) throws TimeoutException , InterruptedException { String query = "SELECT corpus FROM `bigquery-public-data.samples.shakespeare` GROUP BY corpus;" ; QueryJobConfiguration queryConfig = QueryJobConfiguration . newBuilder ( query ) . setUseQueryCache ( false ) . build ( ) ; for ( FieldValueList row : bigquery . query ( queryConfig ) . iterateAll ( ) ) { for ( FieldValue val : row ) { System . out . printf ( "%s," , val . toString ( ) ) ; } System . out . printf ( "\n" ) ; } } | Example of running a query with the cache disabled . |
23,786 | public void runBatchQuery ( ) throws TimeoutException , InterruptedException { String query = "SELECT corpus FROM `bigquery-public-data.samples.shakespeare` GROUP BY corpus;" ; QueryJobConfiguration queryConfig = QueryJobConfiguration . newBuilder ( query ) . setPriority ( QueryJobConfiguration . Priority . BATCH ) . build ( ) ; JobId jobId = JobId . newBuilder ( ) . setRandomJob ( ) . setLocation ( "US" ) . build ( ) ; String jobIdString = jobId . getJob ( ) ; bigquery . create ( JobInfo . newBuilder ( queryConfig ) . setJobId ( jobId ) . build ( ) ) ; Job queryJob = bigquery . getJob ( JobId . newBuilder ( ) . setJob ( jobIdString ) . setLocation ( "US" ) . build ( ) ) ; System . out . printf ( "Job %s in location %s currently in state: %s%n" , queryJob . getJobId ( ) . getJob ( ) , queryJob . getJobId ( ) . getLocation ( ) , queryJob . getStatus ( ) . getState ( ) . toString ( ) ) ; } | Example of running a batch query . |
23,787 | public void runQueryWithNamedParameters ( ) throws InterruptedException { String corpus = "romeoandjuliet" ; long minWordCount = 250 ; String query = "SELECT word, word_count\n" + "FROM `bigquery-public-data.samples.shakespeare`\n" + "WHERE corpus = @corpus\n" + "AND word_count >= @min_word_count\n" + "ORDER BY word_count DESC" ; QueryJobConfiguration queryConfig = QueryJobConfiguration . newBuilder ( query ) . addNamedParameter ( "corpus" , QueryParameterValue . string ( corpus ) ) . addNamedParameter ( "min_word_count" , QueryParameterValue . int64 ( minWordCount ) ) . build ( ) ; for ( FieldValueList row : bigquery . query ( queryConfig ) . iterateAll ( ) ) { for ( FieldValue val : row ) { System . out . printf ( "%s," , val . toString ( ) ) ; } System . out . printf ( "\n" ) ; } } | Example of running a query with named query parameters . |
23,788 | public void runQueryWithArrayParameters ( ) throws InterruptedException { String gender = "M" ; String [ ] states = { "WA" , "WI" , "WV" , "WY" } ; String query = "SELECT name, sum(number) as count\n" + "FROM `bigquery-public-data.usa_names.usa_1910_2013`\n" + "WHERE gender = @gender\n" + "AND state IN UNNEST(@states)\n" + "GROUP BY name\n" + "ORDER BY count DESC\n" + "LIMIT 10;" ; QueryJobConfiguration queryConfig = QueryJobConfiguration . newBuilder ( query ) . addNamedParameter ( "gender" , QueryParameterValue . string ( gender ) ) . addNamedParameter ( "states" , QueryParameterValue . array ( states , String . class ) ) . build ( ) ; for ( FieldValueList row : bigquery . query ( queryConfig ) . iterateAll ( ) ) { for ( FieldValue val : row ) { System . out . printf ( "%s," , val . toString ( ) ) ; } System . out . printf ( "\n" ) ; } } | Example of running a query with array query parameters . |
23,789 | public void runQueryWithTimestampParameters ( ) throws InterruptedException { ZonedDateTime timestamp = LocalDateTime . of ( 2016 , 12 , 7 , 8 , 0 , 0 ) . atZone ( ZoneOffset . UTC ) ; String query = "SELECT TIMESTAMP_ADD(@ts_value, INTERVAL 1 HOUR);" ; QueryJobConfiguration queryConfig = QueryJobConfiguration . newBuilder ( query ) . addNamedParameter ( "ts_value" , QueryParameterValue . timestamp ( timestamp . toInstant ( ) . toEpochMilli ( ) * 1000 ) ) . build ( ) ; DateTimeFormatter formatter = DateTimeFormatter . ISO_INSTANT . withZone ( ZoneOffset . UTC ) ; for ( FieldValueList row : bigquery . query ( queryConfig ) . iterateAll ( ) ) { System . out . printf ( "%s\n" , formatter . format ( Instant . ofEpochMilli ( row . get ( 0 ) . getTimestampValue ( ) / 1000 ) . atOffset ( ZoneOffset . UTC ) ) ) ; System . out . printf ( "\n" ) ; } } | Example of running a query with timestamp query parameters . |
23,790 | public void loadTableGcsParquet ( String datasetName ) throws InterruptedException { String sourceUri = "gs://cloud-samples-data/bigquery/us-states/us-states.parquet" ; TableId tableId = TableId . of ( datasetName , "us_states" ) ; LoadJobConfiguration configuration = LoadJobConfiguration . builder ( tableId , sourceUri ) . setFormatOptions ( FormatOptions . parquet ( ) ) . build ( ) ; Job loadJob = bigquery . create ( JobInfo . of ( configuration ) ) ; loadJob = loadJob . waitFor ( ) ; StandardTableDefinition destinationTable = bigquery . getTable ( tableId ) . getDefinition ( ) ; System . out . println ( "State: " + loadJob . getStatus ( ) . getState ( ) ) ; System . out . printf ( "Loaded %d rows.\n" , destinationTable . getNumRows ( ) ) ; } | Example of loading a parquet file from GCS to a table . |
23,791 | public void copyTables ( String datasetId , String destinationTableId ) throws InterruptedException { generateTableWithDdl ( datasetId , "table1" ) ; generateTableWithDdl ( datasetId , "table2" ) ; TableId destinationTable = TableId . of ( datasetId , destinationTableId ) ; CopyJobConfiguration configuration = CopyJobConfiguration . newBuilder ( destinationTable , Arrays . asList ( TableId . of ( datasetId , "table1" ) , TableId . of ( datasetId , "table2" ) ) ) . build ( ) ; Job job = bigquery . create ( JobInfo . of ( configuration ) ) ; job = job . waitFor ( ) ; StandardTableDefinition table = bigquery . getTable ( destinationTable ) . getDefinition ( ) ; System . out . println ( "State: " + job . getStatus ( ) . getState ( ) ) ; System . out . printf ( "Copied %d rows.\n" , table . getNumRows ( ) ) ; } | Example of copying multiple tables to a destination . |
23,792 | public void undeleteTable ( String datasetId ) throws InterruptedException { generateTableWithDdl ( datasetId , "oops_undelete_me" ) ; String tableId = "oops_undelete_me" ; long snapTime = Instant . now ( ) . toEpochMilli ( ) ; bigquery . delete ( TableId . of ( datasetId , tableId ) ) ; String snapshotTableId = String . format ( "%s@%d" , tableId , snapTime ) ; String recoverTableId = String . format ( "%s_recovered" , tableId ) ; CopyJobConfiguration configuration = CopyJobConfiguration . newBuilder ( TableId . of ( datasetId , recoverTableId ) , TableId . of ( datasetId , snapshotTableId ) ) . build ( ) ; Job job = bigquery . create ( JobInfo . of ( configuration ) ) ; job = job . waitFor ( ) ; StandardTableDefinition table = bigquery . getTable ( TableId . of ( datasetId , recoverTableId ) ) . getDefinition ( ) ; System . out . println ( "State: " + job . getStatus ( ) . getState ( ) ) ; System . out . printf ( "Recovered %d rows.\n" , table . getNumRows ( ) ) ; } | Example of undeleting a table . |
23,793 | public UnaryCallSettings . Builder getApiCallSettings ( RetrySettings retrySettings ) { return UnaryCallSettings . newUnaryCallSettingsBuilder ( ) . setRetrySettings ( retrySettings ) ; } | Returns a builder for API call settings . |
23,794 | public static TransportChannelProvider setUpChannelProvider ( InstantiatingGrpcChannelProvider . Builder providerBuilder , ServiceOptions < ? , ? > serviceOptions ) { providerBuilder . setEndpoint ( serviceOptions . getHost ( ) ) ; return providerBuilder . build ( ) ; } | Returns a channel provider from the given default provider . |
23,795 | public SeekableByteChannel newByteChannel ( Path path , Set < ? extends OpenOption > options , FileAttribute < ? > ... attrs ) throws IOException { checkNotNull ( path ) ; initStorage ( ) ; CloudStorageUtil . checkNotNullArray ( attrs ) ; if ( options . contains ( StandardOpenOption . WRITE ) ) { return newWriteChannel ( path , options ) ; } else { return newReadChannel ( path , options ) ; } } | Open a file for reading or writing . To read receiver - pays buckets specify the BlobSourceOption . userProject option . |
23,796 | public void createDirectory ( Path dir , FileAttribute < ? > ... attrs ) { CloudStorageUtil . checkPath ( dir ) ; CloudStorageUtil . checkNotNullArray ( attrs ) ; } | Does nothing since Google Cloud Storage uses fake directories . |
23,797 | Page < Bucket > listBuckets ( Storage . BucketListOption ... options ) { initStorage ( ) ; return storage . list ( options ) ; } | Lists the project s buckets . But use the one in CloudStorageFileSystem . |
23,798 | public boolean exists ( BucketSourceOption ... options ) { int length = options . length ; Storage . BucketGetOption [ ] getOptions = Arrays . copyOf ( toGetOptions ( this , options ) , length + 1 ) ; getOptions [ length ] = Storage . BucketGetOption . fields ( ) ; return storage . get ( getName ( ) , getOptions ) != null ; } | Checks if this bucket exists . |
23,799 | static void setDefaultCloudStorageConfiguration ( CloudStorageConfiguration config ) { if ( null == config ) { userSpecifiedDefault = CloudStorageConfiguration . DEFAULT ; } else { userSpecifiedDefault = config ; } } | Don t call this one call the one in CloudStorageFileSystemProvider . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.