1- // Copyright 2020 Google LLC
1+ // Copyright 2021 Google LLC
22//
33// Licensed under the Apache License, Version 2.0 (the "License");
44// you may not use this file except in compliance with the License.
@@ -71,8 +71,7 @@ service BigQueryRead {
7171 post : "/v1beta2/{read_session.table=projects/*/datasets/*/tables/*}"
7272 body : "*"
7373 };
74- option (google.api.method_signature ) =
75- "parent,read_session,max_stream_count" ;
74+ option (google.api.method_signature ) = "parent,read_session,max_stream_count" ;
7675 }
7776
7877 // Reads rows from the stream in the format prescribed by the ReadSession.
@@ -101,8 +100,7 @@ service BigQueryRead {
101100 // original, primary, and residual, that original[0-j] = primary[0-j] and
102101 // original[j-n] = residual[0-m] once the streams have been read to
103102 // completion.
104- rpc SplitReadStream (SplitReadStreamRequest )
105- returns (SplitReadStreamResponse ) {
103+ rpc SplitReadStream (SplitReadStreamRequest ) returns (SplitReadStreamResponse ) {
106104 option (google.api.http ) = {
107105 get : "/v1beta2/{name=projects/*/locations/*/sessions/*/streams/*}"
108106 };
@@ -171,8 +169,7 @@ service BigQueryWrite {
171169
172170 // Finalize a write stream so that no new data can be appended to the
173171 // stream. Finalize is not supported on the '_default' stream.
174- rpc FinalizeWriteStream (FinalizeWriteStreamRequest )
175- returns (FinalizeWriteStreamResponse ) {
172+ rpc FinalizeWriteStream (FinalizeWriteStreamRequest ) returns (FinalizeWriteStreamResponse ) {
176173 option (google.api.http ) = {
177174 post : "/v1beta2/{name=projects/*/datasets/*/tables/*/streams/*}"
178175 body : "*"
@@ -185,8 +182,7 @@ service BigQueryWrite {
185182 // Streams must be finalized before commit and cannot be committed multiple
186183 // times. Once a stream is committed, data in the stream becomes available
187184 // for read operations.
188- rpc BatchCommitWriteStreams (BatchCommitWriteStreamsRequest )
189- returns (BatchCommitWriteStreamsResponse ) {
185+ rpc BatchCommitWriteStreams (BatchCommitWriteStreamsRequest ) returns (BatchCommitWriteStreamsResponse ) {
190186 option (google.api.http ) = {
191187 get : "/v1beta2/{parent=projects/*/datasets/*/tables/*}"
192188 };
@@ -303,6 +299,19 @@ message ReadRowsResponse {
303299 // Throttling state. If unset, the latest response still describes
304300 // the current throttling status.
305301 ThrottleState throttle_state = 5 ;
302+
303+ // The schema for the read. If read_options.selected_fields is set, the
304+ // schema may be different from the table schema as it will only contain
305+ // the selected fields. This schema is equivelant to the one returned by
306+ // CreateSession. This field is only populated in the first ReadRowsResponse
307+ // RPC.
308+ oneof schema {
309+ // Output only. Avro schema.
310+ AvroSchema avro_schema = 7 [(google.api.field_behavior ) = OUTPUT_ONLY ];
311+
312+ // Output only. Arrow schema.
313+ ArrowSchema arrow_schema = 8 [(google.api.field_behavior ) = OUTPUT_ONLY ];
314+ }
306315}
307316
308317// Request message for `SplitReadStream`.
@@ -342,7 +351,9 @@ message CreateWriteStreamRequest {
342351 // of `projects/{project}/datasets/{dataset}/tables/{table}`.
343352 string parent = 1 [
344353 (google.api.field_behavior ) = REQUIRED ,
345- (google.api.resource_reference ) = { type : "bigquery.googleapis.com/Table" }
354+ (google.api.resource_reference ) = {
355+ type : "bigquery.googleapis.com/Table"
356+ }
346357 ];
347358
348359 // Required. Stream to be created.
@@ -360,9 +371,9 @@ message AppendRowsRequest {
360371 ProtoRows rows = 2 ;
361372 }
362373
363- // Required. The stream that is the target of the append operation. This value
364- // must be specified for the initial request. If subsequent requests specify
365- // the stream name, it must equal to the value provided in the first request.
374+ // Required. The stream that is the target of the append operation. This value must be
375+ // specified for the initial request. If subsequent requests specify the
376+ // stream name, it must equal to the value provided in the first request.
366377 // To write to the _default stream, populate this field with a string in the
367378 // format `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
368379 string write_stream = 1 [
@@ -394,7 +405,7 @@ message AppendRowsRequest {
394405
395406// Response message for `AppendRows`.
396407message AppendRowsResponse {
397- // A success append result .
408+ // AppendResult is returned for successful append requests .
398409 message AppendResult {
399410 // The row offset at which the last append occurred. The offset will not be
400411 // set if appending using default streams.
@@ -405,25 +416,32 @@ message AppendRowsResponse {
405416 // Result if the append is successful.
406417 AppendResult append_result = 1 ;
407418
408- // Error in case of request failed. If set, it means rows are not accepted
409- // into the system. Users can retry or continue with other requests within
410- // the same connection.
411- // ALREADY_EXISTS: happens when offset is specified, it means the entire
412- // request is already appended, it is safe to ignore this error.
413- // OUT_OF_RANGE: happens when offset is specified, it means the specified
414- // offset is beyond the end of the stream.
415- // INVALID_ARGUMENT: error caused by malformed request or data.
416- // RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when
417- // append without offset.
418- // ABORTED: request processing is aborted because of prior failures, request
419- // can be retried if previous failure is fixed.
420- // INTERNAL: server side errors that can be retried.
419+ // Error returned when problems were encountered. If present,
420+ // it indicates rows were not accepted into the system.
421+ // Users can retry or continue with other append requests within the
422+ // same connection.
423+ //
424+ // Additional information about error signalling:
425+ //
426+ // ALREADY_EXISTS: Happens when an append specified an offset, and the
427+ // backend already has received data at this offset. Typically encountered
428+ // in retry scenarios, and can be ignored.
429+ //
430+ // OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
431+ // the current end of the stream.
432+ //
433+ // INVALID_ARGUMENT: Indicates a malformed request or data.
434+ //
435+ // ABORTED: Request processing is aborted because of prior failures. The
436+ // request can be retried if previous failure is addressed.
437+ //
438+ // INTERNAL: Indicates server side error(s) that can be retried.
421439 google.rpc.Status error = 2 ;
422440 }
423441
424442 // If backend detects a schema update, pass it to user so that user can
425- // use it to input new type of message. It will be empty when there is no
426- // schema updates.
443+ // use it to input new type of message. It will be empty when no schema
444+ // updates have occurred .
427445 TableSchema updated_schema = 3 ;
428446}
429447
@@ -441,9 +459,11 @@ message GetWriteStreamRequest {
441459
442460// Request message for `BatchCommitWriteStreams`.
443461message BatchCommitWriteStreamsRequest {
444- // Required. Parent table that all the streams should belong to, in the form
445- // of `projects/{project}/datasets/{dataset}/tables/{table}`.
446- string parent = 1 [(google.api.field_behavior ) = REQUIRED ];
462+ // Required. Parent table that all the streams should belong to, in the form of
463+ // `projects/{project}/datasets/{dataset}/tables/{table}`.
464+ string parent = 1 [
465+ (google.api.field_behavior ) = REQUIRED
466+ ];
447467
448468 // Required. The group of streams that will be committed atomically.
449469 repeated string write_streams = 2 [(google.api.field_behavior ) = REQUIRED ];
@@ -452,11 +472,15 @@ message BatchCommitWriteStreamsRequest {
452472// Response message for `BatchCommitWriteStreams`.
453473message BatchCommitWriteStreamsResponse {
454474 // The time at which streams were committed in microseconds granularity.
455- // This field will only exist when there is no stream errors.
475+ // This field will only exist when there are no stream errors.
476+ // **Note** if this field is not set, it means the commit was not successful.
456477 google.protobuf.Timestamp commit_time = 1 ;
457478
458479 // Stream level error if commit failed. Only streams with error will be in
459480 // the list.
481+ // If empty, there is no error and all streams are committed successfully.
482+ // If non empty, certain streams have errors and ZERO stream is committed due
483+ // to atomicity guarantee.
460484 repeated StorageError stream_errors = 2 ;
461485}
462486
@@ -500,8 +524,9 @@ message FlushRowsResponse {
500524}
501525
502526// Structured custom BigQuery Storage error message. The error can be attached
503- // as error details in the returned rpc Status. User can use the info to process
504- // errors in a structural way, rather than having to parse error messages.
527+ // as error details in the returned rpc Status. In particular, the use of error
528+ // codes allows more structured error handling, and reduces the need to evaluate
529+ // unstructured error text strings.
505530message StorageError {
506531 // Error code for `StorageError`.
507532 enum StorageErrorCode {
@@ -522,9 +547,12 @@ message StorageError {
522547 INVALID_STREAM_TYPE = 4 ;
523548
524549 // Invalid Stream state.
525- // For example, you try to commit a stream that is not fianlized or is
550+ // For example, you try to commit a stream that is not finalized or is
526551 // garbaged.
527552 INVALID_STREAM_STATE = 5 ;
553+
554+ // Stream is finalized.
555+ STREAM_FINALIZED = 6 ;
528556 }
529557
530558 // BigQuery Storage specific error code.
0 commit comments