diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequest.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequest.java
index ebafee0778..f5503f40fd 100644
--- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequest.java
+++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequest.java
@@ -26,6 +26,8 @@
* Due to the nature of AppendRows being a bidirectional streaming RPC, certain
* parts of the AppendRowsRequest need only be specified for the first request
* sent each time the gRPC network connection is opened/reopened.
+ * The size of a single AppendRowsRequest must be less than 10 MB in size.
+ * Requests larger than this return an error, typically `INVALID_ARGUMENT`.
*
*
* Protobuf type {@code google.cloud.bigquery.storage.v1.AppendRowsRequest}
@@ -1827,6 +1829,8 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build
* Due to the nature of AppendRows being a bidirectional streaming RPC, certain
* parts of the AppendRowsRequest need only be specified for the first request
* sent each time the gRPC network connection is opened/reopened.
+ * The size of a single AppendRowsRequest must be less than 10 MB in size.
+ * Requests larger than this return an error, typically `INVALID_ARGUMENT`.
*
*
* Protobuf type {@code google.cloud.bigquery.storage.v1.AppendRowsRequest}
diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsResponse.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsResponse.java
index 7b4c2c8c9c..23240d2a6e 100644
--- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsResponse.java
+++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsResponse.java
@@ -39,6 +39,7 @@ private AppendRowsResponse(com.google.protobuf.GeneratedMessageV3.Builder> bui
private AppendRowsResponse() {
rowErrors_ = java.util.Collections.emptyList();
+ writeStream_ = "";
}
@java.lang.Override
@@ -136,6 +137,13 @@ private AppendRowsResponse(
com.google.cloud.bigquery.storage.v1.RowError.parser(), extensionRegistry));
break;
}
+ case 42:
+ {
+ java.lang.String s = input.readStringRequireUtf8();
+
+ writeStream_ = s;
+ break;
+ }
default:
{
if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) {
@@ -1282,6 +1290,57 @@ public com.google.cloud.bigquery.storage.v1.RowErrorOrBuilder getRowErrorsOrBuil
return rowErrors_.get(index);
}
+ public static final int WRITE_STREAM_FIELD_NUMBER = 5;
+ private volatile java.lang.Object writeStream_;
+ /**
+ *
+ *
+ *
+ * The target of the append operation. Matches the write_stream in the
+ * corresponding request.
+ *
+ *
+ * string write_stream = 5;
+ *
+ * @return The writeStream.
+ */
+ @java.lang.Override
+ public java.lang.String getWriteStream() {
+ java.lang.Object ref = writeStream_;
+ if (ref instanceof java.lang.String) {
+ return (java.lang.String) ref;
+ } else {
+ com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ writeStream_ = s;
+ return s;
+ }
+ }
+ /**
+ *
+ *
+ *
+ * The target of the append operation. Matches the write_stream in the
+ * corresponding request.
+ *
+ *
+ * string write_stream = 5;
+ *
+ * @return The bytes for writeStream.
+ */
+ @java.lang.Override
+ public com.google.protobuf.ByteString getWriteStreamBytes() {
+ java.lang.Object ref = writeStream_;
+ if (ref instanceof java.lang.String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
+ writeStream_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+
private byte memoizedIsInitialized = -1;
@java.lang.Override
@@ -1309,6 +1368,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io
for (int i = 0; i < rowErrors_.size(); i++) {
output.writeMessage(4, rowErrors_.get(i));
}
+ if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(writeStream_)) {
+ com.google.protobuf.GeneratedMessageV3.writeString(output, 5, writeStream_);
+ }
unknownFields.writeTo(output);
}
@@ -1334,6 +1396,9 @@ public int getSerializedSize() {
for (int i = 0; i < rowErrors_.size(); i++) {
size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, rowErrors_.get(i));
}
+ if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(writeStream_)) {
+ size += com.google.protobuf.GeneratedMessageV3.computeStringSize(5, writeStream_);
+ }
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
@@ -1355,6 +1420,7 @@ public boolean equals(final java.lang.Object obj) {
if (!getUpdatedSchema().equals(other.getUpdatedSchema())) return false;
}
if (!getRowErrorsList().equals(other.getRowErrorsList())) return false;
+ if (!getWriteStream().equals(other.getWriteStream())) return false;
if (!getResponseCase().equals(other.getResponseCase())) return false;
switch (responseCase_) {
case 1:
@@ -1385,6 +1451,8 @@ public int hashCode() {
hash = (37 * hash) + ROW_ERRORS_FIELD_NUMBER;
hash = (53 * hash) + getRowErrorsList().hashCode();
}
+ hash = (37 * hash) + WRITE_STREAM_FIELD_NUMBER;
+ hash = (53 * hash) + getWriteStream().hashCode();
switch (responseCase_) {
case 1:
hash = (37 * hash) + APPEND_RESULT_FIELD_NUMBER;
@@ -1557,6 +1625,8 @@ public Builder clear() {
} else {
rowErrorsBuilder_.clear();
}
+ writeStream_ = "";
+
responseCase_ = 0;
response_ = null;
return this;
@@ -1615,6 +1685,7 @@ public com.google.cloud.bigquery.storage.v1.AppendRowsResponse buildPartial() {
} else {
result.rowErrors_ = rowErrorsBuilder_.build();
}
+ result.writeStream_ = writeStream_;
result.responseCase_ = responseCase_;
onBuilt();
return result;
@@ -1696,6 +1767,10 @@ public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.AppendRowsResponse
}
}
}
+ if (!other.getWriteStream().isEmpty()) {
+ writeStream_ = other.writeStream_;
+ onChanged();
+ }
switch (other.getResponseCase()) {
case APPEND_RESULT:
{
@@ -2899,6 +2974,117 @@ public com.google.cloud.bigquery.storage.v1.RowError.Builder addRowErrorsBuilder
return rowErrorsBuilder_;
}
+ private java.lang.Object writeStream_ = "";
+ /**
+ *
+ *
+ *
+ * The target of the append operation. Matches the write_stream in the
+ * corresponding request.
+ *
+ *
+ * string write_stream = 5;
+ *
+ * @return The writeStream.
+ */
+ public java.lang.String getWriteStream() {
+ java.lang.Object ref = writeStream_;
+ if (!(ref instanceof java.lang.String)) {
+ com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref;
+ java.lang.String s = bs.toStringUtf8();
+ writeStream_ = s;
+ return s;
+ } else {
+ return (java.lang.String) ref;
+ }
+ }
+ /**
+ *
+ *
+ *
+ * The target of the append operation. Matches the write_stream in the
+ * corresponding request.
+ *
+ *
+ * string write_stream = 5;
+ *
+ * @return The bytes for writeStream.
+ */
+ public com.google.protobuf.ByteString getWriteStreamBytes() {
+ java.lang.Object ref = writeStream_;
+ if (ref instanceof String) {
+ com.google.protobuf.ByteString b =
+ com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref);
+ writeStream_ = b;
+ return b;
+ } else {
+ return (com.google.protobuf.ByteString) ref;
+ }
+ }
+ /**
+ *
+ *
+ *
+ * The target of the append operation. Matches the write_stream in the
+ * corresponding request.
+ *
+ *
+ * string write_stream = 5;
+ *
+ * @param value The writeStream to set.
+ * @return This builder for chaining.
+ */
+ public Builder setWriteStream(java.lang.String value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+
+ writeStream_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * The target of the append operation. Matches the write_stream in the
+ * corresponding request.
+ *
+ *
+ * string write_stream = 5;
+ *
+ * @return This builder for chaining.
+ */
+ public Builder clearWriteStream() {
+
+ writeStream_ = getDefaultInstance().getWriteStream();
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * The target of the append operation. Matches the write_stream in the
+ * corresponding request.
+ *
+ *
+ * string write_stream = 5;
+ *
+ * @param value The bytes for writeStream to set.
+ * @return This builder for chaining.
+ */
+ public Builder setWriteStreamBytes(com.google.protobuf.ByteString value) {
+ if (value == null) {
+ throw new NullPointerException();
+ }
+ checkByteStringIsUtf8(value);
+
+ writeStream_ = value;
+ onChanged();
+ return this;
+ }
+
@java.lang.Override
public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsResponseOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsResponseOrBuilder.java
index 0226d78642..cfafd12c9d 100644
--- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsResponseOrBuilder.java
+++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsResponseOrBuilder.java
@@ -239,5 +239,32 @@ public interface AppendRowsResponseOrBuilder
*/
com.google.cloud.bigquery.storage.v1.RowErrorOrBuilder getRowErrorsOrBuilder(int index);
+ /**
+ *
+ *
+ *
+ * The target of the append operation. Matches the write_stream in the
+ * corresponding request.
+ *
+ *
+ * string write_stream = 5;
+ *
+ * @return The writeStream.
+ */
+ java.lang.String getWriteStream();
+ /**
+ *
+ *
+ *
+ * The target of the append operation. Matches the write_stream in the
+ * corresponding request.
+ *
+ *
+ * string write_stream = 5;
+ *
+ * @return The bytes for writeStream.
+ */
+ com.google.protobuf.ByteString getWriteStreamBytes();
+
public com.google.cloud.bigquery.storage.v1.AppendRowsResponse.ResponseCase getResponseCase();
}
diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateReadSessionRequest.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateReadSessionRequest.java
index 4727bcb074..13336966b2 100644
--- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateReadSessionRequest.java
+++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateReadSessionRequest.java
@@ -98,6 +98,11 @@ private CreateReadSessionRequest(
maxStreamCount_ = input.readInt32();
break;
}
+ case 32:
+ {
+ preferredMinStreamCount_ = input.readInt32();
+ break;
+ }
default:
{
if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) {
@@ -269,6 +274,30 @@ public int getMaxStreamCount() {
return maxStreamCount_;
}
+ public static final int PREFERRED_MIN_STREAM_COUNT_FIELD_NUMBER = 4;
+ private int preferredMinStreamCount_;
+ /**
+ *
+ *
+ *
+ * The minimum preferred stream count. This parameter can be used to inform
+ * the service that there is a desired lower bound on the number of streams.
+ * This is typically a target parallelism of the client (e.g. a Spark
+ * cluster with N-workers would set this to a low multiple of N to ensure
+ * good cluster utilization).
+ * The system will make a best effort to provide at least this number of
+ * streams, but in some cases might provide less.
+ *
+ *
+ * int32 preferred_min_stream_count = 4;
+ *
+ * @return The preferredMinStreamCount.
+ */
+ @java.lang.Override
+ public int getPreferredMinStreamCount() {
+ return preferredMinStreamCount_;
+ }
+
private byte memoizedIsInitialized = -1;
@java.lang.Override
@@ -292,6 +321,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io
if (maxStreamCount_ != 0) {
output.writeInt32(3, maxStreamCount_);
}
+ if (preferredMinStreamCount_ != 0) {
+ output.writeInt32(4, preferredMinStreamCount_);
+ }
unknownFields.writeTo(output);
}
@@ -310,6 +342,9 @@ public int getSerializedSize() {
if (maxStreamCount_ != 0) {
size += com.google.protobuf.CodedOutputStream.computeInt32Size(3, maxStreamCount_);
}
+ if (preferredMinStreamCount_ != 0) {
+ size += com.google.protobuf.CodedOutputStream.computeInt32Size(4, preferredMinStreamCount_);
+ }
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
@@ -332,6 +367,7 @@ public boolean equals(final java.lang.Object obj) {
if (!getReadSession().equals(other.getReadSession())) return false;
}
if (getMaxStreamCount() != other.getMaxStreamCount()) return false;
+ if (getPreferredMinStreamCount() != other.getPreferredMinStreamCount()) return false;
if (!unknownFields.equals(other.unknownFields)) return false;
return true;
}
@@ -351,6 +387,8 @@ public int hashCode() {
}
hash = (37 * hash) + MAX_STREAM_COUNT_FIELD_NUMBER;
hash = (53 * hash) + getMaxStreamCount();
+ hash = (37 * hash) + PREFERRED_MIN_STREAM_COUNT_FIELD_NUMBER;
+ hash = (53 * hash) + getPreferredMinStreamCount();
hash = (29 * hash) + unknownFields.hashCode();
memoizedHashCode = hash;
return hash;
@@ -507,6 +545,8 @@ public Builder clear() {
}
maxStreamCount_ = 0;
+ preferredMinStreamCount_ = 0;
+
return this;
}
@@ -542,6 +582,7 @@ public com.google.cloud.bigquery.storage.v1.CreateReadSessionRequest buildPartia
result.readSession_ = readSessionBuilder_.build();
}
result.maxStreamCount_ = maxStreamCount_;
+ result.preferredMinStreamCount_ = preferredMinStreamCount_;
onBuilt();
return result;
}
@@ -603,6 +644,9 @@ public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.CreateReadSessionR
if (other.getMaxStreamCount() != 0) {
setMaxStreamCount(other.getMaxStreamCount());
}
+ if (other.getPreferredMinStreamCount() != 0) {
+ setPreferredMinStreamCount(other.getPreferredMinStreamCount());
+ }
this.mergeUnknownFields(other.unknownFields);
onChanged();
return this;
@@ -1035,6 +1079,76 @@ public Builder clearMaxStreamCount() {
return this;
}
+ private int preferredMinStreamCount_;
+ /**
+ *
+ *
+ *
+ * The minimum preferred stream count. This parameter can be used to inform
+ * the service that there is a desired lower bound on the number of streams.
+ * This is typically a target parallelism of the client (e.g. a Spark
+ * cluster with N-workers would set this to a low multiple of N to ensure
+ * good cluster utilization).
+ * The system will make a best effort to provide at least this number of
+ * streams, but in some cases might provide less.
+ *
+ *
+ * int32 preferred_min_stream_count = 4;
+ *
+ * @return The preferredMinStreamCount.
+ */
+ @java.lang.Override
+ public int getPreferredMinStreamCount() {
+ return preferredMinStreamCount_;
+ }
+ /**
+ *
+ *
+ *
+ * The minimum preferred stream count. This parameter can be used to inform
+ * the service that there is a desired lower bound on the number of streams.
+ * This is typically a target parallelism of the client (e.g. a Spark
+ * cluster with N-workers would set this to a low multiple of N to ensure
+ * good cluster utilization).
+ * The system will make a best effort to provide at least this number of
+ * streams, but in some cases might provide less.
+ *
+ *
+ * int32 preferred_min_stream_count = 4;
+ *
+ * @param value The preferredMinStreamCount to set.
+ * @return This builder for chaining.
+ */
+ public Builder setPreferredMinStreamCount(int value) {
+
+ preferredMinStreamCount_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ *
+ *
+ *
+ * The minimum preferred stream count. This parameter can be used to inform
+ * the service that there is a desired lower bound on the number of streams.
+ * This is typically a target parallelism of the client (e.g. a Spark
+ * cluster with N-workers would set this to a low multiple of N to ensure
+ * good cluster utilization).
+ * The system will make a best effort to provide at least this number of
+ * streams, but in some cases might provide less.
+ *
+ *
+ * int32 preferred_min_stream_count = 4;
+ *
+ * @return This builder for chaining.
+ */
+ public Builder clearPreferredMinStreamCount() {
+
+ preferredMinStreamCount_ = 0;
+ onChanged();
+ return this;
+ }
+
@java.lang.Override
public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateReadSessionRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateReadSessionRequestOrBuilder.java
index 39836f0e4a..a878ce7c8f 100644
--- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateReadSessionRequestOrBuilder.java
+++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/CreateReadSessionRequestOrBuilder.java
@@ -115,4 +115,23 @@ public interface CreateReadSessionRequestOrBuilder
* @return The maxStreamCount.
*/
int getMaxStreamCount();
+
+ /**
+ *
+ *
+ *
+ * The minimum preferred stream count. This parameter can be used to inform
+ * the service that there is a desired lower bound on the number of streams.
+ * This is typically a target parallelism of the client (e.g. a Spark
+ * cluster with N-workers would set this to a low multiple of N to ensure
+ * good cluster utilization).
+ * The system will make a best effort to provide at least this number of
+ * streams, but in some cases might provide less.
+ *
+ *
+ * int32 preferred_min_stream_count = 4;
+ *
+ * @return The preferredMinStreamCount.
+ */
+ int getPreferredMinStreamCount();
}
diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StorageProto.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StorageProto.java
index 3c8e5fa627..41a86d47f0 100644
--- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StorageProto.java
+++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/StorageProto.java
@@ -137,157 +137,159 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
+ "igquery/storage/v1/table.proto\032\037google/p"
+ "rotobuf/timestamp.proto\032\036google/protobuf"
+ "/wrappers.proto\032\027google/rpc/status.proto"
- + "\"\303\001\n\030CreateReadSessionRequest\022C\n\006parent\030"
+ + "\"\347\001\n\030CreateReadSessionRequest\022C\n\006parent\030"
+ "\001 \001(\tB3\340A\002\372A-\n+cloudresourcemanager.goog"
+ "leapis.com/Project\022H\n\014read_session\030\002 \001(\013"
+ "2-.google.cloud.bigquery.storage.v1.Read"
- + "SessionB\003\340A\002\022\030\n\020max_stream_count\030\003 \001(\005\"i"
- + "\n\017ReadRowsRequest\022F\n\013read_stream\030\001 \001(\tB1"
- + "\340A\002\372A+\n)bigquerystorage.googleapis.com/R"
- + "eadStream\022\016\n\006offset\030\002 \001(\003\")\n\rThrottleSta"
- + "te\022\030\n\020throttle_percent\030\001 \001(\005\"\227\001\n\013StreamS"
- + "tats\022H\n\010progress\030\002 \001(\01326.google.cloud.bi"
- + "gquery.storage.v1.StreamStats.Progress\032>"
- + "\n\010Progress\022\031\n\021at_response_start\030\001 \001(\001\022\027\n"
- + "\017at_response_end\030\002 \001(\001\"\347\003\n\020ReadRowsRespo"
- + "nse\022?\n\tavro_rows\030\003 \001(\0132*.google.cloud.bi"
- + "gquery.storage.v1.AvroRowsH\000\022P\n\022arrow_re"
- + "cord_batch\030\004 \001(\01322.google.cloud.bigquery"
- + ".storage.v1.ArrowRecordBatchH\000\022\021\n\trow_co"
- + "unt\030\006 \001(\003\022<\n\005stats\030\002 \001(\0132-.google.cloud."
- + "bigquery.storage.v1.StreamStats\022G\n\016throt"
- + "tle_state\030\005 \001(\0132/.google.cloud.bigquery."
- + "storage.v1.ThrottleState\022H\n\013avro_schema\030"
- + "\007 \001(\0132,.google.cloud.bigquery.storage.v1"
- + ".AvroSchemaB\003\340A\003H\001\022J\n\014arrow_schema\030\010 \001(\013"
- + "2-.google.cloud.bigquery.storage.v1.Arro"
- + "wSchemaB\003\340A\003H\001B\006\n\004rowsB\010\n\006schema\"k\n\026Spli"
- + "tReadStreamRequest\022?\n\004name\030\001 \001(\tB1\340A\002\372A+"
- + "\n)bigquerystorage.googleapis.com/ReadStr"
- + "eam\022\020\n\010fraction\030\002 \001(\001\"\247\001\n\027SplitReadStrea"
- + "mResponse\022D\n\016primary_stream\030\001 \001(\0132,.goog"
- + "le.cloud.bigquery.storage.v1.ReadStream\022"
- + "F\n\020remainder_stream\030\002 \001(\0132,.google.cloud"
- + ".bigquery.storage.v1.ReadStream\"\233\001\n\030Crea"
- + "teWriteStreamRequest\0225\n\006parent\030\001 \001(\tB%\340A"
- + "\002\372A\037\n\035bigquery.googleapis.com/Table\022H\n\014w"
- + "rite_stream\030\002 \001(\0132-.google.cloud.bigquer"
- + "y.storage.v1.WriteStreamB\003\340A\002\"\210\003\n\021Append"
- + "RowsRequest\022H\n\014write_stream\030\001 \001(\tB2\340A\002\372A"
- + ",\n*bigquerystorage.googleapis.com/WriteS"
- + "tream\022+\n\006offset\030\002 \001(\0132\033.google.protobuf."
- + "Int64Value\022S\n\nproto_rows\030\004 \001(\0132=.google."
- + "cloud.bigquery.storage.v1.AppendRowsRequ"
- + "est.ProtoDataH\000\022\020\n\010trace_id\030\006 \001(\t\032\214\001\n\tPr"
- + "otoData\022D\n\rwriter_schema\030\001 \001(\0132-.google."
- + "cloud.bigquery.storage.v1.ProtoSchema\0229\n"
- + "\004rows\030\002 \001(\0132+.google.cloud.bigquery.stor"
- + "age.v1.ProtoRowsB\006\n\004rows\"\345\002\n\022AppendRowsR"
- + "esponse\022Z\n\rappend_result\030\001 \001(\0132A.google."
- + "cloud.bigquery.storage.v1.AppendRowsResp"
- + "onse.AppendResultH\000\022#\n\005error\030\002 \001(\0132\022.goo"
- + "gle.rpc.StatusH\000\022E\n\016updated_schema\030\003 \001(\013"
- + "2-.google.cloud.bigquery.storage.v1.Tabl"
- + "eSchema\022>\n\nrow_errors\030\004 \003(\0132*.google.clo"
- + "ud.bigquery.storage.v1.RowError\032;\n\014Appen"
- + "dResult\022+\n\006offset\030\001 \001(\0132\033.google.protobu"
- + "f.Int64ValueB\n\n\010response\"Y\n\025GetWriteStre"
- + "amRequest\022@\n\004name\030\001 \001(\tB2\340A\002\372A,\n*bigquer"
- + "ystorage.googleapis.com/WriteStream\"s\n\036B"
- + "atchCommitWriteStreamsRequest\0225\n\006parent\030"
- + "\001 \001(\tB%\340A\002\372A\037\n\035bigquery.googleapis.com/T"
- + "able\022\032\n\rwrite_streams\030\002 \003(\tB\003\340A\002\"\231\001\n\037Bat"
- + "chCommitWriteStreamsResponse\022/\n\013commit_t"
- + "ime\030\001 \001(\0132\032.google.protobuf.Timestamp\022E\n"
- + "\rstream_errors\030\002 \003(\0132..google.cloud.bigq"
- + "uery.storage.v1.StorageError\"^\n\032Finalize"
- + "WriteStreamRequest\022@\n\004name\030\001 \001(\tB2\340A\002\372A,"
- + "\n*bigquerystorage.googleapis.com/WriteSt"
- + "ream\"0\n\033FinalizeWriteStreamResponse\022\021\n\tr"
- + "ow_count\030\001 \001(\003\"\211\001\n\020FlushRowsRequest\022H\n\014w"
- + "rite_stream\030\001 \001(\tB2\340A\002\372A,\n*bigquerystora"
- + "ge.googleapis.com/WriteStream\022+\n\006offset\030"
- + "\002 \001(\0132\033.google.protobuf.Int64Value\"#\n\021Fl"
- + "ushRowsResponse\022\016\n\006offset\030\001 \001(\003\"\245\003\n\014Stor"
- + "ageError\022M\n\004code\030\001 \001(\0162?.google.cloud.bi"
- + "gquery.storage.v1.StorageError.StorageEr"
- + "rorCode\022\016\n\006entity\030\002 \001(\t\022\025\n\rerror_message"
- + "\030\003 \001(\t\"\236\002\n\020StorageErrorCode\022\"\n\036STORAGE_E"
- + "RROR_CODE_UNSPECIFIED\020\000\022\023\n\017TABLE_NOT_FOU"
- + "ND\020\001\022\034\n\030STREAM_ALREADY_COMMITTED\020\002\022\024\n\020ST"
- + "REAM_NOT_FOUND\020\003\022\027\n\023INVALID_STREAM_TYPE\020"
- + "\004\022\030\n\024INVALID_STREAM_STATE\020\005\022\024\n\020STREAM_FI"
- + "NALIZED\020\006\022 \n\034SCHEMA_MISMATCH_EXTRA_FIELD"
- + "S\020\007\022\031\n\025OFFSET_ALREADY_EXISTS\020\010\022\027\n\023OFFSET"
- + "_OUT_OF_RANGE\020\t\"\263\001\n\010RowError\022\r\n\005index\030\001 "
- + "\001(\003\022E\n\004code\030\002 \001(\01627.google.cloud.bigquer"
- + "y.storage.v1.RowError.RowErrorCode\022\017\n\007me"
- + "ssage\030\003 \001(\t\"@\n\014RowErrorCode\022\036\n\032ROW_ERROR"
- + "_CODE_UNSPECIFIED\020\000\022\020\n\014FIELDS_ERROR\020\0012\222\006"
- + "\n\014BigQueryRead\022\351\001\n\021CreateReadSession\022:.g"
- + "oogle.cloud.bigquery.storage.v1.CreateRe"
- + "adSessionRequest\032-.google.cloud.bigquery"
- + ".storage.v1.ReadSession\"i\202\323\344\223\002<\"7/v1/{re"
- + "ad_session.table=projects/*/datasets/*/t"
- + "ables/*}:\001*\332A$parent,read_session,max_st"
- + "ream_count\022\317\001\n\010ReadRows\0221.google.cloud.b"
- + "igquery.storage.v1.ReadRowsRequest\0322.goo"
- + "gle.cloud.bigquery.storage.v1.ReadRowsRe"
- + "sponse\"Z\202\323\344\223\002?\022=/v1/{read_stream=project"
- + "s/*/locations/*/sessions/*/streams/*}\332A\022"
- + "read_stream,offset0\001\022\306\001\n\017SplitReadStream"
- + "\0228.google.cloud.bigquery.storage.v1.Spli"
- + "tReadStreamRequest\0329.google.cloud.bigque"
- + "ry.storage.v1.SplitReadStreamResponse\">\202"
- + "\323\344\223\0028\0226/v1/{name=projects/*/locations/*/"
- + "sessions/*/streams/*}\032{\312A\036bigquerystorag"
- + "e.googleapis.com\322AWhttps://www.googleapi"
- + "s.com/auth/bigquery,https://www.googleap"
- + "is.com/auth/cloud-platform2\274\013\n\rBigQueryW"
- + "rite\022\327\001\n\021CreateWriteStream\022:.google.clou"
- + "d.bigquery.storage.v1.CreateWriteStreamR"
- + "equest\032-.google.cloud.bigquery.storage.v"
- + "1.WriteStream\"W\202\323\344\223\002;\"+/v1/{parent=proje"
- + "cts/*/datasets/*/tables/*}:\014write_stream"
- + "\332A\023parent,write_stream\022\322\001\n\nAppendRows\0223."
- + "google.cloud.bigquery.storage.v1.AppendR"
- + "owsRequest\0324.google.cloud.bigquery.stora"
- + "ge.v1.AppendRowsResponse\"U\202\323\344\223\002@\";/v1/{w"
- + "rite_stream=projects/*/datasets/*/tables"
- + "/*/streams/*}:\001*\332A\014write_stream(\0010\001\022\277\001\n\016"
- + "GetWriteStream\0227.google.cloud.bigquery.s"
- + "torage.v1.GetWriteStreamRequest\032-.google"
- + ".cloud.bigquery.storage.v1.WriteStream\"E"
- + "\202\323\344\223\0028\"3/v1/{name=projects/*/datasets/*/"
- + "tables/*/streams/*}:\001*\332A\004name\022\331\001\n\023Finali"
- + "zeWriteStream\022<.google.cloud.bigquery.st"
- + "orage.v1.FinalizeWriteStreamRequest\032=.go"
- + "ogle.cloud.bigquery.storage.v1.FinalizeW"
- + "riteStreamResponse\"E\202\323\344\223\0028\"3/v1/{name=pr"
- + "ojects/*/datasets/*/tables/*/streams/*}:"
- + "\001*\332A\004name\022\334\001\n\027BatchCommitWriteStreams\022@."
- + "google.cloud.bigquery.storage.v1.BatchCo"
- + "mmitWriteStreamsRequest\032A.google.cloud.b"
- + "igquery.storage.v1.BatchCommitWriteStrea"
- + "msResponse\"<\202\323\344\223\002-\022+/v1/{parent=projects"
- + "/*/datasets/*/tables/*}\332A\006parent\022\313\001\n\tFlu"
- + "shRows\0222.google.cloud.bigquery.storage.v"
- + "1.FlushRowsRequest\0323.google.cloud.bigque"
- + "ry.storage.v1.FlushRowsResponse\"U\202\323\344\223\002@\""
- + ";/v1/{write_stream=projects/*/datasets/*"
- + "/tables/*/streams/*}:\001*\332A\014write_stream\032\260"
- + "\001\312A\036bigquerystorage.googleapis.com\322A\213\001ht"
- + "tps://www.googleapis.com/auth/bigquery,h"
- + "ttps://www.googleapis.com/auth/bigquery."
- + "insertdata,https://www.googleapis.com/au"
- + "th/cloud-platformB\235\002\n$com.google.cloud.b"
- + "igquery.storage.v1B\014StorageProtoP\001ZGgoog"
- + "le.golang.org/genproto/googleapis/cloud/"
- + "bigquery/storage/v1;storage\252\002 Google.Clo"
- + "ud.BigQuery.Storage.V1\312\002 Google\\Cloud\\Bi"
- + "gQuery\\Storage\\V1\352AU\n\035bigquery.googleapi"
- + "s.com/Table\0224projects/{project}/datasets"
- + "/{dataset}/tables/{table}b\006proto3"
+ + "SessionB\003\340A\002\022\030\n\020max_stream_count\030\003 \001(\005\022\""
+ + "\n\032preferred_min_stream_count\030\004 \001(\005\"i\n\017Re"
+ + "adRowsRequest\022F\n\013read_stream\030\001 \001(\tB1\340A\002\372"
+ + "A+\n)bigquerystorage.googleapis.com/ReadS"
+ + "tream\022\016\n\006offset\030\002 \001(\003\")\n\rThrottleState\022\030"
+ + "\n\020throttle_percent\030\001 \001(\005\"\227\001\n\013StreamStats"
+ + "\022H\n\010progress\030\002 \001(\01326.google.cloud.bigque"
+ + "ry.storage.v1.StreamStats.Progress\032>\n\010Pr"
+ + "ogress\022\031\n\021at_response_start\030\001 \001(\001\022\027\n\017at_"
+ + "response_end\030\002 \001(\001\"\347\003\n\020ReadRowsResponse\022"
+ + "?\n\tavro_rows\030\003 \001(\0132*.google.cloud.bigque"
+ + "ry.storage.v1.AvroRowsH\000\022P\n\022arrow_record"
+ + "_batch\030\004 \001(\01322.google.cloud.bigquery.sto"
+ + "rage.v1.ArrowRecordBatchH\000\022\021\n\trow_count\030"
+ + "\006 \001(\003\022<\n\005stats\030\002 \001(\0132-.google.cloud.bigq"
+ + "uery.storage.v1.StreamStats\022G\n\016throttle_"
+ + "state\030\005 \001(\0132/.google.cloud.bigquery.stor"
+ + "age.v1.ThrottleState\022H\n\013avro_schema\030\007 \001("
+ + "\0132,.google.cloud.bigquery.storage.v1.Avr"
+ + "oSchemaB\003\340A\003H\001\022J\n\014arrow_schema\030\010 \001(\0132-.g"
+ + "oogle.cloud.bigquery.storage.v1.ArrowSch"
+ + "emaB\003\340A\003H\001B\006\n\004rowsB\010\n\006schema\"k\n\026SplitRea"
+ + "dStreamRequest\022?\n\004name\030\001 \001(\tB1\340A\002\372A+\n)bi"
+ + "gquerystorage.googleapis.com/ReadStream\022"
+ + "\020\n\010fraction\030\002 \001(\001\"\247\001\n\027SplitReadStreamRes"
+ + "ponse\022D\n\016primary_stream\030\001 \001(\0132,.google.c"
+ + "loud.bigquery.storage.v1.ReadStream\022F\n\020r"
+ + "emainder_stream\030\002 \001(\0132,.google.cloud.big"
+ + "query.storage.v1.ReadStream\"\233\001\n\030CreateWr"
+ + "iteStreamRequest\0225\n\006parent\030\001 \001(\tB%\340A\002\372A\037"
+ + "\n\035bigquery.googleapis.com/Table\022H\n\014write"
+ + "_stream\030\002 \001(\0132-.google.cloud.bigquery.st"
+ + "orage.v1.WriteStreamB\003\340A\002\"\210\003\n\021AppendRows"
+ + "Request\022H\n\014write_stream\030\001 \001(\tB2\340A\002\372A,\n*b"
+ + "igquerystorage.googleapis.com/WriteStrea"
+ + "m\022+\n\006offset\030\002 \001(\0132\033.google.protobuf.Int6"
+ + "4Value\022S\n\nproto_rows\030\004 \001(\0132=.google.clou"
+ + "d.bigquery.storage.v1.AppendRowsRequest."
+ + "ProtoDataH\000\022\020\n\010trace_id\030\006 \001(\t\032\214\001\n\tProtoD"
+ + "ata\022D\n\rwriter_schema\030\001 \001(\0132-.google.clou"
+ + "d.bigquery.storage.v1.ProtoSchema\0229\n\004row"
+ + "s\030\002 \001(\0132+.google.cloud.bigquery.storage."
+ + "v1.ProtoRowsB\006\n\004rows\"\373\002\n\022AppendRowsRespo"
+ + "nse\022Z\n\rappend_result\030\001 \001(\0132A.google.clou"
+ + "d.bigquery.storage.v1.AppendRowsResponse"
+ + ".AppendResultH\000\022#\n\005error\030\002 \001(\0132\022.google."
+ + "rpc.StatusH\000\022E\n\016updated_schema\030\003 \001(\0132-.g"
+ + "oogle.cloud.bigquery.storage.v1.TableSch"
+ + "ema\022>\n\nrow_errors\030\004 \003(\0132*.google.cloud.b"
+ + "igquery.storage.v1.RowError\022\024\n\014write_str"
+ + "eam\030\005 \001(\t\032;\n\014AppendResult\022+\n\006offset\030\001 \001("
+ + "\0132\033.google.protobuf.Int64ValueB\n\n\010respon"
+ + "se\"Y\n\025GetWriteStreamRequest\022@\n\004name\030\001 \001("
+ + "\tB2\340A\002\372A,\n*bigquerystorage.googleapis.co"
+ + "m/WriteStream\"s\n\036BatchCommitWriteStreams"
+ + "Request\0225\n\006parent\030\001 \001(\tB%\340A\002\372A\037\n\035bigquer"
+ + "y.googleapis.com/Table\022\032\n\rwrite_streams\030"
+ + "\002 \003(\tB\003\340A\002\"\231\001\n\037BatchCommitWriteStreamsRe"
+ + "sponse\022/\n\013commit_time\030\001 \001(\0132\032.google.pro"
+ + "tobuf.Timestamp\022E\n\rstream_errors\030\002 \003(\0132."
+ + ".google.cloud.bigquery.storage.v1.Storag"
+ + "eError\"^\n\032FinalizeWriteStreamRequest\022@\n\004"
+ + "name\030\001 \001(\tB2\340A\002\372A,\n*bigquerystorage.goog"
+ + "leapis.com/WriteStream\"0\n\033FinalizeWriteS"
+ + "treamResponse\022\021\n\trow_count\030\001 \001(\003\"\211\001\n\020Flu"
+ + "shRowsRequest\022H\n\014write_stream\030\001 \001(\tB2\340A\002"
+ + "\372A,\n*bigquerystorage.googleapis.com/Writ"
+ + "eStream\022+\n\006offset\030\002 \001(\0132\033.google.protobu"
+ + "f.Int64Value\"#\n\021FlushRowsResponse\022\016\n\006off"
+ + "set\030\001 \001(\003\"\245\003\n\014StorageError\022M\n\004code\030\001 \001(\016"
+ + "2?.google.cloud.bigquery.storage.v1.Stor"
+ + "ageError.StorageErrorCode\022\016\n\006entity\030\002 \001("
+ + "\t\022\025\n\rerror_message\030\003 \001(\t\"\236\002\n\020StorageErro"
+ + "rCode\022\"\n\036STORAGE_ERROR_CODE_UNSPECIFIED\020"
+ + "\000\022\023\n\017TABLE_NOT_FOUND\020\001\022\034\n\030STREAM_ALREADY"
+ + "_COMMITTED\020\002\022\024\n\020STREAM_NOT_FOUND\020\003\022\027\n\023IN"
+ + "VALID_STREAM_TYPE\020\004\022\030\n\024INVALID_STREAM_ST"
+ + "ATE\020\005\022\024\n\020STREAM_FINALIZED\020\006\022 \n\034SCHEMA_MI"
+ + "SMATCH_EXTRA_FIELDS\020\007\022\031\n\025OFFSET_ALREADY_"
+ + "EXISTS\020\010\022\027\n\023OFFSET_OUT_OF_RANGE\020\t\"\263\001\n\010Ro"
+ + "wError\022\r\n\005index\030\001 \001(\003\022E\n\004code\030\002 \001(\01627.go"
+ + "ogle.cloud.bigquery.storage.v1.RowError."
+ + "RowErrorCode\022\017\n\007message\030\003 \001(\t\"@\n\014RowErro"
+ + "rCode\022\036\n\032ROW_ERROR_CODE_UNSPECIFIED\020\000\022\020\n"
+ + "\014FIELDS_ERROR\020\0012\222\006\n\014BigQueryRead\022\351\001\n\021Cre"
+ + "ateReadSession\022:.google.cloud.bigquery.s"
+ + "torage.v1.CreateReadSessionRequest\032-.goo"
+ + "gle.cloud.bigquery.storage.v1.ReadSessio"
+ + "n\"i\202\323\344\223\002<\"7/v1/{read_session.table=proje"
+ + "cts/*/datasets/*/tables/*}:\001*\332A$parent,r"
+ + "ead_session,max_stream_count\022\317\001\n\010ReadRow"
+ + "s\0221.google.cloud.bigquery.storage.v1.Rea"
+ + "dRowsRequest\0322.google.cloud.bigquery.sto"
+ + "rage.v1.ReadRowsResponse\"Z\202\323\344\223\002?\022=/v1/{r"
+ + "ead_stream=projects/*/locations/*/sessio"
+ + "ns/*/streams/*}\332A\022read_stream,offset0\001\022\306"
+ + "\001\n\017SplitReadStream\0228.google.cloud.bigque"
+ + "ry.storage.v1.SplitReadStreamRequest\0329.g"
+ + "oogle.cloud.bigquery.storage.v1.SplitRea"
+ + "dStreamResponse\">\202\323\344\223\0028\0226/v1/{name=proje"
+ + "cts/*/locations/*/sessions/*/streams/*}\032"
+ + "{\312A\036bigquerystorage.googleapis.com\322AWhtt"
+ + "ps://www.googleapis.com/auth/bigquery,ht"
+ + "tps://www.googleapis.com/auth/cloud-plat"
+ + "form2\274\013\n\rBigQueryWrite\022\327\001\n\021CreateWriteSt"
+ + "ream\022:.google.cloud.bigquery.storage.v1."
+ + "CreateWriteStreamRequest\032-.google.cloud."
+ + "bigquery.storage.v1.WriteStream\"W\202\323\344\223\002;\""
+ + "+/v1/{parent=projects/*/datasets/*/table"
+ + "s/*}:\014write_stream\332A\023parent,write_stream"
+ + "\022\322\001\n\nAppendRows\0223.google.cloud.bigquery."
+ + "storage.v1.AppendRowsRequest\0324.google.cl"
+ + "oud.bigquery.storage.v1.AppendRowsRespon"
+ + "se\"U\202\323\344\223\002@\";/v1/{write_stream=projects/*"
+ + "/datasets/*/tables/*/streams/*}:\001*\332A\014wri"
+ + "te_stream(\0010\001\022\277\001\n\016GetWriteStream\0227.googl"
+ + "e.cloud.bigquery.storage.v1.GetWriteStre"
+ + "amRequest\032-.google.cloud.bigquery.storag"
+ + "e.v1.WriteStream\"E\202\323\344\223\0028\"3/v1/{name=proj"
+ + "ects/*/datasets/*/tables/*/streams/*}:\001*"
+ + "\332A\004name\022\331\001\n\023FinalizeWriteStream\022<.google"
+ + ".cloud.bigquery.storage.v1.FinalizeWrite"
+ + "StreamRequest\032=.google.cloud.bigquery.st"
+ + "orage.v1.FinalizeWriteStreamResponse\"E\202\323"
+ + "\344\223\0028\"3/v1/{name=projects/*/datasets/*/ta"
+ + "bles/*/streams/*}:\001*\332A\004name\022\334\001\n\027BatchCom"
+ + "mitWriteStreams\022@.google.cloud.bigquery."
+ + "storage.v1.BatchCommitWriteStreamsReques"
+ + "t\032A.google.cloud.bigquery.storage.v1.Bat"
+ + "chCommitWriteStreamsResponse\"<\202\323\344\223\002-\022+/v"
+ + "1/{parent=projects/*/datasets/*/tables/*"
+ + "}\332A\006parent\022\313\001\n\tFlushRows\0222.google.cloud."
+ + "bigquery.storage.v1.FlushRowsRequest\0323.g"
+ + "oogle.cloud.bigquery.storage.v1.FlushRow"
+ + "sResponse\"U\202\323\344\223\002@\";/v1/{write_stream=pro"
+ + "jects/*/datasets/*/tables/*/streams/*}:\001"
+ + "*\332A\014write_stream\032\260\001\312A\036bigquerystorage.go"
+ + "ogleapis.com\322A\213\001https://www.googleapis.c"
+ + "om/auth/bigquery,https://www.googleapis."
+ + "com/auth/bigquery.insertdata,https://www"
+ + ".googleapis.com/auth/cloud-platformB\235\002\n$"
+ + "com.google.cloud.bigquery.storage.v1B\014St"
+ + "orageProtoP\001ZGgoogle.golang.org/genproto"
+ + "/googleapis/cloud/bigquery/storage/v1;st"
+ + "orage\252\002 Google.Cloud.BigQuery.Storage.V1"
+ + "\312\002 Google\\Cloud\\BigQuery\\Storage\\V1\352AU\n\035"
+ + "bigquery.googleapis.com/Table\0224projects/"
+ + "{project}/datasets/{dataset}/tables/{tab"
+ + "le}b\006proto3"
};
descriptor =
com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom(
@@ -312,7 +314,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_bigquery_storage_v1_CreateReadSessionRequest_descriptor,
new java.lang.String[] {
- "Parent", "ReadSession", "MaxStreamCount",
+ "Parent", "ReadSession", "MaxStreamCount", "PreferredMinStreamCount",
});
internal_static_google_cloud_bigquery_storage_v1_ReadRowsRequest_descriptor =
getDescriptor().getMessageTypes().get(1);
@@ -412,7 +414,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable(
internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_descriptor,
new java.lang.String[] {
- "AppendResult", "Error", "UpdatedSchema", "RowErrors", "Response",
+ "AppendResult", "Error", "UpdatedSchema", "RowErrors", "WriteStream", "Response",
});
internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_AppendResult_descriptor =
internal_static_google_cloud_bigquery_storage_v1_AppendRowsResponse_descriptor
diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/storage.proto b/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/storage.proto
index f3c974c646..e0b25c1afe 100644
--- a/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/storage.proto
+++ b/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/storage.proto
@@ -256,6 +256,16 @@ message CreateReadSessionRequest {
// determine an upper bound OR set this a size for the maximum "units of work"
// it can gracefully handle.
int32 max_stream_count = 3;
+
+ // The minimum preferred stream count. This parameter can be used to inform
+ // the service that there is a desired lower bound on the number of streams.
+ // This is typically a target parallelism of the client (e.g. a Spark
+ // cluster with N-workers would set this to a low multiple of N to ensure
+ // good cluster utilization).
+ //
+ // The system will make a best effort to provide at least this number of
+ // streams, but in some cases might provide less.
+ int32 preferred_min_stream_count = 4;
}
// Request message for `ReadRows`.
@@ -395,6 +405,9 @@ message CreateWriteStreamRequest {
// Due to the nature of AppendRows being a bidirectional streaming RPC, certain
// parts of the AppendRowsRequest need only be specified for the first request
// sent each time the gRPC network connection is opened/reopened.
+//
+// The size of a single AppendRowsRequest must be less than 10 MB in size.
+// Requests larger than this return an error, typically `INVALID_ARGUMENT`.
message AppendRowsRequest {
// ProtoData contains the data rows and schema when constructing append
// requests.
@@ -495,6 +508,10 @@ message AppendRowsResponse {
// appended. The API will return row level error info, so that the caller can
// remove the bad rows and retry the request.
repeated RowError row_errors = 4;
+
+ // The target of the append operation. Matches the write_stream in the
+ // corresponding request.
+ string write_stream = 5;
}
// Request message for `GetWriteStreamRequest`.
diff --git a/proto-google-cloud-bigquerystorage-v1beta1/pom.xml b/proto-google-cloud-bigquerystorage-v1beta1/pom.xml
index 11e5555779..83649384c7 100644
--- a/proto-google-cloud-bigquerystorage-v1beta1/pom.xml
+++ b/proto-google-cloud-bigquerystorage-v1beta1/pom.xml
@@ -4,13 +4,13 @@
4.0.0
com.google.api.grpc
proto-google-cloud-bigquerystorage-v1beta1
- 0.141.0
+ 0.142.0
proto-google-cloud-bigquerystorage-v1beta1
PROTO library for proto-google-cloud-bigquerystorage-v1beta1
com.google.cloud
google-cloud-bigquerystorage-parent
- 2.17.0
+ 2.18.0
diff --git a/proto-google-cloud-bigquerystorage-v1beta2/pom.xml b/proto-google-cloud-bigquerystorage-v1beta2/pom.xml
index 419d2a3a33..d1a0733cf4 100644
--- a/proto-google-cloud-bigquerystorage-v1beta2/pom.xml
+++ b/proto-google-cloud-bigquerystorage-v1beta2/pom.xml
@@ -4,13 +4,13 @@
4.0.0
com.google.api.grpc
proto-google-cloud-bigquerystorage-v1beta2
- 0.141.0
+ 0.142.0
proto-google-cloud-bigquerystorage-v1beta2
PROTO library for proto-google-cloud-bigquerystorage-v1beta2
com.google.cloud
google-cloud-bigquerystorage-parent
- 2.17.0
+ 2.18.0
diff --git a/samples/snapshot/pom.xml b/samples/snapshot/pom.xml
index b85bb355fe..010e480942 100644
--- a/samples/snapshot/pom.xml
+++ b/samples/snapshot/pom.xml
@@ -29,7 +29,7 @@
com.google.cloud
google-cloud-bigquerystorage
- 2.17.0
+ 2.18.0
diff --git a/versions.txt b/versions.txt
index dea44ced06..f60bb0187a 100644
--- a/versions.txt
+++ b/versions.txt
@@ -1,10 +1,10 @@
# Format:
# module:released-version:current-version
-google-cloud-bigquerystorage:2.17.0:2.17.0
-grpc-google-cloud-bigquerystorage-v1beta1:0.141.0:0.141.0
-grpc-google-cloud-bigquerystorage-v1beta2:0.141.0:0.141.0
-grpc-google-cloud-bigquerystorage-v1:2.17.0:2.17.0
-proto-google-cloud-bigquerystorage-v1beta1:0.141.0:0.141.0
-proto-google-cloud-bigquerystorage-v1beta2:0.141.0:0.141.0
-proto-google-cloud-bigquerystorage-v1:2.17.0:2.17.0
+google-cloud-bigquerystorage:2.18.0:2.18.0
+grpc-google-cloud-bigquerystorage-v1beta1:0.142.0:0.142.0
+grpc-google-cloud-bigquerystorage-v1beta2:0.142.0:0.142.0
+grpc-google-cloud-bigquerystorage-v1:2.18.0:2.18.0
+proto-google-cloud-bigquerystorage-v1beta1:0.142.0:0.142.0
+proto-google-cloud-bigquerystorage-v1beta2:0.142.0:0.142.0
+proto-google-cloud-bigquerystorage-v1:2.18.0:2.18.0