diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 53ebea9e6a..01a5ce1602 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -17,7 +17,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-java@v2 with: - distribution: zulu + distribution: temurin java-version: ${{matrix.java}} - run: java -version - run: .kokoro/build.sh @@ -51,7 +51,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-java@v2 with: - distribution: zulu + distribution: temurin java-version: 8 - run: java -version - run: .kokoro/build.bat @@ -66,17 +66,29 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-java@v2 with: - distribution: zulu + distribution: temurin java-version: ${{matrix.java}} - run: java -version - run: .kokoro/dependencies.sh + javadoc: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-java@v3 + with: + distribution: temurin + java-version: 17 + - run: java -version + - run: .kokoro/build.sh + env: + JOB_TYPE: javadoc lint: runs-on: ubuntu-latest steps: - uses: actions/checkout@v3 - uses: actions/setup-java@v2 with: - distribution: zulu + distribution: temurin java-version: 11 - run: java -version - run: .kokoro/build.sh @@ -88,7 +100,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-java@v2 with: - distribution: zulu + distribution: temurin java-version: 8 - run: java -version - run: .kokoro/build.sh diff --git a/CHANGELOG.md b/CHANGELOG.md index 6119b2db36..167f36f782 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,29 @@ # Changelog +## [2.40.0](https://github.com/googleapis/java-bigquerystorage/compare/v2.39.1...v2.40.0) (2023-07-17) + + +### Features + +* Add ResourceExhausted to retryable error for Write API unary calls ([#2178](https://github.com/googleapis/java-bigquerystorage/issues/2178)) ([d9b526a](https://github.com/googleapis/java-bigquerystorage/commit/d9b526a2e4109ef5ed95fb74373f2f13b06c7c54)) +* Improve json to proto conversion by caching schema ([#2179](https://github.com/googleapis/java-bigquerystorage/issues/2179)) ([afc550a](https://github.com/googleapis/java-bigquerystorage/commit/afc550aeacb0e3f26440eeb70d2cebbf65922c07)) + + +### Bug Fixes + +* Interpret Integer and Float values for TIMESTAMP as microseconds ([#2175](https://github.com/googleapis/java-bigquerystorage/issues/2175)) ([e5bb5d0](https://github.com/googleapis/java-bigquerystorage/commit/e5bb5d099ea0272c4bd447b7f8fef5207c14ffc5)) +* Support DATETIME field that has a space between date and time and has only date ([#2176](https://github.com/googleapis/java-bigquerystorage/issues/2176)) ([494ce85](https://github.com/googleapis/java-bigquerystorage/commit/494ce8513e8925b4330a2bf45641ba38db625c1d)) + + +### Dependencies + +* Update dependency com.google.auto.value:auto-value to v1.10.2 ([#2171](https://github.com/googleapis/java-bigquerystorage/issues/2171)) ([721908d](https://github.com/googleapis/java-bigquerystorage/commit/721908d412f1d82aff9aed8edcf727fc5b1bf950)) +* Update dependency com.google.auto.value:auto-value-annotations to v1.10.2 ([#2172](https://github.com/googleapis/java-bigquerystorage/issues/2172)) ([8a51fae](https://github.com/googleapis/java-bigquerystorage/commit/8a51fae180ced3b362acc350999157d3d6e0da6a)) +* Update dependency com.google.cloud:google-cloud-bigquery to v2.29.0 ([#2168](https://github.com/googleapis/java-bigquerystorage/issues/2168)) ([50ca432](https://github.com/googleapis/java-bigquerystorage/commit/50ca432854851f7cc89cb50a327d9641000b81ee)) +* Update dependency com.google.cloud:google-cloud-bigquery to v2.30.0 ([#2184](https://github.com/googleapis/java-bigquerystorage/issues/2184)) ([87f93a9](https://github.com/googleapis/java-bigquerystorage/commit/87f93a921c62cd71808cddc35382bbaabb7da54b)) +* Update dependency com.google.cloud:google-cloud-shared-dependencies to v3.13.0 ([#2180](https://github.com/googleapis/java-bigquerystorage/issues/2180)) ([7ce19e7](https://github.com/googleapis/java-bigquerystorage/commit/7ce19e7a4ca47df9590c1023abcc459248b1fec2)) +* Update dependency org.apache.avro:avro to v1.11.2 ([#2177](https://github.com/googleapis/java-bigquerystorage/issues/2177)) ([75ce0b5](https://github.com/googleapis/java-bigquerystorage/commit/75ce0b5d7009bbb47b91c222390cfe864b8bd84e)) + ## [2.39.1](https://github.com/googleapis/java-bigquerystorage/compare/v2.39.0...v2.39.1) (2023-06-22) diff --git a/README.md b/README.md index ac72bff3f6..9bf5e6afb5 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ If you are using Maven with [BOM][libraries-bom], add this to your pom.xml file: com.google.cloud libraries-bom - 26.17.0 + 26.18.0 pom import @@ -42,7 +42,7 @@ If you are using Maven without the BOM, add this to your dependencies: com.google.cloud google-cloud-bigquerystorage - 2.39.0 + 2.39.1 ``` @@ -50,20 +50,20 @@ If you are using Maven without the BOM, add this to your dependencies: If you are using Gradle 5.x or later, add this to your dependencies: ```Groovy -implementation platform('com.google.cloud:libraries-bom:26.17.0') +implementation platform('com.google.cloud:libraries-bom:26.18.0') implementation 'com.google.cloud:google-cloud-bigquerystorage' ``` If you are using Gradle without BOM, add this to your dependencies: ```Groovy -implementation 'com.google.cloud:google-cloud-bigquerystorage:2.39.0' +implementation 'com.google.cloud:google-cloud-bigquerystorage:2.39.1' ``` If you are using SBT, add this to your dependencies: ```Scala -libraryDependencies += "com.google.cloud" % "google-cloud-bigquerystorage" % "2.39.0" +libraryDependencies += "com.google.cloud" % "google-cloud-bigquerystorage" % "2.39.1" ``` @@ -220,7 +220,7 @@ Java is a registered trademark of Oracle and/or its affiliates. [kokoro-badge-link-5]: http://storage.googleapis.com/cloud-devrel-public/java/badges/java-bigquerystorage/java11.html [stability-image]: https://img.shields.io/badge/stability-stable-green [maven-version-image]: https://img.shields.io/maven-central/v/com.google.cloud/google-cloud-bigquerystorage.svg -[maven-version-link]: https://central.sonatype.com/artifact/com.google.cloud/google-cloud-bigquerystorage/2.39.0 +[maven-version-link]: https://central.sonatype.com/artifact/com.google.cloud/google-cloud-bigquerystorage/2.39.1 [authentication]: https://github.com/googleapis/google-cloud-java#authentication [auth-scopes]: https://developers.google.com/identity/protocols/oauth2/scopes [predefined-iam-roles]: https://cloud.google.com/iam/docs/understanding-roles#predefined_roles diff --git a/google-cloud-bigquerystorage-bom/pom.xml b/google-cloud-bigquerystorage-bom/pom.xml index 1baf525d43..c497f44625 100644 --- a/google-cloud-bigquerystorage-bom/pom.xml +++ b/google-cloud-bigquerystorage-bom/pom.xml @@ -3,7 +3,7 @@ 4.0.0 com.google.cloud google-cloud-bigquerystorage-bom - 2.39.1 + 2.40.0 pom com.google.cloud @@ -52,37 +52,37 @@ com.google.cloud google-cloud-bigquerystorage - 2.39.1 + 2.40.0 com.google.api.grpc grpc-google-cloud-bigquerystorage-v1beta1 - 0.163.1 + 0.164.0 com.google.api.grpc grpc-google-cloud-bigquerystorage-v1beta2 - 0.163.1 + 0.164.0 com.google.api.grpc grpc-google-cloud-bigquerystorage-v1 - 2.39.1 + 2.40.0 com.google.api.grpc proto-google-cloud-bigquerystorage-v1beta1 - 0.163.1 + 0.164.0 com.google.api.grpc proto-google-cloud-bigquerystorage-v1beta2 - 0.163.1 + 0.164.0 com.google.api.grpc proto-google-cloud-bigquerystorage-v1 - 2.39.1 + 2.40.0 diff --git a/google-cloud-bigquerystorage/clirr-ignored-differences.xml b/google-cloud-bigquerystorage/clirr-ignored-differences.xml index 96d4b3d595..1ce4f651e5 100644 --- a/google-cloud-bigquerystorage/clirr-ignored-differences.xml +++ b/google-cloud-bigquerystorage/clirr-ignored-differences.xml @@ -157,5 +157,17 @@ com/google/cloud/bigquery/storage/v1/JsonStreamWriter boolean isDone() + + 7006 + com/google/cloud/bigquery/storage/v1/ToProtoConverter + com.google.protobuf.DynamicMessage convertToProtoMessage(com.google.protobuf.Descriptors$Descriptor, com.google.cloud.bigquery.storage.v1.TableSchema, java.lang.Object, boolean) + java.util.List + + + 7005 + com/google/cloud/bigquery/storage/v1/ToProtoConverter + com.google.protobuf.DynamicMessage convertToProtoMessage(com.google.protobuf.Descriptors$Descriptor, com.google.cloud.bigquery.storage.v1.TableSchema, java.lang.Object, boolean) + com.google.protobuf.DynamicMessage convertToProtoMessage(com.google.protobuf.Descriptors$Descriptor, com.google.cloud.bigquery.storage.v1.TableSchema, java.lang.Iterable, boolean) + diff --git a/google-cloud-bigquerystorage/pom.xml b/google-cloud-bigquerystorage/pom.xml index 4ed2bae1f8..38aea24aa6 100644 --- a/google-cloud-bigquerystorage/pom.xml +++ b/google-cloud-bigquerystorage/pom.xml @@ -3,7 +3,7 @@ 4.0.0 com.google.cloud google-cloud-bigquerystorage - 2.39.1 + 2.40.0 jar BigQuery Storage https://github.com/googleapis/java-bigquerystorage @@ -11,7 +11,7 @@ com.google.cloud google-cloud-bigquerystorage-parent - 2.39.1 + 2.40.0 google-cloud-bigquerystorage @@ -75,12 +75,12 @@ com.google.auto.value auto-value - 1.10.1 + 1.10.2 com.google.auto.value auto-value-annotations - 1.10.1 + 1.10.2 com.google.protobuf @@ -179,7 +179,7 @@ org.apache.avro avro - 1.11.1 + 1.11.2 test diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/CivilTimeEncoder.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/CivilTimeEncoder.java index be2d1fd929..e52ada64d8 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/CivilTimeEncoder.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/CivilTimeEncoder.java @@ -30,6 +30,7 @@ *

The valid range and number of bits required by each date/time field is as the following: * * + * * * * diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/ConnectionWorker.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/ConnectionWorker.java index 7e86da4d81..1f0e1e1989 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/ConnectionWorker.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/ConnectionWorker.java @@ -843,7 +843,8 @@ private void requestCallback(AppendRowsResponse response) { log.fine( String.format( - "Got response with schema updated (omitting updated schema in response here): %s writer id %s", + "Got response with schema updated (omitting updated schema in response here): %s" + + " writer id %s", responseWithUpdatedSchemaRemoved.toString(), writerId)); } diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/Exceptions.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/Exceptions.java index 2f9083e4e9..8824e43c77 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/Exceptions.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/Exceptions.java @@ -243,10 +243,10 @@ public String getStreamName() { } /** - * This exception is thrown from {@link SchemaAwareStreamWriter#append()} when the client side - * Proto serialization fails. It can also be thrown by the server in case rows contains invalid - * data. The exception contains a Map of indexes of faulty rows and the corresponding error - * message. + * This exception is thrown from {@link SchemaAwareStreamWriter#append(Iterable)} when the client + * side Proto serialization fails. It can also be thrown by the server in case rows contains + * invalid data. The exception contains a Map of indexes of faulty rows and the corresponding + * error message. */ public static class AppendSerializationError extends AppendSerializtionError { @@ -259,6 +259,29 @@ public AppendSerializationError( } } + /** This exception is thrown from proto converter to wrap the row index to error mapping. */ + static class RowIndexToErrorException extends IllegalArgumentException { + Map rowIndexToErrorMessage; + + boolean hasDataUnknownError; + + public RowIndexToErrorException( + Map rowIndexToErrorMessage, boolean hasDataUnknownError) { + this.rowIndexToErrorMessage = rowIndexToErrorMessage; + this.hasDataUnknownError = hasDataUnknownError; + } + + // This message should not be exposed to the user directly. + // Please examine individual row's error through `rowIndexToErrorMessage`. + public String getMessage() { + return "The map of row index to error message is " + rowIndexToErrorMessage.toString(); + } + + public boolean hasDataUnknownError() { + return hasDataUnknownError; + } + } + /** This exception is used internally to handle field level parsing errors. */ public static class FieldParseError extends IllegalArgumentException { private final String fieldName; @@ -344,7 +367,8 @@ protected InflightRequestsLimitExceededException(String writerId, long currentLi super( Status.fromCode(Status.Code.RESOURCE_EXHAUSTED) .withDescription( - "Exceeds client side inflight buffer, consider add more buffer or open more connections. Current limit: " + "Exceeds client side inflight buffer, consider add more buffer or open more" + + " connections. Current limit: " + currentLimit), writerId, currentLimit); @@ -356,7 +380,8 @@ protected InflightBytesLimitExceededException(String writerId, long currentLimit super( Status.fromCode(Status.Code.RESOURCE_EXHAUSTED) .withDescription( - "Exceeds client side inflight buffer, consider add more buffer or open more connections. Current limit: " + "Exceeds client side inflight buffer, consider add more buffer or open more" + + " connections. Current limit: " + currentLimit), writerId, currentLimit); diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/JsonStreamWriter.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/JsonStreamWriter.java index 0d47910e7b..79d2582a89 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/JsonStreamWriter.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/JsonStreamWriter.java @@ -54,8 +54,8 @@ private JsonStreamWriter(SchemaAwareStreamWriter.Builder builder) * is created with the updated TableSchema. * * @param jsonArr The JSON array that contains JSONObjects to be written - * @return ApiFuture returns an AppendRowsResponse message wrapped in an - * ApiFuture + * @return {@code ApiFuture} returns an AppendRowsResponse message wrapped in + * an ApiFuture */ public ApiFuture append(JSONArray jsonArr) throws IOException, Descriptors.DescriptorValidationException { @@ -70,8 +70,8 @@ public ApiFuture append(JSONArray jsonArr) * * @param jsonArr The JSON array that contains JSONObjects to be written * @param offset Offset for deduplication - * @return ApiFuture returns an AppendRowsResponse message wrapped in an - * ApiFuture + * @return {@code ApiFuture} returns an AppendRowsResponse message wrapped in + * an ApiFuture */ public ApiFuture append(JSONArray jsonArr, long offset) throws IOException, Descriptors.DescriptorValidationException { diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/JsonToProtoMessage.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/JsonToProtoMessage.java index 3d1e1e0b5d..6cde31081a 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/JsonToProtoMessage.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/JsonToProtoMessage.java @@ -16,6 +16,7 @@ package com.google.cloud.bigquery.storage.v1; import com.google.api.pathtemplate.ValidationException; +import com.google.cloud.bigquery.storage.v1.Exceptions.RowIndexToErrorException; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableMap; import com.google.common.primitives.Doubles; @@ -29,7 +30,10 @@ import java.math.BigDecimal; import java.math.RoundingMode; import java.time.LocalDate; +import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; import org.json.JSONArray; import org.json.JSONException; import org.json.JSONObject; @@ -100,6 +104,25 @@ public class JsonToProtoMessage implements ToProtoConverter { .toFormatter() .withZone(ZoneOffset.UTC); + private static final DateTimeFormatter DATETIME_FORMATTER = + new DateTimeFormatterBuilder() + .parseLenient() + .append(DateTimeFormatter.ISO_LOCAL_DATE) + .optionalStart() + .optionalStart() + .parseCaseInsensitive() + .appendLiteral('T') + .optionalEnd() + .optionalStart() + .appendLiteral(' ') + .optionalEnd() + .append(DateTimeFormatter.ISO_LOCAL_TIME) + .optionalEnd() + .parseDefaulting(ChronoField.HOUR_OF_DAY, 0) + .parseDefaulting(ChronoField.MINUTE_OF_HOUR, 0) + .parseDefaulting(ChronoField.SECOND_OF_MINUTE, 0) + .toFormatter(); + /** You can use {@link #INSTANCE} instead */ public JsonToProtoMessage() {} @@ -121,7 +144,10 @@ public static DynamicMessage convertJsonToProtoMessage( } /** - * Converts input message to Protobuf + * Converts input message to Protobuf. + * + *

WARNING: it's much more efficient to call the other APIs accepting json array if the jsons + * share the same table schema. * * @param protoSchema the schema of the output Protobuf schems. * @param tableSchema tha underlying table schema for which Protobuf is being built. @@ -130,15 +156,37 @@ public static DynamicMessage convertJsonToProtoMessage( * schema should be accepted. * @return Converted message in Protobuf format. */ - @Override public DynamicMessage convertToProtoMessage( Descriptor protoSchema, TableSchema tableSchema, Object json, boolean ignoreUnknownFields) { return convertToProtoMessage(protoSchema, tableSchema, (JSONObject) json, ignoreUnknownFields); } + /** + * Converts Json array to list of Protobuf + * + * @param protoSchema the schema of the output Protobuf schems. + * @param tableSchema tha underlying table schema for which Protobuf is being built. + * @param jsonArray the input JSON array converted to Protobuf. + * @param ignoreUnknownFields flag indicating that the additional fields not present in the output + * schema should be accepted. + * @return Converted message in Protobuf format. + */ + @Override + public List convertToProtoMessage( + Descriptor protoSchema, + TableSchema tableSchema, + Iterable jsonArray, + boolean ignoreUnknownFields) { + return convertToProtoMessage( + protoSchema, tableSchema, (JSONArray) jsonArray, ignoreUnknownFields); + } + /** * Converts Json data to protocol buffer messages given the protocol buffer descriptor. * + *

WARNING: it's much more efficient to call the other APIs accepting json array if the jsons + * share the same table schema. + * * @param protoSchema * @param json * @throws IllegalArgumentException when JSON data is not compatible with proto descriptor. @@ -155,6 +203,9 @@ public DynamicMessage convertToProtoMessage(Descriptor protoSchema, JSONObject j /** * Converts Json data to protocol buffer messages given the protocol buffer descriptor. * + *

WARNING: it's much more efficient to call the other APIs accepting json array if the jsons + * share the same table schema. + * * @param protoSchema * @param tableSchema bigquery table schema is needed for type conversion of DATETIME, TIME, * NUMERIC, BIGNUMERIC @@ -175,6 +226,9 @@ public DynamicMessage convertToProtoMessage( /** * Converts Json data to protocol buffer messages given the protocol buffer descriptor. * + *

WARNING: it's much more efficient to call the other APIs accepting json array if the jsons + * share the same table schema. + * * @param protoSchema * @param tableSchema bigquery table schema is needed for type conversion of DATETIME, TIME, * NUMERIC, BIGNUMERIC @@ -189,11 +243,48 @@ public DynamicMessage convertToProtoMessage( Preconditions.checkNotNull(protoSchema, "Protobuf descriptor is null."); Preconditions.checkNotNull(tableSchema, "TableSchema is null."); Preconditions.checkState(json.length() != 0, "JSONObject is empty."); - return convertToProtoMessage( protoSchema, tableSchema.getFieldsList(), json, "root", ignoreUnknownFields); } + /** + * Converts Json array to list of protocol buffer messages given the protocol buffer descriptor. + * + * @param protoSchema + * @param tableSchema bigquery table schema is needed for type conversion of DATETIME, TIME, + * NUMERIC, BIGNUMERIC + * @param jsonArray + * @param ignoreUnknownFields allows unknown fields in JSON input to be ignored. + * @throws IllegalArgumentException when JSON data is not compatible with proto descriptor. + */ + public List convertToProtoMessage( + Descriptor protoSchema, + TableSchema tableSchema, + JSONArray jsonArray, + boolean ignoreUnknownFields) + throws IllegalArgumentException { + Preconditions.checkNotNull(jsonArray, "jsonArray is null."); + Preconditions.checkNotNull(protoSchema, "Protobuf descriptor is null."); + Preconditions.checkNotNull(tableSchema, "tableSchema is null."); + Preconditions.checkState(jsonArray.length() != 0, "jsonArray is empty."); + + return convertToProtoMessage( + protoSchema, tableSchema.getFieldsList(), jsonArray, "root", ignoreUnknownFields); + } + + private DynamicMessage convertToProtoMessage( + Descriptor protoSchema, + List tableSchema, + JSONObject jsonObject, + String jsonScope, + boolean ignoreUnknownFields) { + JSONArray jsonArray = new JSONArray(); + jsonArray.put(jsonObject); + return convertToProtoMessage( + protoSchema, tableSchema, jsonArray, jsonScope, ignoreUnknownFields) + .get(0); + } + /** * Converts Json data to protocol buffer messages given the protocol buffer descriptor. * @@ -202,84 +293,162 @@ public DynamicMessage convertToProtoMessage( * @param jsonScope Debugging purposes * @throws IllegalArgumentException when JSON data is not compatible with proto descriptor. */ - private DynamicMessage convertToProtoMessage( + private List convertToProtoMessage( Descriptor protoSchema, List tableSchema, - JSONObject json, + JSONArray jsonArray, String jsonScope, boolean ignoreUnknownFields) - throws IllegalArgumentException { - - DynamicMessage.Builder protoMsg = DynamicMessage.newBuilder(protoSchema); - String[] jsonNames = JSONObject.getNames(json); - if (jsonNames == null) { - return protoMsg.build(); - } - for (String jsonName : jsonNames) { - // We want lowercase here to support case-insensitive data writes. - // The protobuf descriptor that is used is assumed to have all lowercased fields - String jsonFieldLocator = jsonName.toLowerCase(); + throws RowIndexToErrorException { + List messageList = new ArrayList<>(); + Map jsonNameToMetadata = new HashMap<>(); + Map rowIndexToErrorMessage = new HashMap<>(); - // If jsonName is not compatible with proto naming convention, we should look by its - // placeholder name. - if (!BigQuerySchemaUtil.isProtoCompatible(jsonFieldLocator)) { - jsonFieldLocator = BigQuerySchemaUtil.generatePlaceholderFieldName(jsonFieldLocator); - } - String currentScope = jsonScope + "." + jsonName; - FieldDescriptor field = protoSchema.findFieldByName(jsonFieldLocator); - if (field == null && !ignoreUnknownFields) { - throw new Exceptions.DataHasUnknownFieldException(currentScope); - } else if (field == null) { - continue; - } - TableFieldSchema fieldSchema = null; - if (tableSchema != null) { - // protoSchema is generated from tableSchema so their field ordering should match. - fieldSchema = tableSchema.get(field.getIndex()); - if (!fieldSchema.getName().toLowerCase().equals(BigQuerySchemaUtil.getFieldName(field))) { - throw new ValidationException( - "Field at index " - + field.getIndex() - + " has mismatch names (" - + fieldSchema.getName() - + ") (" - + field.getName() - + ")"); - } - } + boolean hasDataUnknownError = false; + for (int i = 0; i < jsonArray.length(); i++) { try { - if (!field.isRepeated()) { - fillField( - protoMsg, field, fieldSchema, json, jsonName, currentScope, ignoreUnknownFields); + DynamicMessage.Builder protoMsg = DynamicMessage.newBuilder(protoSchema); + JSONObject jsonObject = jsonArray.getJSONObject(i); + String[] jsonNames = JSONObject.getNames(jsonObject); + if (jsonNames == null) { + messageList.add(protoMsg.build()); + continue; + } + for (String jsonName : jsonNames) { + String currentScope = jsonScope + "." + jsonName; + FieldDescriptorAndFieldTableSchema fieldDescriptorAndFieldTableSchema = + jsonNameToMetadata.computeIfAbsent( + currentScope, + k -> { + return computeDescriptorAndSchema( + currentScope, ignoreUnknownFields, jsonName, protoSchema, tableSchema); + }); + if (fieldDescriptorAndFieldTableSchema == null) { + continue; + } + FieldDescriptor field = fieldDescriptorAndFieldTableSchema.fieldDescriptor; + TableFieldSchema tableFieldSchema = fieldDescriptorAndFieldTableSchema.tableFieldSchema; + try { + if (!field.isRepeated()) { + fillField( + protoMsg, + field, + tableFieldSchema, + jsonObject, + jsonName, + currentScope, + ignoreUnknownFields); + } else { + fillRepeatedField( + protoMsg, + field, + tableFieldSchema, + jsonObject, + jsonName, + currentScope, + ignoreUnknownFields); + } + } catch (Exceptions.FieldParseError ex) { + throw ex; + } catch (Exception ex) { + // This function is recursively called, so this throw will be caught and throw directly + // out by the catch above. + throw new Exceptions.FieldParseError( + currentScope, + tableFieldSchema != null + ? tableFieldSchema.getType().name() + : field.getType().name(), + ex); + } + } + DynamicMessage msg; + try { + msg = protoMsg.build(); + } catch (UninitializedMessageException e) { + String errorMsg = e.getMessage(); + int idxOfColon = errorMsg.indexOf(":"); + String missingFieldName = errorMsg.substring(idxOfColon + 2); + throw new IllegalArgumentException( + String.format( + "JSONObject does not have the required field %s.%s.", + jsonScope, missingFieldName)); + } + messageList.add(msg); + } catch (IllegalArgumentException exception) { + if (exception instanceof Exceptions.DataHasUnknownFieldException) { + hasDataUnknownError = true; + } + if (exception instanceof Exceptions.FieldParseError) { + Exceptions.FieldParseError ex = (Exceptions.FieldParseError) exception; + rowIndexToErrorMessage.put( + i, + "Field " + + ex.getFieldName() + + " failed to convert to " + + ex.getBqType() + + ". Error: " + + ex.getCause().getMessage()); } else { - fillRepeatedField( - protoMsg, field, fieldSchema, json, jsonName, currentScope, ignoreUnknownFields); + rowIndexToErrorMessage.put(i, exception.getMessage()); } - } catch (Exceptions.FieldParseError ex) { - throw ex; - } catch (Exception ex) { - // This function is recursively called, so this throw will be caught and throw directly out - // by the catch - // above. - throw new Exceptions.FieldParseError( - currentScope, - fieldSchema != null ? fieldSchema.getType().name() : field.getType().name(), - ex); } } + if (!rowIndexToErrorMessage.isEmpty()) { + throw new RowIndexToErrorException(rowIndexToErrorMessage, hasDataUnknownError); + } + return messageList; + } - DynamicMessage msg; - try { - msg = protoMsg.build(); - } catch (UninitializedMessageException e) { - String errorMsg = e.getMessage(); - int idxOfColon = errorMsg.indexOf(":"); - String missingFieldName = errorMsg.substring(idxOfColon + 2); - throw new IllegalArgumentException( - String.format( - "JSONObject does not have the required field %s.%s.", jsonScope, missingFieldName)); + private static final class FieldDescriptorAndFieldTableSchema { + TableFieldSchema tableFieldSchema; + + // Field descriptor + FieldDescriptor fieldDescriptor; + } + + private FieldDescriptorAndFieldTableSchema computeDescriptorAndSchema( + String currentScope, + boolean ignoreUnknownFields, + String jsonName, + Descriptor protoSchema, + List tableFieldSchemaList) { + + // We want lowercase here to support case-insensitive data writes. + // The protobuf descriptor that is used is assumed to have all lowercased fields + String jsonFieldLocator = jsonName.toLowerCase(); + + // If jsonName is not compatible with proto naming convention, we should look by its + // placeholder name. + if (!BigQuerySchemaUtil.isProtoCompatible(jsonFieldLocator)) { + jsonFieldLocator = BigQuerySchemaUtil.generatePlaceholderFieldName(jsonFieldLocator); } - return msg; + + FieldDescriptor field = protoSchema.findFieldByName(jsonFieldLocator); + if (field == null && !ignoreUnknownFields) { + throw new Exceptions.DataHasUnknownFieldException(currentScope); + } else if (field == null) { + return null; + } + TableFieldSchema fieldSchema = null; + if (tableFieldSchemaList != null) { + // protoSchema is generated from tableSchema so their field ordering should match. + fieldSchema = tableFieldSchemaList.get(field.getIndex()); + if (!fieldSchema.getName().toLowerCase().equals(BigQuerySchemaUtil.getFieldName(field))) { + throw new ValidationException( + "Field at index " + + field.getIndex() + + " has mismatch names (" + + fieldSchema.getName() + + ") (" + + field.getName() + + ")"); + } + } + FieldDescriptorAndFieldTableSchema fieldDescriptorAndFieldTableSchema = + new FieldDescriptorAndFieldTableSchema(); + fieldDescriptorAndFieldTableSchema.fieldDescriptor = field; + fieldDescriptorAndFieldTableSchema.tableFieldSchema = fieldSchema; + return fieldDescriptorAndFieldTableSchema; } /** @@ -302,7 +471,6 @@ private void fillField( String currentScope, boolean ignoreUnknownFields) throws IllegalArgumentException { - java.lang.Object val = json.get(exactJsonKeyName); if (val == JSONObject.NULL) { return; @@ -402,7 +570,8 @@ private void fillField( if (val instanceof String) { protoMsg.setField( fieldDescriptor, - CivilTimeEncoder.encodePacked64DatetimeMicros(LocalDateTime.parse((String) val))); + CivilTimeEncoder.encodePacked64DatetimeMicros( + LocalDateTime.parse((String) val, DATETIME_FORMATTER))); return; } else if (val instanceof Long) { protoMsg.setField(fieldDescriptor, val); @@ -422,7 +591,7 @@ private void fillField( if (val instanceof String) { Double parsed = Doubles.tryParse((String) val); if (parsed != null) { - protoMsg.setField(fieldDescriptor, parsed.longValue() * 10000000); + protoMsg.setField(fieldDescriptor, parsed.longValue()); return; } TemporalAccessor parsedTime = TIMESTAMP_FORMATTER.parse((String) val); @@ -435,7 +604,7 @@ private void fillField( protoMsg.setField(fieldDescriptor, val); return; } else if (val instanceof Integer) { - protoMsg.setField(fieldDescriptor, Long.valueOf((Integer) val) * 10000000); + protoMsg.setField(fieldDescriptor, Long.valueOf((Integer) val)); return; } } @@ -663,7 +832,8 @@ private void fillRepeatedField( if (val instanceof String) { protoMsg.addRepeatedField( fieldDescriptor, - CivilTimeEncoder.encodePacked64DatetimeMicros(LocalDateTime.parse((String) val))); + CivilTimeEncoder.encodePacked64DatetimeMicros( + LocalDateTime.parse((String) val, DATETIME_FORMATTER))); } else if (val instanceof Long) { protoMsg.addRepeatedField(fieldDescriptor, val); } else { @@ -684,7 +854,7 @@ private void fillRepeatedField( if (val instanceof String) { Double parsed = Doubles.tryParse((String) val); if (parsed != null) { - protoMsg.addRepeatedField(fieldDescriptor, parsed.longValue() * 10000000); + protoMsg.addRepeatedField(fieldDescriptor, parsed.longValue()); } else { TemporalAccessor parsedTime = TIMESTAMP_FORMATTER.parse((String) val); protoMsg.addRepeatedField( @@ -695,7 +865,7 @@ private void fillRepeatedField( } else if (val instanceof Long) { protoMsg.addRepeatedField(fieldDescriptor, val); } else if (val instanceof Integer) { - protoMsg.addRepeatedField(fieldDescriptor, ((Integer) val) * 10000000); + protoMsg.addRepeatedField(fieldDescriptor, Long.valueOf((Integer) val)); } else { throwWrongFieldType(fieldDescriptor, currentScope, index); } diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/SchemaAwareStreamWriter.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/SchemaAwareStreamWriter.java index 10fceeee68..32f8e0f7e3 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/SchemaAwareStreamWriter.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/SchemaAwareStreamWriter.java @@ -21,13 +21,15 @@ import com.google.api.gax.core.ExecutorProvider; import com.google.api.gax.rpc.TransportChannelProvider; import com.google.cloud.bigquery.storage.v1.Exceptions.AppendSerializationError; +import com.google.cloud.bigquery.storage.v1.Exceptions.RowIndexToErrorException; import com.google.common.base.Preconditions; import com.google.protobuf.Descriptors.Descriptor; import com.google.protobuf.Descriptors.DescriptorValidationException; -import com.google.protobuf.Message; +import com.google.protobuf.DynamicMessage; import com.google.rpc.Code; import java.io.IOException; import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.logging.Logger; import java.util.regex.Matcher; @@ -101,8 +103,8 @@ private SchemaAwareStreamWriter(Builder builder) * created with the updated TableSchema. * * @param items The array that contains objects to be written - * @return ApiFuture returns an AppendRowsResponse message wrapped in an - * ApiFuture + * @return {@code ApiFuture} returns an AppendRowsResponse message wrapped in + * an ApiFuture */ public ApiFuture append(Iterable items) throws IOException, DescriptorValidationException { @@ -124,19 +126,23 @@ private void refreshWriter(TableSchema updatedSchema) this.streamWriter = streamWriterBuilder.setWriterSchema(this.protoSchema).build(); } - private Message buildMessage(T item) + private List buildMessage(Iterable items) throws InterruptedException, DescriptorValidationException, IOException { try { return this.toProtoConverter.convertToProtoMessage( - this.descriptor, this.tableSchema, item, ignoreUnknownFields); - } catch (Exceptions.DataHasUnknownFieldException ex) { + this.descriptor, this.tableSchema, items, ignoreUnknownFields); + } catch (RowIndexToErrorException ex) { + // We only retry for data unknown error. + if (!ex.hasDataUnknownError) { + throw ex; + } // Directly return error when stream writer refresh is disabled. if (this.skipRefreshStreamWriter) { throw ex; } LOG.warning( - "Saw unknown field " - + ex.getFieldName() + "Saw unknown field error during proto message conversin within error messages" + + ex.rowIndexToErrorMessage + ", try to refresh the writer with updated schema, stream: " + streamName); GetWriteStreamRequest writeStreamRequest = @@ -147,7 +153,7 @@ private Message buildMessage(T item) WriteStream writeStream = client.getWriteStream(writeStreamRequest); refreshWriter(writeStream.getTableSchema()); return this.toProtoConverter.convertToProtoMessage( - this.descriptor, this.tableSchema, item, ignoreUnknownFields); + this.descriptor, this.tableSchema, items, ignoreUnknownFields); } } /** @@ -158,8 +164,8 @@ private Message buildMessage(T item) * * @param items The collection that contains objects to be written * @param offset Offset for deduplication - * @return ApiFuture returns an AppendRowsResponse message wrapped in an - * ApiFuture + * @return {@code ApiFuture} returns an AppendRowsResponse message wrapped in + * an ApiFuture */ public ApiFuture append(Iterable items, long offset) throws IOException, DescriptorValidationException { @@ -169,7 +175,6 @@ public ApiFuture append(Iterable items, long offset) if (!this.skipRefreshStreamWriter && this.streamWriter.getUpdatedSchema() != null) { refreshWriter(this.streamWriter.getUpdatedSchema()); } - ProtoRows.Builder rowsBuilder = ProtoRows.newBuilder(); // Any error in convertToProtoMessage will throw an // IllegalArgumentException/IllegalStateException/NullPointerException. @@ -177,29 +182,15 @@ public ApiFuture append(Iterable items, long offset) // After the conversion is finished an AppendSerializtionError exception that contains all the // conversion errors will be thrown. Map rowIndexToErrorMessage = new HashMap<>(); - int i = -1; - for (T item : items) { - i += 1; - try { - Message protoMessage = buildMessage(item); - rowsBuilder.addSerializedRows(protoMessage.toByteString()); - } catch (IllegalArgumentException exception) { - if (exception instanceof Exceptions.FieldParseError) { - Exceptions.FieldParseError ex = (Exceptions.FieldParseError) exception; - rowIndexToErrorMessage.put( - i, - "Field " - + ex.getFieldName() - + " failed to convert to " - + ex.getBqType() - + ". Error: " - + ex.getCause().getMessage()); - } else { - rowIndexToErrorMessage.put(i, exception.getMessage()); - } - } catch (InterruptedException ex) { - throw new RuntimeException(ex); + try { + List protoMessages = buildMessage(items); + for (DynamicMessage dynamicMessage : protoMessages) { + rowsBuilder.addSerializedRows(dynamicMessage.toByteString()); } + } catch (RowIndexToErrorException exception) { + rowIndexToErrorMessage = exception.rowIndexToErrorMessage; + } catch (InterruptedException ex) { + throw new RuntimeException(ex); } if (!rowIndexToErrorMessage.isEmpty()) { diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/StreamWriter.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/StreamWriter.java index 6ab9346da5..d6f0f99ca9 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/StreamWriter.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/StreamWriter.java @@ -247,7 +247,8 @@ private StreamWriter(Builder builder) throws IOException { String fetchedLocation = writeStream.getLocation(); log.info( String.format( - "Fethed location %s for stream name %s, extracted project and dataset name: %s\"", + "Fethed location %s for stream name %s, extracted project and dataset" + + " name: %s\"", fetchedLocation, streamName, datasetAndProjectName)); return fetchedLocation; }); @@ -494,12 +495,12 @@ public void close() { singleConnectionOrConnectionPool.close(this); } - /** Constructs a new {@link StreamWriterV2.Builder} using the given stream and client. */ + /** Constructs a new {@link StreamWriter.Builder} using the given stream and client. */ public static StreamWriter.Builder newBuilder(String streamName, BigQueryWriteClient client) { return new StreamWriter.Builder(streamName, client); } - /** Constructs a new {@link StreamWriterV2.Builder} using the given stream. */ + /** Constructs a new {@link StreamWriter.Builder} using the given stream. */ public static StreamWriter.Builder newBuilder(String streamName) { return new StreamWriter.Builder(streamName); } diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/ToProtoConverter.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/ToProtoConverter.java index ca17ed11e7..76ef223e24 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/ToProtoConverter.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/ToProtoConverter.java @@ -17,11 +17,12 @@ import com.google.protobuf.Descriptors; import com.google.protobuf.DynamicMessage; +import java.util.List; public interface ToProtoConverter { - DynamicMessage convertToProtoMessage( + List convertToProtoMessage( Descriptors.Descriptor protoSchema, TableSchema tableSchema, - T inputObject, + Iterable inputObject, boolean ignoreUnknownFields); } diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryWriteStubSettings.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryWriteStubSettings.java index 6d0c86e18a..9331a9dc48 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryWriteStubSettings.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryWriteStubSettings.java @@ -259,7 +259,9 @@ public static class Builder extends StubSettings.BuildernewArrayList( - StatusCode.Code.DEADLINE_EXCEEDED, StatusCode.Code.UNAVAILABLE))); + StatusCode.Code.DEADLINE_EXCEEDED, + StatusCode.Code.UNAVAILABLE, + StatusCode.Code.RESOURCE_EXHAUSTED))); RETRYABLE_CODE_DEFINITIONS = definitions.build(); } diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CivilTimeEncoder.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CivilTimeEncoder.java index 1ab8d9eb17..1ff98117b0 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CivilTimeEncoder.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/CivilTimeEncoder.java @@ -30,6 +30,7 @@ *

The valid range and number of bits required by each date/time field is as the following: * *

Range and bits for date/time fields
Field Range #Bits
Year [1, 9999] 14
Month [1, 12] 4
+ * * * * diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/JsonStreamWriter.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/JsonStreamWriter.java index 5837e5c4a5..a86a35b7ab 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/JsonStreamWriter.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta2/JsonStreamWriter.java @@ -83,8 +83,8 @@ private JsonStreamWriter(Builder builder) * data to protobuf messages, then using StreamWriter's append() to write the data. * * @param jsonArr The JSON array that contains JSONObjects to be written - * @return ApiFuture returns an AppendRowsResponse message wrapped in an - * ApiFuture + * @return {@code ApiFuture} returns an AppendRowsResponse message wrapped in + * an ApiFuture */ public ApiFuture append(JSONArray jsonArr) { return append(jsonArr, -1); @@ -96,8 +96,8 @@ public ApiFuture append(JSONArray jsonArr) { * * @param jsonArr The JSON array that contains JSONObjects to be written * @param offset Offset for deduplication - * @return ApiFuture returns an AppendRowsResponse message wrapped in an - * ApiFuture + * @return {@code ApiFuture} returns an AppendRowsResponse message wrapped in + * an ApiFuture */ public ApiFuture append(JSONArray jsonArr, long offset) { ProtoRows.Builder rowsBuilder = ProtoRows.newBuilder(); diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/JsonStreamWriterTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/JsonStreamWriterTest.java index eed96886a4..a6ad2df000 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/JsonStreamWriterTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/JsonStreamWriterTest.java @@ -1280,7 +1280,8 @@ public void run() throws Throwable { ex.getStatus() .getDescription() .contains( - "Exceeds client side inflight buffer, consider add more buffer or open more connections")); + "Exceeds client side inflight buffer, consider add more buffer or open more" + + " connections")); } } @@ -1342,7 +1343,8 @@ public void testMultipleAppendSerializationErrors() "The source object has fields unknown to BigQuery: root.not_foo.", rowIndexToErrorMessage.get(0)); assertEquals( - "Field root.foo failed to convert to STRING. Error: JSONObject does not have a string field at root.foo.", + "Field root.foo failed to convert to STRING. Error: JSONObject does not have a string" + + " field at root.foo.", rowIndexToErrorMessage.get(2)); } } diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/JsonToProtoMessageTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/JsonToProtoMessageTest.java index 5c44d014d4..dd3a6dcfa1 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/JsonToProtoMessageTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/JsonToProtoMessageTest.java @@ -20,6 +20,7 @@ import com.google.cloud.bigquery.storage.test.JsonTest.*; import com.google.cloud.bigquery.storage.test.SchemaTest.*; +import com.google.cloud.bigquery.storage.v1.Exceptions.RowIndexToErrorException; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.protobuf.ByteString; @@ -29,6 +30,7 @@ import java.math.BigDecimal; import java.util.ArrayList; import java.util.Collections; +import java.util.List; import java.util.Map; import java.util.logging.Logger; import org.json.JSONArray; @@ -393,6 +395,12 @@ public class JsonToProtoMessageTest { .setMode(TableFieldSchema.Mode.NULLABLE) .setName("test_timestamp") .build(); + private final TableFieldSchema TEST_TIMESTAMP_REPEATED = + TableFieldSchema.newBuilder() + .setType(TableFieldSchema.Type.TIMESTAMP) + .setMode(TableFieldSchema.Mode.REPEATED) + .setName("test_timestamp_repeated") + .build(); private final TableFieldSchema TEST_TIME = TableFieldSchema.newBuilder() .setType(TableFieldSchema.Type.TIME) @@ -598,7 +606,7 @@ public void testInt32NotMatchInt64() throws Exception { JsonToProtoMessage.INSTANCE.convertToProtoMessage(TestInt32.getDescriptor(), json); Assert.fail("should fail"); } catch (IllegalArgumentException e) { - assertEquals("JSONObject does not have a int32 field at root.int.", e.getMessage()); + assertTrue(e.getMessage().contains("JSONObject does not have a int32 field at root.int.")); } } @@ -608,7 +616,7 @@ public void testDateTimeMismatch() throws Exception { TableFieldSchema.newBuilder() .setName("datetime") .setType(TableFieldSchema.Type.DATETIME) - .setMode(TableFieldSchema.Mode.REPEATED) + .setMode(TableFieldSchema.Mode.NULLABLE) .build(); TableSchema tableSchema = TableSchema.newBuilder().addFields(field).build(); JSONObject json = new JSONObject(); @@ -619,10 +627,39 @@ public void testDateTimeMismatch() throws Exception { TestDatetime.getDescriptor(), tableSchema, json); Assert.fail("should fail"); } catch (IllegalArgumentException e) { - assertEquals("JSONObject does not have a int64 field at root.datetime.", e.getMessage()); + assertTrue( + e.getMessage().contains("JSONObject does not have a int64 field at root.datetime.")); } } + private void dateTimeMatch_Internal(String jsonVal, Long expectedVal) throws Exception { + TableFieldSchema field = + TableFieldSchema.newBuilder() + .setName("datetime") + .setType(TableFieldSchema.Type.DATETIME) + .setMode(TableFieldSchema.Mode.NULLABLE) + .build(); + TableSchema tableSchema = TableSchema.newBuilder().addFields(field).build(); + TestDatetime expectedProto = TestDatetime.newBuilder().setDatetime(expectedVal).build(); + JSONObject json = new JSONObject(); + json.put("datetime", jsonVal); + DynamicMessage protoMsg = + JsonToProtoMessage.INSTANCE.convertToProtoMessage( + TestDatetime.getDescriptor(), tableSchema, json); + assertEquals(expectedProto, protoMsg); + } + + @Test + public void testDateTimeMatch() throws Exception { + dateTimeMatch_Internal("2021-09-27T20:51:10.752", 142258614586538368L); + dateTimeMatch_Internal("2021-09-27t20:51:10.752", 142258614586538368L); + dateTimeMatch_Internal("2021-09-27 20:51:10.752", 142258614586538368L); + dateTimeMatch_Internal("2021-9-27T20:51:10.752", 142258614586538368L); + dateTimeMatch_Internal("2021-09-27T00:00:00", 142258525253402624L); + dateTimeMatch_Internal("2021-09-27T00:0:00", 142258525253402624L); + dateTimeMatch_Internal("2021-09-27", 142258525253402624L); + } + @Test public void testTimeMismatch() throws Exception { TableFieldSchema field = @@ -640,7 +677,8 @@ public void testTimeMismatch() throws Exception { TestTime.getDescriptor(), tableSchema, json); Assert.fail("should fail"); } catch (IllegalArgumentException e) { - assertEquals("JSONObject does not have a int64 field at root.time[0].", e.getMessage()); + assertTrue( + e.getMessage().contains("JSONObject does not have a int64 field at root.time[0].")); } } @@ -759,9 +797,9 @@ public void testTimestamp() throws Exception { TestTimestamp.newBuilder() .setTestString(10L) .setTestStringTZ(1648493279010000L) - .setTestLong(0L) - .setTestInt(1534806950000000L) - .setTestFloat(1534680695000000000L) + .setTestLong(1687984085000000L) + .setTestInt(153480695L) + .setTestFloat(153468069500L) .setTestOffset(1649135171000000L) .setTestTimezone(1649174771000000L) .setTestSaformat(1534680660000000L) @@ -769,7 +807,7 @@ public void testTimestamp() throws Exception { JSONObject json = new JSONObject(); json.put("test_string", "1970-01-01 00:00:00.000010"); json.put("test_string_T_Z", "2022-03-28T18:47:59.01Z"); - json.put("test_long", 0L); + json.put("test_long", 1687984085000000L); json.put("test_int", 153480695); json.put("test_float", "1.534680695e11"); json.put("test_offset", "2022-04-05T09:06:11+04:00"); @@ -781,6 +819,69 @@ public void testTimestamp() throws Exception { assertEquals(expectedProto, protoMsg); } + @Test + public void testTimestampRepeated() throws Exception { + TableSchema tableSchema = + TableSchema.newBuilder() + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_REPEATED) + .setName("test_string_repeated") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_REPEATED) + .setName("test_string_T_Z_repeated") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_REPEATED) + .setName("test_long_repeated") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_REPEATED) + .setName("test_int_repeated") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_REPEATED) + .setName("test_float_repeated") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_REPEATED) + .setName("test_offset_repeated") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_REPEATED) + .setName("test_timezone_repeated") + .build()) + .addFields( + TableFieldSchema.newBuilder(TEST_TIMESTAMP_REPEATED) + .setName("test_saformat_repeated") + .build()) + .build(); + TestRepeatedTimestamp expectedProto = + TestRepeatedTimestamp.newBuilder() + .addTestStringRepeated(10L) + .addTestStringTZRepeated(1648493279010000L) + .addTestLongRepeated(1687984085000000L) + .addTestIntRepeated(153480695L) + .addTestFloatRepeated(153468069500L) + .addTestOffsetRepeated(1649135171000000L) + .addTestTimezoneRepeated(1649174771000000L) + .addTestSaformatRepeated(1534680660000000L) + .build(); + JSONObject json = new JSONObject(); + json.put("test_string_repeated", new JSONArray(new String[] {"1970-01-01 00:00:00.000010"})); + json.put("test_string_T_Z_repeated", new JSONArray(new String[] {"2022-03-28T18:47:59.01Z"})); + json.put("test_long_repeated", new JSONArray(new Long[] {1687984085000000L})); + json.put("test_int_repeated", new JSONArray(new Integer[] {153480695})); + json.put("test_float_repeated", new JSONArray(new String[] {"1.534680695e11"})); + json.put("test_offset_repeated", new JSONArray(new String[] {"2022-04-05T09:06:11+04:00"})); + json.put("test_timezone_repeated", new JSONArray(new String[] {"2022-04-05 09:06:11 PST"})); + json.put("test_saformat_repeated", new JSONArray(new String[] {"2018/08/19 12:11"})); + DynamicMessage protoMsg = + JsonToProtoMessage.INSTANCE.convertToProtoMessage( + TestRepeatedTimestamp.getDescriptor(), tableSchema, json); + assertEquals(expectedProto, protoMsg); + } + @Test public void testDate() throws Exception { TableSchema tableSchema = @@ -811,9 +912,12 @@ public void testAllTypes() throws Exception { assertEquals(protoMsg, AllTypesToCorrectProto.get(entry.getKey())[success]); success += 1; } catch (IllegalArgumentException e) { - assertEquals( - "JSONObject does not have a " + entry.getValue() + " field at root.test_field_type.", - e.getMessage()); + assertTrue( + e.getMessage() + .contains( + "JSONObject does not have a " + + entry.getValue() + + " field at root.test_field_type.")); } } if (entry.getKey() == DoubleType.getDescriptor()) { @@ -846,12 +950,12 @@ public void testAllRepeatedTypesWithLimits() throws Exception { LOG.info(e.getMessage()); assertTrue( e.getMessage() - .equals( + .contains( "JSONObject does not have a " + entry.getValue() + " field at root.test_repeated[0].") || e.getMessage() - .equals("Error: root.test_repeated[0] could not be converted to byte[].")); + .contains("Error: root.test_repeated[0] could not be converted to byte[].")); } } if (entry.getKey() == RepeatedDouble.getDescriptor()) { @@ -897,8 +1001,9 @@ public void testRequired() throws Exception { JsonToProtoMessage.INSTANCE.convertToProtoMessage(TestRequired.getDescriptor(), json); Assert.fail("should fail"); } catch (IllegalArgumentException e) { - assertEquals( - "JSONObject does not have the required field root.required_double.", e.getMessage()); + assertTrue( + e.getMessage() + .contains("JSONObject does not have the required field root.required_double.")); } } @@ -929,9 +1034,10 @@ public void testStructSimpleFail() throws Exception { JsonToProtoMessage.INSTANCE.convertToProtoMessage(MessageType.getDescriptor(), json); Assert.fail("should fail"); } catch (IllegalArgumentException e) { - assertEquals( - "JSONObject does not have a string field at root.test_field_type.test_field_type.", - e.getMessage()); + assertTrue( + e.getMessage() + .contains( + "JSONObject does not have a string field at root.test_field_type.test_field_type.")); } } @@ -952,6 +1058,9 @@ public void testStructComplex() throws Exception { .setTestDate(1) .setTestDatetime(1) .addTestDatetimeStr(142258614586538368L) + .addTestDatetimeStr(142258614586538368L) + .addTestDatetimeStr(142258614586538368L) + .addTestDatetimeStr(142258525253402624L) .addTestDatetimeStr(142258525253402624L) .setComplexLvl1( ComplexLvl1.newBuilder() @@ -962,7 +1071,7 @@ public void testStructComplex() throws Exception { .setTestNumeric( BigDecimalByteStringEncoder.encodeToNumericByteString(new BigDecimal("1.23456"))) .setTestGeo("POINT(1,1)") - .setTestTimestamp(123456780000000L) + .setTestTimestamp(12345678L) .setTestTime(CivilTimeEncoder.encodePacked64TimeMicros(LocalTime.of(1, 0, 1))) .setTestTimeStr(89332507144L) .addTestNumericRepeated( @@ -1020,7 +1129,14 @@ public void testStructComplex() throws Exception { json.put("test_datetime", 1); json.put( "test_datetime_str", - new JSONArray(new String[] {"2021-09-27T20:51:10.752", "2021-09-27T00:00:00"})); + new JSONArray( + new String[] { + "2021-09-27T20:51:10.752", + "2021-09-27t20:51:10.752", + "2021-09-27 20:51:10.752", + "2021-09-27T00:00:00", + "2021-09-27" + })); json.put("complex_lvl1", complex_lvl1); json.put("complex_lvl2", complex_lvl2); json.put( @@ -1089,8 +1205,9 @@ public void testStructComplexFail() throws Exception { JsonToProtoMessage.INSTANCE.convertToProtoMessage(ComplexRoot.getDescriptor(), json); Assert.fail("should fail"); } catch (IllegalArgumentException e) { - assertEquals( - "JSONObject does not have a int64 field at root.complex_lvl1.test_int.", e.getMessage()); + assertTrue( + e.getMessage() + .contains("JSONObject does not have a int64 field at root.complex_lvl1.test_int.")); } } @@ -1103,8 +1220,9 @@ public void testRepeatedWithMixedTypes() throws Exception { JsonToProtoMessage.INSTANCE.convertToProtoMessage(RepeatedDouble.getDescriptor(), json); Assert.fail("should fail"); } catch (IllegalArgumentException e) { - assertEquals( - "JSONObject does not have a double field at root.test_repeated[2].", e.getMessage()); + assertTrue( + e.getMessage() + .contains("JSONObject does not have a double field at root.test_repeated[2].")); } } @@ -1165,9 +1283,10 @@ public void testNestedRepeatedComplexFail() throws Exception { JsonToProtoMessage.INSTANCE.convertToProtoMessage(NestedRepeated.getDescriptor(), json); Assert.fail("should fail"); } catch (IllegalArgumentException e) { - assertEquals( - "JSONObject does not have a string field at root.repeated_string.test_repeated[0].", - e.getMessage()); + assertTrue( + e.getMessage() + .contains( + "JSONObject does not have a string field at root.repeated_string.test_repeated[0].")); } } @@ -1198,10 +1317,10 @@ public void testAllowUnknownFieldsError() throws Exception { DynamicMessage protoMsg = JsonToProtoMessage.INSTANCE.convertToProtoMessage(RepeatedInt64.getDescriptor(), json); Assert.fail("Should fail"); - } catch (Exceptions.DataHasUnknownFieldException e) { - assertEquals( - "The source object has fields unknown to BigQuery: root.string.", e.getMessage()); - assertEquals("root.string", e.getFieldName()); + } catch (IllegalArgumentException e) { + assertTrue( + e.getMessage() + .contains("The source object has fields unknown to BigQuery: " + "root.string.")); } } @@ -1262,9 +1381,10 @@ public void testAllowUnknownFieldsSecondLevel() throws Exception { JsonToProtoMessage.INSTANCE.convertToProtoMessage(ComplexLvl1.getDescriptor(), json); Assert.fail("Should fail"); } catch (IllegalArgumentException e) { - assertEquals( - "The source object has fields unknown to BigQuery: root.complex_lvl2.no_match.", - e.getMessage()); + assertTrue( + e.getMessage() + .contains( + "The source object has fields unknown to BigQuery: root.complex_lvl2.no_match.")); } } @@ -1327,9 +1447,9 @@ public void testBadJsonFieldRepeated() throws Exception { JsonToProtoMessage.INSTANCE.convertToProtoMessage( RepeatedBytes.getDescriptor(), ts, json); Assert.fail("Should fail"); - } catch (Exceptions.FieldParseError ex) { - assertEquals(ex.getBqType(), "NUMERIC"); - assertEquals(ex.getFieldName(), "root.test_repeated"); + } catch (RowIndexToErrorException ex) { + assertTrue(ex.rowIndexToErrorMessage.size() == 1); + assertTrue(ex.getMessage().contains("root.test_repeated failed to convert to NUMERIC.")); } } @@ -1354,7 +1474,7 @@ public void testBadJsonFieldIntRepeated() throws Exception { RepeatedInt32.getDescriptor(), ts, json); Assert.fail("Should fail"); } catch (IllegalArgumentException ex) { - assertEquals(ex.getMessage(), "Text 'blah' could not be parsed at index 0"); + assertTrue(ex.getMessage().contains("Text 'blah' could not be parsed at index 0")); } } @@ -1421,6 +1541,51 @@ public void testDoubleAndFloatToNumericConversion() { assertEquals(expectedProto, protoMsg); } + @Test + public void testDoubleAndFloatToNumericConversionWithJsonArray() { + TableSchema ts = + TableSchema.newBuilder() + .addFields( + 0, + TableFieldSchema.newBuilder() + .setName("numeric") + .setType(TableFieldSchema.Type.NUMERIC) + .build()) + .build(); + List protoList = new ArrayList<>(); + int protoNum = 10; + for (int i = 0; i < protoNum; i++) { + protoList.add( + TestNumeric.newBuilder() + .setNumeric( + BigDecimalByteStringEncoder.encodeToNumericByteString( + new BigDecimal("24.678" + i))) + .build()); + } + + JSONArray doubleJsonArray = new JSONArray(); + JSONArray floatJsonArray = new JSONArray(); + for (int i = 0; i < protoNum; i++) { + JSONObject doubleJson = new JSONObject(); + doubleJson.put("numeric", new Double(24.678 + (i * 0.0001))); + doubleJsonArray.put(doubleJson); + + JSONObject floatJson = new JSONObject(); + floatJson.put("numeric", new Float(24.678 + (i * 0.0001))); + floatJsonArray.put(floatJson); + } + + List protoMsgList = + JsonToProtoMessage.INSTANCE.convertToProtoMessage( + TestNumeric.getDescriptor(), ts, doubleJsonArray, false); + assertEquals(protoList, protoMsgList); + + protoMsgList = + JsonToProtoMessage.INSTANCE.convertToProtoMessage( + TestNumeric.getDescriptor(), ts, floatJsonArray, false); + assertEquals(protoList, protoMsgList); + } + @Test public void testBigDecimalToBigNumericConversion() { TableSchema ts = diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryWriteManualClientTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryWriteManualClientTest.java index 1e73643eb8..c29c03d178 100644 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryWriteManualClientTest.java +++ b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/it/ITBigQueryWriteManualClientTest.java @@ -376,13 +376,15 @@ public void testRowErrors() AppendSerializationError e = (AppendSerializationError) t; LOG.info("Found row errors on stream: " + e.getStreamName()); assertEquals( - "Field foo: STRING(10) has maximum length 10 but got a value with length 12 on field foo.", + "Field foo: STRING(10) has maximum length 10 but got a value with length 12 on field" + + " foo.", e.getRowIndexToErrorMessage().get(0)); assertEquals( "Timestamp field value is out of range: -9223372036854775808 on field bar.", e.getRowIndexToErrorMessage().get(1)); assertEquals( - "Field foo: STRING(10) has maximum length 10 but got a value with length 15 on field foo.", + "Field foo: STRING(10) has maximum length 10 but got a value with length 15 on field" + + " foo.", e.getRowIndexToErrorMessage().get(2)); for (Map.Entry entry : e.getRowIndexToErrorMessage().entrySet()) { LOG.info("Bad row index: " + entry.getKey() + ", has problem: " + entry.getValue()); @@ -1035,10 +1037,12 @@ public void testComplicateSchemaWithPendingStream() Iterator queryIter = queryResult.getValues().iterator(); assertTrue(queryIter.hasNext()); assertEquals( - "[FieldValue{attribute=REPEATED, value=[FieldValue{attribute=PRIMITIVE, value=aaa}, FieldValue{attribute=PRIMITIVE, value=aaa}]}]", + "[FieldValue{attribute=REPEATED, value=[FieldValue{attribute=PRIMITIVE, value=aaa}," + + " FieldValue{attribute=PRIMITIVE, value=aaa}]}]", queryIter.next().get(1).getRepeatedValue().toString()); assertEquals( - "[FieldValue{attribute=REPEATED, value=[FieldValue{attribute=PRIMITIVE, value=bbb}, FieldValue{attribute=PRIMITIVE, value=bbb}]}]", + "[FieldValue{attribute=REPEATED, value=[FieldValue{attribute=PRIMITIVE, value=bbb}," + + " FieldValue{attribute=PRIMITIVE, value=bbb}]}]", queryIter.next().get(1).getRepeatedValue().toString()); assertFalse(queryIter.hasNext()); } diff --git a/google-cloud-bigquerystorage/src/test/proto/jsonTest.proto b/google-cloud-bigquerystorage/src/test/proto/jsonTest.proto index 03209d9c81..d70d214be2 100644 --- a/google-cloud-bigquerystorage/src/test/proto/jsonTest.proto +++ b/google-cloud-bigquerystorage/src/test/proto/jsonTest.proto @@ -156,6 +156,17 @@ message TestTimestamp { optional int64 test_saformat = 8; } +message TestRepeatedTimestamp { + repeated int64 test_string_repeated = 1; + repeated int64 test_string_t_z_repeated = 2; + repeated int64 test_long_repeated = 3; + repeated int64 test_int_repeated = 4; + repeated int64 test_float_repeated = 5; + repeated int64 test_offset_repeated = 6; + repeated int64 test_timezone_repeated = 7; + repeated int64 test_saformat_repeated = 8; +} + message TestDate { optional int32 test_string = 1; optional int32 test_long = 2; diff --git a/grpc-google-cloud-bigquerystorage-v1/pom.xml b/grpc-google-cloud-bigquerystorage-v1/pom.xml index cf0432913e..dd440be295 100644 --- a/grpc-google-cloud-bigquerystorage-v1/pom.xml +++ b/grpc-google-cloud-bigquerystorage-v1/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc grpc-google-cloud-bigquerystorage-v1 - 2.39.1 + 2.40.0 grpc-google-cloud-bigquerystorage-v1 GRPC library for grpc-google-cloud-bigquerystorage-v1 com.google.cloud google-cloud-bigquerystorage-parent - 2.39.1 + 2.40.0 diff --git a/grpc-google-cloud-bigquerystorage-v1beta1/pom.xml b/grpc-google-cloud-bigquerystorage-v1beta1/pom.xml index 2edb61c942..4a6ab39d59 100644 --- a/grpc-google-cloud-bigquerystorage-v1beta1/pom.xml +++ b/grpc-google-cloud-bigquerystorage-v1beta1/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc grpc-google-cloud-bigquerystorage-v1beta1 - 0.163.1 + 0.164.0 grpc-google-cloud-bigquerystorage-v1beta1 GRPC library for grpc-google-cloud-bigquerystorage-v1beta1 com.google.cloud google-cloud-bigquerystorage-parent - 2.39.1 + 2.40.0 diff --git a/grpc-google-cloud-bigquerystorage-v1beta2/pom.xml b/grpc-google-cloud-bigquerystorage-v1beta2/pom.xml index a1ce5951dc..8cf1f37f05 100644 --- a/grpc-google-cloud-bigquerystorage-v1beta2/pom.xml +++ b/grpc-google-cloud-bigquerystorage-v1beta2/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc grpc-google-cloud-bigquerystorage-v1beta2 - 0.163.1 + 0.164.0 grpc-google-cloud-bigquerystorage-v1beta2 GRPC library for grpc-google-cloud-bigquerystorage-v1beta2 com.google.cloud google-cloud-bigquerystorage-parent - 2.39.1 + 2.40.0 diff --git a/pom.xml b/pom.xml index 6b2987a9e0..25b163d72d 100644 --- a/pom.xml +++ b/pom.xml @@ -4,7 +4,7 @@ com.google.cloud google-cloud-bigquerystorage-parent pom - 2.39.1 + 2.40.0 BigQuery Storage Parent https://github.com/googleapis/java-bigquerystorage @@ -76,44 +76,44 @@ com.google.cloud google-cloud-shared-dependencies - 3.12.0 + 3.13.0 pom import com.google.api.grpc proto-google-cloud-bigquerystorage-v1beta1 - 0.163.1 + 0.164.0 com.google.api.grpc proto-google-cloud-bigquerystorage-v1beta2 - 0.163.1 + 0.164.0 com.google.api.grpc proto-google-cloud-bigquerystorage-v1 - 2.39.1 + 2.40.0 com.google.api.grpc grpc-google-cloud-bigquerystorage-v1beta1 - 0.163.1 + 0.164.0 com.google.api.grpc grpc-google-cloud-bigquerystorage-v1beta2 - 0.163.1 + 0.164.0 com.google.api.grpc grpc-google-cloud-bigquerystorage-v1 - 2.39.1 + 2.40.0 com.google.cloud google-cloud-bigquerystorage - 2.39.1 + 2.40.0 org.json @@ -132,7 +132,7 @@ com.google.cloud google-cloud-bigquery - 2.28.0 + 2.30.0 test diff --git a/proto-google-cloud-bigquerystorage-v1/pom.xml b/proto-google-cloud-bigquerystorage-v1/pom.xml index f2e699a552..94f629692f 100644 --- a/proto-google-cloud-bigquerystorage-v1/pom.xml +++ b/proto-google-cloud-bigquerystorage-v1/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc proto-google-cloud-bigquerystorage-v1 - 2.39.1 + 2.40.0 proto-google-cloud-bigquerystorage-v1 PROTO library for proto-google-cloud-bigquerystorage-v1 com.google.cloud google-cloud-bigquerystorage-parent - 2.39.1 + 2.40.0 diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequest.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequest.java index 55258107b7..bf64f57698 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequest.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequest.java @@ -24,9 +24,10 @@ *
  * Request message for `AppendRows`.
  *
- * Due to the nature of AppendRows being a bidirectional streaming RPC, certain
- * parts of the AppendRowsRequest need only be specified for the first request
- * sent each time the gRPC network connection is opened/reopened.
+ * Because AppendRows is a bidirectional streaming RPC, certain parts of the
+ * AppendRowsRequest need only be specified for the first request before
+ * switching table destinations. You can also switch table destinations within
+ * the same connection for the default stream.
  *
  * The size of a single AppendRowsRequest must be less than 10 MB in size.
  * Requests larger than this return an error, typically `INVALID_ARGUMENT`.
@@ -85,10 +86,9 @@ protected com.google.protobuf.MapField internalGetMapField(int number) {
    *
    *
    * 
-   * An enum to indicate how to interpret missing values. Missing values are
-   * fields present in user schema but missing in rows. A missing value can
-   * represent a NULL or a column default value defined in BigQuery table
-   * schema.
+   * An enum to indicate how to interpret missing values of fields that are
+   * present in user schema but missing in rows. A missing value can represent a
+   * NULL or a column default value defined in BigQuery table schema.
    * 
* * Protobuf enum {@code @@ -260,9 +260,14 @@ public interface ProtoDataOrBuilder * * *
-     * Proto schema used to serialize the data.  This value only needs to be
-     * provided as part of the first request on a gRPC network connection,
-     * and will be ignored for subsequent requests on the connection.
+     * The protocol buffer schema used to serialize the data. Provide this value
+     * whenever:
+     *
+     * * You send the first request of an RPC connection.
+     *
+     * * You change the input schema.
+     *
+     * * You specify a new destination table.
      * 
* * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; @@ -274,9 +279,14 @@ public interface ProtoDataOrBuilder * * *
-     * Proto schema used to serialize the data.  This value only needs to be
-     * provided as part of the first request on a gRPC network connection,
-     * and will be ignored for subsequent requests on the connection.
+     * The protocol buffer schema used to serialize the data. Provide this value
+     * whenever:
+     *
+     * * You send the first request of an RPC connection.
+     *
+     * * You change the input schema.
+     *
+     * * You specify a new destination table.
      * 
* * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; @@ -288,9 +298,14 @@ public interface ProtoDataOrBuilder * * *
-     * Proto schema used to serialize the data.  This value only needs to be
-     * provided as part of the first request on a gRPC network connection,
-     * and will be ignored for subsequent requests on the connection.
+     * The protocol buffer schema used to serialize the data. Provide this value
+     * whenever:
+     *
+     * * You send the first request of an RPC connection.
+     *
+     * * You change the input schema.
+     *
+     * * You specify a new destination table.
      * 
* * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; @@ -390,9 +405,14 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * * *
-     * Proto schema used to serialize the data.  This value only needs to be
-     * provided as part of the first request on a gRPC network connection,
-     * and will be ignored for subsequent requests on the connection.
+     * The protocol buffer schema used to serialize the data. Provide this value
+     * whenever:
+     *
+     * * You send the first request of an RPC connection.
+     *
+     * * You change the input schema.
+     *
+     * * You specify a new destination table.
      * 
* * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; @@ -407,9 +427,14 @@ public boolean hasWriterSchema() { * * *
-     * Proto schema used to serialize the data.  This value only needs to be
-     * provided as part of the first request on a gRPC network connection,
-     * and will be ignored for subsequent requests on the connection.
+     * The protocol buffer schema used to serialize the data. Provide this value
+     * whenever:
+     *
+     * * You send the first request of an RPC connection.
+     *
+     * * You change the input schema.
+     *
+     * * You specify a new destination table.
      * 
* * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; @@ -426,9 +451,14 @@ public com.google.cloud.bigquery.storage.v1.ProtoSchema getWriterSchema() { * * *
-     * Proto schema used to serialize the data.  This value only needs to be
-     * provided as part of the first request on a gRPC network connection,
-     * and will be ignored for subsequent requests on the connection.
+     * The protocol buffer schema used to serialize the data. Provide this value
+     * whenever:
+     *
+     * * You send the first request of an RPC connection.
+     *
+     * * You change the input schema.
+     *
+     * * You specify a new destination table.
      * 
* * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; @@ -915,9 +945,14 @@ public Builder mergeFrom( * * *
-       * Proto schema used to serialize the data.  This value only needs to be
-       * provided as part of the first request on a gRPC network connection,
-       * and will be ignored for subsequent requests on the connection.
+       * The protocol buffer schema used to serialize the data. Provide this value
+       * whenever:
+       *
+       * * You send the first request of an RPC connection.
+       *
+       * * You change the input schema.
+       *
+       * * You specify a new destination table.
        * 
* * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; @@ -931,9 +966,14 @@ public boolean hasWriterSchema() { * * *
-       * Proto schema used to serialize the data.  This value only needs to be
-       * provided as part of the first request on a gRPC network connection,
-       * and will be ignored for subsequent requests on the connection.
+       * The protocol buffer schema used to serialize the data. Provide this value
+       * whenever:
+       *
+       * * You send the first request of an RPC connection.
+       *
+       * * You change the input schema.
+       *
+       * * You specify a new destination table.
        * 
* * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; @@ -953,9 +993,14 @@ public com.google.cloud.bigquery.storage.v1.ProtoSchema getWriterSchema() { * * *
-       * Proto schema used to serialize the data.  This value only needs to be
-       * provided as part of the first request on a gRPC network connection,
-       * and will be ignored for subsequent requests on the connection.
+       * The protocol buffer schema used to serialize the data. Provide this value
+       * whenever:
+       *
+       * * You send the first request of an RPC connection.
+       *
+       * * You change the input schema.
+       *
+       * * You specify a new destination table.
        * 
* * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; @@ -977,9 +1022,14 @@ public Builder setWriterSchema(com.google.cloud.bigquery.storage.v1.ProtoSchema * * *
-       * Proto schema used to serialize the data.  This value only needs to be
-       * provided as part of the first request on a gRPC network connection,
-       * and will be ignored for subsequent requests on the connection.
+       * The protocol buffer schema used to serialize the data. Provide this value
+       * whenever:
+       *
+       * * You send the first request of an RPC connection.
+       *
+       * * You change the input schema.
+       *
+       * * You specify a new destination table.
        * 
* * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; @@ -999,9 +1049,14 @@ public Builder setWriterSchema( * * *
-       * Proto schema used to serialize the data.  This value only needs to be
-       * provided as part of the first request on a gRPC network connection,
-       * and will be ignored for subsequent requests on the connection.
+       * The protocol buffer schema used to serialize the data. Provide this value
+       * whenever:
+       *
+       * * You send the first request of an RPC connection.
+       *
+       * * You change the input schema.
+       *
+       * * You specify a new destination table.
        * 
* * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; @@ -1027,9 +1082,14 @@ public Builder mergeWriterSchema(com.google.cloud.bigquery.storage.v1.ProtoSchem * * *
-       * Proto schema used to serialize the data.  This value only needs to be
-       * provided as part of the first request on a gRPC network connection,
-       * and will be ignored for subsequent requests on the connection.
+       * The protocol buffer schema used to serialize the data. Provide this value
+       * whenever:
+       *
+       * * You send the first request of an RPC connection.
+       *
+       * * You change the input schema.
+       *
+       * * You specify a new destination table.
        * 
* * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; @@ -1048,9 +1108,14 @@ public Builder clearWriterSchema() { * * *
-       * Proto schema used to serialize the data.  This value only needs to be
-       * provided as part of the first request on a gRPC network connection,
-       * and will be ignored for subsequent requests on the connection.
+       * The protocol buffer schema used to serialize the data. Provide this value
+       * whenever:
+       *
+       * * You send the first request of an RPC connection.
+       *
+       * * You change the input schema.
+       *
+       * * You specify a new destination table.
        * 
* * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; @@ -1064,9 +1129,14 @@ public com.google.cloud.bigquery.storage.v1.ProtoSchema.Builder getWriterSchemaB * * *
-       * Proto schema used to serialize the data.  This value only needs to be
-       * provided as part of the first request on a gRPC network connection,
-       * and will be ignored for subsequent requests on the connection.
+       * The protocol buffer schema used to serialize the data. Provide this value
+       * whenever:
+       *
+       * * You send the first request of an RPC connection.
+       *
+       * * You change the input schema.
+       *
+       * * You specify a new destination table.
        * 
* * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; @@ -1084,9 +1154,14 @@ public com.google.cloud.bigquery.storage.v1.ProtoSchemaOrBuilder getWriterSchema * * *
-       * Proto schema used to serialize the data.  This value only needs to be
-       * provided as part of the first request on a gRPC network connection,
-       * and will be ignored for subsequent requests on the connection.
+       * The protocol buffer schema used to serialize the data. Provide this value
+       * whenever:
+       *
+       * * You send the first request of an RPC connection.
+       *
+       * * You change the input schema.
+       *
+       * * You specify a new destination table.
        * 
* * .google.cloud.bigquery.storage.v1.ProtoSchema writer_schema = 1; @@ -1440,10 +1515,14 @@ public RowsCase getRowsCase() { * * *
-   * Required. The write_stream identifies the target of the append operation,
-   * and only needs to be specified as part of the first request on the gRPC
-   * connection. If provided for subsequent requests, it must match the value of
-   * the first request.
+   * Required. The write_stream identifies the append operation. It must be
+   * provided in the following scenarios:
+   *
+   * * In the first request to an AppendRows connection.
+   *
+   * * In all subsequent requests to an AppendRows connection, if you use the
+   * same connection to write to multiple tables or change the input schema for
+   * default streams.
    *
    * For explicitly created write streams, the format is:
    *
@@ -1452,6 +1531,22 @@ public RowsCase getRowsCase() {
    * For the special default stream, the format is:
    *
    * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
+   *
+   * An example of a possible sequence of requests with write_stream fields
+   * within a single connection:
+   *
+   * * r1: {write_stream: stream_name_1}
+   *
+   * * r2: {write_stream: /*omit*/}
+   *
+   * * r3: {write_stream: /*omit*/}
+   *
+   * * r4: {write_stream: stream_name_2}
+   *
+   * * r5: {write_stream: stream_name_2}
+   *
+   * The destination changed in request_4, so the write_stream field must be
+   * populated in all subsequent requests in this stream.
    * 
* * @@ -1476,10 +1571,14 @@ public java.lang.String getWriteStream() { * * *
-   * Required. The write_stream identifies the target of the append operation,
-   * and only needs to be specified as part of the first request on the gRPC
-   * connection. If provided for subsequent requests, it must match the value of
-   * the first request.
+   * Required. The write_stream identifies the append operation. It must be
+   * provided in the following scenarios:
+   *
+   * * In the first request to an AppendRows connection.
+   *
+   * * In all subsequent requests to an AppendRows connection, if you use the
+   * same connection to write to multiple tables or change the input schema for
+   * default streams.
    *
    * For explicitly created write streams, the format is:
    *
@@ -1488,6 +1587,22 @@ public java.lang.String getWriteStream() {
    * For the special default stream, the format is:
    *
    * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
+   *
+   * An example of a possible sequence of requests with write_stream fields
+   * within a single connection:
+   *
+   * * r1: {write_stream: stream_name_1}
+   *
+   * * r2: {write_stream: /*omit*/}
+   *
+   * * r3: {write_stream: /*omit*/}
+   *
+   * * r4: {write_stream: stream_name_2}
+   *
+   * * r5: {write_stream: stream_name_2}
+   *
+   * The destination changed in request_4, so the write_stream field must be
+   * populated in all subsequent requests in this stream.
    * 
* * @@ -2248,9 +2363,10 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build *
    * Request message for `AppendRows`.
    *
-   * Due to the nature of AppendRows being a bidirectional streaming RPC, certain
-   * parts of the AppendRowsRequest need only be specified for the first request
-   * sent each time the gRPC network connection is opened/reopened.
+   * Because AppendRows is a bidirectional streaming RPC, certain parts of the
+   * AppendRowsRequest need only be specified for the first request before
+   * switching table destinations. You can also switch table destinations within
+   * the same connection for the default stream.
    *
    * The size of a single AppendRowsRequest must be less than 10 MB in size.
    * Requests larger than this return an error, typically `INVALID_ARGUMENT`.
@@ -2563,10 +2679,14 @@ public Builder clearRows() {
      *
      *
      * 
-     * Required. The write_stream identifies the target of the append operation,
-     * and only needs to be specified as part of the first request on the gRPC
-     * connection. If provided for subsequent requests, it must match the value of
-     * the first request.
+     * Required. The write_stream identifies the append operation. It must be
+     * provided in the following scenarios:
+     *
+     * * In the first request to an AppendRows connection.
+     *
+     * * In all subsequent requests to an AppendRows connection, if you use the
+     * same connection to write to multiple tables or change the input schema for
+     * default streams.
      *
      * For explicitly created write streams, the format is:
      *
@@ -2575,6 +2695,22 @@ public Builder clearRows() {
      * For the special default stream, the format is:
      *
      * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
+     *
+     * An example of a possible sequence of requests with write_stream fields
+     * within a single connection:
+     *
+     * * r1: {write_stream: stream_name_1}
+     *
+     * * r2: {write_stream: /*omit*/}
+     *
+     * * r3: {write_stream: /*omit*/}
+     *
+     * * r4: {write_stream: stream_name_2}
+     *
+     * * r5: {write_stream: stream_name_2}
+     *
+     * The destination changed in request_4, so the write_stream field must be
+     * populated in all subsequent requests in this stream.
      * 
* * @@ -2598,10 +2734,14 @@ public java.lang.String getWriteStream() { * * *
-     * Required. The write_stream identifies the target of the append operation,
-     * and only needs to be specified as part of the first request on the gRPC
-     * connection. If provided for subsequent requests, it must match the value of
-     * the first request.
+     * Required. The write_stream identifies the append operation. It must be
+     * provided in the following scenarios:
+     *
+     * * In the first request to an AppendRows connection.
+     *
+     * * In all subsequent requests to an AppendRows connection, if you use the
+     * same connection to write to multiple tables or change the input schema for
+     * default streams.
      *
      * For explicitly created write streams, the format is:
      *
@@ -2610,6 +2750,22 @@ public java.lang.String getWriteStream() {
      * For the special default stream, the format is:
      *
      * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
+     *
+     * An example of a possible sequence of requests with write_stream fields
+     * within a single connection:
+     *
+     * * r1: {write_stream: stream_name_1}
+     *
+     * * r2: {write_stream: /*omit*/}
+     *
+     * * r3: {write_stream: /*omit*/}
+     *
+     * * r4: {write_stream: stream_name_2}
+     *
+     * * r5: {write_stream: stream_name_2}
+     *
+     * The destination changed in request_4, so the write_stream field must be
+     * populated in all subsequent requests in this stream.
      * 
* * @@ -2633,10 +2789,14 @@ public com.google.protobuf.ByteString getWriteStreamBytes() { * * *
-     * Required. The write_stream identifies the target of the append operation,
-     * and only needs to be specified as part of the first request on the gRPC
-     * connection. If provided for subsequent requests, it must match the value of
-     * the first request.
+     * Required. The write_stream identifies the append operation. It must be
+     * provided in the following scenarios:
+     *
+     * * In the first request to an AppendRows connection.
+     *
+     * * In all subsequent requests to an AppendRows connection, if you use the
+     * same connection to write to multiple tables or change the input schema for
+     * default streams.
      *
      * For explicitly created write streams, the format is:
      *
@@ -2645,6 +2805,22 @@ public com.google.protobuf.ByteString getWriteStreamBytes() {
      * For the special default stream, the format is:
      *
      * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
+     *
+     * An example of a possible sequence of requests with write_stream fields
+     * within a single connection:
+     *
+     * * r1: {write_stream: stream_name_1}
+     *
+     * * r2: {write_stream: /*omit*/}
+     *
+     * * r3: {write_stream: /*omit*/}
+     *
+     * * r4: {write_stream: stream_name_2}
+     *
+     * * r5: {write_stream: stream_name_2}
+     *
+     * The destination changed in request_4, so the write_stream field must be
+     * populated in all subsequent requests in this stream.
      * 
* * @@ -2667,10 +2843,14 @@ public Builder setWriteStream(java.lang.String value) { * * *
-     * Required. The write_stream identifies the target of the append operation,
-     * and only needs to be specified as part of the first request on the gRPC
-     * connection. If provided for subsequent requests, it must match the value of
-     * the first request.
+     * Required. The write_stream identifies the append operation. It must be
+     * provided in the following scenarios:
+     *
+     * * In the first request to an AppendRows connection.
+     *
+     * * In all subsequent requests to an AppendRows connection, if you use the
+     * same connection to write to multiple tables or change the input schema for
+     * default streams.
      *
      * For explicitly created write streams, the format is:
      *
@@ -2679,6 +2859,22 @@ public Builder setWriteStream(java.lang.String value) {
      * For the special default stream, the format is:
      *
      * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
+     *
+     * An example of a possible sequence of requests with write_stream fields
+     * within a single connection:
+     *
+     * * r1: {write_stream: stream_name_1}
+     *
+     * * r2: {write_stream: /*omit*/}
+     *
+     * * r3: {write_stream: /*omit*/}
+     *
+     * * r4: {write_stream: stream_name_2}
+     *
+     * * r5: {write_stream: stream_name_2}
+     *
+     * The destination changed in request_4, so the write_stream field must be
+     * populated in all subsequent requests in this stream.
      * 
* * @@ -2697,10 +2893,14 @@ public Builder clearWriteStream() { * * *
-     * Required. The write_stream identifies the target of the append operation,
-     * and only needs to be specified as part of the first request on the gRPC
-     * connection. If provided for subsequent requests, it must match the value of
-     * the first request.
+     * Required. The write_stream identifies the append operation. It must be
+     * provided in the following scenarios:
+     *
+     * * In the first request to an AppendRows connection.
+     *
+     * * In all subsequent requests to an AppendRows connection, if you use the
+     * same connection to write to multiple tables or change the input schema for
+     * default streams.
      *
      * For explicitly created write streams, the format is:
      *
@@ -2709,6 +2909,22 @@ public Builder clearWriteStream() {
      * For the special default stream, the format is:
      *
      * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
+     *
+     * An example of a possible sequence of requests with write_stream fields
+     * within a single connection:
+     *
+     * * r1: {write_stream: stream_name_1}
+     *
+     * * r2: {write_stream: /*omit*/}
+     *
+     * * r3: {write_stream: /*omit*/}
+     *
+     * * r4: {write_stream: stream_name_2}
+     *
+     * * r5: {write_stream: stream_name_2}
+     *
+     * The destination changed in request_4, so the write_stream field must be
+     * populated in all subsequent requests in this stream.
      * 
* * diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequestOrBuilder.java index a9d1f10fe1..1f40b2ec71 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequestOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/AppendRowsRequestOrBuilder.java @@ -27,10 +27,14 @@ public interface AppendRowsRequestOrBuilder * * *
-   * Required. The write_stream identifies the target of the append operation,
-   * and only needs to be specified as part of the first request on the gRPC
-   * connection. If provided for subsequent requests, it must match the value of
-   * the first request.
+   * Required. The write_stream identifies the append operation. It must be
+   * provided in the following scenarios:
+   *
+   * * In the first request to an AppendRows connection.
+   *
+   * * In all subsequent requests to an AppendRows connection, if you use the
+   * same connection to write to multiple tables or change the input schema for
+   * default streams.
    *
    * For explicitly created write streams, the format is:
    *
@@ -39,6 +43,22 @@ public interface AppendRowsRequestOrBuilder
    * For the special default stream, the format is:
    *
    * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
+   *
+   * An example of a possible sequence of requests with write_stream fields
+   * within a single connection:
+   *
+   * * r1: {write_stream: stream_name_1}
+   *
+   * * r2: {write_stream: /*omit*/}
+   *
+   * * r3: {write_stream: /*omit*/}
+   *
+   * * r4: {write_stream: stream_name_2}
+   *
+   * * r5: {write_stream: stream_name_2}
+   *
+   * The destination changed in request_4, so the write_stream field must be
+   * populated in all subsequent requests in this stream.
    * 
* * @@ -52,10 +72,14 @@ public interface AppendRowsRequestOrBuilder * * *
-   * Required. The write_stream identifies the target of the append operation,
-   * and only needs to be specified as part of the first request on the gRPC
-   * connection. If provided for subsequent requests, it must match the value of
-   * the first request.
+   * Required. The write_stream identifies the append operation. It must be
+   * provided in the following scenarios:
+   *
+   * * In the first request to an AppendRows connection.
+   *
+   * * In all subsequent requests to an AppendRows connection, if you use the
+   * same connection to write to multiple tables or change the input schema for
+   * default streams.
    *
    * For explicitly created write streams, the format is:
    *
@@ -64,6 +88,22 @@ public interface AppendRowsRequestOrBuilder
    * For the special default stream, the format is:
    *
    * * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
+   *
+   * An example of a possible sequence of requests with write_stream fields
+   * within a single connection:
+   *
+   * * r1: {write_stream: stream_name_1}
+   *
+   * * r2: {write_stream: /*omit*/}
+   *
+   * * r3: {write_stream: /*omit*/}
+   *
+   * * r4: {write_stream: stream_name_2}
+   *
+   * * r5: {write_stream: stream_name_2}
+   *
+   * The destination changed in request_4, so the write_stream field must be
+   * populated in all subsequent requests in this stream.
    * 
* * diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSession.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSession.java index 54711847d8..28eb0fd9f0 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSession.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSession.java @@ -4342,10 +4342,10 @@ public long getEstimatedTotalBytesScanned() { * * *
-   * Output only. A pre-projected estimate of the total physical size (in bytes)
-   * of files this session will scan when all streams are completely consumed.
-   * This estimate does not depend on the selected columns and can be based on
-   * metadata from the table which might be incomplete or stale. Only set for
+   * Output only. A pre-projected estimate of the total physical size of files
+   * (in bytes) that this session will scan when all streams are consumed. This
+   * estimate is independent of the selected columns and can be based on
+   * incomplete or stale metadata from the table.  This field is only set for
    * BigLake tables.
    * 
* @@ -7182,10 +7182,10 @@ public Builder clearEstimatedTotalBytesScanned() { * * *
-     * Output only. A pre-projected estimate of the total physical size (in bytes)
-     * of files this session will scan when all streams are completely consumed.
-     * This estimate does not depend on the selected columns and can be based on
-     * metadata from the table which might be incomplete or stale. Only set for
+     * Output only. A pre-projected estimate of the total physical size of files
+     * (in bytes) that this session will scan when all streams are consumed. This
+     * estimate is independent of the selected columns and can be based on
+     * incomplete or stale metadata from the table.  This field is only set for
      * BigLake tables.
      * 
* @@ -7203,10 +7203,10 @@ public long getEstimatedTotalPhysicalFileSize() { * * *
-     * Output only. A pre-projected estimate of the total physical size (in bytes)
-     * of files this session will scan when all streams are completely consumed.
-     * This estimate does not depend on the selected columns and can be based on
-     * metadata from the table which might be incomplete or stale. Only set for
+     * Output only. A pre-projected estimate of the total physical size of files
+     * (in bytes) that this session will scan when all streams are consumed. This
+     * estimate is independent of the selected columns and can be based on
+     * incomplete or stale metadata from the table.  This field is only set for
      * BigLake tables.
      * 
* @@ -7228,10 +7228,10 @@ public Builder setEstimatedTotalPhysicalFileSize(long value) { * * *
-     * Output only. A pre-projected estimate of the total physical size (in bytes)
-     * of files this session will scan when all streams are completely consumed.
-     * This estimate does not depend on the selected columns and can be based on
-     * metadata from the table which might be incomplete or stale. Only set for
+     * Output only. A pre-projected estimate of the total physical size of files
+     * (in bytes) that this session will scan when all streams are consumed. This
+     * estimate is independent of the selected columns and can be based on
+     * incomplete or stale metadata from the table.  This field is only set for
      * BigLake tables.
      * 
* diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSessionOrBuilder.java b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSessionOrBuilder.java index 85a8b45aa1..1fd051672d 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSessionOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/ReadSessionOrBuilder.java @@ -435,10 +435,10 @@ public interface ReadSessionOrBuilder * * *
-   * Output only. A pre-projected estimate of the total physical size (in bytes)
-   * of files this session will scan when all streams are completely consumed.
-   * This estimate does not depend on the selected columns and can be based on
-   * metadata from the table which might be incomplete or stale. Only set for
+   * Output only. A pre-projected estimate of the total physical size of files
+   * (in bytes) that this session will scan when all streams are consumed. This
+   * estimate is independent of the selected columns and can be based on
+   * incomplete or stale metadata from the table.  This field is only set for
    * BigLake tables.
    * 
* diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/storage.proto b/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/storage.proto index d28c36f43f..2959faaf0b 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/storage.proto +++ b/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/storage.proto @@ -397,9 +397,10 @@ message CreateWriteStreamRequest { // Request message for `AppendRows`. // -// Due to the nature of AppendRows being a bidirectional streaming RPC, certain -// parts of the AppendRowsRequest need only be specified for the first request -// sent each time the gRPC network connection is opened/reopened. +// Because AppendRows is a bidirectional streaming RPC, certain parts of the +// AppendRowsRequest need only be specified for the first request before +// switching table destinations. You can also switch table destinations within +// the same connection for the default stream. // // The size of a single AppendRowsRequest must be less than 10 MB in size. // Requests larger than this return an error, typically `INVALID_ARGUMENT`. @@ -407,9 +408,14 @@ message AppendRowsRequest { // ProtoData contains the data rows and schema when constructing append // requests. message ProtoData { - // Proto schema used to serialize the data. This value only needs to be - // provided as part of the first request on a gRPC network connection, - // and will be ignored for subsequent requests on the connection. + // The protocol buffer schema used to serialize the data. Provide this value + // whenever: + // + // * You send the first request of an RPC connection. + // + // * You change the input schema. + // + // * You specify a new destination table. ProtoSchema writer_schema = 1; // Serialized row data in protobuf message format. @@ -419,10 +425,9 @@ message AppendRowsRequest { ProtoRows rows = 2; } - // An enum to indicate how to interpret missing values. Missing values are - // fields present in user schema but missing in rows. A missing value can - // represent a NULL or a column default value defined in BigQuery table - // schema. + // An enum to indicate how to interpret missing values of fields that are + // present in user schema but missing in rows. A missing value can represent a + // NULL or a column default value defined in BigQuery table schema. enum MissingValueInterpretation { // Invalid missing value interpretation. Requests with this value will be // rejected. @@ -436,10 +441,14 @@ message AppendRowsRequest { DEFAULT_VALUE = 2; } - // Required. The write_stream identifies the target of the append operation, - // and only needs to be specified as part of the first request on the gRPC - // connection. If provided for subsequent requests, it must match the value of - // the first request. + // Required. The write_stream identifies the append operation. It must be + // provided in the following scenarios: + // + // * In the first request to an AppendRows connection. + // + // * In all subsequent requests to an AppendRows connection, if you use the + // same connection to write to multiple tables or change the input schema for + // default streams. // // For explicitly created write streams, the format is: // @@ -448,6 +457,22 @@ message AppendRowsRequest { // For the special default stream, the format is: // // * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`. + // + // An example of a possible sequence of requests with write_stream fields + // within a single connection: + // + // * r1: {write_stream: stream_name_1} + // + // * r2: {write_stream: /*omit*/} + // + // * r3: {write_stream: /*omit*/} + // + // * r4: {write_stream: stream_name_2} + // + // * r5: {write_stream: stream_name_2} + // + // The destination changed in request_4, so the write_stream field must be + // populated in all subsequent requests in this stream. string write_stream = 1 [ (google.api.field_behavior) = REQUIRED, (google.api.resource_reference) = { diff --git a/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/stream.proto b/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/stream.proto index 0a7c7c79c0..785c74f788 100644 --- a/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/stream.proto +++ b/proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/stream.proto @@ -194,10 +194,10 @@ message ReadSession { int64 estimated_total_bytes_scanned = 12 [(google.api.field_behavior) = OUTPUT_ONLY]; - // Output only. A pre-projected estimate of the total physical size (in bytes) - // of files this session will scan when all streams are completely consumed. - // This estimate does not depend on the selected columns and can be based on - // metadata from the table which might be incomplete or stale. Only set for + // Output only. A pre-projected estimate of the total physical size of files + // (in bytes) that this session will scan when all streams are consumed. This + // estimate is independent of the selected columns and can be based on + // incomplete or stale metadata from the table. This field is only set for // BigLake tables. int64 estimated_total_physical_file_size = 15 [(google.api.field_behavior) = OUTPUT_ONLY]; diff --git a/proto-google-cloud-bigquerystorage-v1beta1/pom.xml b/proto-google-cloud-bigquerystorage-v1beta1/pom.xml index 8c2c37913d..d08b09cdf3 100644 --- a/proto-google-cloud-bigquerystorage-v1beta1/pom.xml +++ b/proto-google-cloud-bigquerystorage-v1beta1/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc proto-google-cloud-bigquerystorage-v1beta1 - 0.163.1 + 0.164.0 proto-google-cloud-bigquerystorage-v1beta1 PROTO library for proto-google-cloud-bigquerystorage-v1beta1 com.google.cloud google-cloud-bigquerystorage-parent - 2.39.1 + 2.40.0 diff --git a/proto-google-cloud-bigquerystorage-v1beta2/pom.xml b/proto-google-cloud-bigquerystorage-v1beta2/pom.xml index 292fe36092..128077340e 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/pom.xml +++ b/proto-google-cloud-bigquerystorage-v1beta2/pom.xml @@ -4,13 +4,13 @@ 4.0.0 com.google.api.grpc proto-google-cloud-bigquerystorage-v1beta2 - 0.163.1 + 0.164.0 proto-google-cloud-bigquerystorage-v1beta2 PROTO library for proto-google-cloud-bigquerystorage-v1beta2 com.google.cloud google-cloud-bigquerystorage-parent - 2.39.1 + 2.40.0 diff --git a/samples/install-without-bom/pom.xml b/samples/install-without-bom/pom.xml index b5a3df7383..ba2c5cc3b7 100644 --- a/samples/install-without-bom/pom.xml +++ b/samples/install-without-bom/pom.xml @@ -30,19 +30,19 @@ com.google.cloud google-cloud-bigquerystorage - 2.39.0 + 2.39.1 com.google.cloud google-cloud-bigquery - 2.28.0 + 2.30.0 org.apache.avro avro - 1.11.1 + 1.11.2 org.apache.arrow diff --git a/samples/snapshot/pom.xml b/samples/snapshot/pom.xml index 189dc7a992..455674f0aa 100644 --- a/samples/snapshot/pom.xml +++ b/samples/snapshot/pom.xml @@ -29,19 +29,19 @@ com.google.cloud google-cloud-bigquerystorage - 2.39.1 + 2.40.0 com.google.cloud google-cloud-bigquery - 2.28.0 + 2.30.0 org.apache.avro avro - 1.11.1 + 1.11.2 diff --git a/samples/snippets/pom.xml b/samples/snippets/pom.xml index b9229ebb88..4f94b1fd8b 100644 --- a/samples/snippets/pom.xml +++ b/samples/snippets/pom.xml @@ -31,7 +31,7 @@ com.google.cloud libraries-bom - 26.17.0 + 26.18.0 pom import @@ -48,12 +48,12 @@ com.google.cloud google-cloud-bigquery - 2.28.0 + 2.30.0 org.apache.avro avro - 1.11.1 + 1.11.2 org.apache.arrow diff --git a/tutorials/JsonWriterDefaultStream/pom.xml b/tutorials/JsonWriterDefaultStream/pom.xml index 7be2863855..256f894bef 100644 --- a/tutorials/JsonWriterDefaultStream/pom.xml +++ b/tutorials/JsonWriterDefaultStream/pom.xml @@ -19,17 +19,17 @@ com.google.cloud google-cloud-bigquerystorage - 2.39.0 + 2.39.1 com.google.cloud google-cloud-bigquery - 2.28.0 + 2.30.0 org.apache.avro avro - 1.11.1 + 1.11.2 org.apache.arrow diff --git a/versions.txt b/versions.txt index e13d3db2e6..b63f957686 100644 --- a/versions.txt +++ b/versions.txt @@ -1,10 +1,10 @@ # Format: # module:released-version:current-version -google-cloud-bigquerystorage:2.39.1:2.39.1 -grpc-google-cloud-bigquerystorage-v1beta1:0.163.1:0.163.1 -grpc-google-cloud-bigquerystorage-v1beta2:0.163.1:0.163.1 -grpc-google-cloud-bigquerystorage-v1:2.39.1:2.39.1 -proto-google-cloud-bigquerystorage-v1beta1:0.163.1:0.163.1 -proto-google-cloud-bigquerystorage-v1beta2:0.163.1:0.163.1 -proto-google-cloud-bigquerystorage-v1:2.39.1:2.39.1 +google-cloud-bigquerystorage:2.40.0:2.40.0 +grpc-google-cloud-bigquerystorage-v1beta1:0.164.0:0.164.0 +grpc-google-cloud-bigquerystorage-v1beta2:0.164.0:0.164.0 +grpc-google-cloud-bigquerystorage-v1:2.40.0:2.40.0 +proto-google-cloud-bigquerystorage-v1beta1:0.164.0:0.164.0 +proto-google-cloud-bigquerystorage-v1beta2:0.164.0:0.164.0 +proto-google-cloud-bigquerystorage-v1:2.40.0:2.40.0
Range and bits for date/time fields
Field Range #Bits
Year [1, 9999] 14
Month [1, 12] 4