// Copyright 2022 Luca Casonato. All rights reserved. MIT license. /** * Cloud Natural Language API Client for Deno * ========================================== * * Provides natural language understanding technologies, such as sentiment analysis, entity recognition, entity sentiment analysis, and other text annotations, to developers. * * Docs: https://cloud.google.com/natural-language/ * Source: https://googleapis.deno.dev/v1/language:v2.ts */ import { auth, CredentialsClient, GoogleAuth, request } from "/_/base@v1/mod.ts"; export { auth, GoogleAuth }; export type { CredentialsClient }; /** * Provides natural language understanding technologies, such as sentiment * analysis, entity recognition, entity sentiment analysis, and other text * annotations, to developers. */ export class Language { #client: CredentialsClient | undefined; #baseUrl: string; constructor(client?: CredentialsClient, baseUrl: string = "https://language.googleapis.com/") { this.#client = client; this.#baseUrl = baseUrl; } /** * Finds named entities (currently proper names and common nouns) in the text * along with entity types, probability, mentions for each entity, and other * properties. * */ async documentsAnalyzeEntities(req: AnalyzeEntitiesRequest): Promise { const url = new URL(`${this.#baseUrl}v2/documents:analyzeEntities`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as AnalyzeEntitiesResponse; } /** * Analyzes the sentiment of the provided text. * */ async documentsAnalyzeSentiment(req: AnalyzeSentimentRequest): Promise { const url = new URL(`${this.#baseUrl}v2/documents:analyzeSentiment`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as AnalyzeSentimentResponse; } /** * A convenience method that provides all features in one call. * */ async documentsAnnotateText(req: AnnotateTextRequest): Promise { const url = new URL(`${this.#baseUrl}v2/documents:annotateText`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as AnnotateTextResponse; } /** * Classifies a document into categories. * */ async documentsClassifyText(req: ClassifyTextRequest): Promise { const url = new URL(`${this.#baseUrl}v2/documents:classifyText`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as ClassifyTextResponse; } /** * Moderates a document for harmful and sensitive categories. * */ async documentsModerateText(req: ModerateTextRequest): Promise { const url = new URL(`${this.#baseUrl}v2/documents:moderateText`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as ModerateTextResponse; } } /** * The entity analysis request message. */ export interface AnalyzeEntitiesRequest { /** * Required. Input document. */ document?: Document; /** * The encoding type used by the API to calculate offsets. */ encodingType?: | "NONE" | "UTF8" | "UTF16" | "UTF32"; } /** * The entity analysis response message. */ export interface AnalyzeEntitiesResponse { /** * The recognized entities in the input document. */ entities?: Entity[]; /** * The language of the text, which will be the same as the language specified * in the request or, if not specified, the automatically-detected language. * See Document.language_code field for more details. */ languageCode?: string; /** * Whether the language is officially supported. The API may still return a * response when the language is not supported, but it is on a best effort * basis. */ languageSupported?: boolean; } /** * The sentiment analysis request message. */ export interface AnalyzeSentimentRequest { /** * Required. Input document. */ document?: Document; /** * The encoding type used by the API to calculate sentence offsets. */ encodingType?: | "NONE" | "UTF8" | "UTF16" | "UTF32"; } /** * The sentiment analysis response message. */ export interface AnalyzeSentimentResponse { /** * The overall sentiment of the input document. */ documentSentiment?: Sentiment; /** * The language of the text, which will be the same as the language specified * in the request or, if not specified, the automatically-detected language. * See Document.language_code field for more details. */ languageCode?: string; /** * Whether the language is officially supported. The API may still return a * response when the language is not supported, but it is on a best effort * basis. */ languageSupported?: boolean; /** * The sentiment for all the sentences in the document. */ sentences?: Sentence[]; } /** * The request message for the text annotation API, which can perform multiple * analysis types in one call. */ export interface AnnotateTextRequest { /** * Required. Input document. */ document?: Document; /** * The encoding type used by the API to calculate offsets. */ encodingType?: | "NONE" | "UTF8" | "UTF16" | "UTF32"; /** * Required. The enabled features. */ features?: AnnotateTextRequestFeatures; } /** * All available features. Setting each one to true will enable that specific * analysis for the input. */ export interface AnnotateTextRequestFeatures { /** * Optional. Classify the full document into categories. */ classifyText?: boolean; /** * Optional. Extract document-level sentiment. */ extractDocumentSentiment?: boolean; /** * Optional. Extract entities. */ extractEntities?: boolean; /** * Optional. Moderate the document for harmful and sensitive categories. */ moderateText?: boolean; } /** * The text annotations response message. */ export interface AnnotateTextResponse { /** * Categories identified in the input document. */ categories?: ClassificationCategory[]; /** * The overall sentiment for the document. Populated if the user enables * AnnotateTextRequest.Features.extract_document_sentiment. */ documentSentiment?: Sentiment; /** * Entities, along with their semantic information, in the input document. * Populated if the user enables AnnotateTextRequest.Features.extract_entities * . */ entities?: Entity[]; /** * The language of the text, which will be the same as the language specified * in the request or, if not specified, the automatically-detected language. * See Document.language_code field for more details. */ languageCode?: string; /** * Whether the language is officially supported by all requested features. * The API may still return a response when the language is not supported, but * it is on a best effort basis. */ languageSupported?: boolean; /** * Harmful and sensitive categories identified in the input document. */ moderationCategories?: ClassificationCategory[]; /** * Sentences in the input document. Populated if the user enables * AnnotateTextRequest.Features.extract_document_sentiment. */ sentences?: Sentence[]; } /** * Represents a category returned from the text classifier. */ export interface ClassificationCategory { /** * The classifier's confidence of the category. Number represents how certain * the classifier is that this category represents the given text. */ confidence?: number; /** * The name of the category representing the document. */ name?: string; /** * Optional. The classifier's severity of the category. This is only present * when the ModerateTextRequest.ModelVersion is set to MODEL_VERSION_2, and * the corresponding category has a severity score. */ severity?: number; } /** * The document classification request message. */ export interface ClassifyTextRequest { /** * Required. Input document. */ document?: Document; } /** * The document classification response message. */ export interface ClassifyTextResponse { /** * Categories representing the input document. */ categories?: ClassificationCategory[]; /** * The language of the text, which will be the same as the language specified * in the request or, if not specified, the automatically-detected language. * See Document.language_code field for more details. */ languageCode?: string; /** * Whether the language is officially supported. The API may still return a * response when the language is not supported, but it is on a best effort * basis. */ languageSupported?: boolean; } /** * Represents a color in the RGBA color space. This representation is designed * for simplicity of conversion to and from color representations in various * languages over compactness. For example, the fields of this representation * can be trivially provided to the constructor of `java.awt.Color` in Java; it * can also be trivially provided to UIColor's `+colorWithRed:green:blue:alpha` * method in iOS; and, with just a little work, it can be easily formatted into * a CSS `rgba()` string in JavaScript. This reference page doesn't have * information about the absolute color space that should be used to interpret * the RGB value—for example, sRGB, Adobe RGB, DCI-P3, and BT.2020. By default, * applications should assume the sRGB color space. When color equality needs to * be decided, implementations, unless documented otherwise, treat two colors as * equal if all their red, green, blue, and alpha values each differ by at most * `1e-5`. Example (Java): import com.google.type.Color; // ... public static * java.awt.Color fromProto(Color protocolor) { float alpha = * protocolor.hasAlpha() ? protocolor.getAlpha().getValue() : 1.0; return new * java.awt.Color( protocolor.getRed(), protocolor.getGreen(), * protocolor.getBlue(), alpha); } public static Color toProto(java.awt.Color * color) { float red = (float) color.getRed(); float green = (float) * color.getGreen(); float blue = (float) color.getBlue(); float denominator = * 255.0; Color.Builder resultBuilder = Color .newBuilder() .setRed(red / * denominator) .setGreen(green / denominator) .setBlue(blue / denominator); int * alpha = color.getAlpha(); if (alpha != 255) { result.setAlpha( FloatValue * .newBuilder() .setValue(((float) alpha) / denominator) .build()); } return * resultBuilder.build(); } // ... Example (iOS / Obj-C): // ... static UIColor* * fromProto(Color* protocolor) { float red = [protocolor red]; float green = * [protocolor green]; float blue = [protocolor blue]; FloatValue* alpha_wrapper * = [protocolor alpha]; float alpha = 1.0; if (alpha_wrapper != nil) { alpha = * [alpha_wrapper value]; } return [UIColor colorWithRed:red green:green * blue:blue alpha:alpha]; } static Color* toProto(UIColor* color) { CGFloat * red, green, blue, alpha; if (![color getRed:&red green:&green blue:&blue * alpha:&alpha]) { return nil; } Color* result = [[Color alloc] init]; [result * setRed:red]; [result setGreen:green]; [result setBlue:blue]; if (alpha <= * 0.9999) { [result setAlpha:floatWrapperWithValue(alpha)]; } [result * autorelease]; return result; } // ... Example (JavaScript): // ... var * protoToCssColor = function(rgb_color) { var redFrac = rgb_color.red || 0.0; * var greenFrac = rgb_color.green || 0.0; var blueFrac = rgb_color.blue || 0.0; * var red = Math.floor(redFrac * 255); var green = Math.floor(greenFrac * 255); * var blue = Math.floor(blueFrac * 255); if (!('alpha' in rgb_color)) { return * rgbToCssColor(red, green, blue); } var alphaFrac = rgb_color.alpha.value || * 0.0; var rgbParams = [red, green, blue].join(','); return ['rgba(', * rgbParams, ',', alphaFrac, ')'].join(''); }; var rgbToCssColor = * function(red, green, blue) { var rgbNumber = new Number((red << 16) | (green * << 8) | blue); var hexString = rgbNumber.toString(16); var missingZeros = 6 - * hexString.length; var resultBuilder = ['#']; for (var i = 0; i < * missingZeros; i++) { resultBuilder.push('0'); } * resultBuilder.push(hexString); return resultBuilder.join(''); }; // ... */ export interface Color { /** * The fraction of this color that should be applied to the pixel. That is, * the final pixel color is defined by the equation: `pixel color = alpha * * (this color) + (1.0 - alpha) * (background color)` This means that a value * of 1.0 corresponds to a solid color, whereas a value of 0.0 corresponds to * a completely transparent color. This uses a wrapper message rather than a * simple float scalar so that it is possible to distinguish between a default * value and the value being unset. If omitted, this color object is rendered * as a solid color (as if the alpha value had been explicitly given a value * of 1.0). */ alpha?: number; /** * The amount of blue in the color as a value in the interval [0, 1]. */ blue?: number; /** * The amount of green in the color as a value in the interval [0, 1]. */ green?: number; /** * The amount of red in the color as a value in the interval [0, 1]. */ red?: number; } /** * Metric for billing reports. */ export interface CpuMetric { /** * Required. Number of CPU cores. */ coreNumber?: bigint; /** * Required. Total seconds of core usage, e.g. 4. */ coreSec?: bigint; /** * Required. Type of cpu, e.g. N2. */ cpuType?: | "UNKNOWN_CPU_TYPE" | "A2" | "A3" | "C2" | "C2D" | "CUSTOM" | "E2" | "G2" | "C3" | "M2" | "M1" | "N1" | "N2_CUSTOM" | "N2" | "N2D"; /** * Required. Machine spec, e.g. N1_STANDARD_4. */ machineSpec?: | "UNKNOWN_MACHINE_SPEC" | "N1_STANDARD_2" | "N1_STANDARD_4" | "N1_STANDARD_8" | "N1_STANDARD_16" | "N1_STANDARD_32" | "N1_STANDARD_64" | "N1_STANDARD_96" | "N1_HIGHMEM_2" | "N1_HIGHMEM_4" | "N1_HIGHMEM_8" | "N1_HIGHMEM_16" | "N1_HIGHMEM_32" | "N1_HIGHMEM_64" | "N1_HIGHMEM_96" | "N1_HIGHCPU_2" | "N1_HIGHCPU_4" | "N1_HIGHCPU_8" | "N1_HIGHCPU_16" | "N1_HIGHCPU_32" | "N1_HIGHCPU_64" | "N1_HIGHCPU_96" | "A2_HIGHGPU_1G" | "A2_HIGHGPU_2G" | "A2_HIGHGPU_4G" | "A2_HIGHGPU_8G" | "A2_MEGAGPU_16G" | "A2_ULTRAGPU_1G" | "A2_ULTRAGPU_2G" | "A2_ULTRAGPU_4G" | "A2_ULTRAGPU_8G" | "A3_HIGHGPU_1G" | "A3_HIGHGPU_2G" | "A3_HIGHGPU_4G" | "A3_HIGHGPU_8G" | "A3_MEGAGPU_8G" | "E2_STANDARD_2" | "E2_STANDARD_4" | "E2_STANDARD_8" | "E2_STANDARD_16" | "E2_STANDARD_32" | "E2_HIGHMEM_2" | "E2_HIGHMEM_4" | "E2_HIGHMEM_8" | "E2_HIGHMEM_16" | "E2_HIGHCPU_2" | "E2_HIGHCPU_4" | "E2_HIGHCPU_8" | "E2_HIGHCPU_16" | "E2_HIGHCPU_32" | "N2_STANDARD_2" | "N2_STANDARD_4" | "N2_STANDARD_8" | "N2_STANDARD_16" | "N2_STANDARD_32" | "N2_STANDARD_48" | "N2_STANDARD_64" | "N2_STANDARD_80" | "N2_STANDARD_96" | "N2_STANDARD_128" | "N2_HIGHMEM_2" | "N2_HIGHMEM_4" | "N2_HIGHMEM_8" | "N2_HIGHMEM_16" | "N2_HIGHMEM_32" | "N2_HIGHMEM_48" | "N2_HIGHMEM_64" | "N2_HIGHMEM_80" | "N2_HIGHMEM_96" | "N2_HIGHMEM_128" | "N2_HIGHCPU_2" | "N2_HIGHCPU_4" | "N2_HIGHCPU_8" | "N2_HIGHCPU_16" | "N2_HIGHCPU_32" | "N2_HIGHCPU_48" | "N2_HIGHCPU_64" | "N2_HIGHCPU_80" | "N2_HIGHCPU_96" | "N2D_STANDARD_2" | "N2D_STANDARD_4" | "N2D_STANDARD_8" | "N2D_STANDARD_16" | "N2D_STANDARD_32" | "N2D_STANDARD_48" | "N2D_STANDARD_64" | "N2D_STANDARD_80" | "N2D_STANDARD_96" | "N2D_STANDARD_128" | "N2D_STANDARD_224" | "N2D_HIGHMEM_2" | "N2D_HIGHMEM_4" | "N2D_HIGHMEM_8" | "N2D_HIGHMEM_16" | "N2D_HIGHMEM_32" | "N2D_HIGHMEM_48" | "N2D_HIGHMEM_64" | "N2D_HIGHMEM_80" | "N2D_HIGHMEM_96" | "N2D_HIGHCPU_2" | "N2D_HIGHCPU_4" | "N2D_HIGHCPU_8" | "N2D_HIGHCPU_16" | "N2D_HIGHCPU_32" | "N2D_HIGHCPU_48" | "N2D_HIGHCPU_64" | "N2D_HIGHCPU_80" | "N2D_HIGHCPU_96" | "N2D_HIGHCPU_128" | "N2D_HIGHCPU_224" | "C2_STANDARD_4" | "C2_STANDARD_8" | "C2_STANDARD_16" | "C2_STANDARD_30" | "C2_STANDARD_60" | "C2D_STANDARD_2" | "C2D_STANDARD_4" | "C2D_STANDARD_8" | "C2D_STANDARD_16" | "C2D_STANDARD_32" | "C2D_STANDARD_56" | "C2D_STANDARD_112" | "C2D_HIGHCPU_2" | "C2D_HIGHCPU_4" | "C2D_HIGHCPU_8" | "C2D_HIGHCPU_16" | "C2D_HIGHCPU_32" | "C2D_HIGHCPU_56" | "C2D_HIGHCPU_112" | "C2D_HIGHMEM_2" | "C2D_HIGHMEM_4" | "C2D_HIGHMEM_8" | "C2D_HIGHMEM_16" | "C2D_HIGHMEM_32" | "C2D_HIGHMEM_56" | "C2D_HIGHMEM_112" | "G2_STANDARD_4" | "G2_STANDARD_8" | "G2_STANDARD_12" | "G2_STANDARD_16" | "G2_STANDARD_24" | "G2_STANDARD_32" | "G2_STANDARD_48" | "G2_STANDARD_96" | "C3_STANDARD_4" | "C3_STANDARD_8" | "C3_STANDARD_22" | "C3_STANDARD_44" | "C3_STANDARD_88" | "C3_STANDARD_176" | "C3_HIGHCPU_4" | "C3_HIGHCPU_8" | "C3_HIGHCPU_22" | "C3_HIGHCPU_44" | "C3_HIGHCPU_88" | "C3_HIGHCPU_176" | "C3_HIGHMEM_4" | "C3_HIGHMEM_8" | "C3_HIGHMEM_22" | "C3_HIGHMEM_44" | "C3_HIGHMEM_88" | "C3_HIGHMEM_176"; /** * Billing tracking labels. They do not contain any user data but only the * labels set by Vertex Core Infra itself. Tracking labels' keys are defined * with special format: goog-[\p{Ll}\p{N}]+ E.g. "key": * "goog-k8s-cluster-name","value": "us-east1-b4rk" */ trackingLabels?: { [key: string]: string }; } function serializeCpuMetric(data: any): CpuMetric { return { ...data, coreNumber: data["coreNumber"] !== undefined ? String(data["coreNumber"]) : undefined, coreSec: data["coreSec"] !== undefined ? String(data["coreSec"]) : undefined, }; } function deserializeCpuMetric(data: any): CpuMetric { return { ...data, coreNumber: data["coreNumber"] !== undefined ? BigInt(data["coreNumber"]) : undefined, coreSec: data["coreSec"] !== undefined ? BigInt(data["coreSec"]) : undefined, }; } export interface DiskMetric { /** * Required. Type of Disk, e.g. REGIONAL_SSD. */ diskType?: | "UNKNOWN_DISK_TYPE" | "REGIONAL_SSD" | "REGIONAL_STORAGE" | "PD_SSD" | "PD_STANDARD" | "STORAGE_SNAPSHOT"; /** * Required. Seconds of physical disk usage, e.g. 3600. */ gibSec?: bigint; } function serializeDiskMetric(data: any): DiskMetric { return { ...data, gibSec: data["gibSec"] !== undefined ? String(data["gibSec"]) : undefined, }; } function deserializeDiskMetric(data: any): DiskMetric { return { ...data, gibSec: data["gibSec"] !== undefined ? BigInt(data["gibSec"]) : undefined, }; } /** * Represents the input to API methods. */ export interface Document { /** * The content of the input in string format. Cloud audit logging exempt * since it is based on user data. */ content?: string; /** * The Google Cloud Storage URI where the file content is located. This URI * must be of the form: gs://bucket_name/object_name. For more details, see * https://cloud.google.com/storage/docs/reference-uris. NOTE: Cloud Storage * object versioning is not supported. */ gcsContentUri?: string; /** * Optional. The language of the document (if not specified, the language is * automatically detected). Both ISO and BCP-47 language codes are accepted. * [Language * Support](https://cloud.google.com/natural-language/docs/languages) lists * currently supported languages for each API method. If the language (either * specified by the caller or automatically detected) is not supported by the * called API method, an `INVALID_ARGUMENT` error is returned. */ languageCode?: string; /** * Required. If the type is not set or is `TYPE_UNSPECIFIED`, returns an * `INVALID_ARGUMENT` error. */ type?: | "TYPE_UNSPECIFIED" | "PLAIN_TEXT" | "HTML"; } /** * Represents a phrase in the text that is a known entity, such as a person, an * organization, or location. The API associates information, such as * probability and mentions, with entities. */ export interface Entity { /** * The mentions of this entity in the input document. The API currently * supports proper noun mentions. */ mentions?: EntityMention[]; /** * Metadata associated with the entity. For the metadata associated with * other entity types, see the Type table below. */ metadata?: { [key: string]: string }; /** * The representative name for the entity. */ name?: string; /** * For calls to AnalyzeEntitySentiment this field will contain the aggregate * sentiment expressed for this entity in the provided document. */ sentiment?: Sentiment; /** * The entity type. */ type?: | "UNKNOWN" | "PERSON" | "LOCATION" | "ORGANIZATION" | "EVENT" | "WORK_OF_ART" | "CONSUMER_GOOD" | "OTHER" | "PHONE_NUMBER" | "ADDRESS" | "DATE" | "NUMBER" | "PRICE"; } /** * Represents a mention for an entity in the text. Currently, proper noun * mentions are supported. */ export interface EntityMention { /** * Probability score associated with the entity. The score shows the * probability of the entity mention being the entity type. The score is in * (0, 1] range. */ probability?: number; /** * For calls to AnalyzeEntitySentiment this field will contain the sentiment * expressed for this mention of the entity in the provided document. */ sentiment?: Sentiment; /** * The mention text. */ text?: TextSpan; /** * The type of the entity mention. */ type?: | "TYPE_UNKNOWN" | "PROPER" | "COMMON"; } export interface GpuMetric { /** * Required. Seconds of GPU usage, e.g. 3600. */ gpuSec?: bigint; /** * Required. Type of GPU, e.g. NVIDIA_TESLA_V100. */ gpuType?: | "UNKNOWN_GPU_TYPE" | "NVIDIA_TESLA_A100" | "NVIDIA_A100_80GB" | "NVIDIA_TESLA_K80" | "NVIDIA_L4" | "NVIDIA_TESLA_P100" | "NVIDIA_TESLA_P4" | "NVIDIA_TESLA_T4" | "NVIDIA_TESLA_V100" | "NVIDIA_H100_80GB"; /** * Required. Machine spec, e.g. N1_STANDARD_4. */ machineSpec?: | "UNKNOWN_MACHINE_SPEC" | "N1_STANDARD_2" | "N1_STANDARD_4" | "N1_STANDARD_8" | "N1_STANDARD_16" | "N1_STANDARD_32" | "N1_STANDARD_64" | "N1_STANDARD_96" | "N1_HIGHMEM_2" | "N1_HIGHMEM_4" | "N1_HIGHMEM_8" | "N1_HIGHMEM_16" | "N1_HIGHMEM_32" | "N1_HIGHMEM_64" | "N1_HIGHMEM_96" | "N1_HIGHCPU_2" | "N1_HIGHCPU_4" | "N1_HIGHCPU_8" | "N1_HIGHCPU_16" | "N1_HIGHCPU_32" | "N1_HIGHCPU_64" | "N1_HIGHCPU_96" | "A2_HIGHGPU_1G" | "A2_HIGHGPU_2G" | "A2_HIGHGPU_4G" | "A2_HIGHGPU_8G" | "A2_MEGAGPU_16G" | "A2_ULTRAGPU_1G" | "A2_ULTRAGPU_2G" | "A2_ULTRAGPU_4G" | "A2_ULTRAGPU_8G" | "A3_HIGHGPU_1G" | "A3_HIGHGPU_2G" | "A3_HIGHGPU_4G" | "A3_HIGHGPU_8G" | "A3_MEGAGPU_8G" | "E2_STANDARD_2" | "E2_STANDARD_4" | "E2_STANDARD_8" | "E2_STANDARD_16" | "E2_STANDARD_32" | "E2_HIGHMEM_2" | "E2_HIGHMEM_4" | "E2_HIGHMEM_8" | "E2_HIGHMEM_16" | "E2_HIGHCPU_2" | "E2_HIGHCPU_4" | "E2_HIGHCPU_8" | "E2_HIGHCPU_16" | "E2_HIGHCPU_32" | "N2_STANDARD_2" | "N2_STANDARD_4" | "N2_STANDARD_8" | "N2_STANDARD_16" | "N2_STANDARD_32" | "N2_STANDARD_48" | "N2_STANDARD_64" | "N2_STANDARD_80" | "N2_STANDARD_96" | "N2_STANDARD_128" | "N2_HIGHMEM_2" | "N2_HIGHMEM_4" | "N2_HIGHMEM_8" | "N2_HIGHMEM_16" | "N2_HIGHMEM_32" | "N2_HIGHMEM_48" | "N2_HIGHMEM_64" | "N2_HIGHMEM_80" | "N2_HIGHMEM_96" | "N2_HIGHMEM_128" | "N2_HIGHCPU_2" | "N2_HIGHCPU_4" | "N2_HIGHCPU_8" | "N2_HIGHCPU_16" | "N2_HIGHCPU_32" | "N2_HIGHCPU_48" | "N2_HIGHCPU_64" | "N2_HIGHCPU_80" | "N2_HIGHCPU_96" | "N2D_STANDARD_2" | "N2D_STANDARD_4" | "N2D_STANDARD_8" | "N2D_STANDARD_16" | "N2D_STANDARD_32" | "N2D_STANDARD_48" | "N2D_STANDARD_64" | "N2D_STANDARD_80" | "N2D_STANDARD_96" | "N2D_STANDARD_128" | "N2D_STANDARD_224" | "N2D_HIGHMEM_2" | "N2D_HIGHMEM_4" | "N2D_HIGHMEM_8" | "N2D_HIGHMEM_16" | "N2D_HIGHMEM_32" | "N2D_HIGHMEM_48" | "N2D_HIGHMEM_64" | "N2D_HIGHMEM_80" | "N2D_HIGHMEM_96" | "N2D_HIGHCPU_2" | "N2D_HIGHCPU_4" | "N2D_HIGHCPU_8" | "N2D_HIGHCPU_16" | "N2D_HIGHCPU_32" | "N2D_HIGHCPU_48" | "N2D_HIGHCPU_64" | "N2D_HIGHCPU_80" | "N2D_HIGHCPU_96" | "N2D_HIGHCPU_128" | "N2D_HIGHCPU_224" | "C2_STANDARD_4" | "C2_STANDARD_8" | "C2_STANDARD_16" | "C2_STANDARD_30" | "C2_STANDARD_60" | "C2D_STANDARD_2" | "C2D_STANDARD_4" | "C2D_STANDARD_8" | "C2D_STANDARD_16" | "C2D_STANDARD_32" | "C2D_STANDARD_56" | "C2D_STANDARD_112" | "C2D_HIGHCPU_2" | "C2D_HIGHCPU_4" | "C2D_HIGHCPU_8" | "C2D_HIGHCPU_16" | "C2D_HIGHCPU_32" | "C2D_HIGHCPU_56" | "C2D_HIGHCPU_112" | "C2D_HIGHMEM_2" | "C2D_HIGHMEM_4" | "C2D_HIGHMEM_8" | "C2D_HIGHMEM_16" | "C2D_HIGHMEM_32" | "C2D_HIGHMEM_56" | "C2D_HIGHMEM_112" | "G2_STANDARD_4" | "G2_STANDARD_8" | "G2_STANDARD_12" | "G2_STANDARD_16" | "G2_STANDARD_24" | "G2_STANDARD_32" | "G2_STANDARD_48" | "G2_STANDARD_96" | "C3_STANDARD_4" | "C3_STANDARD_8" | "C3_STANDARD_22" | "C3_STANDARD_44" | "C3_STANDARD_88" | "C3_STANDARD_176" | "C3_HIGHCPU_4" | "C3_HIGHCPU_8" | "C3_HIGHCPU_22" | "C3_HIGHCPU_44" | "C3_HIGHCPU_88" | "C3_HIGHCPU_176" | "C3_HIGHMEM_4" | "C3_HIGHMEM_8" | "C3_HIGHMEM_22" | "C3_HIGHMEM_44" | "C3_HIGHMEM_88" | "C3_HIGHMEM_176"; /** * Billing tracking labels. They do not contain any user data but only the * labels set by Vertex Core Infra itself. Tracking labels' keys are defined * with special format: goog-[\p{Ll}\p{N}]+ E.g. "key": * "goog-k8s-cluster-name","value": "us-east1-b4rk" */ trackingLabels?: { [key: string]: string }; } function serializeGpuMetric(data: any): GpuMetric { return { ...data, gpuSec: data["gpuSec"] !== undefined ? String(data["gpuSec"]) : undefined, }; } function deserializeGpuMetric(data: any): GpuMetric { return { ...data, gpuSec: data["gpuSec"] !== undefined ? BigInt(data["gpuSec"]) : undefined, }; } /** * Infra Usage of billing metrics. */ export interface InfraUsage { /** * Aggregated core metrics since requested start_time. */ cpuMetrics?: CpuMetric[]; /** * Aggregated persistent disk metrics since requested start_time. */ diskMetrics?: DiskMetric[]; /** * Aggregated gpu metrics since requested start_time. */ gpuMetrics?: GpuMetric[]; /** * Aggregated ram metrics since requested start_time. */ ramMetrics?: RamMetric[]; /** * Aggregated tpu metrics since requested start_time. */ tpuMetrics?: TpuMetric[]; } function serializeInfraUsage(data: any): InfraUsage { return { ...data, cpuMetrics: data["cpuMetrics"] !== undefined ? data["cpuMetrics"].map((item: any) => (serializeCpuMetric(item))) : undefined, diskMetrics: data["diskMetrics"] !== undefined ? data["diskMetrics"].map((item: any) => (serializeDiskMetric(item))) : undefined, gpuMetrics: data["gpuMetrics"] !== undefined ? data["gpuMetrics"].map((item: any) => (serializeGpuMetric(item))) : undefined, ramMetrics: data["ramMetrics"] !== undefined ? data["ramMetrics"].map((item: any) => (serializeRamMetric(item))) : undefined, tpuMetrics: data["tpuMetrics"] !== undefined ? data["tpuMetrics"].map((item: any) => (serializeTpuMetric(item))) : undefined, }; } function deserializeInfraUsage(data: any): InfraUsage { return { ...data, cpuMetrics: data["cpuMetrics"] !== undefined ? data["cpuMetrics"].map((item: any) => (deserializeCpuMetric(item))) : undefined, diskMetrics: data["diskMetrics"] !== undefined ? data["diskMetrics"].map((item: any) => (deserializeDiskMetric(item))) : undefined, gpuMetrics: data["gpuMetrics"] !== undefined ? data["gpuMetrics"].map((item: any) => (deserializeGpuMetric(item))) : undefined, ramMetrics: data["ramMetrics"] !== undefined ? data["ramMetrics"].map((item: any) => (deserializeRamMetric(item))) : undefined, tpuMetrics: data["tpuMetrics"] !== undefined ? data["tpuMetrics"].map((item: any) => (deserializeTpuMetric(item))) : undefined, }; } /** * The document moderation request message. */ export interface ModerateTextRequest { /** * Required. Input document. */ document?: Document; /** * Optional. The model version to use for ModerateText. */ modelVersion?: | "MODEL_VERSION_UNSPECIFIED" | "MODEL_VERSION_1" | "MODEL_VERSION_2"; } /** * The document moderation response message. */ export interface ModerateTextResponse { /** * The language of the text, which will be the same as the language specified * in the request or, if not specified, the automatically-detected language. * See Document.language_code field for more details. */ languageCode?: string; /** * Whether the language is officially supported. The API may still return a * response when the language is not supported, but it is on a best effort * basis. */ languageSupported?: boolean; /** * Harmful and sensitive categories representing the input document. */ moderationCategories?: ClassificationCategory[]; } export interface RamMetric { /** * Required. VM memory in Gigabyte second, e.g. 3600. Using int64 type to * match billing metrics definition. */ gibSec?: bigint; /** * Required. Machine spec, e.g. N1_STANDARD_4. */ machineSpec?: | "UNKNOWN_MACHINE_SPEC" | "N1_STANDARD_2" | "N1_STANDARD_4" | "N1_STANDARD_8" | "N1_STANDARD_16" | "N1_STANDARD_32" | "N1_STANDARD_64" | "N1_STANDARD_96" | "N1_HIGHMEM_2" | "N1_HIGHMEM_4" | "N1_HIGHMEM_8" | "N1_HIGHMEM_16" | "N1_HIGHMEM_32" | "N1_HIGHMEM_64" | "N1_HIGHMEM_96" | "N1_HIGHCPU_2" | "N1_HIGHCPU_4" | "N1_HIGHCPU_8" | "N1_HIGHCPU_16" | "N1_HIGHCPU_32" | "N1_HIGHCPU_64" | "N1_HIGHCPU_96" | "A2_HIGHGPU_1G" | "A2_HIGHGPU_2G" | "A2_HIGHGPU_4G" | "A2_HIGHGPU_8G" | "A2_MEGAGPU_16G" | "A2_ULTRAGPU_1G" | "A2_ULTRAGPU_2G" | "A2_ULTRAGPU_4G" | "A2_ULTRAGPU_8G" | "A3_HIGHGPU_1G" | "A3_HIGHGPU_2G" | "A3_HIGHGPU_4G" | "A3_HIGHGPU_8G" | "A3_MEGAGPU_8G" | "E2_STANDARD_2" | "E2_STANDARD_4" | "E2_STANDARD_8" | "E2_STANDARD_16" | "E2_STANDARD_32" | "E2_HIGHMEM_2" | "E2_HIGHMEM_4" | "E2_HIGHMEM_8" | "E2_HIGHMEM_16" | "E2_HIGHCPU_2" | "E2_HIGHCPU_4" | "E2_HIGHCPU_8" | "E2_HIGHCPU_16" | "E2_HIGHCPU_32" | "N2_STANDARD_2" | "N2_STANDARD_4" | "N2_STANDARD_8" | "N2_STANDARD_16" | "N2_STANDARD_32" | "N2_STANDARD_48" | "N2_STANDARD_64" | "N2_STANDARD_80" | "N2_STANDARD_96" | "N2_STANDARD_128" | "N2_HIGHMEM_2" | "N2_HIGHMEM_4" | "N2_HIGHMEM_8" | "N2_HIGHMEM_16" | "N2_HIGHMEM_32" | "N2_HIGHMEM_48" | "N2_HIGHMEM_64" | "N2_HIGHMEM_80" | "N2_HIGHMEM_96" | "N2_HIGHMEM_128" | "N2_HIGHCPU_2" | "N2_HIGHCPU_4" | "N2_HIGHCPU_8" | "N2_HIGHCPU_16" | "N2_HIGHCPU_32" | "N2_HIGHCPU_48" | "N2_HIGHCPU_64" | "N2_HIGHCPU_80" | "N2_HIGHCPU_96" | "N2D_STANDARD_2" | "N2D_STANDARD_4" | "N2D_STANDARD_8" | "N2D_STANDARD_16" | "N2D_STANDARD_32" | "N2D_STANDARD_48" | "N2D_STANDARD_64" | "N2D_STANDARD_80" | "N2D_STANDARD_96" | "N2D_STANDARD_128" | "N2D_STANDARD_224" | "N2D_HIGHMEM_2" | "N2D_HIGHMEM_4" | "N2D_HIGHMEM_8" | "N2D_HIGHMEM_16" | "N2D_HIGHMEM_32" | "N2D_HIGHMEM_48" | "N2D_HIGHMEM_64" | "N2D_HIGHMEM_80" | "N2D_HIGHMEM_96" | "N2D_HIGHCPU_2" | "N2D_HIGHCPU_4" | "N2D_HIGHCPU_8" | "N2D_HIGHCPU_16" | "N2D_HIGHCPU_32" | "N2D_HIGHCPU_48" | "N2D_HIGHCPU_64" | "N2D_HIGHCPU_80" | "N2D_HIGHCPU_96" | "N2D_HIGHCPU_128" | "N2D_HIGHCPU_224" | "C2_STANDARD_4" | "C2_STANDARD_8" | "C2_STANDARD_16" | "C2_STANDARD_30" | "C2_STANDARD_60" | "C2D_STANDARD_2" | "C2D_STANDARD_4" | "C2D_STANDARD_8" | "C2D_STANDARD_16" | "C2D_STANDARD_32" | "C2D_STANDARD_56" | "C2D_STANDARD_112" | "C2D_HIGHCPU_2" | "C2D_HIGHCPU_4" | "C2D_HIGHCPU_8" | "C2D_HIGHCPU_16" | "C2D_HIGHCPU_32" | "C2D_HIGHCPU_56" | "C2D_HIGHCPU_112" | "C2D_HIGHMEM_2" | "C2D_HIGHMEM_4" | "C2D_HIGHMEM_8" | "C2D_HIGHMEM_16" | "C2D_HIGHMEM_32" | "C2D_HIGHMEM_56" | "C2D_HIGHMEM_112" | "G2_STANDARD_4" | "G2_STANDARD_8" | "G2_STANDARD_12" | "G2_STANDARD_16" | "G2_STANDARD_24" | "G2_STANDARD_32" | "G2_STANDARD_48" | "G2_STANDARD_96" | "C3_STANDARD_4" | "C3_STANDARD_8" | "C3_STANDARD_22" | "C3_STANDARD_44" | "C3_STANDARD_88" | "C3_STANDARD_176" | "C3_HIGHCPU_4" | "C3_HIGHCPU_8" | "C3_HIGHCPU_22" | "C3_HIGHCPU_44" | "C3_HIGHCPU_88" | "C3_HIGHCPU_176" | "C3_HIGHMEM_4" | "C3_HIGHMEM_8" | "C3_HIGHMEM_22" | "C3_HIGHMEM_44" | "C3_HIGHMEM_88" | "C3_HIGHMEM_176"; /** * Required. VM memory in gb. */ memories?: number; /** * Required. Type of ram. */ ramType?: | "UNKNOWN_RAM_TYPE" | "A2" | "A3" | "C2" | "C2D" | "CUSTOM" | "E2" | "G2" | "C3" | "M2" | "M1" | "N1" | "N2_CUSTOM" | "N2" | "N2D"; /** * Billing tracking labels. They do not contain any user data but only the * labels set by Vertex Core Infra itself. Tracking labels' keys are defined * with special format: goog-[\p{Ll}\p{N}]+ E.g. "key": * "goog-k8s-cluster-name","value": "us-east1-b4rk" */ trackingLabels?: { [key: string]: string }; } function serializeRamMetric(data: any): RamMetric { return { ...data, gibSec: data["gibSec"] !== undefined ? String(data["gibSec"]) : undefined, }; } function deserializeRamMetric(data: any): RamMetric { return { ...data, gibSec: data["gibSec"] !== undefined ? BigInt(data["gibSec"]) : undefined, }; } /** * Represents a sentence in the input document. */ export interface Sentence { /** * For calls to AnalyzeSentiment or if * AnnotateTextRequest.Features.extract_document_sentiment is set to true, * this field will contain the sentiment for the sentence. */ sentiment?: Sentiment; /** * The sentence text. */ text?: TextSpan; } /** * Represents the feeling associated with the entire text or entities in the * text. */ export interface Sentiment { /** * A non-negative number in the [0, +inf] range, which represents the * absolute magnitude of sentiment regardless of score (positive or negative). */ magnitude?: number; /** * Sentiment score between -1.0 (negative sentiment) and 1.0 (positive * sentiment). */ score?: number; } /** * The `Status` type defines a logical error model that is suitable for * different programming environments, including REST APIs and RPC APIs. It is * used by [gRPC](https://github.com/grpc). Each `Status` message contains three * pieces of data: error code, error message, and error details. You can find * out more about this error model and how to work with it in the [API Design * Guide](https://cloud.google.com/apis/design/errors). */ export interface Status { /** * The status code, which should be an enum value of google.rpc.Code. */ code?: number; /** * A list of messages that carry the error details. There is a common set of * message types for APIs to use. */ details?: { [key: string]: any }[]; /** * A developer-facing error message, which should be in English. Any * user-facing error message should be localized and sent in the * google.rpc.Status.details field, or localized by the client. */ message?: string; } /** * Represents a text span in the input document. */ export interface TextSpan { /** * The API calculates the beginning offset of the content in the original * document according to the EncodingType specified in the API request. */ beginOffset?: number; /** * The content of the text span, which is a substring of the document. */ content?: string; } export interface TpuMetric { /** * Required. Seconds of TPU usage, e.g. 3600. */ tpuSec?: bigint; /** * Required. Type of TPU, e.g. TPU_V2, TPU_V3_POD. */ tpuType?: | "UNKNOWN_TPU_TYPE" | "TPU_V2_POD" | "TPU_V2" | "TPU_V3_POD" | "TPU_V3" | "TPU_V5_LITEPOD"; } function serializeTpuMetric(data: any): TpuMetric { return { ...data, tpuSec: data["tpuSec"] !== undefined ? String(data["tpuSec"]) : undefined, }; } function deserializeTpuMetric(data: any): TpuMetric { return { ...data, tpuSec: data["tpuSec"] !== undefined ? BigInt(data["tpuSec"]) : undefined, }; } /** * The data statistics of a series of ARRAY values. */ export interface XPSArrayStats { commonStats?: XPSCommonStats; /** * Stats of all the values of all arrays, as if they were a single long * series of data. The type depends on the element type of the array. */ memberStats?: XPSDataStats; } function serializeXPSArrayStats(data: any): XPSArrayStats { return { ...data, commonStats: data["commonStats"] !== undefined ? serializeXPSCommonStats(data["commonStats"]) : undefined, memberStats: data["memberStats"] !== undefined ? serializeXPSDataStats(data["memberStats"]) : undefined, }; } function deserializeXPSArrayStats(data: any): XPSArrayStats { return { ...data, commonStats: data["commonStats"] !== undefined ? deserializeXPSCommonStats(data["commonStats"]) : undefined, memberStats: data["memberStats"] !== undefined ? deserializeXPSDataStats(data["memberStats"]) : undefined, }; } export interface XPSBatchPredictResponse { /** * Examples for batch prediction result. Under full API implementation, * results are stored in shared RecordIO of AnnotatedExample protobufs, the * annotations field of which is populated by XPS backend. */ exampleSet?: XPSExampleSet; } function serializeXPSBatchPredictResponse(data: any): XPSBatchPredictResponse { return { ...data, exampleSet: data["exampleSet"] !== undefined ? serializeXPSExampleSet(data["exampleSet"]) : undefined, }; } function deserializeXPSBatchPredictResponse(data: any): XPSBatchPredictResponse { return { ...data, exampleSet: data["exampleSet"] !== undefined ? deserializeXPSExampleSet(data["exampleSet"]) : undefined, }; } /** * Bounding box matching model metrics for a single intersection-over-union * threshold and multiple label match confidence thresholds. */ export interface XPSBoundingBoxMetricsEntry { /** * Metrics for each label-match confidence_threshold from * 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99. */ confidenceMetricsEntries?: XPSBoundingBoxMetricsEntryConfidenceMetricsEntry[]; /** * The intersection-over-union threshold value used to compute this metrics * entry. */ iouThreshold?: number; /** * The mean average precision. */ meanAveragePrecision?: number; } /** * Metrics for a single confidence threshold. */ export interface XPSBoundingBoxMetricsEntryConfidenceMetricsEntry { /** * The confidence threshold value used to compute the metrics. */ confidenceThreshold?: number; /** * The harmonic mean of recall and precision. */ f1Score?: number; /** * Precision for the given confidence threshold. */ precision?: number; /** * Recall for the given confidence threshold. */ recall?: number; } /** * The data statistics of a series of CATEGORY values. */ export interface XPSCategoryStats { commonStats?: XPSCommonStats; /** * The statistics of the top 20 CATEGORY values, ordered by * CategoryStats.SingleCategoryStats.count. */ topCategoryStats?: XPSCategoryStatsSingleCategoryStats[]; } function serializeXPSCategoryStats(data: any): XPSCategoryStats { return { ...data, commonStats: data["commonStats"] !== undefined ? serializeXPSCommonStats(data["commonStats"]) : undefined, topCategoryStats: data["topCategoryStats"] !== undefined ? data["topCategoryStats"].map((item: any) => (serializeXPSCategoryStatsSingleCategoryStats(item))) : undefined, }; } function deserializeXPSCategoryStats(data: any): XPSCategoryStats { return { ...data, commonStats: data["commonStats"] !== undefined ? deserializeXPSCommonStats(data["commonStats"]) : undefined, topCategoryStats: data["topCategoryStats"] !== undefined ? data["topCategoryStats"].map((item: any) => (deserializeXPSCategoryStatsSingleCategoryStats(item))) : undefined, }; } /** * The statistics of a single CATEGORY value. */ export interface XPSCategoryStatsSingleCategoryStats { /** * The number of occurrences of this value in the series. */ count?: bigint; /** * The CATEGORY value. */ value?: string; } function serializeXPSCategoryStatsSingleCategoryStats(data: any): XPSCategoryStatsSingleCategoryStats { return { ...data, count: data["count"] !== undefined ? String(data["count"]) : undefined, }; } function deserializeXPSCategoryStatsSingleCategoryStats(data: any): XPSCategoryStatsSingleCategoryStats { return { ...data, count: data["count"] !== undefined ? BigInt(data["count"]) : undefined, }; } /** * Model evaluation metrics for classification problems. It can be used for * image and video classification. Next tag: 9. */ export interface XPSClassificationEvaluationMetrics { /** * The Area under precision recall curve metric. */ auPrc?: number; /** * The Area Under Receiver Operating Characteristic curve metric. * Micro-averaged for the overall evaluation. */ auRoc?: number; /** * The Area under precision recall curve metric based on priors. */ baseAuPrc?: number; /** * Metrics that have confidence thresholds. Precision-recall curve can be * derived from it. */ confidenceMetricsEntries?: XPSConfidenceMetricsEntry[]; /** * Confusion matrix of the evaluation. Only set for MULTICLASS classification * problems where number of annotation specs is no more than 10. Only set for * model level evaluation, not for evaluation per label. */ confusionMatrix?: XPSConfusionMatrix; /** * The number of examples used for model evaluation. */ evaluatedExamplesCount?: number; /** * The Log Loss metric. */ logLoss?: number; } function serializeXPSClassificationEvaluationMetrics(data: any): XPSClassificationEvaluationMetrics { return { ...data, confidenceMetricsEntries: data["confidenceMetricsEntries"] !== undefined ? data["confidenceMetricsEntries"].map((item: any) => (serializeXPSConfidenceMetricsEntry(item))) : undefined, confusionMatrix: data["confusionMatrix"] !== undefined ? serializeXPSConfusionMatrix(data["confusionMatrix"]) : undefined, }; } function deserializeXPSClassificationEvaluationMetrics(data: any): XPSClassificationEvaluationMetrics { return { ...data, confidenceMetricsEntries: data["confidenceMetricsEntries"] !== undefined ? data["confidenceMetricsEntries"].map((item: any) => (deserializeXPSConfidenceMetricsEntry(item))) : undefined, confusionMatrix: data["confusionMatrix"] !== undefined ? deserializeXPSConfusionMatrix(data["confusionMatrix"]) : undefined, }; } /** * Map from color to display name. Will only be used by Image Segmentation for * uCAIP. */ export interface XPSColorMap { /** * Should be used during training. */ annotationSpecIdToken?: string; /** * This type is deprecated in favor of the IntColor below. This is because * google.type.Color represent color has a float which semantically does not * reflect discrete classes/categories concept. Moreover, to handle it well we * need to have some tolerance when converting to a discretized color. As * such, the recommendation is to have API surface still use google.type.Color * while internally IntColor is used. */ color?: Color; /** * Should be used during preprocessing. */ displayName?: string; intColor?: XPSColorMapIntColor; } /** * RGB color and each channel is represented by an integer. */ export interface XPSColorMapIntColor { /** * The value should be in range of [0, 255]. */ blue?: number; /** * The value should be in range of [0, 255]. */ green?: number; /** * The value should be in range of [0, 255]. */ red?: number; } export interface XPSColumnSpec { /** * The unique id of the column. When Preprocess, the Tables BE will popuate * the order id of the column, which reflects the order of the column inside * the table, i.e. 0 means the first column in the table, N-1 means the last * column. AutoML BE will persist this order id in Spanner and set the order * id here when calling RefreshTablesStats and Train. Note: it's different * than the column_spec_id that is generated in AutoML BE. */ columnId?: number; /** * The data stats of the column. It's outputed in RefreshTablesStats and a * required input for Train. */ dataStats?: XPSDataStats; /** * The data type of the column. It's outputed in Preprocess rpc and a * required input for RefreshTablesStats and Train. */ dataType?: XPSDataType; /** * The display name of the column. It's outputed in Preprocess and a required * input for RefreshTablesStats and Train. */ displayName?: string; forecastingMetadata?: XPSColumnSpecForecastingMetadata; /** * It's outputed in RefreshTablesStats, and a required input in Train. */ topCorrelatedColumns?: XPSColumnSpecCorrelatedColumn[]; } function serializeXPSColumnSpec(data: any): XPSColumnSpec { return { ...data, dataStats: data["dataStats"] !== undefined ? serializeXPSDataStats(data["dataStats"]) : undefined, }; } function deserializeXPSColumnSpec(data: any): XPSColumnSpec { return { ...data, dataStats: data["dataStats"] !== undefined ? deserializeXPSDataStats(data["dataStats"]) : undefined, }; } /** * Identifies a table's column, and its correlation with the column this * ColumnSpec describes. */ export interface XPSColumnSpecCorrelatedColumn { columnId?: number; correlationStats?: XPSCorrelationStats; } export interface XPSColumnSpecForecastingMetadata { /** * The type of the column for FORECASTING model training purposes. */ columnType?: | "COLUMN_TYPE_UNSPECIFIED" | "KEY" | "KEY_METADATA" | "TIME_SERIES_AVAILABLE_PAST_ONLY" | "TIME_SERIES_AVAILABLE_PAST_AND_FUTURE"; } /** * Common statistics for a column with a specified data type. */ export interface XPSCommonStats { distinctValueCount?: bigint; nullValueCount?: bigint; validValueCount?: bigint; } function serializeXPSCommonStats(data: any): XPSCommonStats { return { ...data, distinctValueCount: data["distinctValueCount"] !== undefined ? String(data["distinctValueCount"]) : undefined, nullValueCount: data["nullValueCount"] !== undefined ? String(data["nullValueCount"]) : undefined, validValueCount: data["validValueCount"] !== undefined ? String(data["validValueCount"]) : undefined, }; } function deserializeXPSCommonStats(data: any): XPSCommonStats { return { ...data, distinctValueCount: data["distinctValueCount"] !== undefined ? BigInt(data["distinctValueCount"]) : undefined, nullValueCount: data["nullValueCount"] !== undefined ? BigInt(data["nullValueCount"]) : undefined, validValueCount: data["validValueCount"] !== undefined ? BigInt(data["validValueCount"]) : undefined, }; } /** * ConfidenceMetricsEntry includes generic precision, recall, f1 score etc. * Next tag: 16. */ export interface XPSConfidenceMetricsEntry { /** * Metrics are computed with an assumption that the model never return * predictions with score lower than this value. */ confidenceThreshold?: number; /** * The harmonic mean of recall and precision. */ f1Score?: number; /** * The harmonic mean of recall_at1 and precision_at1. */ f1ScoreAt1?: number; /** * The number of ground truth labels that are not matched by a model created * label. */ falseNegativeCount?: bigint; /** * The number of model created labels that do not match a ground truth label. */ falsePositiveCount?: bigint; /** * False Positive Rate for the given confidence threshold. */ falsePositiveRate?: number; /** * The False Positive Rate when only considering the label that has the * highest prediction score and not below the confidence threshold for each * example. */ falsePositiveRateAt1?: number; /** * Metrics are computed with an assumption that the model always returns at * most this many predictions (ordered by their score, descendingly), but they * all still need to meet the confidence_threshold. */ positionThreshold?: number; /** * Precision for the given confidence threshold. */ precision?: number; /** * The precision when only considering the label that has the highest * prediction score and not below the confidence threshold for each example. */ precisionAt1?: number; /** * Recall (true positive rate) for the given confidence threshold. */ recall?: number; /** * The recall (true positive rate) when only considering the label that has * the highest prediction score and not below the confidence threshold for * each example. */ recallAt1?: number; /** * The number of labels that were not created by the model, but if they * would, they would not match a ground truth label. */ trueNegativeCount?: bigint; /** * The number of model created labels that match a ground truth label. */ truePositiveCount?: bigint; } function serializeXPSConfidenceMetricsEntry(data: any): XPSConfidenceMetricsEntry { return { ...data, falseNegativeCount: data["falseNegativeCount"] !== undefined ? String(data["falseNegativeCount"]) : undefined, falsePositiveCount: data["falsePositiveCount"] !== undefined ? String(data["falsePositiveCount"]) : undefined, trueNegativeCount: data["trueNegativeCount"] !== undefined ? String(data["trueNegativeCount"]) : undefined, truePositiveCount: data["truePositiveCount"] !== undefined ? String(data["truePositiveCount"]) : undefined, }; } function deserializeXPSConfidenceMetricsEntry(data: any): XPSConfidenceMetricsEntry { return { ...data, falseNegativeCount: data["falseNegativeCount"] !== undefined ? BigInt(data["falseNegativeCount"]) : undefined, falsePositiveCount: data["falsePositiveCount"] !== undefined ? BigInt(data["falsePositiveCount"]) : undefined, trueNegativeCount: data["trueNegativeCount"] !== undefined ? BigInt(data["trueNegativeCount"]) : undefined, truePositiveCount: data["truePositiveCount"] !== undefined ? BigInt(data["truePositiveCount"]) : undefined, }; } /** * Confusion matrix of the model running the classification. */ export interface XPSConfusionMatrix { /** * For the following three repeated fields, only one is intended to be set. * annotation_spec_id_token is preferable to be set. ID tokens of the * annotation specs used in the confusion matrix. */ annotationSpecIdToken?: string[]; /** * Category (mainly for segmentation). Set only for image segmentation * models. Note: uCAIP Image Segmentation should use annotation_spec_id_token. */ category?: number[]; /** * Rows in the confusion matrix. The number of rows is equal to the size of * `annotation_spec_id_token`. `row[i].value[j]` is the number of examples * that have ground truth of the `annotation_spec_id_token[i]` and are * predicted as `annotation_spec_id_token[j]` by the model being evaluated. */ row?: XPSConfusionMatrixRow[]; /** * Sentiment labels used in the confusion matrix. Set only for text sentiment * models. For AutoML Text Revamp, use `annotation_spec_id_token` instead and * leave this field empty. */ sentimentLabel?: number[]; } function serializeXPSConfusionMatrix(data: any): XPSConfusionMatrix { return { ...data, row: data["row"] !== undefined ? data["row"].map((item: any) => (serializeXPSConfusionMatrixRow(item))) : undefined, }; } function deserializeXPSConfusionMatrix(data: any): XPSConfusionMatrix { return { ...data, row: data["row"] !== undefined ? data["row"].map((item: any) => (deserializeXPSConfusionMatrixRow(item))) : undefined, }; } /** * A row in the confusion matrix. */ export interface XPSConfusionMatrixRow { /** * Same as above except intended to represent other counts (for e.g. for * segmentation this is pixel count). NOTE(params): Only example_count or * count is set (oneoff does not support repeated fields unless they are * embedded inside another message). */ count?: bigint[]; /** * Value of the specific cell in the confusion matrix. The number of values * each row has (i.e. the length of the row) is equal to the length of the * annotation_spec_id_token field. */ exampleCount?: number[]; } function serializeXPSConfusionMatrixRow(data: any): XPSConfusionMatrixRow { return { ...data, count: data["count"] !== undefined ? data["count"].map((item: any) => (String(item))) : undefined, }; } function deserializeXPSConfusionMatrixRow(data: any): XPSConfusionMatrixRow { return { ...data, count: data["count"] !== undefined ? data["count"].map((item: any) => (BigInt(item))) : undefined, }; } /** * A model format used for iOS mobile devices. */ export interface XPSCoreMlFormat { } /** * A correlation statistics between two series of DataType values. The series * may have differing DataType-s, but within a single series the DataType must * be the same. */ export interface XPSCorrelationStats { /** * The correlation value using the Cramer's V measure. */ cramersV?: number; } /** * Different types of errors and the stats associatesd with each error. */ export interface XPSDataErrors { /** * Number of records having errors associated with the enum. */ count?: number; /** * Type of the error. */ errorType?: | "ERROR_TYPE_UNSPECIFIED" | "UNSUPPORTED_AUDIO_FORMAT" | "FILE_EXTENSION_MISMATCH_WITH_AUDIO_FORMAT" | "FILE_TOO_LARGE" | "MISSING_TRANSCRIPTION"; } /** * The data statistics of a series of values that share the same DataType. */ export interface XPSDataStats { /** * The statistics for ARRAY DataType. */ arrayStats?: XPSArrayStats; /** * The statistics for CATEGORY DataType. */ categoryStats?: XPSCategoryStats; /** * The number of distinct values. */ distinctValueCount?: bigint; /** * The statistics for FLOAT64 DataType. */ float64Stats?: XPSFloat64Stats; /** * The number of values that are null. */ nullValueCount?: bigint; /** * The statistics for STRING DataType. */ stringStats?: XPSStringStats; /** * The statistics for STRUCT DataType. */ structStats?: XPSStructStats; /** * The statistics for TIMESTAMP DataType. */ timestampStats?: XPSTimestampStats; /** * The number of values that are valid. */ validValueCount?: bigint; } function serializeXPSDataStats(data: any): XPSDataStats { return { ...data, arrayStats: data["arrayStats"] !== undefined ? serializeXPSArrayStats(data["arrayStats"]) : undefined, categoryStats: data["categoryStats"] !== undefined ? serializeXPSCategoryStats(data["categoryStats"]) : undefined, distinctValueCount: data["distinctValueCount"] !== undefined ? String(data["distinctValueCount"]) : undefined, float64Stats: data["float64Stats"] !== undefined ? serializeXPSFloat64Stats(data["float64Stats"]) : undefined, nullValueCount: data["nullValueCount"] !== undefined ? String(data["nullValueCount"]) : undefined, stringStats: data["stringStats"] !== undefined ? serializeXPSStringStats(data["stringStats"]) : undefined, structStats: data["structStats"] !== undefined ? serializeXPSStructStats(data["structStats"]) : undefined, timestampStats: data["timestampStats"] !== undefined ? serializeXPSTimestampStats(data["timestampStats"]) : undefined, validValueCount: data["validValueCount"] !== undefined ? String(data["validValueCount"]) : undefined, }; } function deserializeXPSDataStats(data: any): XPSDataStats { return { ...data, arrayStats: data["arrayStats"] !== undefined ? deserializeXPSArrayStats(data["arrayStats"]) : undefined, categoryStats: data["categoryStats"] !== undefined ? deserializeXPSCategoryStats(data["categoryStats"]) : undefined, distinctValueCount: data["distinctValueCount"] !== undefined ? BigInt(data["distinctValueCount"]) : undefined, float64Stats: data["float64Stats"] !== undefined ? deserializeXPSFloat64Stats(data["float64Stats"]) : undefined, nullValueCount: data["nullValueCount"] !== undefined ? BigInt(data["nullValueCount"]) : undefined, stringStats: data["stringStats"] !== undefined ? deserializeXPSStringStats(data["stringStats"]) : undefined, structStats: data["structStats"] !== undefined ? deserializeXPSStructStats(data["structStats"]) : undefined, timestampStats: data["timestampStats"] !== undefined ? deserializeXPSTimestampStats(data["timestampStats"]) : undefined, validValueCount: data["validValueCount"] !== undefined ? BigInt(data["validValueCount"]) : undefined, }; } /** * Indicated the type of data that can be stored in a structured data entity * (e.g. a table). */ export interface XPSDataType { /** * The highly compatible data types to this data type. */ compatibleDataTypes?: XPSDataType[]; /** * If type_code == ARRAY, then `list_element_type` is the type of the * elements. */ listElementType?: XPSDataType; /** * If true, this DataType can also be `null`. */ nullable?: boolean; /** * If type_code == STRUCT, then `struct_type` provides type information for * the struct's fields. */ structType?: XPSStructType; /** * If type_code == TIMESTAMP then `time_format` provides the format in which * that time field is expressed. The time_format must be written in `strftime` * syntax. If time_format is not set, then the default format as described on * the field is used. */ timeFormat?: string; /** * Required. The TypeCode for this type. */ typeCode?: | "TYPE_CODE_UNSPECIFIED" | "FLOAT64" | "TIMESTAMP" | "STRING" | "ARRAY" | "STRUCT" | "CATEGORY"; } /** * A model format used for Docker containers. Use the params field to customize * the container. The container is verified to work correctly on ubuntu 16.04 * operating system. */ export interface XPSDockerFormat { /** * Optional. Additional cpu information describing the requirements for the * to be exported model files. */ cpuArchitecture?: | "CPU_ARCHITECTURE_UNSPECIFIED" | "CPU_ARCHITECTURE_X86_64"; /** * Optional. Additional gpu information describing the requirements for the * to be exported model files. */ gpuArchitecture?: | "GPU_ARCHITECTURE_UNSPECIFIED" | "GPU_ARCHITECTURE_NVIDIA"; } /** * A model format used for [Edge TPU](https://cloud.google.com/edge-tpu/) * devices. */ export interface XPSEdgeTpuTfLiteFormat { } /** * Contains xPS-specific model evaluation metrics either for a single * annotation spec (label), or for the model overall. Next tag: 18. */ export interface XPSEvaluationMetrics { /** * The annotation_spec for which this evaluation metrics instance had been * created. Empty iff this is an overall model evaluation (like Tables * evaluation metrics), i.e. aggregated across all labels. The value comes * from the input annotations in AnnotatedExample. For MVP product or for text * sentiment models where annotation_spec_id_token is not available, set label * instead. */ annotationSpecIdToken?: string; /** * The integer category label for which this evaluation metric instance had * been created. Valid categories are 0 or higher. Overall model evaluation * should set this to negative values (rather than implicit zero). Only used * for Image Segmentation (prefer to set annotation_spec_id_token instead). * Note: uCAIP Image Segmentation should use annotation_spec_id_token. */ category?: number; /** * The number of examples used to create this evaluation metrics instance. */ evaluatedExampleCount?: number; imageClassificationEvalMetrics?: XPSClassificationEvaluationMetrics; imageObjectDetectionEvalMetrics?: XPSImageObjectDetectionEvaluationMetrics; imageSegmentationEvalMetrics?: XPSImageSegmentationEvaluationMetrics; /** * The label for which this evaluation metrics instance had been created. * Empty iff this is an overall model evaluation (like Tables evaluation * metrics), i.e. aggregated across all labels. The label maps to * AnnotationSpec.display_name in Public API protos. Only used by MVP * implementation and text sentiment FULL implementation. */ label?: string; regressionEvalMetrics?: XPSRegressionEvaluationMetrics; tablesClassificationEvalMetrics?: XPSClassificationEvaluationMetrics; tablesEvalMetrics?: XPSTablesEvaluationMetrics; textClassificationEvalMetrics?: XPSClassificationEvaluationMetrics; textExtractionEvalMetrics?: XPSTextExtractionEvaluationMetrics; textSentimentEvalMetrics?: XPSTextSentimentEvaluationMetrics; translationEvalMetrics?: XPSTranslationEvaluationMetrics; videoActionRecognitionEvalMetrics?: XPSVideoActionRecognitionEvaluationMetrics; videoClassificationEvalMetrics?: XPSClassificationEvaluationMetrics; videoObjectTrackingEvalMetrics?: XPSVideoObjectTrackingEvaluationMetrics; } function serializeXPSEvaluationMetrics(data: any): XPSEvaluationMetrics { return { ...data, imageClassificationEvalMetrics: data["imageClassificationEvalMetrics"] !== undefined ? serializeXPSClassificationEvaluationMetrics(data["imageClassificationEvalMetrics"]) : undefined, imageSegmentationEvalMetrics: data["imageSegmentationEvalMetrics"] !== undefined ? serializeXPSImageSegmentationEvaluationMetrics(data["imageSegmentationEvalMetrics"]) : undefined, tablesClassificationEvalMetrics: data["tablesClassificationEvalMetrics"] !== undefined ? serializeXPSClassificationEvaluationMetrics(data["tablesClassificationEvalMetrics"]) : undefined, tablesEvalMetrics: data["tablesEvalMetrics"] !== undefined ? serializeXPSTablesEvaluationMetrics(data["tablesEvalMetrics"]) : undefined, textClassificationEvalMetrics: data["textClassificationEvalMetrics"] !== undefined ? serializeXPSClassificationEvaluationMetrics(data["textClassificationEvalMetrics"]) : undefined, textExtractionEvalMetrics: data["textExtractionEvalMetrics"] !== undefined ? serializeXPSTextExtractionEvaluationMetrics(data["textExtractionEvalMetrics"]) : undefined, textSentimentEvalMetrics: data["textSentimentEvalMetrics"] !== undefined ? serializeXPSTextSentimentEvaluationMetrics(data["textSentimentEvalMetrics"]) : undefined, videoActionRecognitionEvalMetrics: data["videoActionRecognitionEvalMetrics"] !== undefined ? serializeXPSVideoActionRecognitionEvaluationMetrics(data["videoActionRecognitionEvalMetrics"]) : undefined, videoClassificationEvalMetrics: data["videoClassificationEvalMetrics"] !== undefined ? serializeXPSClassificationEvaluationMetrics(data["videoClassificationEvalMetrics"]) : undefined, }; } function deserializeXPSEvaluationMetrics(data: any): XPSEvaluationMetrics { return { ...data, imageClassificationEvalMetrics: data["imageClassificationEvalMetrics"] !== undefined ? deserializeXPSClassificationEvaluationMetrics(data["imageClassificationEvalMetrics"]) : undefined, imageSegmentationEvalMetrics: data["imageSegmentationEvalMetrics"] !== undefined ? deserializeXPSImageSegmentationEvaluationMetrics(data["imageSegmentationEvalMetrics"]) : undefined, tablesClassificationEvalMetrics: data["tablesClassificationEvalMetrics"] !== undefined ? deserializeXPSClassificationEvaluationMetrics(data["tablesClassificationEvalMetrics"]) : undefined, tablesEvalMetrics: data["tablesEvalMetrics"] !== undefined ? deserializeXPSTablesEvaluationMetrics(data["tablesEvalMetrics"]) : undefined, textClassificationEvalMetrics: data["textClassificationEvalMetrics"] !== undefined ? deserializeXPSClassificationEvaluationMetrics(data["textClassificationEvalMetrics"]) : undefined, textExtractionEvalMetrics: data["textExtractionEvalMetrics"] !== undefined ? deserializeXPSTextExtractionEvaluationMetrics(data["textExtractionEvalMetrics"]) : undefined, textSentimentEvalMetrics: data["textSentimentEvalMetrics"] !== undefined ? deserializeXPSTextSentimentEvaluationMetrics(data["textSentimentEvalMetrics"]) : undefined, videoActionRecognitionEvalMetrics: data["videoActionRecognitionEvalMetrics"] !== undefined ? deserializeXPSVideoActionRecognitionEvaluationMetrics(data["videoActionRecognitionEvalMetrics"]) : undefined, videoClassificationEvalMetrics: data["videoClassificationEvalMetrics"] !== undefined ? deserializeXPSClassificationEvaluationMetrics(data["videoClassificationEvalMetrics"]) : undefined, }; } /** * Specifies location of model evaluation metrics. */ export interface XPSEvaluationMetricsSet { /** * Inline EvaluationMetrics - should be relatively small. For passing large * quantities of exhaustive metrics, use file_spec. */ evaluationMetrics?: XPSEvaluationMetrics[]; /** * File spec containing evaluation metrics of a model, must point to RecordIO * file(s) of intelligence.cloud.automl.xps.EvaluationMetrics messages. */ fileSpec?: XPSFileSpec; /** * Number of the evaluation metrics (usually one per label plus overall). */ numEvaluationMetrics?: bigint; } function serializeXPSEvaluationMetricsSet(data: any): XPSEvaluationMetricsSet { return { ...data, evaluationMetrics: data["evaluationMetrics"] !== undefined ? data["evaluationMetrics"].map((item: any) => (serializeXPSEvaluationMetrics(item))) : undefined, numEvaluationMetrics: data["numEvaluationMetrics"] !== undefined ? String(data["numEvaluationMetrics"]) : undefined, }; } function deserializeXPSEvaluationMetricsSet(data: any): XPSEvaluationMetricsSet { return { ...data, evaluationMetrics: data["evaluationMetrics"] !== undefined ? data["evaluationMetrics"].map((item: any) => (deserializeXPSEvaluationMetrics(item))) : undefined, numEvaluationMetrics: data["numEvaluationMetrics"] !== undefined ? BigInt(data["numEvaluationMetrics"]) : undefined, }; } /** * Set of examples or input sources. */ export interface XPSExampleSet { /** * File spec of the examples or input sources. */ fileSpec?: XPSFileSpec; /** * Fingerprint of the example set. */ fingerprint?: bigint; /** * Number of examples. */ numExamples?: bigint; /** * Number of input sources. */ numInputSources?: bigint; } function serializeXPSExampleSet(data: any): XPSExampleSet { return { ...data, fingerprint: data["fingerprint"] !== undefined ? String(data["fingerprint"]) : undefined, numExamples: data["numExamples"] !== undefined ? String(data["numExamples"]) : undefined, numInputSources: data["numInputSources"] !== undefined ? String(data["numInputSources"]) : undefined, }; } function deserializeXPSExampleSet(data: any): XPSExampleSet { return { ...data, fingerprint: data["fingerprint"] !== undefined ? BigInt(data["fingerprint"]) : undefined, numExamples: data["numExamples"] !== undefined ? BigInt(data["numExamples"]) : undefined, numInputSources: data["numInputSources"] !== undefined ? BigInt(data["numInputSources"]) : undefined, }; } export interface XPSExportModelOutputConfig { coreMlFormat?: XPSCoreMlFormat; dockerFormat?: XPSDockerFormat; edgeTpuTfLiteFormat?: XPSEdgeTpuTfLiteFormat; /** * For any model and format: If true, will additionally export * FirebaseExportedModelInfo in a firebase.txt file. */ exportFirebaseAuxiliaryInfo?: boolean; /** * The Google Contained Registry path the exported files to be pushed to. * This location is set if the exported format is DOCKDER. */ outputGcrUri?: string; /** * The Google Cloud Storage directory where XPS will output the exported * models and related files. Format: gs://bucket/directory */ outputGcsUri?: string; tfJsFormat?: XPSTfJsFormat; tfLiteFormat?: XPSTfLiteFormat; tfSavedModelFormat?: XPSTfSavedModelFormat; } /** * Spec of input and output files, on external file systems (for example, * Colossus Namespace System or Google Cloud Storage). */ export interface XPSFileSpec { /** * Deprecated. Use file_spec. */ directoryPath?: string; fileFormat?: | "FILE_FORMAT_UNKNOWN" | "FILE_FORMAT_SSTABLE" | "FILE_FORMAT_TRANSLATION_RKV" | "FILE_FORMAT_RECORDIO" | "FILE_FORMAT_RAW_CSV" | "FILE_FORMAT_RAW_CAPACITOR"; /** * Single file path, or file pattern of format "/path/to/file@shard_count". * E.g. /cns/cell-d/somewhere/file@2 is expanded to two files: * /cns/cell-d/somewhere/file-00000-of-00002 and * /cns/cell-d/somewhere/file-00001-of-00002. */ fileSpec?: string; /** * Deprecated. Use file_spec. */ singleFilePath?: string; } /** * The data statistics of a series of FLOAT64 values. */ export interface XPSFloat64Stats { commonStats?: XPSCommonStats; /** * Histogram buckets of the data series. Sorted by the min value of the * bucket, ascendingly, and the number of the buckets is dynamically * generated. The buckets are non-overlapping and completely cover whole * FLOAT64 range with min of first bucket being `"-Infinity"`, and max of the * last one being `"Infinity"`. */ histogramBuckets?: XPSFloat64StatsHistogramBucket[]; /** * The mean of the series. */ mean?: number; /** * Ordered from 0 to k k-quantile values of the data series of n values. The * value at index i is, approximately, the i*n/k-th smallest value in the * series; for i = 0 and i = k these are, respectively, the min and max * values. */ quantiles?: number[]; /** * The standard deviation of the series. */ standardDeviation?: number; } function serializeXPSFloat64Stats(data: any): XPSFloat64Stats { return { ...data, commonStats: data["commonStats"] !== undefined ? serializeXPSCommonStats(data["commonStats"]) : undefined, histogramBuckets: data["histogramBuckets"] !== undefined ? data["histogramBuckets"].map((item: any) => (serializeXPSFloat64StatsHistogramBucket(item))) : undefined, }; } function deserializeXPSFloat64Stats(data: any): XPSFloat64Stats { return { ...data, commonStats: data["commonStats"] !== undefined ? deserializeXPSCommonStats(data["commonStats"]) : undefined, histogramBuckets: data["histogramBuckets"] !== undefined ? data["histogramBuckets"].map((item: any) => (deserializeXPSFloat64StatsHistogramBucket(item))) : undefined, }; } /** * A bucket of a histogram. */ export interface XPSFloat64StatsHistogramBucket { /** * The number of data values that are in the bucket, i.e. are between min and * max values. */ count?: bigint; /** * The maximum value of the bucket, exclusive unless max = `"Infinity"`, in * which case it's inclusive. */ max?: number; /** * The minimum value of the bucket, inclusive. */ min?: number; } function serializeXPSFloat64StatsHistogramBucket(data: any): XPSFloat64StatsHistogramBucket { return { ...data, count: data["count"] !== undefined ? String(data["count"]) : undefined, }; } function deserializeXPSFloat64StatsHistogramBucket(data: any): XPSFloat64StatsHistogramBucket { return { ...data, count: data["count"] !== undefined ? BigInt(data["count"]) : undefined, }; } export interface XPSImageClassificationTrainResponse { /** * Total number of classes. */ classCount?: bigint; /** * Information of downloadable models that are pre-generated as part of * training flow and will be persisted in AutoMl backend. Populated for AutoMl * requests. */ exportModelSpec?: XPSImageExportModelSpec; /** * ## The fields below are only populated under uCAIP request scope. */ modelArtifactSpec?: XPSImageModelArtifactSpec; modelServingSpec?: XPSImageModelServingSpec; /** * Stop reason for training job, e.g. 'TRAIN_BUDGET_REACHED', * 'MODEL_CONVERGED', 'MODEL_EARLY_STOPPED'. */ stopReason?: | "TRAIN_STOP_REASON_UNSPECIFIED" | "TRAIN_STOP_REASON_BUDGET_REACHED" | "TRAIN_STOP_REASON_MODEL_CONVERGED" | "TRAIN_STOP_REASON_MODEL_EARLY_STOPPED"; /** * The actual cost to create this model. - For edge type model, the cost is * expressed in node hour. - For cloud type model,the cost is expressed in * compute hour. - Populated for models created before GA. To be deprecated * after GA. */ trainCostInNodeTime?: number /* Duration */; /** * The actual training cost, expressed in node seconds. Populated for models * trained in node time. */ trainCostNodeSeconds?: bigint; } function serializeXPSImageClassificationTrainResponse(data: any): XPSImageClassificationTrainResponse { return { ...data, classCount: data["classCount"] !== undefined ? String(data["classCount"]) : undefined, trainCostInNodeTime: data["trainCostInNodeTime"] !== undefined ? data["trainCostInNodeTime"] : undefined, trainCostNodeSeconds: data["trainCostNodeSeconds"] !== undefined ? String(data["trainCostNodeSeconds"]) : undefined, }; } function deserializeXPSImageClassificationTrainResponse(data: any): XPSImageClassificationTrainResponse { return { ...data, classCount: data["classCount"] !== undefined ? BigInt(data["classCount"]) : undefined, trainCostInNodeTime: data["trainCostInNodeTime"] !== undefined ? data["trainCostInNodeTime"] : undefined, trainCostNodeSeconds: data["trainCostNodeSeconds"] !== undefined ? BigInt(data["trainCostNodeSeconds"]) : undefined, }; } /** * Information of downloadable models that are pre-generated as part of * training flow and will be persisted in AutoMl backend. Upon receiving * ExportModel request from user, AutoMl backend can serve the pre-generated * models to user if exists (by copying the files from internal path to user * provided location), otherwise, AutoMl backend will call xPS ExportModel API * to generate the model on the fly with the requesting format. */ export interface XPSImageExportModelSpec { /** * Contains the model format and internal location of the model files to be * exported/downloaded. Use the Google Cloud Storage bucket name which is * provided via TrainRequest.gcs_bucket_name to store the model files. */ exportModelOutputConfig?: XPSExportModelOutputConfig[]; } /** * Stores the locations and related metadata of the model artifacts. Populated * for uCAIP requests only. */ export interface XPSImageModelArtifactSpec { /** * The Tensorflow checkpoint files. e.g. Used for resumable training. */ checkpointArtifact?: XPSModelArtifactItem; /** * The model binary files in different formats for model export. */ exportArtifact?: XPSModelArtifactItem[]; /** * Google Cloud Storage URI of decoded labels file for model export * 'dict.txt'. */ labelGcsUri?: string; /** * The default model binary file used for serving (e.g. online predict, batch * predict) via public Cloud AI Platform API. */ servingArtifact?: XPSModelArtifactItem; /** * Google Cloud Storage URI prefix of Tensorflow JavaScript binary files * 'groupX-shardXofX.bin'. Deprecated. */ tfJsBinaryGcsPrefix?: string; /** * Google Cloud Storage URI of Tensorflow Lite metadata * 'tflite_metadata.json'. */ tfLiteMetadataGcsUri?: string; } /** * Serving specification for image models. */ export interface XPSImageModelServingSpec { /** * Populate under uCAIP request scope. */ modelThroughputEstimation?: XPSImageModelServingSpecModelThroughputEstimation[]; /** * An estimated value of how much traffic a node can serve. Populated for * AutoMl request only. */ nodeQps?: number; /** * ## The fields below are only populated under uCAIP request scope. * https://cloud.google.com/ml-engine/docs/runtime-version-list */ tfRuntimeVersion?: string; } export interface XPSImageModelServingSpecModelThroughputEstimation { computeEngineAcceleratorType?: | "UNSPECIFIED" | "NVIDIA_TESLA_K80" | "NVIDIA_TESLA_P100" | "NVIDIA_TESLA_V100" | "NVIDIA_TESLA_P4" | "NVIDIA_TESLA_T4" | "NVIDIA_TESLA_A100" | "NVIDIA_A100_80GB" | "NVIDIA_L4" | "NVIDIA_H100_80GB" | "NVIDIA_H100_MEGA_80GB" | "TPU_V2" | "TPU_V3" | "TPU_V4_POD" | "TPU_V5_LITEPOD"; /** * Estimated latency. */ latencyInMilliseconds?: number; /** * The approximate qps a deployed node can serve. */ nodeQps?: number; servomaticPartitionType?: | "PARTITION_TYPE_UNSPECIFIED" | "PARTITION_ZERO" | "PARTITION_REDUCED_HOMING" | "PARTITION_JELLYFISH" | "PARTITION_CPU" | "PARTITION_CUSTOM_STORAGE_CPU"; } /** * Model evaluation metrics for image object detection problems. Evaluates * prediction quality of labeled bounding boxes. */ export interface XPSImageObjectDetectionEvaluationMetrics { /** * The single metric for bounding boxes evaluation: the * mean_average_precision averaged over all bounding_box_metrics_entries. */ boundingBoxMeanAveragePrecision?: number; /** * The bounding boxes match metrics for each Intersection-over-union * threshold 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 and each label confidence * threshold 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 pair. */ boundingBoxMetricsEntries?: XPSBoundingBoxMetricsEntry[]; /** * The total number of bounding boxes (i.e. summed over all images) the * ground truth used to create this evaluation had. */ evaluatedBoundingBoxCount?: number; } export interface XPSImageObjectDetectionModelSpec { /** * Total number of classes. */ classCount?: bigint; exportModelSpec?: XPSImageExportModelSpec; /** * Max number of bounding box. */ maxBoundingBoxCount?: bigint; /** * ## The fields below are only populated under uCAIP request scope. */ modelArtifactSpec?: XPSImageModelArtifactSpec; modelServingSpec?: XPSImageModelServingSpec; /** * Stop reason for training job, e.g. 'TRAIN_BUDGET_REACHED', * 'MODEL_CONVERGED'. */ stopReason?: | "TRAIN_STOP_REASON_UNSPECIFIED" | "TRAIN_STOP_REASON_BUDGET_REACHED" | "TRAIN_STOP_REASON_MODEL_CONVERGED" | "TRAIN_STOP_REASON_MODEL_EARLY_STOPPED"; /** * The actual train cost of creating this model, expressed in node seconds, * i.e. 3,600 value in this field means 1 node hour. */ trainCostNodeSeconds?: bigint; } function serializeXPSImageObjectDetectionModelSpec(data: any): XPSImageObjectDetectionModelSpec { return { ...data, classCount: data["classCount"] !== undefined ? String(data["classCount"]) : undefined, maxBoundingBoxCount: data["maxBoundingBoxCount"] !== undefined ? String(data["maxBoundingBoxCount"]) : undefined, trainCostNodeSeconds: data["trainCostNodeSeconds"] !== undefined ? String(data["trainCostNodeSeconds"]) : undefined, }; } function deserializeXPSImageObjectDetectionModelSpec(data: any): XPSImageObjectDetectionModelSpec { return { ...data, classCount: data["classCount"] !== undefined ? BigInt(data["classCount"]) : undefined, maxBoundingBoxCount: data["maxBoundingBoxCount"] !== undefined ? BigInt(data["maxBoundingBoxCount"]) : undefined, trainCostNodeSeconds: data["trainCostNodeSeconds"] !== undefined ? BigInt(data["trainCostNodeSeconds"]) : undefined, }; } /** * Model evaluation metrics for image segmentation problems. Next tag: 4. */ export interface XPSImageSegmentationEvaluationMetrics { /** * Metrics that have confidence thresholds. Precision-recall curve can be * derived from it. */ confidenceMetricsEntries?: XPSImageSegmentationEvaluationMetricsConfidenceMetricsEntry[]; } function serializeXPSImageSegmentationEvaluationMetrics(data: any): XPSImageSegmentationEvaluationMetrics { return { ...data, confidenceMetricsEntries: data["confidenceMetricsEntries"] !== undefined ? data["confidenceMetricsEntries"].map((item: any) => (serializeXPSImageSegmentationEvaluationMetricsConfidenceMetricsEntry(item))) : undefined, }; } function deserializeXPSImageSegmentationEvaluationMetrics(data: any): XPSImageSegmentationEvaluationMetrics { return { ...data, confidenceMetricsEntries: data["confidenceMetricsEntries"] !== undefined ? data["confidenceMetricsEntries"].map((item: any) => (deserializeXPSImageSegmentationEvaluationMetricsConfidenceMetricsEntry(item))) : undefined, }; } /** * Metrics for a single confidence threshold. */ export interface XPSImageSegmentationEvaluationMetricsConfidenceMetricsEntry { /** * The confidence threshold value used to compute the metrics. */ confidenceThreshold?: number; /** * Confusion matrix of the per confidence_threshold evaluation. Pixel counts * are set here. Only set for model level evaluation, not for evaluation per * label. */ confusionMatrix?: XPSConfusionMatrix; /** * DSC or the F1 score: The harmonic mean of recall and precision. */ diceScoreCoefficient?: number; /** * IOU score. */ iouScore?: number; /** * Precision for the given confidence threshold. */ precision?: number; /** * Recall for the given confidence threshold. */ recall?: number; } function serializeXPSImageSegmentationEvaluationMetricsConfidenceMetricsEntry(data: any): XPSImageSegmentationEvaluationMetricsConfidenceMetricsEntry { return { ...data, confusionMatrix: data["confusionMatrix"] !== undefined ? serializeXPSConfusionMatrix(data["confusionMatrix"]) : undefined, }; } function deserializeXPSImageSegmentationEvaluationMetricsConfidenceMetricsEntry(data: any): XPSImageSegmentationEvaluationMetricsConfidenceMetricsEntry { return { ...data, confusionMatrix: data["confusionMatrix"] !== undefined ? deserializeXPSConfusionMatrix(data["confusionMatrix"]) : undefined, }; } export interface XPSImageSegmentationTrainResponse { /** * Color map of the model. */ colorMaps?: XPSColorMap[]; /** * NOTE: These fields are not used/needed in EAP but will be set later. */ exportModelSpec?: XPSImageExportModelSpec; /** * ## The fields below are only populated under uCAIP request scope. Model * artifact spec stores and model gcs pathes and related metadata */ modelArtifactSpec?: XPSImageModelArtifactSpec; modelServingSpec?: XPSImageModelServingSpec; /** * Stop reason for training job, e.g. 'TRAIN_BUDGET_REACHED', * 'MODEL_CONVERGED'. */ stopReason?: | "TRAIN_STOP_REASON_UNSPECIFIED" | "TRAIN_STOP_REASON_BUDGET_REACHED" | "TRAIN_STOP_REASON_MODEL_CONVERGED" | "TRAIN_STOP_REASON_MODEL_EARLY_STOPPED"; /** * The actual train cost of creating this model, expressed in node seconds, * i.e. 3,600 value in this field means 1 node hour. */ trainCostNodeSeconds?: bigint; } function serializeXPSImageSegmentationTrainResponse(data: any): XPSImageSegmentationTrainResponse { return { ...data, trainCostNodeSeconds: data["trainCostNodeSeconds"] !== undefined ? String(data["trainCostNodeSeconds"]) : undefined, }; } function deserializeXPSImageSegmentationTrainResponse(data: any): XPSImageSegmentationTrainResponse { return { ...data, trainCostNodeSeconds: data["trainCostNodeSeconds"] !== undefined ? BigInt(data["trainCostNodeSeconds"]) : undefined, }; } /** * An attribution method that computes the Aumann-Shapley value taking * advantage of the model's fully differentiable structure. Refer to this paper * for more details: https://arxiv.org/abs/1703.01365 */ export interface XPSIntegratedGradientsAttribution { /** * The number of steps for approximating the path integral. A good value to * start is 50 and gradually increase until the sum to diff property is within * the desired error range. Valid range of its value is [1, 100], inclusively. */ stepCount?: number; } export interface XPSMetricEntry { /** * For billing metrics that are using legacy sku's, set the legacy billing * metric id here. This will be sent to Chemist as the * "cloudbilling.googleapis.com/argentum_metric_id" label. Otherwise leave * empty. */ argentumMetricId?: string; /** * A double value. */ doubleValue?: number; /** * A signed 64-bit integer value. */ int64Value?: bigint; /** * The metric name defined in the service configuration. */ metricName?: string; /** * Billing system labels for this (metric, value) pair. */ systemLabels?: XPSMetricEntryLabel[]; } function serializeXPSMetricEntry(data: any): XPSMetricEntry { return { ...data, int64Value: data["int64Value"] !== undefined ? String(data["int64Value"]) : undefined, }; } function deserializeXPSMetricEntry(data: any): XPSMetricEntry { return { ...data, int64Value: data["int64Value"] !== undefined ? BigInt(data["int64Value"]) : undefined, }; } export interface XPSMetricEntryLabel { /** * The name of the label. */ labelName?: string; /** * The value of the label. */ labelValue?: string; } /** * A single model artifact item. */ export interface XPSModelArtifactItem { /** * The model artifact format. */ artifactFormat?: | "ARTIFACT_FORMAT_UNSPECIFIED" | "TF_CHECKPOINT" | "TF_SAVED_MODEL" | "TF_LITE" | "EDGE_TPU_TF_LITE" | "TF_JS" | "CORE_ML"; /** * The Google Cloud Storage URI that stores the model binary files. */ gcsUri?: string; } export interface XPSPreprocessResponse { /** * Preprocessed examples, that are to be imported into AutoML storage. This * should point to RecordIO file(s) of PreprocessedExample messages. The * PreprocessedExample.mvp_training_data-s returned here are later verbatim * passed to Train() call in TrainExample.mvp_training_data. */ outputExampleSet?: XPSExampleSet; speechPreprocessResp?: XPSSpeechPreprocessResponse; tablesPreprocessResponse?: XPSTablesPreprocessResponse; translationPreprocessResp?: XPSTranslationPreprocessResponse; } function serializeXPSPreprocessResponse(data: any): XPSPreprocessResponse { return { ...data, outputExampleSet: data["outputExampleSet"] !== undefined ? serializeXPSExampleSet(data["outputExampleSet"]) : undefined, tablesPreprocessResponse: data["tablesPreprocessResponse"] !== undefined ? serializeXPSTablesPreprocessResponse(data["tablesPreprocessResponse"]) : undefined, translationPreprocessResp: data["translationPreprocessResp"] !== undefined ? serializeXPSTranslationPreprocessResponse(data["translationPreprocessResp"]) : undefined, }; } function deserializeXPSPreprocessResponse(data: any): XPSPreprocessResponse { return { ...data, outputExampleSet: data["outputExampleSet"] !== undefined ? deserializeXPSExampleSet(data["outputExampleSet"]) : undefined, tablesPreprocessResponse: data["tablesPreprocessResponse"] !== undefined ? deserializeXPSTablesPreprocessResponse(data["tablesPreprocessResponse"]) : undefined, translationPreprocessResp: data["translationPreprocessResp"] !== undefined ? deserializeXPSTranslationPreprocessResponse(data["translationPreprocessResp"]) : undefined, }; } /** * Model evaluation metrics for regression problems. It can be used for Tables. */ export interface XPSRegressionEvaluationMetrics { /** * Mean Absolute Error (MAE). */ meanAbsoluteError?: number; /** * Mean absolute percentage error. Only set if all ground truth values are * positive. */ meanAbsolutePercentageError?: number; /** * A list of actual versus predicted points for the model being evaluated. */ regressionMetricsEntries?: XPSRegressionMetricsEntry[]; /** * Root Mean Squared Error (RMSE). */ rootMeanSquaredError?: number; /** * Root mean squared log error. */ rootMeanSquaredLogError?: number; /** * R squared. */ rSquared?: number; } /** * A pair of actual & observed values for the model being evaluated. */ export interface XPSRegressionMetricsEntry { /** * The observed value for a row in the dataset. */ predictedValue?: number; /** * The actual target value for a row in the dataset. */ trueValue?: number; } export interface XPSReportingMetrics { /** * The effective time training used. If set, this is used for quota * management and billing. Deprecated. AutoML BE doesn't use this. Don't set. */ effectiveTrainingDuration?: number /* Duration */; /** * One entry per metric name. The values must be aggregated per metric name. */ metricEntries?: XPSMetricEntry[]; } function serializeXPSReportingMetrics(data: any): XPSReportingMetrics { return { ...data, effectiveTrainingDuration: data["effectiveTrainingDuration"] !== undefined ? data["effectiveTrainingDuration"] : undefined, metricEntries: data["metricEntries"] !== undefined ? data["metricEntries"].map((item: any) => (serializeXPSMetricEntry(item))) : undefined, }; } function deserializeXPSReportingMetrics(data: any): XPSReportingMetrics { return { ...data, effectiveTrainingDuration: data["effectiveTrainingDuration"] !== undefined ? data["effectiveTrainingDuration"] : undefined, metricEntries: data["metricEntries"] !== undefined ? data["metricEntries"].map((item: any) => (deserializeXPSMetricEntry(item))) : undefined, }; } export interface XPSResponseExplanationMetadata { /** * Metadata of the input. */ inputs?: { [key: string]: XPSResponseExplanationMetadataInputMetadata }; /** * Metadata of the output. */ outputs?: { [key: string]: XPSResponseExplanationMetadataOutputMetadata }; } /** * Metadata of the input of a feature. */ export interface XPSResponseExplanationMetadataInputMetadata { /** * Name of the input tensor for this model. Only needed in train response. */ inputTensorName?: string; /** * Modality of the feature. Valid values are: numeric, image. Defaults to * numeric. */ modality?: | "MODALITY_UNSPECIFIED" | "NUMERIC" | "IMAGE" | "CATEGORICAL"; /** * Visualization configurations for image explanation. */ visualizationConfig?: XPSVisualization; } /** * Metadata of the prediction output to be explained. */ export interface XPSResponseExplanationMetadataOutputMetadata { /** * Name of the output tensor. Only needed in train response. */ outputTensorName?: string; } export interface XPSResponseExplanationParameters { /** * An attribution method that computes Aumann-Shapley values taking advantage * of the model's fully differentiable structure. Refer to this paper for more * details: https://arxiv.org/abs/1703.01365 */ integratedGradientsAttribution?: XPSIntegratedGradientsAttribution; /** * An attribution method that redistributes Integrated Gradients attribution * to segmented regions, taking advantage of the model's fully differentiable * structure. Refer to this paper for more details: * https://arxiv.org/abs/1906.02825 XRAI currently performs better on natural * images, like a picture of a house or an animal. If the images are taken in * artificial environments, like a lab or manufacturing line, or from * diagnostic equipment, like x-rays or quality-control cameras, use * Integrated Gradients instead. */ xraiAttribution?: XPSXraiAttribution; } /** * Specification of Model explanation. Feature-based XAI in AutoML Vision ICN * is deprecated. */ export interface XPSResponseExplanationSpec { /** * Explanation type. For AutoML Image Classification models, possible values * are: * `image-integrated-gradients` * `image-xrai` */ explanationType?: string; /** * Metadata describing the Model's input and output for explanation. */ metadata?: XPSResponseExplanationMetadata; /** * Parameters that configure explaining of the Model's predictions. */ parameters?: XPSResponseExplanationParameters; } export interface XPSRow { /** * The ids of the columns. Note: The below `values` field must match order of * this field, if this field is set. */ columnIds?: number[]; /** * The values of the row cells, given in the same order as the column_ids. If * column_ids is not set, then in the same order as the * input_feature_column_ids in TablesModelMetadata. */ values?: any[]; } export interface XPSSpeechEvaluationMetrics { /** * Evaluation metrics for all submodels contained in this model. */ subModelEvaluationMetrics?: XPSSpeechEvaluationMetricsSubModelEvaluationMetric[]; } export interface XPSSpeechEvaluationMetricsSubModelEvaluationMetric { /** * Type of the biasing model. */ biasingModelType?: | "BIASING_MODEL_TYPE_UNSPECIFIED" | "COMMAND_AND_SEARCH" | "PHONE_CALL" | "VIDEO" | "DEFAULT"; /** * If true then it means we have an enhanced version of the biasing models. */ isEnhancedModel?: boolean; numDeletions?: number; numInsertions?: number; numSubstitutions?: number; /** * Number of utterances used in the wer computation. */ numUtterances?: number; /** * Number of words over which the word error rate was computed. */ numWords?: number; /** * Below fields are used for debugging purposes */ sentenceAccuracy?: number; /** * Word error rate (standard error metric used for speech recognition). */ wer?: number; } export interface XPSSpeechModelSpec { /** * Required for speech xps backend. Speech xps has to use dataset_id and * model_id as the primary key in db so that speech API can query the db * directly. */ datasetId?: bigint; language?: string; /** * Model specs for all submodels contained in this model. */ subModelSpecs?: XPSSpeechModelSpecSubModelSpec[]; } function serializeXPSSpeechModelSpec(data: any): XPSSpeechModelSpec { return { ...data, datasetId: data["datasetId"] !== undefined ? String(data["datasetId"]) : undefined, }; } function deserializeXPSSpeechModelSpec(data: any): XPSSpeechModelSpec { return { ...data, datasetId: data["datasetId"] !== undefined ? BigInt(data["datasetId"]) : undefined, }; } export interface XPSSpeechModelSpecSubModelSpec { /** * Type of the biasing model. */ biasingModelType?: | "BIASING_MODEL_TYPE_UNSPECIFIED" | "COMMAND_AND_SEARCH" | "PHONE_CALL" | "VIDEO" | "DEFAULT"; /** * In S3, Recognition ClientContextId.client_id */ clientId?: string; /** * In S3, Recognition ClientContextId.context_id */ contextId?: string; /** * If true then it means we have an enhanced version of the biasing models. */ isEnhancedModel?: boolean; } export interface XPSSpeechPreprocessResponse { /** * Location od shards of sstables (test data) of DataUtterance protos. */ cnsTestDataPath?: string; /** * Location of shards of sstables (training data) of DataUtterance protos. */ cnsTrainDataPath?: string; /** * The metrics for prebuilt speech models. They are included here because * there is no prebuilt speech models stored in the AutoML. */ prebuiltModelEvaluationMetrics?: XPSSpeechEvaluationMetrics; /** * Stats associated with the data. */ speechPreprocessStats?: XPSSpeechPreprocessStats; } export interface XPSSpeechPreprocessStats { /** * Different types of data errors and the counts associated with them. */ dataErrors?: XPSDataErrors[]; /** * The number of rows marked HUMAN_LABELLED */ numHumanLabeledExamples?: number; /** * The number of samples found in the previously recorded logs data. */ numLogsExamples?: number; /** * The number of rows marked as MACHINE_TRANSCRIBED */ numMachineTranscribedExamples?: number; /** * The number of examples labelled as TEST by Speech xps server. */ testExamplesCount?: number; /** * The number of sentences in the test data set. */ testSentencesCount?: number; /** * The number of words in the test data set. */ testWordsCount?: number; /** * The number of examples labeled as TRAIN by Speech xps server. */ trainExamplesCount?: number; /** * The number of sentences in the training data set. */ trainSentencesCount?: number; /** * The number of words in the training data set. */ trainWordsCount?: number; } /** * The data statistics of a series of STRING values. */ export interface XPSStringStats { commonStats?: XPSCommonStats; /** * The statistics of the top 20 unigrams, ordered by * StringStats.UnigramStats.count. */ topUnigramStats?: XPSStringStatsUnigramStats[]; } function serializeXPSStringStats(data: any): XPSStringStats { return { ...data, commonStats: data["commonStats"] !== undefined ? serializeXPSCommonStats(data["commonStats"]) : undefined, topUnigramStats: data["topUnigramStats"] !== undefined ? data["topUnigramStats"].map((item: any) => (serializeXPSStringStatsUnigramStats(item))) : undefined, }; } function deserializeXPSStringStats(data: any): XPSStringStats { return { ...data, commonStats: data["commonStats"] !== undefined ? deserializeXPSCommonStats(data["commonStats"]) : undefined, topUnigramStats: data["topUnigramStats"] !== undefined ? data["topUnigramStats"].map((item: any) => (deserializeXPSStringStatsUnigramStats(item))) : undefined, }; } /** * The statistics of a unigram. */ export interface XPSStringStatsUnigramStats { /** * The number of occurrences of this unigram in the series. */ count?: bigint; /** * The unigram. */ value?: string; } function serializeXPSStringStatsUnigramStats(data: any): XPSStringStatsUnigramStats { return { ...data, count: data["count"] !== undefined ? String(data["count"]) : undefined, }; } function deserializeXPSStringStatsUnigramStats(data: any): XPSStringStatsUnigramStats { return { ...data, count: data["count"] !== undefined ? BigInt(data["count"]) : undefined, }; } /** * The data statistics of a series of STRUCT values. */ export interface XPSStructStats { commonStats?: XPSCommonStats; /** * Map from a field name of the struct to data stats aggregated over series * of all data in that field across all the structs. */ fieldStats?: { [key: string]: XPSDataStats }; } function serializeXPSStructStats(data: any): XPSStructStats { return { ...data, commonStats: data["commonStats"] !== undefined ? serializeXPSCommonStats(data["commonStats"]) : undefined, fieldStats: data["fieldStats"] !== undefined ? Object.fromEntries(Object.entries(data["fieldStats"]).map(([k, v]: [string, any]) => ([k, serializeXPSDataStats(v)]))) : undefined, }; } function deserializeXPSStructStats(data: any): XPSStructStats { return { ...data, commonStats: data["commonStats"] !== undefined ? deserializeXPSCommonStats(data["commonStats"]) : undefined, fieldStats: data["fieldStats"] !== undefined ? Object.fromEntries(Object.entries(data["fieldStats"]).map(([k, v]: [string, any]) => ([k, deserializeXPSDataStats(v)]))) : undefined, }; } /** * `StructType` defines the DataType-s of a STRUCT type. */ export interface XPSStructType { /** * Unordered map of struct field names to their data types. */ fields?: { [key: string]: XPSDataType }; } /** * Metrics for Tables classification problems. */ export interface XPSTablesClassificationMetrics { /** * Metrics building a curve. */ curveMetrics?: XPSTablesClassificationMetricsCurveMetrics[]; } function serializeXPSTablesClassificationMetrics(data: any): XPSTablesClassificationMetrics { return { ...data, curveMetrics: data["curveMetrics"] !== undefined ? data["curveMetrics"].map((item: any) => (serializeXPSTablesClassificationMetricsCurveMetrics(item))) : undefined, }; } function deserializeXPSTablesClassificationMetrics(data: any): XPSTablesClassificationMetrics { return { ...data, curveMetrics: data["curveMetrics"] !== undefined ? data["curveMetrics"].map((item: any) => (deserializeXPSTablesClassificationMetricsCurveMetrics(item))) : undefined, }; } /** * Metrics curve data point for a single value. */ export interface XPSTablesClassificationMetricsCurveMetrics { /** * The area under the precision-recall curve. */ aucPr?: number; /** * The area under receiver operating characteristic curve. */ aucRoc?: number; /** * Metrics that have confidence thresholds. Precision-recall curve and ROC * curve can be derived from them. */ confidenceMetricsEntries?: XPSTablesConfidenceMetricsEntry[]; /** * The Log loss metric. */ logLoss?: number; /** * The position threshold value used to compute the metrics. */ positionThreshold?: number; /** * The CATEGORY row value (for ARRAY unnested) the curve metrics are for. */ value?: string; } function serializeXPSTablesClassificationMetricsCurveMetrics(data: any): XPSTablesClassificationMetricsCurveMetrics { return { ...data, confidenceMetricsEntries: data["confidenceMetricsEntries"] !== undefined ? data["confidenceMetricsEntries"].map((item: any) => (serializeXPSTablesConfidenceMetricsEntry(item))) : undefined, }; } function deserializeXPSTablesClassificationMetricsCurveMetrics(data: any): XPSTablesClassificationMetricsCurveMetrics { return { ...data, confidenceMetricsEntries: data["confidenceMetricsEntries"] !== undefined ? data["confidenceMetricsEntries"].map((item: any) => (deserializeXPSTablesConfidenceMetricsEntry(item))) : undefined, }; } /** * Metrics for a single confidence threshold. */ export interface XPSTablesConfidenceMetricsEntry { /** * The confidence threshold value used to compute the metrics. */ confidenceThreshold?: number; /** * The harmonic mean of recall and precision. (2 * precision * recall) / * (precision + recall) */ f1Score?: number; /** * False negative count. */ falseNegativeCount?: bigint; /** * False positive count. */ falsePositiveCount?: bigint; /** * FPR = #false positives / (#false positives + #true negatives) */ falsePositiveRate?: number; /** * Precision = #true positives / (#true positives + #false positives). */ precision?: number; /** * Recall = #true positives / (#true positives + #false negatives). */ recall?: number; /** * True negative count. */ trueNegativeCount?: bigint; /** * True positive count. */ truePositiveCount?: bigint; /** * TPR = #true positives / (#true positives + #false negatvies) */ truePositiveRate?: number; } function serializeXPSTablesConfidenceMetricsEntry(data: any): XPSTablesConfidenceMetricsEntry { return { ...data, falseNegativeCount: data["falseNegativeCount"] !== undefined ? String(data["falseNegativeCount"]) : undefined, falsePositiveCount: data["falsePositiveCount"] !== undefined ? String(data["falsePositiveCount"]) : undefined, trueNegativeCount: data["trueNegativeCount"] !== undefined ? String(data["trueNegativeCount"]) : undefined, truePositiveCount: data["truePositiveCount"] !== undefined ? String(data["truePositiveCount"]) : undefined, }; } function deserializeXPSTablesConfidenceMetricsEntry(data: any): XPSTablesConfidenceMetricsEntry { return { ...data, falseNegativeCount: data["falseNegativeCount"] !== undefined ? BigInt(data["falseNegativeCount"]) : undefined, falsePositiveCount: data["falsePositiveCount"] !== undefined ? BigInt(data["falsePositiveCount"]) : undefined, trueNegativeCount: data["trueNegativeCount"] !== undefined ? BigInt(data["trueNegativeCount"]) : undefined, truePositiveCount: data["truePositiveCount"] !== undefined ? BigInt(data["truePositiveCount"]) : undefined, }; } /** * Metadata for a dataset used for AutoML Tables. */ export interface XPSTablesDatasetMetadata { /** * Id the column to split the table. */ mlUseColumnId?: number; /** * Primary table. */ primaryTableSpec?: XPSTableSpec; /** * (the column id : its CorrelationStats with target column). */ targetColumnCorrelations?: { [key: string]: XPSCorrelationStats }; /** * Id of the primary table column that should be used as the training label. */ targetColumnId?: number; /** * Id of the primary table column that should be used as the weight column. */ weightColumnId?: number; } function serializeXPSTablesDatasetMetadata(data: any): XPSTablesDatasetMetadata { return { ...data, primaryTableSpec: data["primaryTableSpec"] !== undefined ? serializeXPSTableSpec(data["primaryTableSpec"]) : undefined, }; } function deserializeXPSTablesDatasetMetadata(data: any): XPSTablesDatasetMetadata { return { ...data, primaryTableSpec: data["primaryTableSpec"] !== undefined ? deserializeXPSTableSpec(data["primaryTableSpec"]) : undefined, }; } export interface XPSTablesEvaluationMetrics { /** * Classification metrics. */ classificationMetrics?: XPSTablesClassificationMetrics; /** * Regression metrics. */ regressionMetrics?: XPSTablesRegressionMetrics; } function serializeXPSTablesEvaluationMetrics(data: any): XPSTablesEvaluationMetrics { return { ...data, classificationMetrics: data["classificationMetrics"] !== undefined ? serializeXPSTablesClassificationMetrics(data["classificationMetrics"]) : undefined, }; } function deserializeXPSTablesEvaluationMetrics(data: any): XPSTablesEvaluationMetrics { return { ...data, classificationMetrics: data["classificationMetrics"] !== undefined ? deserializeXPSTablesClassificationMetrics(data["classificationMetrics"]) : undefined, }; } /** * An information specific to given column and Tables Model, in context of the * Model and the predictions created by it. */ export interface XPSTablesModelColumnInfo { /** * The ID of the column. */ columnId?: number; /** * When given as part of a Model: Measurement of how much model predictions * correctness on the TEST data depend on values in this column. A value * between 0 and 1, higher means higher influence. These values are normalized * - for all input feature columns of a given model they add to 1. When given * back by Predict or Batch Predict: Measurement of how impactful for the * prediction returned for the given row the value in this column was. * Specifically, the feature importance specifies the marginal contribution * that the feature made to the prediction score compared to the baseline * score. These values are computed using the Sampled Shapley method. */ featureImportance?: number; } /** * A description of Tables model structure. */ export interface XPSTablesModelStructure { /** * A list of models. */ modelParameters?: XPSTablesModelStructureModelParameters[]; } function serializeXPSTablesModelStructure(data: any): XPSTablesModelStructure { return { ...data, modelParameters: data["modelParameters"] !== undefined ? data["modelParameters"].map((item: any) => (serializeXPSTablesModelStructureModelParameters(item))) : undefined, }; } function deserializeXPSTablesModelStructure(data: any): XPSTablesModelStructure { return { ...data, modelParameters: data["modelParameters"] !== undefined ? data["modelParameters"].map((item: any) => (deserializeXPSTablesModelStructureModelParameters(item))) : undefined, }; } /** * Model hyper-parameters for a model. */ export interface XPSTablesModelStructureModelParameters { hyperparameters?: XPSTablesModelStructureModelParametersParameter[]; } function serializeXPSTablesModelStructureModelParameters(data: any): XPSTablesModelStructureModelParameters { return { ...data, hyperparameters: data["hyperparameters"] !== undefined ? data["hyperparameters"].map((item: any) => (serializeXPSTablesModelStructureModelParametersParameter(item))) : undefined, }; } function deserializeXPSTablesModelStructureModelParameters(data: any): XPSTablesModelStructureModelParameters { return { ...data, hyperparameters: data["hyperparameters"] !== undefined ? data["hyperparameters"].map((item: any) => (deserializeXPSTablesModelStructureModelParametersParameter(item))) : undefined, }; } export interface XPSTablesModelStructureModelParametersParameter { /** * Float type parameter value. */ floatValue?: number; /** * Integer type parameter value. */ intValue?: bigint; /** * Parameter name. */ name?: string; /** * String type parameter value. */ stringValue?: string; } function serializeXPSTablesModelStructureModelParametersParameter(data: any): XPSTablesModelStructureModelParametersParameter { return { ...data, intValue: data["intValue"] !== undefined ? String(data["intValue"]) : undefined, }; } function deserializeXPSTablesModelStructureModelParametersParameter(data: any): XPSTablesModelStructureModelParametersParameter { return { ...data, intValue: data["intValue"] !== undefined ? BigInt(data["intValue"]) : undefined, }; } export interface XPSTableSpec { /** * Mapping from column id to column spec. */ columnSpecs?: { [key: string]: XPSColumnSpec }; /** * The total size of imported data of the table. */ importedDataSizeInBytes?: bigint; /** * The number of rows in the table. */ rowCount?: bigint; /** * The id of the time column. */ timeColumnId?: number; /** * The number of valid rows. */ validRowCount?: bigint; } function serializeXPSTableSpec(data: any): XPSTableSpec { return { ...data, columnSpecs: data["columnSpecs"] !== undefined ? Object.fromEntries(Object.entries(data["columnSpecs"]).map(([k, v]: [string, any]) => ([k, serializeXPSColumnSpec(v)]))) : undefined, importedDataSizeInBytes: data["importedDataSizeInBytes"] !== undefined ? String(data["importedDataSizeInBytes"]) : undefined, rowCount: data["rowCount"] !== undefined ? String(data["rowCount"]) : undefined, validRowCount: data["validRowCount"] !== undefined ? String(data["validRowCount"]) : undefined, }; } function deserializeXPSTableSpec(data: any): XPSTableSpec { return { ...data, columnSpecs: data["columnSpecs"] !== undefined ? Object.fromEntries(Object.entries(data["columnSpecs"]).map(([k, v]: [string, any]) => ([k, deserializeXPSColumnSpec(v)]))) : undefined, importedDataSizeInBytes: data["importedDataSizeInBytes"] !== undefined ? BigInt(data["importedDataSizeInBytes"]) : undefined, rowCount: data["rowCount"] !== undefined ? BigInt(data["rowCount"]) : undefined, validRowCount: data["validRowCount"] !== undefined ? BigInt(data["validRowCount"]) : undefined, }; } export interface XPSTablesPreprocessResponse { /** * The table/column id, column_name and the DataTypes of the columns will be * populated. */ tablesDatasetMetadata?: XPSTablesDatasetMetadata; } function serializeXPSTablesPreprocessResponse(data: any): XPSTablesPreprocessResponse { return { ...data, tablesDatasetMetadata: data["tablesDatasetMetadata"] !== undefined ? serializeXPSTablesDatasetMetadata(data["tablesDatasetMetadata"]) : undefined, }; } function deserializeXPSTablesPreprocessResponse(data: any): XPSTablesPreprocessResponse { return { ...data, tablesDatasetMetadata: data["tablesDatasetMetadata"] !== undefined ? deserializeXPSTablesDatasetMetadata(data["tablesDatasetMetadata"]) : undefined, }; } /** * Metrics for Tables regression problems. */ export interface XPSTablesRegressionMetrics { /** * Mean absolute error. */ meanAbsoluteError?: number; /** * Mean absolute percentage error, only set if all of the target column's * values are positive. */ meanAbsolutePercentageError?: number; /** * A list of actual versus predicted points for the model being evaluated. */ regressionMetricsEntries?: XPSRegressionMetricsEntry[]; /** * Root mean squared error. */ rootMeanSquaredError?: number; /** * Root mean squared log error. */ rootMeanSquaredLogError?: number; /** * R squared. */ rSquared?: number; } export interface XPSTablesTrainingOperationMetadata { /** * Current stage of creating model. */ createModelStage?: | "CREATE_MODEL_STAGE_UNSPECIFIED" | "DATA_PREPROCESSING" | "TRAINING" | "EVALUATING" | "MODEL_POST_PROCESSING"; /** * The optimization objective for model. */ optimizationObjective?: string; /** * This field is for training. When the operation is terminated successfully, * AutoML Backend post this field to operation metadata in spanner. If the * metadata has no trials returned, the training operation is supposed to be a * failure. */ topTrials?: XPSTuningTrial[]; /** * Creating model budget. */ trainBudgetMilliNodeHours?: bigint; /** * This field records the training objective value with respect to time, * giving insight into how the model architecture search is performing as * training time elapses. */ trainingObjectivePoints?: XPSTrainingObjectivePoint[]; /** * Timestamp when training process starts. */ trainingStartTime?: Date; } function serializeXPSTablesTrainingOperationMetadata(data: any): XPSTablesTrainingOperationMetadata { return { ...data, topTrials: data["topTrials"] !== undefined ? data["topTrials"].map((item: any) => (serializeXPSTuningTrial(item))) : undefined, trainBudgetMilliNodeHours: data["trainBudgetMilliNodeHours"] !== undefined ? String(data["trainBudgetMilliNodeHours"]) : undefined, trainingObjectivePoints: data["trainingObjectivePoints"] !== undefined ? data["trainingObjectivePoints"].map((item: any) => (serializeXPSTrainingObjectivePoint(item))) : undefined, trainingStartTime: data["trainingStartTime"] !== undefined ? data["trainingStartTime"].toISOString() : undefined, }; } function deserializeXPSTablesTrainingOperationMetadata(data: any): XPSTablesTrainingOperationMetadata { return { ...data, topTrials: data["topTrials"] !== undefined ? data["topTrials"].map((item: any) => (deserializeXPSTuningTrial(item))) : undefined, trainBudgetMilliNodeHours: data["trainBudgetMilliNodeHours"] !== undefined ? BigInt(data["trainBudgetMilliNodeHours"]) : undefined, trainingObjectivePoints: data["trainingObjectivePoints"] !== undefined ? data["trainingObjectivePoints"].map((item: any) => (deserializeXPSTrainingObjectivePoint(item))) : undefined, trainingStartTime: data["trainingStartTime"] !== undefined ? new Date(data["trainingStartTime"]) : undefined, }; } export interface XPSTablesTrainResponse { modelStructure?: XPSTablesModelStructure; /** * Sample rows from the dataset this model was trained. */ predictionSampleRows?: XPSRow[]; /** * Output only. Auxiliary information for each of the * input_feature_column_specs, with respect to this particular model. */ tablesModelColumnInfo?: XPSTablesModelColumnInfo[]; /** * The actual training cost of the model, expressed in milli node hours, i.e. * 1,000 value in this field means 1 node hour. Guaranteed to not exceed the * train budget. */ trainCostMilliNodeHours?: bigint; } function serializeXPSTablesTrainResponse(data: any): XPSTablesTrainResponse { return { ...data, modelStructure: data["modelStructure"] !== undefined ? serializeXPSTablesModelStructure(data["modelStructure"]) : undefined, trainCostMilliNodeHours: data["trainCostMilliNodeHours"] !== undefined ? String(data["trainCostMilliNodeHours"]) : undefined, }; } function deserializeXPSTablesTrainResponse(data: any): XPSTablesTrainResponse { return { ...data, modelStructure: data["modelStructure"] !== undefined ? deserializeXPSTablesModelStructure(data["modelStructure"]) : undefined, trainCostMilliNodeHours: data["trainCostMilliNodeHours"] !== undefined ? BigInt(data["trainCostMilliNodeHours"]) : undefined, }; } /** * Component model. */ export interface XPSTextComponentModel { /** * The Cloud Storage resource path to hold batch prediction model. */ batchPredictionModelGcsUri?: string; /** * The Cloud Storage resource path to hold online prediction model. */ onlinePredictionModelGcsUri?: string; /** * The partition where the model is deployed. Populated by uCAIP BE as part * of online PredictRequest. */ partition?: | "PARTITION_TYPE_UNSPECIFIED" | "PARTITION_ZERO" | "PARTITION_REDUCED_HOMING" | "PARTITION_JELLYFISH" | "PARTITION_CPU" | "PARTITION_CUSTOM_STORAGE_CPU"; /** * The default model binary file used for serving (e.g. online predict, batch * predict) via public Cloud Ai Platform API. */ servingArtifact?: XPSModelArtifactItem; /** * The name of servo model. Populated by uCAIP BE as part of online * PredictRequest. */ servoModelName?: string; /** * The name of the trained NL submodel. */ submodelName?: string; /** * The type of trained NL submodel */ submodelType?: | "TEXT_MODEL_TYPE_UNSPECIFIED" | "TEXT_MODEL_TYPE_DEFAULT" | "TEXT_MODEL_TYPE_META_ARCHITECT" | "TEXT_MODEL_TYPE_ATC" | "TEXT_MODEL_TYPE_CLARA2" | "TEXT_MODEL_TYPE_CHATBASE" | "TEXT_MODEL_TYPE_SAFT_SPAN_LABELING" | "TEXT_MODEL_TYPE_TEXT_EXTRACTION" | "TEXT_MODEL_TYPE_RELATIONSHIP_EXTRACTION" | "TEXT_MODEL_TYPE_COMPOSITE" | "TEXT_MODEL_TYPE_ALL_MODELS" | "TEXT_MODEL_TYPE_BERT" | "TEXT_MODEL_TYPE_ENC_PALM"; /** * ## The fields below are only populated under uCAIP request scope. * https://cloud.google.com/ml-engine/docs/runtime-version-list */ tfRuntimeVersion?: string; /** * The servomatic model version number. Populated by uCAIP BE as part of * online PredictRequest. */ versionNumber?: bigint; } function serializeXPSTextComponentModel(data: any): XPSTextComponentModel { return { ...data, versionNumber: data["versionNumber"] !== undefined ? String(data["versionNumber"]) : undefined, }; } function deserializeXPSTextComponentModel(data: any): XPSTextComponentModel { return { ...data, versionNumber: data["versionNumber"] !== undefined ? BigInt(data["versionNumber"]) : undefined, }; } export interface XPSTextExtractionEvaluationMetrics { /** * Values are at the highest F1 score on the precision-recall curve. Only * confidence_threshold, recall, precision, and f1_score will be set. */ bestF1ConfidenceMetrics?: XPSConfidenceMetricsEntry; /** * If the enclosing EvaluationMetrics.label is empty, * confidence_metrics_entries is an evaluation of the entire model across all * labels. If the enclosing EvaluationMetrics.label is set, * confidence_metrics_entries applies to that label. */ confidenceMetricsEntries?: XPSConfidenceMetricsEntry[]; /** * Confusion matrix of the model, at the default confidence threshold (0.0). * Only set for whole-model evaluation, not for evaluation per label. */ confusionMatrix?: XPSConfusionMatrix; /** * Only recall, precision, and f1_score will be set. */ perLabelConfidenceMetrics?: { [key: string]: XPSConfidenceMetricsEntry }; } function serializeXPSTextExtractionEvaluationMetrics(data: any): XPSTextExtractionEvaluationMetrics { return { ...data, bestF1ConfidenceMetrics: data["bestF1ConfidenceMetrics"] !== undefined ? serializeXPSConfidenceMetricsEntry(data["bestF1ConfidenceMetrics"]) : undefined, confidenceMetricsEntries: data["confidenceMetricsEntries"] !== undefined ? data["confidenceMetricsEntries"].map((item: any) => (serializeXPSConfidenceMetricsEntry(item))) : undefined, confusionMatrix: data["confusionMatrix"] !== undefined ? serializeXPSConfusionMatrix(data["confusionMatrix"]) : undefined, perLabelConfidenceMetrics: data["perLabelConfidenceMetrics"] !== undefined ? Object.fromEntries(Object.entries(data["perLabelConfidenceMetrics"]).map(([k, v]: [string, any]) => ([k, serializeXPSConfidenceMetricsEntry(v)]))) : undefined, }; } function deserializeXPSTextExtractionEvaluationMetrics(data: any): XPSTextExtractionEvaluationMetrics { return { ...data, bestF1ConfidenceMetrics: data["bestF1ConfidenceMetrics"] !== undefined ? deserializeXPSConfidenceMetricsEntry(data["bestF1ConfidenceMetrics"]) : undefined, confidenceMetricsEntries: data["confidenceMetricsEntries"] !== undefined ? data["confidenceMetricsEntries"].map((item: any) => (deserializeXPSConfidenceMetricsEntry(item))) : undefined, confusionMatrix: data["confusionMatrix"] !== undefined ? deserializeXPSConfusionMatrix(data["confusionMatrix"]) : undefined, perLabelConfidenceMetrics: data["perLabelConfidenceMetrics"] !== undefined ? Object.fromEntries(Object.entries(data["perLabelConfidenceMetrics"]).map(([k, v]: [string, any]) => ([k, deserializeXPSConfidenceMetricsEntry(v)]))) : undefined, }; } /** * Model evaluation metrics for text sentiment problems. */ export interface XPSTextSentimentEvaluationMetrics { /** * Output only. Confusion matrix of the evaluation. Only set for the overall * model evaluation, not for evaluation of a single annotation spec. */ confusionMatrix?: XPSConfusionMatrix; /** * Output only. The harmonic mean of recall and precision. */ f1Score?: number; /** * Output only. Linear weighted kappa. Only set for the overall model * evaluation, not for evaluation of a single annotation spec. */ linearKappa?: number; /** * Output only. Mean absolute error. Only set for the overall model * evaluation, not for evaluation of a single annotation spec. */ meanAbsoluteError?: number; /** * Output only. Mean squared error. Only set for the overall model * evaluation, not for evaluation of a single annotation spec. */ meanSquaredError?: number; /** * Output only. Precision. */ precision?: number; /** * Output only. Quadratic weighted kappa. Only set for the overall model * evaluation, not for evaluation of a single annotation spec. */ quadraticKappa?: number; /** * Output only. Recall. */ recall?: number; } function serializeXPSTextSentimentEvaluationMetrics(data: any): XPSTextSentimentEvaluationMetrics { return { ...data, confusionMatrix: data["confusionMatrix"] !== undefined ? serializeXPSConfusionMatrix(data["confusionMatrix"]) : undefined, }; } function deserializeXPSTextSentimentEvaluationMetrics(data: any): XPSTextSentimentEvaluationMetrics { return { ...data, confusionMatrix: data["confusionMatrix"] !== undefined ? deserializeXPSConfusionMatrix(data["confusionMatrix"]) : undefined, }; } /** * TextToSpeech train response */ export interface XPSTextToSpeechTrainResponse { } export interface XPSTextTrainResponse { /** * Component submodels. */ componentModel?: XPSTextComponentModel[]; } function serializeXPSTextTrainResponse(data: any): XPSTextTrainResponse { return { ...data, componentModel: data["componentModel"] !== undefined ? data["componentModel"].map((item: any) => (serializeXPSTextComponentModel(item))) : undefined, }; } function deserializeXPSTextTrainResponse(data: any): XPSTextTrainResponse { return { ...data, componentModel: data["componentModel"] !== undefined ? data["componentModel"].map((item: any) => (deserializeXPSTextComponentModel(item))) : undefined, }; } /** * A [TensorFlow.js](https://www.tensorflow.org/js) model that can be used in * the browser and in Node.js using JavaScript. */ export interface XPSTfJsFormat { } /** * LINT.IfChange A model format used for mobile and IoT devices. See * https://www.tensorflow.org/lite. */ export interface XPSTfLiteFormat { } /** * A tensorflow model format in SavedModel format. */ export interface XPSTfSavedModelFormat { } /** * The data statistics of a series of TIMESTAMP values. */ export interface XPSTimestampStats { commonStats?: XPSCommonStats; /** * The string key is the pre-defined granularity. Currently supported: * hour_of_day, day_of_week, month_of_year. Granularities finer that the * granularity of timestamp data are not populated (e.g. if timestamps are at * day granularity, then hour_of_day is not populated). */ granularStats?: { [key: string]: XPSTimestampStatsGranularStats }; medianTimestampNanos?: bigint; } function serializeXPSTimestampStats(data: any): XPSTimestampStats { return { ...data, commonStats: data["commonStats"] !== undefined ? serializeXPSCommonStats(data["commonStats"]) : undefined, granularStats: data["granularStats"] !== undefined ? Object.fromEntries(Object.entries(data["granularStats"]).map(([k, v]: [string, any]) => ([k, serializeXPSTimestampStatsGranularStats(v)]))) : undefined, medianTimestampNanos: data["medianTimestampNanos"] !== undefined ? String(data["medianTimestampNanos"]) : undefined, }; } function deserializeXPSTimestampStats(data: any): XPSTimestampStats { return { ...data, commonStats: data["commonStats"] !== undefined ? deserializeXPSCommonStats(data["commonStats"]) : undefined, granularStats: data["granularStats"] !== undefined ? Object.fromEntries(Object.entries(data["granularStats"]).map(([k, v]: [string, any]) => ([k, deserializeXPSTimestampStatsGranularStats(v)]))) : undefined, medianTimestampNanos: data["medianTimestampNanos"] !== undefined ? BigInt(data["medianTimestampNanos"]) : undefined, }; } /** * Stats split by a defined in context granularity. */ export interface XPSTimestampStatsGranularStats { /** * A map from granularity key to example count for that key. E.g. for * hour_of_day `13` means 1pm, or for month_of_year `5` means May). */ buckets?: { [key: string]: bigint }; } function serializeXPSTimestampStatsGranularStats(data: any): XPSTimestampStatsGranularStats { return { ...data, buckets: data["buckets"] !== undefined ? Object.fromEntries(Object.entries(data["buckets"]).map(([k, v]: [string, any]) => ([k, String(v)]))) : undefined, }; } function deserializeXPSTimestampStatsGranularStats(data: any): XPSTimestampStatsGranularStats { return { ...data, buckets: data["buckets"] !== undefined ? Object.fromEntries(Object.entries(data["buckets"]).map(([k, v]: [string, any]) => ([k, BigInt(v)]))) : undefined, }; } /** * Track matching model metrics for a single track match threshold and multiple * label match confidence thresholds. Next tag: 6. */ export interface XPSTrackMetricsEntry { /** * Output only. Metrics for each label-match confidence_threshold from * 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99. Precision-recall curve is derived * from them. */ confidenceMetricsEntries?: XPSTrackMetricsEntryConfidenceMetricsEntry[]; /** * Output only. The intersection-over-union threshold value between bounding * boxes across frames used to compute this metric entry. */ iouThreshold?: number; /** * Output only. The mean bounding box iou over all confidence thresholds. */ meanBoundingBoxIou?: number; /** * Output only. The mean mismatch rate over all confidence thresholds. */ meanMismatchRate?: number; /** * Output only. The mean average precision over all confidence thresholds. */ meanTrackingAveragePrecision?: number; } /** * Metrics for a single confidence threshold. Next tag: 6. */ export interface XPSTrackMetricsEntryConfidenceMetricsEntry { /** * Output only. Bounding box intersection-over-union precision. Measures how * well the bounding boxes overlap between each other (e.g. complete overlap * or just barely above iou_threshold). */ boundingBoxIou?: number; /** * Output only. The confidence threshold value used to compute the metrics. */ confidenceThreshold?: number; /** * Output only. Mismatch rate, which measures the tracking consistency, i.e. * correctness of instance ID continuity. */ mismatchRate?: number; /** * Output only. Tracking precision. */ trackingPrecision?: number; /** * Output only. Tracking recall. */ trackingRecall?: number; } export interface XPSTrainingObjectivePoint { /** * The time at which this point was recorded. */ createTime?: Date; /** * The objective value when this point was recorded. */ value?: number; } function serializeXPSTrainingObjectivePoint(data: any): XPSTrainingObjectivePoint { return { ...data, createTime: data["createTime"] !== undefined ? data["createTime"].toISOString() : undefined, }; } function deserializeXPSTrainingObjectivePoint(data: any): XPSTrainingObjectivePoint { return { ...data, createTime: data["createTime"] !== undefined ? new Date(data["createTime"]) : undefined, }; } export interface XPSTrainResponse { /** * Estimated model size in bytes once deployed. */ deployedModelSizeBytes?: bigint; /** * Optional vision model error analysis configuration. The field is set when * model error analysis is enabled in the training request. The results of * error analysis will be binded together with evaluation results (in the * format of AnnotatedExample). */ errorAnalysisConfigs?: XPSVisionErrorAnalysisConfig[]; /** * Examples used to evaluate the model (usually the test set), with the * predicted annotations. The file_spec should point to recordio file(s) of * AnnotatedExample. For each returned example, the example_id_token and * annotations predicted by the model must be set. The example payload can and * is recommended to be omitted. */ evaluatedExampleSet?: XPSExampleSet; /** * The trained model evaluation metrics. This can be optionally returned. */ evaluationMetricsSet?: XPSEvaluationMetricsSet; /** * VisionExplanationConfig for XAI on test set. Optional for when XAI is * enable in training request. */ explanationConfigs?: XPSResponseExplanationSpec[]; imageClassificationTrainResp?: XPSImageClassificationTrainResponse; imageObjectDetectionTrainResp?: XPSImageObjectDetectionModelSpec; imageSegmentationTrainResp?: XPSImageSegmentationTrainResponse; /** * Token that represents the trained model. This is considered immutable and * is persisted in AutoML. xPS can put their own proto in the byte string, to * e.g. point to the model checkpoints. The token is passed to other xPS APIs * to refer to the model. */ modelToken?: Uint8Array; speechTrainResp?: XPSSpeechModelSpec; tablesTrainResp?: XPSTablesTrainResponse; textToSpeechTrainResp?: XPSTextToSpeechTrainResponse; /** * Will only be needed for uCAIP from Beta. */ textTrainResp?: XPSTextTrainResponse; translationTrainResp?: XPSTranslationTrainResponse; videoActionRecognitionTrainResp?: XPSVideoActionRecognitionTrainResponse; videoClassificationTrainResp?: XPSVideoClassificationTrainResponse; videoObjectTrackingTrainResp?: XPSVideoObjectTrackingTrainResponse; } function serializeXPSTrainResponse(data: any): XPSTrainResponse { return { ...data, deployedModelSizeBytes: data["deployedModelSizeBytes"] !== undefined ? String(data["deployedModelSizeBytes"]) : undefined, evaluatedExampleSet: data["evaluatedExampleSet"] !== undefined ? serializeXPSExampleSet(data["evaluatedExampleSet"]) : undefined, evaluationMetricsSet: data["evaluationMetricsSet"] !== undefined ? serializeXPSEvaluationMetricsSet(data["evaluationMetricsSet"]) : undefined, imageClassificationTrainResp: data["imageClassificationTrainResp"] !== undefined ? serializeXPSImageClassificationTrainResponse(data["imageClassificationTrainResp"]) : undefined, imageObjectDetectionTrainResp: data["imageObjectDetectionTrainResp"] !== undefined ? serializeXPSImageObjectDetectionModelSpec(data["imageObjectDetectionTrainResp"]) : undefined, imageSegmentationTrainResp: data["imageSegmentationTrainResp"] !== undefined ? serializeXPSImageSegmentationTrainResponse(data["imageSegmentationTrainResp"]) : undefined, modelToken: data["modelToken"] !== undefined ? encodeBase64(data["modelToken"]) : undefined, speechTrainResp: data["speechTrainResp"] !== undefined ? serializeXPSSpeechModelSpec(data["speechTrainResp"]) : undefined, tablesTrainResp: data["tablesTrainResp"] !== undefined ? serializeXPSTablesTrainResponse(data["tablesTrainResp"]) : undefined, textTrainResp: data["textTrainResp"] !== undefined ? serializeXPSTextTrainResponse(data["textTrainResp"]) : undefined, videoActionRecognitionTrainResp: data["videoActionRecognitionTrainResp"] !== undefined ? serializeXPSVideoActionRecognitionTrainResponse(data["videoActionRecognitionTrainResp"]) : undefined, videoClassificationTrainResp: data["videoClassificationTrainResp"] !== undefined ? serializeXPSVideoClassificationTrainResponse(data["videoClassificationTrainResp"]) : undefined, videoObjectTrackingTrainResp: data["videoObjectTrackingTrainResp"] !== undefined ? serializeXPSVideoObjectTrackingTrainResponse(data["videoObjectTrackingTrainResp"]) : undefined, }; } function deserializeXPSTrainResponse(data: any): XPSTrainResponse { return { ...data, deployedModelSizeBytes: data["deployedModelSizeBytes"] !== undefined ? BigInt(data["deployedModelSizeBytes"]) : undefined, evaluatedExampleSet: data["evaluatedExampleSet"] !== undefined ? deserializeXPSExampleSet(data["evaluatedExampleSet"]) : undefined, evaluationMetricsSet: data["evaluationMetricsSet"] !== undefined ? deserializeXPSEvaluationMetricsSet(data["evaluationMetricsSet"]) : undefined, imageClassificationTrainResp: data["imageClassificationTrainResp"] !== undefined ? deserializeXPSImageClassificationTrainResponse(data["imageClassificationTrainResp"]) : undefined, imageObjectDetectionTrainResp: data["imageObjectDetectionTrainResp"] !== undefined ? deserializeXPSImageObjectDetectionModelSpec(data["imageObjectDetectionTrainResp"]) : undefined, imageSegmentationTrainResp: data["imageSegmentationTrainResp"] !== undefined ? deserializeXPSImageSegmentationTrainResponse(data["imageSegmentationTrainResp"]) : undefined, modelToken: data["modelToken"] !== undefined ? decodeBase64(data["modelToken"] as string) : undefined, speechTrainResp: data["speechTrainResp"] !== undefined ? deserializeXPSSpeechModelSpec(data["speechTrainResp"]) : undefined, tablesTrainResp: data["tablesTrainResp"] !== undefined ? deserializeXPSTablesTrainResponse(data["tablesTrainResp"]) : undefined, textTrainResp: data["textTrainResp"] !== undefined ? deserializeXPSTextTrainResponse(data["textTrainResp"]) : undefined, videoActionRecognitionTrainResp: data["videoActionRecognitionTrainResp"] !== undefined ? deserializeXPSVideoActionRecognitionTrainResponse(data["videoActionRecognitionTrainResp"]) : undefined, videoClassificationTrainResp: data["videoClassificationTrainResp"] !== undefined ? deserializeXPSVideoClassificationTrainResponse(data["videoClassificationTrainResp"]) : undefined, videoObjectTrackingTrainResp: data["videoObjectTrackingTrainResp"] !== undefined ? deserializeXPSVideoObjectTrackingTrainResponse(data["videoObjectTrackingTrainResp"]) : undefined, }; } /** * Evaluation metrics for the dataset. */ export interface XPSTranslationEvaluationMetrics { /** * BLEU score for base model. */ baseBleuScore?: number; /** * BLEU score. */ bleuScore?: number; } /** * Translation preprocess response. */ export interface XPSTranslationPreprocessResponse { /** * Total example count parsed. */ parsedExampleCount?: bigint; /** * Total valid example count. */ validExampleCount?: bigint; } function serializeXPSTranslationPreprocessResponse(data: any): XPSTranslationPreprocessResponse { return { ...data, parsedExampleCount: data["parsedExampleCount"] !== undefined ? String(data["parsedExampleCount"]) : undefined, validExampleCount: data["validExampleCount"] !== undefined ? String(data["validExampleCount"]) : undefined, }; } function deserializeXPSTranslationPreprocessResponse(data: any): XPSTranslationPreprocessResponse { return { ...data, parsedExampleCount: data["parsedExampleCount"] !== undefined ? BigInt(data["parsedExampleCount"]) : undefined, validExampleCount: data["validExampleCount"] !== undefined ? BigInt(data["validExampleCount"]) : undefined, }; } /** * Train response for translation. */ export interface XPSTranslationTrainResponse { /** * Type of the model. */ modelType?: | "MODEL_TYPE_UNSPECIFIED" | "LEGACY" | "CURRENT"; } /** * Metrics for a tuning job generated, will get forwarded to Stackdriver as * model tuning logs. Setting this as a standalone message out of * CreateModelMetadata to avoid confusion as we expose this message only to * users. */ export interface XPSTuningTrial { /** * Model parameters for the trial. */ modelStructure?: XPSTablesModelStructure; /** * The optimization objective evaluation of the eval split data. */ trainingObjectivePoint?: XPSTrainingObjectivePoint; } function serializeXPSTuningTrial(data: any): XPSTuningTrial { return { ...data, modelStructure: data["modelStructure"] !== undefined ? serializeXPSTablesModelStructure(data["modelStructure"]) : undefined, trainingObjectivePoint: data["trainingObjectivePoint"] !== undefined ? serializeXPSTrainingObjectivePoint(data["trainingObjectivePoint"]) : undefined, }; } function deserializeXPSTuningTrial(data: any): XPSTuningTrial { return { ...data, modelStructure: data["modelStructure"] !== undefined ? deserializeXPSTablesModelStructure(data["modelStructure"]) : undefined, trainingObjectivePoint: data["trainingObjectivePoint"] !== undefined ? deserializeXPSTrainingObjectivePoint(data["trainingObjectivePoint"]) : undefined, }; } /** * The Evaluation metrics entry given a specific precision_window_length. */ export interface XPSVideoActionMetricsEntry { /** * Metrics for each label-match confidence_threshold from * 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99. */ confidenceMetricsEntries?: XPSVideoActionMetricsEntryConfidenceMetricsEntry[]; /** * The mean average precision. */ meanAveragePrecision?: number; /** * This VideoActionMetricsEntry is calculated based on this prediction window * length. If the predicted action's timestamp is inside the time window whose * center is the ground truth action's timestamp with this specific length, * the prediction result is treated as a true positive. */ precisionWindowLength?: number /* Duration */; } function serializeXPSVideoActionMetricsEntry(data: any): XPSVideoActionMetricsEntry { return { ...data, precisionWindowLength: data["precisionWindowLength"] !== undefined ? data["precisionWindowLength"] : undefined, }; } function deserializeXPSVideoActionMetricsEntry(data: any): XPSVideoActionMetricsEntry { return { ...data, precisionWindowLength: data["precisionWindowLength"] !== undefined ? data["precisionWindowLength"] : undefined, }; } /** * Metrics for a single confidence threshold. */ export interface XPSVideoActionMetricsEntryConfidenceMetricsEntry { /** * Output only. The confidence threshold value used to compute the metrics. */ confidenceThreshold?: number; /** * Output only. The harmonic mean of recall and precision. */ f1Score?: number; /** * Output only. Precision for the given confidence threshold. */ precision?: number; /** * Output only. Recall for the given confidence threshold. */ recall?: number; } /** * Model evaluation metrics for video action recognition. */ export interface XPSVideoActionRecognitionEvaluationMetrics { /** * Output only. The number of ground truth actions used to create this * evaluation. */ evaluatedActionCount?: number; /** * Output only. The metric entries for precision window lengths: 1s,2s,3s,4s, * 5s. */ videoActionMetricsEntries?: XPSVideoActionMetricsEntry[]; } function serializeXPSVideoActionRecognitionEvaluationMetrics(data: any): XPSVideoActionRecognitionEvaluationMetrics { return { ...data, videoActionMetricsEntries: data["videoActionMetricsEntries"] !== undefined ? data["videoActionMetricsEntries"].map((item: any) => (serializeXPSVideoActionMetricsEntry(item))) : undefined, }; } function deserializeXPSVideoActionRecognitionEvaluationMetrics(data: any): XPSVideoActionRecognitionEvaluationMetrics { return { ...data, videoActionMetricsEntries: data["videoActionMetricsEntries"] !== undefined ? data["videoActionMetricsEntries"].map((item: any) => (deserializeXPSVideoActionMetricsEntry(item))) : undefined, }; } export interface XPSVideoActionRecognitionTrainResponse { /** * ## The fields below are only populated under uCAIP request scope. */ modelArtifactSpec?: XPSVideoModelArtifactSpec; /** * The actual train cost of creating this model, expressed in node seconds, * i.e. 3,600 value in this field means 1 node hour. */ trainCostNodeSeconds?: bigint; } function serializeXPSVideoActionRecognitionTrainResponse(data: any): XPSVideoActionRecognitionTrainResponse { return { ...data, trainCostNodeSeconds: data["trainCostNodeSeconds"] !== undefined ? String(data["trainCostNodeSeconds"]) : undefined, }; } function deserializeXPSVideoActionRecognitionTrainResponse(data: any): XPSVideoActionRecognitionTrainResponse { return { ...data, trainCostNodeSeconds: data["trainCostNodeSeconds"] !== undefined ? BigInt(data["trainCostNodeSeconds"]) : undefined, }; } export interface XPSVideoBatchPredictOperationMetadata { /** * All the partial batch prediction results that are completed at the moment. * Output examples are sorted by completion time. The order will not be * changed. Each output example should be the path of a single RecordIO file * of AnnotatedExamples. */ outputExamples?: string[]; } export interface XPSVideoClassificationTrainResponse { /** * ## The fields below are only populated under uCAIP request scope. */ modelArtifactSpec?: XPSVideoModelArtifactSpec; /** * The actual train cost of creating this model, expressed in node seconds, * i.e. 3,600 value in this field means 1 node hour. */ trainCostNodeSeconds?: bigint; } function serializeXPSVideoClassificationTrainResponse(data: any): XPSVideoClassificationTrainResponse { return { ...data, trainCostNodeSeconds: data["trainCostNodeSeconds"] !== undefined ? String(data["trainCostNodeSeconds"]) : undefined, }; } function deserializeXPSVideoClassificationTrainResponse(data: any): XPSVideoClassificationTrainResponse { return { ...data, trainCostNodeSeconds: data["trainCostNodeSeconds"] !== undefined ? BigInt(data["trainCostNodeSeconds"]) : undefined, }; } /** * Information of downloadable models that are pre-generated as part of * training flow and will be persisted in AutoMl backend. Upon receiving * ExportModel request from user, AutoMl backend can serve the pre-generated * models to user if exists (by copying the files from internal path to user * provided location), otherwise, AutoMl backend will call xPS ExportModel API * to generate the model on the fly with the requesting format. */ export interface XPSVideoExportModelSpec { /** * Contains the model format and internal location of the model files to be * exported/downloaded. Use the Google Cloud Storage bucket name which is * provided via TrainRequest.gcs_bucket_name to store the model files. */ exportModelOutputConfig?: XPSExportModelOutputConfig[]; } export interface XPSVideoModelArtifactSpec { /** * The model binary files in different formats for model export. */ exportArtifact?: XPSModelArtifactItem[]; /** * The default model binary file used for serving (e.g. batch predict) via * public Cloud AI Platform API. */ servingArtifact?: XPSModelArtifactItem; } /** * Model evaluation metrics for ObjectTracking problems. Next tag: 10. */ export interface XPSVideoObjectTrackingEvaluationMetrics { /** * Output only. The single metric for bounding boxes evaluation: the * mean_average_precision averaged over all bounding_box_metrics_entries. */ boundingBoxMeanAveragePrecision?: number; /** * Output only. The bounding boxes match metrics for each * Intersection-over-union threshold 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99. */ boundingBoxMetricsEntries?: XPSBoundingBoxMetricsEntry[]; /** * The number of bounding boxes used for model evaluation. */ evaluatedBoundingboxCount?: number; /** * The number of video frames used for model evaluation. */ evaluatedFrameCount?: number; /** * The number of tracks used for model evaluation. */ evaluatedTrackCount?: number; /** * Output only. The single metric for tracks accuracy evaluation: the * mean_average_precision averaged over all track_metrics_entries. */ trackMeanAveragePrecision?: number; /** * Output only. The single metric for tracks bounding box iou evaluation: the * mean_bounding_box_iou averaged over all track_metrics_entries. */ trackMeanBoundingBoxIou?: number; /** * Output only. The single metric for tracking consistency evaluation: the * mean_mismatch_rate averaged over all track_metrics_entries. */ trackMeanMismatchRate?: number; /** * Output only. The tracks match metrics for each Intersection-over-union * threshold 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99. */ trackMetricsEntries?: XPSTrackMetricsEntry[]; } export interface XPSVideoObjectTrackingTrainResponse { /** * Populated for AutoML request only. */ exportModelSpec?: XPSVideoExportModelSpec; /** * ## The fields below are only populated under uCAIP request scope. */ modelArtifactSpec?: XPSVideoModelArtifactSpec; /** * The actual train cost of creating this model, expressed in node seconds, * i.e. 3,600 value in this field means 1 node hour. */ trainCostNodeSeconds?: bigint; } function serializeXPSVideoObjectTrackingTrainResponse(data: any): XPSVideoObjectTrackingTrainResponse { return { ...data, trainCostNodeSeconds: data["trainCostNodeSeconds"] !== undefined ? String(data["trainCostNodeSeconds"]) : undefined, }; } function deserializeXPSVideoObjectTrackingTrainResponse(data: any): XPSVideoObjectTrackingTrainResponse { return { ...data, trainCostNodeSeconds: data["trainCostNodeSeconds"] !== undefined ? BigInt(data["trainCostNodeSeconds"]) : undefined, }; } export interface XPSVideoTrainingOperationMetadata { /** * This is an estimation of the node hours necessary for training a model, * expressed in milli node hours (i.e. 1,000 value in this field means 1 node * hour). A node hour represents the time a virtual machine spends running * your training job. The cost of one node running for one hour is a node * hour. */ trainCostMilliNodeHour?: bigint; } function serializeXPSVideoTrainingOperationMetadata(data: any): XPSVideoTrainingOperationMetadata { return { ...data, trainCostMilliNodeHour: data["trainCostMilliNodeHour"] !== undefined ? String(data["trainCostMilliNodeHour"]) : undefined, }; } function deserializeXPSVideoTrainingOperationMetadata(data: any): XPSVideoTrainingOperationMetadata { return { ...data, trainCostMilliNodeHour: data["trainCostMilliNodeHour"] !== undefined ? BigInt(data["trainCostMilliNodeHour"]) : undefined, }; } /** * The vision model error analysis configuration. Next tag: 3 */ export interface XPSVisionErrorAnalysisConfig { /** * The number of query examples in error analysis. */ exampleCount?: number; /** * The query type used in retrieval. The enum values are frozen in the * foreseeable future. */ queryType?: | "QUERY_TYPE_UNSPECIFIED" | "QUERY_TYPE_ALL_SIMILAR" | "QUERY_TYPE_SAME_CLASS_SIMILAR" | "QUERY_TYPE_SAME_CLASS_DISSIMILAR"; } export interface XPSVisionTrainingOperationMetadata { /** * Aggregated infra usage within certain time period, for billing report * purpose if XAI is enable in training request. */ explanationUsage?: InfraUsage; } function serializeXPSVisionTrainingOperationMetadata(data: any): XPSVisionTrainingOperationMetadata { return { ...data, explanationUsage: data["explanationUsage"] !== undefined ? serializeInfraUsage(data["explanationUsage"]) : undefined, }; } function deserializeXPSVisionTrainingOperationMetadata(data: any): XPSVisionTrainingOperationMetadata { return { ...data, explanationUsage: data["explanationUsage"] !== undefined ? deserializeInfraUsage(data["explanationUsage"]) : undefined, }; } /** * Visualization configurations for image explanation. */ export interface XPSVisualization { /** * Excludes attributions below the specified percentile, from the highlighted * areas. Defaults to 62. */ clipPercentLowerbound?: number; /** * Excludes attributions above the specified percentile from the highlighted * areas. Using the clip_percent_upperbound and clip_percent_lowerbound * together can be useful for filtering out noise and making it easier to see * areas of strong attribution. Defaults to 99.9. */ clipPercentUpperbound?: number; /** * The color scheme used for the highlighted areas. Defaults to PINK_GREEN * for Integrated Gradients attribution, which shows positive attributions in * green and negative in pink. Defaults to VIRIDIS for XRAI attribution, which * highlights the most influential regions in yellow and the least influential * in blue. */ colorMap?: | "COLOR_MAP_UNSPECIFIED" | "PINK_GREEN" | "VIRIDIS" | "RED" | "GREEN" | "RED_GREEN" | "PINK_WHITE_GREEN"; /** * How the original image is displayed in the visualization. Adjusting the * overlay can help increase visual clarity if the original image makes it * difficult to view the visualization. Defaults to NONE. */ overlayType?: | "OVERLAY_TYPE_UNSPECIFIED" | "NONE" | "ORIGINAL" | "GRAYSCALE" | "MASK_BLACK"; /** * Whether to only highlight pixels with positive contributions, negative or * both. Defaults to POSITIVE. */ polarity?: | "POLARITY_UNSPECIFIED" | "POSITIVE" | "NEGATIVE" | "BOTH"; /** * Type of the image visualization. Only applicable to Integrated Gradients * attribution. OUTLINES shows regions of attribution, while PIXELS shows * per-pixel attribution. Defaults to OUTLINES. */ type?: | "TYPE_UNSPECIFIED" | "PIXELS" | "OUTLINES"; } export interface XPSXpsOperationMetadata { /** * Optional. XPS server can opt to provide example count of the long running * operation (e.g. training, data importing, batch prediction). */ exampleCount?: bigint; /** * Metrics for the operation. By the time the operation is terminated * (whether succeeded or failed) as returned from XPS, AutoML BE assumes the * metrics are finalized. AutoML BE transparently posts the metrics to Chemist * if it's not empty, regardless of the response content or error type. If * user is supposed to be charged in case of cancellation/error, this field * should be set. In the case where the type of LRO doesn't require any * billing, this field should be left unset. */ reportingMetrics?: XPSReportingMetrics; tablesTrainingOperationMetadata?: XPSTablesTrainingOperationMetadata; videoBatchPredictOperationMetadata?: XPSVideoBatchPredictOperationMetadata; videoTrainingOperationMetadata?: XPSVideoTrainingOperationMetadata; visionTrainingOperationMetadata?: XPSVisionTrainingOperationMetadata; } function serializeXPSXpsOperationMetadata(data: any): XPSXpsOperationMetadata { return { ...data, exampleCount: data["exampleCount"] !== undefined ? String(data["exampleCount"]) : undefined, reportingMetrics: data["reportingMetrics"] !== undefined ? serializeXPSReportingMetrics(data["reportingMetrics"]) : undefined, tablesTrainingOperationMetadata: data["tablesTrainingOperationMetadata"] !== undefined ? serializeXPSTablesTrainingOperationMetadata(data["tablesTrainingOperationMetadata"]) : undefined, videoTrainingOperationMetadata: data["videoTrainingOperationMetadata"] !== undefined ? serializeXPSVideoTrainingOperationMetadata(data["videoTrainingOperationMetadata"]) : undefined, visionTrainingOperationMetadata: data["visionTrainingOperationMetadata"] !== undefined ? serializeXPSVisionTrainingOperationMetadata(data["visionTrainingOperationMetadata"]) : undefined, }; } function deserializeXPSXpsOperationMetadata(data: any): XPSXpsOperationMetadata { return { ...data, exampleCount: data["exampleCount"] !== undefined ? BigInt(data["exampleCount"]) : undefined, reportingMetrics: data["reportingMetrics"] !== undefined ? deserializeXPSReportingMetrics(data["reportingMetrics"]) : undefined, tablesTrainingOperationMetadata: data["tablesTrainingOperationMetadata"] !== undefined ? deserializeXPSTablesTrainingOperationMetadata(data["tablesTrainingOperationMetadata"]) : undefined, videoTrainingOperationMetadata: data["videoTrainingOperationMetadata"] !== undefined ? deserializeXPSVideoTrainingOperationMetadata(data["videoTrainingOperationMetadata"]) : undefined, visionTrainingOperationMetadata: data["visionTrainingOperationMetadata"] !== undefined ? deserializeXPSVisionTrainingOperationMetadata(data["visionTrainingOperationMetadata"]) : undefined, }; } /** * An explanation method that redistributes Integrated Gradients attributions * to segmented regions, taking advantage of the model's fully differentiable * structure. Refer to this paper for more details: * https://arxiv.org/abs/1906.02825 Only supports image Models (modality is * IMAGE). */ export interface XPSXraiAttribution { /** * The number of steps for approximating the path integral. A good value to * start is 50 and gradually increase until the sum to diff property is met * within the desired error range. Valid range of its value is [1, 100], * inclusively. */ stepCount?: number; } function decodeBase64(b64: string): Uint8Array { const binString = atob(b64); const size = binString.length; const bytes = new Uint8Array(size); for (let i = 0; i < size; i++) { bytes[i] = binString.charCodeAt(i); } return bytes; } const base64abc = ["A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","W","X","Y","Z","a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z","0","1","2","3","4","5","6","7","8","9","+","/"]; /** * CREDIT: https://gist.github.com/enepomnyaschih/72c423f727d395eeaa09697058238727 * Encodes a given Uint8Array, ArrayBuffer or string into RFC4648 base64 representation * @param data */ function encodeBase64(uint8: Uint8Array): string { let result = "", i; const l = uint8.length; for (i = 2; i < l; i += 3) { result += base64abc[uint8[i - 2] >> 2]; result += base64abc[((uint8[i - 2] & 0x03) << 4) | (uint8[i - 1] >> 4)]; result += base64abc[((uint8[i - 1] & 0x0f) << 2) | (uint8[i] >> 6)]; result += base64abc[uint8[i] & 0x3f]; } if (i === l + 1) { // 1 octet yet to write result += base64abc[uint8[i - 2] >> 2]; result += base64abc[(uint8[i - 2] & 0x03) << 4]; result += "=="; } if (i === l) { // 2 octets yet to write result += base64abc[uint8[i - 2] >> 2]; result += base64abc[((uint8[i - 2] & 0x03) << 4) | (uint8[i - 1] >> 4)]; result += base64abc[(uint8[i - 1] & 0x0f) << 2]; result += "="; } return result; }