// Copyright 2022 Luca Casonato. All rights reserved. MIT license. /** * Vertex AI API Client for Deno * ============================= * * Train high-quality custom machine learning models with minimal machine learning expertise and effort. * * Docs: https://cloud.google.com/vertex-ai/ * Source: https://googleapis.deno.dev/v1/aiplatform:v1.ts */ import { auth, CredentialsClient, GoogleAuth, request } from "/_/base@v1/mod.ts"; export { auth, GoogleAuth }; export type { CredentialsClient }; /** * Train high-quality custom machine learning models with minimal machine * learning expertise and effort. */ export class AIplatform { #client: CredentialsClient | undefined; #baseUrl: string; constructor(client?: CredentialsClient, baseUrl: string = "https://aiplatform.googleapis.com/") { this.#client = client; this.#baseUrl = baseUrl; } /** * Creates a BatchPredictionJob. A BatchPredictionJob once created will right * away be attempted to start. * */ async batchPredictionJobsCreate(req: GoogleCloudAiplatformV1BatchPredictionJob, opts: BatchPredictionJobsCreateOptions = {}): Promise { req = serializeGoogleCloudAiplatformV1BatchPredictionJob(req); const url = new URL(`${this.#baseUrl}v1/batchPredictionJobs`); if (opts.parent !== undefined) { url.searchParams.append("parent", String(opts.parent)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1BatchPredictionJob(data); } /** * Gets a BatchPredictionJob * * @param name Required. The name of the BatchPredictionJob resource. Format: `projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}` */ async batchPredictionJobsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1BatchPredictionJob(data); } /** * Lists BatchPredictionJobs in a Location. * */ async batchPredictionJobsList(opts: BatchPredictionJobsListOptions = {}): Promise { opts = serializeBatchPredictionJobsListOptions(opts); const url = new URL(`${this.#baseUrl}v1/batchPredictionJobs`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.parent !== undefined) { url.searchParams.append("parent", String(opts.parent)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ListBatchPredictionJobsResponse(data); } /** * Creates a Dataset. * */ async datasetsCreate(req: GoogleCloudAiplatformV1Dataset, opts: DatasetsCreateOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/datasets`); if (opts.parent !== undefined) { url.searchParams.append("parent", String(opts.parent)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Create a version from a Dataset. * * @param parent Required. The name of the Dataset resource. Format: `projects/{project}/locations/{location}/datasets/{dataset}` */ async datasetsDatasetVersionsCreate(parent: string, req: GoogleCloudAiplatformV1DatasetVersion): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/datasetVersions`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Deletes a Dataset version. * * @param name Required. The resource name of the Dataset version to delete. Format: `projects/{project}/locations/{location}/datasets/{dataset}/datasetVersions/{dataset_version}` */ async datasetsDatasetVersionsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets a Dataset version. * * @param name Required. The resource name of the Dataset version to delete. Format: `projects/{project}/locations/{location}/datasets/{dataset}/datasetVersions/{dataset_version}` */ async datasetsDatasetVersionsGet(name: string, opts: DatasetsDatasetVersionsGetOptions = {}): Promise { opts = serializeDatasetsDatasetVersionsGetOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1DatasetVersion; } /** * Lists DatasetVersions in a Dataset. * * @param parent Required. The resource name of the Dataset to list DatasetVersions from. Format: `projects/{project}/locations/{location}/datasets/{dataset}` */ async datasetsDatasetVersionsList(parent: string, opts: DatasetsDatasetVersionsListOptions = {}): Promise { opts = serializeDatasetsDatasetVersionsListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/datasetVersions`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListDatasetVersionsResponse; } /** * Updates a DatasetVersion. * * @param name Output only. Identifier. The resource name of the DatasetVersion. Format: `projects/{project}/locations/{location}/datasets/{dataset}/datasetVersions/{dataset_version}` */ async datasetsDatasetVersionsPatch(name: string, req: GoogleCloudAiplatformV1DatasetVersion, opts: DatasetsDatasetVersionsPatchOptions = {}): Promise { opts = serializeDatasetsDatasetVersionsPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleCloudAiplatformV1DatasetVersion; } /** * Restores a dataset version. * * @param name Required. The name of the DatasetVersion resource. Format: `projects/{project}/locations/{location}/datasets/{dataset}/datasetVersions/{dataset_version}` */ async datasetsDatasetVersionsRestore(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:restore`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Deletes a Dataset. * * @param name Required. The resource name of the Dataset to delete. Format: `projects/{project}/locations/{location}/datasets/{dataset}` */ async datasetsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets a Dataset. * * @param name Required. The name of the Dataset resource. */ async datasetsGet(name: string, opts: DatasetsGetOptions = {}): Promise { opts = serializeDatasetsGetOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1Dataset; } /** * Lists Datasets in a Location. * */ async datasetsList(opts: DatasetsListOptions = {}): Promise { opts = serializeDatasetsListOptions(opts); const url = new URL(`${this.#baseUrl}v1/datasets`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.parent !== undefined) { url.searchParams.append("parent", String(opts.parent)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListDatasetsResponse; } /** * Updates a Dataset. * * @param name Output only. Identifier. The resource name of the Dataset. Format: `projects/{project}/locations/{location}/datasets/{dataset}` */ async datasetsPatch(name: string, req: GoogleCloudAiplatformV1Dataset, opts: DatasetsPatchOptions = {}): Promise { opts = serializeDatasetsPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleCloudAiplatformV1Dataset; } /** * Exposes an OpenAI-compatible endpoint for chat completions. * * @param endpoint Required. The name of the endpoint requested to serve the prediction. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async endpointsChatCompletions(endpoint: string, req: GoogleApiHttpBody): Promise { req = serializeGoogleApiHttpBody(req); const url = new URL(`${this.#baseUrl}v1/${ endpoint }/chat/completions`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleApiHttpBody(data); } /** * Return a list of tokens based on the input text. * * @param endpoint Required. The name of the Endpoint requested to get lists of tokens and token ids. */ async endpointsComputeTokens(endpoint: string, req: GoogleCloudAiplatformV1ComputeTokensRequest): Promise { req = serializeGoogleCloudAiplatformV1ComputeTokensRequest(req); const url = new URL(`${this.#baseUrl}v1/${ endpoint }:computeTokens`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1ComputeTokensResponse(data); } /** * Perform a token counting. * * @param endpoint Required. The name of the Endpoint requested to perform token counting. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async endpointsCountTokens(endpoint: string, req: GoogleCloudAiplatformV1CountTokensRequest): Promise { req = serializeGoogleCloudAiplatformV1CountTokensRequest(req); const url = new URL(`${this.#baseUrl}v1/${ endpoint }:countTokens`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1CountTokensResponse; } /** * Generate content with multimodal inputs. * * @param model Required. The fully qualified name of the publisher model or tuned model endpoint to use. Publisher model format: `projects/{project}/locations/{location}/publishers/*/models/*` Tuned model endpoint format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async endpointsGenerateContent(model: string, req: GoogleCloudAiplatformV1GenerateContentRequest): Promise { req = serializeGoogleCloudAiplatformV1GenerateContentRequest(req); const url = new URL(`${this.#baseUrl}v1/${ model }:generateContent`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1GenerateContentResponse; } /** * Perform an online prediction. * * @param endpoint Required. The name of the Endpoint requested to serve the prediction. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async endpointsPredict(endpoint: string, req: GoogleCloudAiplatformV1PredictRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ endpoint }:predict`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1PredictResponse; } /** * Generate content with multimodal inputs with streaming support. * * @param model Required. The fully qualified name of the publisher model or tuned model endpoint to use. Publisher model format: `projects/{project}/locations/{location}/publishers/*/models/*` Tuned model endpoint format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async endpointsStreamGenerateContent(model: string, req: GoogleCloudAiplatformV1GenerateContentRequest): Promise { req = serializeGoogleCloudAiplatformV1GenerateContentRequest(req); const url = new URL(`${this.#baseUrl}v1/${ model }:streamGenerateContent`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1GenerateContentResponse; } /** * Upload a file into a RagCorpus. * * @param parent Required. The name of the RagCorpus resource into which to upload the file. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}` */ async mediaUpload(parent: string, req: GoogleCloudAiplatformV1UploadRagFileRequest): Promise { req = serializeGoogleCloudAiplatformV1UploadRagFileRequest(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/ragFiles:upload`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1UploadRagFileResponse(data); } /** * Gets a GenAI cache config. * * @param name Required. Name of the cache config. Format: - `projects/{project}/cacheConfig`. */ async projectsGetCacheConfig(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1CacheConfig; } /** * Given an input prompt, it returns augmented prompt from vertex rag store * to guide LLM towards generating grounded responses. * * @param parent Required. The resource name of the Location from which to augment prompt. The users must have permission to make a call in the project. Format: `projects/{project}/locations/{location}`. */ async projectsLocationsAugmentPrompt(parent: string, req: GoogleCloudAiplatformV1AugmentPromptRequest): Promise { req = serializeGoogleCloudAiplatformV1AugmentPromptRequest(req); const url = new URL(`${this.#baseUrl}v1/${ parent }:augmentPrompt`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1AugmentPromptResponse(data); } /** * Cancels a BatchPredictionJob. Starts asynchronous cancellation on the * BatchPredictionJob. The server makes the best effort to cancel the job, but * success is not guaranteed. Clients can use JobService.GetBatchPredictionJob * or other methods to check whether the cancellation succeeded or whether the * job completed despite cancellation. On a successful cancellation, the * BatchPredictionJob is not deleted;instead its BatchPredictionJob.state is * set to `CANCELLED`. Any files already outputted by the job are not deleted. * * @param name Required. The name of the BatchPredictionJob to cancel. Format: `projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}` */ async projectsLocationsBatchPredictionJobsCancel(name: string, req: GoogleCloudAiplatformV1CancelBatchPredictionJobRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleProtobufEmpty; } /** * Creates a BatchPredictionJob. A BatchPredictionJob once created will right * away be attempted to start. * * @param parent Required. The resource name of the Location to create the BatchPredictionJob in. Format: `projects/{project}/locations/{location}` */ async projectsLocationsBatchPredictionJobsCreate(parent: string, req: GoogleCloudAiplatformV1BatchPredictionJob): Promise { req = serializeGoogleCloudAiplatformV1BatchPredictionJob(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/batchPredictionJobs`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1BatchPredictionJob(data); } /** * Deletes a BatchPredictionJob. Can only be called on jobs that already * finished. * * @param name Required. The name of the BatchPredictionJob resource to be deleted. Format: `projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}` */ async projectsLocationsBatchPredictionJobsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets a BatchPredictionJob * * @param name Required. The name of the BatchPredictionJob resource. Format: `projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}` */ async projectsLocationsBatchPredictionJobsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1BatchPredictionJob(data); } /** * Lists BatchPredictionJobs in a Location. * * @param parent Required. The resource name of the Location to list the BatchPredictionJobs from. Format: `projects/{project}/locations/{location}` */ async projectsLocationsBatchPredictionJobsList(parent: string, opts: ProjectsLocationsBatchPredictionJobsListOptions = {}): Promise { opts = serializeProjectsLocationsBatchPredictionJobsListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/batchPredictionJobs`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ListBatchPredictionJobsResponse(data); } /** * Creates cached content, this call will initialize the cached content in * the data storage, and users need to pay for the cache data storage. * * @param parent Required. The parent resource where the cached content will be created */ async projectsLocationsCachedContentsCreate(parent: string, req: GoogleCloudAiplatformV1CachedContent): Promise { req = serializeGoogleCloudAiplatformV1CachedContent(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/cachedContents`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1CachedContent(data); } /** * Deletes cached content * * @param name Required. The resource name referring to the cached content */ async projectsLocationsCachedContentsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets cached content configurations * * @param name Required. The resource name referring to the cached content */ async projectsLocationsCachedContentsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1CachedContent(data); } /** * Lists cached contents in a project * * @param parent Required. The parent, which owns this collection of cached contents. */ async projectsLocationsCachedContentsList(parent: string, opts: ProjectsLocationsCachedContentsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/cachedContents`); if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ListCachedContentsResponse(data); } /** * Updates cached content configurations * * @param name Immutable. Identifier. The server-generated resource name of the cached content Format: projects/{project}/locations/{location}/cachedContents/{cached_content} */ async projectsLocationsCachedContentsPatch(name: string, req: GoogleCloudAiplatformV1CachedContent, opts: ProjectsLocationsCachedContentsPatchOptions = {}): Promise { req = serializeGoogleCloudAiplatformV1CachedContent(req); opts = serializeProjectsLocationsCachedContentsPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return deserializeGoogleCloudAiplatformV1CachedContent(data); } /** * Given an input text, it returns a score that evaluates the factuality of * the text. It also extracts and returns claims from the text and provides * supporting facts. * * @param parent Required. The resource name of the Location from which to corroborate text. The users must have permission to make a call in the project. Format: `projects/{project}/locations/{location}`. */ async projectsLocationsCorroborateContent(parent: string, req: GoogleCloudAiplatformV1CorroborateContentRequest): Promise { req = serializeGoogleCloudAiplatformV1CorroborateContentRequest(req); const url = new URL(`${this.#baseUrl}v1/${ parent }:corroborateContent`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1CorroborateContentResponse; } /** * Cancels a CustomJob. Starts asynchronous cancellation on the CustomJob. * The server makes a best effort to cancel the job, but success is not * guaranteed. Clients can use JobService.GetCustomJob or other methods to * check whether the cancellation succeeded or whether the job completed * despite cancellation. On successful cancellation, the CustomJob is not * deleted; instead it becomes a job with a CustomJob.error value with a * google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`, and * CustomJob.state is set to `CANCELLED`. * * @param name Required. The name of the CustomJob to cancel. Format: `projects/{project}/locations/{location}/customJobs/{custom_job}` */ async projectsLocationsCustomJobsCancel(name: string, req: GoogleCloudAiplatformV1CancelCustomJobRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleProtobufEmpty; } /** * Creates a CustomJob. A created CustomJob right away will be attempted to * be run. * * @param parent Required. The resource name of the Location to create the CustomJob in. Format: `projects/{project}/locations/{location}` */ async projectsLocationsCustomJobsCreate(parent: string, req: GoogleCloudAiplatformV1CustomJob): Promise { req = serializeGoogleCloudAiplatformV1CustomJob(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/customJobs`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1CustomJob(data); } /** * Deletes a CustomJob. * * @param name Required. The name of the CustomJob resource to be deleted. Format: `projects/{project}/locations/{location}/customJobs/{custom_job}` */ async projectsLocationsCustomJobsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets a CustomJob. * * @param name Required. The name of the CustomJob resource. Format: `projects/{project}/locations/{location}/customJobs/{custom_job}` */ async projectsLocationsCustomJobsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1CustomJob(data); } /** * Lists CustomJobs in a Location. * * @param parent Required. The resource name of the Location to list the CustomJobs from. Format: `projects/{project}/locations/{location}` */ async projectsLocationsCustomJobsList(parent: string, opts: ProjectsLocationsCustomJobsListOptions = {}): Promise { opts = serializeProjectsLocationsCustomJobsListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/customJobs`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ListCustomJobsResponse(data); } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsCustomJobsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsCustomJobsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsCustomJobsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsCustomJobsOperationsList(name: string, opts: ProjectsLocationsCustomJobsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsCustomJobsOperationsWait(name: string, opts: ProjectsLocationsCustomJobsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsCustomJobsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Cancels a DataLabelingJob. Success of cancellation is not guaranteed. * * @param name Required. The name of the DataLabelingJob. Format: `projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}` */ async projectsLocationsDataLabelingJobsCancel(name: string, req: GoogleCloudAiplatformV1CancelDataLabelingJobRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleProtobufEmpty; } /** * Creates a DataLabelingJob. * * @param parent Required. The parent of the DataLabelingJob. Format: `projects/{project}/locations/{location}` */ async projectsLocationsDataLabelingJobsCreate(parent: string, req: GoogleCloudAiplatformV1DataLabelingJob): Promise { req = serializeGoogleCloudAiplatformV1DataLabelingJob(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/dataLabelingJobs`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1DataLabelingJob(data); } /** * Deletes a DataLabelingJob. * * @param name Required. The name of the DataLabelingJob to be deleted. Format: `projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}` */ async projectsLocationsDataLabelingJobsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets a DataLabelingJob. * * @param name Required. The name of the DataLabelingJob. Format: `projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}` */ async projectsLocationsDataLabelingJobsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1DataLabelingJob(data); } /** * Lists DataLabelingJobs in a Location. * * @param parent Required. The parent of the DataLabelingJob. Format: `projects/{project}/locations/{location}` */ async projectsLocationsDataLabelingJobsList(parent: string, opts: ProjectsLocationsDataLabelingJobsListOptions = {}): Promise { opts = serializeProjectsLocationsDataLabelingJobsListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/dataLabelingJobs`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ListDataLabelingJobsResponse(data); } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsDataLabelingJobsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsDataLabelingJobsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsDataLabelingJobsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsDataLabelingJobsOperationsList(name: string, opts: ProjectsLocationsDataLabelingJobsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsDataLabelingJobsOperationsWait(name: string, opts: ProjectsLocationsDataLabelingJobsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsDataLabelingJobsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Gets an AnnotationSpec. * * @param name Required. The name of the AnnotationSpec resource. Format: `projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}` */ async projectsLocationsDatasetsAnnotationSpecsGet(name: string, opts: ProjectsLocationsDatasetsAnnotationSpecsGetOptions = {}): Promise { opts = serializeProjectsLocationsDatasetsAnnotationSpecsGetOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1AnnotationSpec; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsDatasetsAnnotationSpecsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsDatasetsAnnotationSpecsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsDatasetsAnnotationSpecsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsDatasetsAnnotationSpecsOperationsList(name: string, opts: ProjectsLocationsDatasetsAnnotationSpecsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsDatasetsAnnotationSpecsOperationsWait(name: string, opts: ProjectsLocationsDatasetsAnnotationSpecsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsDatasetsAnnotationSpecsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Creates a Dataset. * * @param parent Required. The resource name of the Location to create the Dataset in. Format: `projects/{project}/locations/{location}` */ async projectsLocationsDatasetsCreate(parent: string, req: GoogleCloudAiplatformV1Dataset): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/datasets`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Lists Annotations belongs to a dataitem. * * @param parent Required. The resource name of the DataItem to list Annotations from. Format: `projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}` */ async projectsLocationsDatasetsDataItemsAnnotationsList(parent: string, opts: ProjectsLocationsDatasetsDataItemsAnnotationsListOptions = {}): Promise { opts = serializeProjectsLocationsDatasetsDataItemsAnnotationsListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/annotations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListAnnotationsResponse; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsDatasetsDataItemsAnnotationsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsDatasetsDataItemsAnnotationsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsDatasetsDataItemsAnnotationsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsDatasetsDataItemsAnnotationsOperationsList(name: string, opts: ProjectsLocationsDatasetsDataItemsAnnotationsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsDatasetsDataItemsAnnotationsOperationsWait(name: string, opts: ProjectsLocationsDatasetsDataItemsAnnotationsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsDatasetsDataItemsAnnotationsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Lists DataItems in a Dataset. * * @param parent Required. The resource name of the Dataset to list DataItems from. Format: `projects/{project}/locations/{location}/datasets/{dataset}` */ async projectsLocationsDatasetsDataItemsList(parent: string, opts: ProjectsLocationsDatasetsDataItemsListOptions = {}): Promise { opts = serializeProjectsLocationsDatasetsDataItemsListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/dataItems`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListDataItemsResponse; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsDatasetsDataItemsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsDatasetsDataItemsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsDatasetsDataItemsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsDatasetsDataItemsOperationsList(name: string, opts: ProjectsLocationsDatasetsDataItemsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsDatasetsDataItemsOperationsWait(name: string, opts: ProjectsLocationsDatasetsDataItemsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsDatasetsDataItemsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Create a version from a Dataset. * * @param parent Required. The name of the Dataset resource. Format: `projects/{project}/locations/{location}/datasets/{dataset}` */ async projectsLocationsDatasetsDatasetVersionsCreate(parent: string, req: GoogleCloudAiplatformV1DatasetVersion): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/datasetVersions`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Deletes a Dataset version. * * @param name Required. The resource name of the Dataset version to delete. Format: `projects/{project}/locations/{location}/datasets/{dataset}/datasetVersions/{dataset_version}` */ async projectsLocationsDatasetsDatasetVersionsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets a Dataset version. * * @param name Required. The resource name of the Dataset version to delete. Format: `projects/{project}/locations/{location}/datasets/{dataset}/datasetVersions/{dataset_version}` */ async projectsLocationsDatasetsDatasetVersionsGet(name: string, opts: ProjectsLocationsDatasetsDatasetVersionsGetOptions = {}): Promise { opts = serializeProjectsLocationsDatasetsDatasetVersionsGetOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1DatasetVersion; } /** * Lists DatasetVersions in a Dataset. * * @param parent Required. The resource name of the Dataset to list DatasetVersions from. Format: `projects/{project}/locations/{location}/datasets/{dataset}` */ async projectsLocationsDatasetsDatasetVersionsList(parent: string, opts: ProjectsLocationsDatasetsDatasetVersionsListOptions = {}): Promise { opts = serializeProjectsLocationsDatasetsDatasetVersionsListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/datasetVersions`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListDatasetVersionsResponse; } /** * Updates a DatasetVersion. * * @param name Output only. Identifier. The resource name of the DatasetVersion. Format: `projects/{project}/locations/{location}/datasets/{dataset}/datasetVersions/{dataset_version}` */ async projectsLocationsDatasetsDatasetVersionsPatch(name: string, req: GoogleCloudAiplatformV1DatasetVersion, opts: ProjectsLocationsDatasetsDatasetVersionsPatchOptions = {}): Promise { opts = serializeProjectsLocationsDatasetsDatasetVersionsPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleCloudAiplatformV1DatasetVersion; } /** * Restores a dataset version. * * @param name Required. The name of the DatasetVersion resource. Format: `projects/{project}/locations/{location}/datasets/{dataset}/datasetVersions/{dataset_version}` */ async projectsLocationsDatasetsDatasetVersionsRestore(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:restore`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Deletes a Dataset. * * @param name Required. The resource name of the Dataset to delete. Format: `projects/{project}/locations/{location}/datasets/{dataset}` */ async projectsLocationsDatasetsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Exports data from a Dataset. * * @param name Required. The name of the Dataset resource. Format: `projects/{project}/locations/{location}/datasets/{dataset}` */ async projectsLocationsDatasetsExport(name: string, req: GoogleCloudAiplatformV1ExportDataRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:export`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Gets a Dataset. * * @param name Required. The name of the Dataset resource. */ async projectsLocationsDatasetsGet(name: string, opts: ProjectsLocationsDatasetsGetOptions = {}): Promise { opts = serializeProjectsLocationsDatasetsGetOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1Dataset; } /** * Imports data into a Dataset. * * @param name Required. The name of the Dataset resource. Format: `projects/{project}/locations/{location}/datasets/{dataset}` */ async projectsLocationsDatasetsImport(name: string, req: GoogleCloudAiplatformV1ImportDataRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:import`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Lists Datasets in a Location. * * @param parent Required. The name of the Dataset's parent resource. Format: `projects/{project}/locations/{location}` */ async projectsLocationsDatasetsList(parent: string, opts: ProjectsLocationsDatasetsListOptions = {}): Promise { opts = serializeProjectsLocationsDatasetsListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/datasets`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListDatasetsResponse; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsDatasetsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsDatasetsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsDatasetsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsDatasetsOperationsList(name: string, opts: ProjectsLocationsDatasetsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsDatasetsOperationsWait(name: string, opts: ProjectsLocationsDatasetsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsDatasetsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates a Dataset. * * @param name Output only. Identifier. The resource name of the Dataset. Format: `projects/{project}/locations/{location}/datasets/{dataset}` */ async projectsLocationsDatasetsPatch(name: string, req: GoogleCloudAiplatformV1Dataset, opts: ProjectsLocationsDatasetsPatchOptions = {}): Promise { opts = serializeProjectsLocationsDatasetsPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleCloudAiplatformV1Dataset; } /** * Deletes a SavedQuery. * * @param name Required. The resource name of the SavedQuery to delete. Format: `projects/{project}/locations/{location}/datasets/{dataset}/savedQueries/{saved_query}` */ async projectsLocationsDatasetsSavedQueriesDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Lists SavedQueries in a Dataset. * * @param parent Required. The resource name of the Dataset to list SavedQueries from. Format: `projects/{project}/locations/{location}/datasets/{dataset}` */ async projectsLocationsDatasetsSavedQueriesList(parent: string, opts: ProjectsLocationsDatasetsSavedQueriesListOptions = {}): Promise { opts = serializeProjectsLocationsDatasetsSavedQueriesListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/savedQueries`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListSavedQueriesResponse; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsDatasetsSavedQueriesOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsDatasetsSavedQueriesOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsDatasetsSavedQueriesOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsDatasetsSavedQueriesOperationsList(name: string, opts: ProjectsLocationsDatasetsSavedQueriesOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsDatasetsSavedQueriesOperationsWait(name: string, opts: ProjectsLocationsDatasetsSavedQueriesOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsDatasetsSavedQueriesOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Searches DataItems in a Dataset. * * @param dataset Required. The resource name of the Dataset from which to search DataItems. Format: `projects/{project}/locations/{location}/datasets/{dataset}` */ async projectsLocationsDatasetsSearchDataItems(dataset: string, opts: ProjectsLocationsDatasetsSearchDataItemsOptions = {}): Promise { opts = serializeProjectsLocationsDatasetsSearchDataItemsOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ dataset }:searchDataItems`); if (opts.annotationFilters !== undefined) { url.searchParams.append("annotationFilters", String(opts.annotationFilters)); } if (opts.annotationsFilter !== undefined) { url.searchParams.append("annotationsFilter", String(opts.annotationsFilter)); } if (opts.annotationsLimit !== undefined) { url.searchParams.append("annotationsLimit", String(opts.annotationsLimit)); } if (opts.dataItemFilter !== undefined) { url.searchParams.append("dataItemFilter", String(opts.dataItemFilter)); } if (opts.dataLabelingJob !== undefined) { url.searchParams.append("dataLabelingJob", String(opts.dataLabelingJob)); } if (opts.fieldMask !== undefined) { url.searchParams.append("fieldMask", String(opts.fieldMask)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts["orderByAnnotation.orderBy"] !== undefined) { url.searchParams.append("orderByAnnotation.orderBy", String(opts["orderByAnnotation.orderBy"])); } if (opts["orderByAnnotation.savedQuery"] !== undefined) { url.searchParams.append("orderByAnnotation.savedQuery", String(opts["orderByAnnotation.savedQuery"])); } if (opts.orderByDataItem !== undefined) { url.searchParams.append("orderByDataItem", String(opts.orderByDataItem)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.savedQuery !== undefined) { url.searchParams.append("savedQuery", String(opts.savedQuery)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1SearchDataItemsResponse; } /** * Create a DeploymentResourcePool. * * @param parent Required. The parent location resource where this DeploymentResourcePool will be created. Format: `projects/{project}/locations/{location}` */ async projectsLocationsDeploymentResourcePoolsCreate(parent: string, req: GoogleCloudAiplatformV1CreateDeploymentResourcePoolRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/deploymentResourcePools`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Delete a DeploymentResourcePool. * * @param name Required. The name of the DeploymentResourcePool to delete. Format: `projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}` */ async projectsLocationsDeploymentResourcePoolsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Get a DeploymentResourcePool. * * @param name Required. The name of the DeploymentResourcePool to retrieve. Format: `projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}` */ async projectsLocationsDeploymentResourcePoolsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1DeploymentResourcePool; } /** * List DeploymentResourcePools in a location. * * @param parent Required. The parent Location which owns this collection of DeploymentResourcePools. Format: `projects/{project}/locations/{location}` */ async projectsLocationsDeploymentResourcePoolsList(parent: string, opts: ProjectsLocationsDeploymentResourcePoolsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/deploymentResourcePools`); if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListDeploymentResourcePoolsResponse; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsDeploymentResourcePoolsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsDeploymentResourcePoolsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsDeploymentResourcePoolsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsDeploymentResourcePoolsOperationsList(name: string, opts: ProjectsLocationsDeploymentResourcePoolsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsDeploymentResourcePoolsOperationsWait(name: string, opts: ProjectsLocationsDeploymentResourcePoolsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsDeploymentResourcePoolsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Update a DeploymentResourcePool. * * @param name Immutable. The resource name of the DeploymentResourcePool. Format: `projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}` */ async projectsLocationsDeploymentResourcePoolsPatch(name: string, req: GoogleCloudAiplatformV1DeploymentResourcePool, opts: ProjectsLocationsDeploymentResourcePoolsPatchOptions = {}): Promise { opts = serializeProjectsLocationsDeploymentResourcePoolsPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleLongrunningOperation; } /** * List DeployedModels that have been deployed on this * DeploymentResourcePool. * * @param deploymentResourcePool Required. The name of the target DeploymentResourcePool to query. Format: `projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}` */ async projectsLocationsDeploymentResourcePoolsQueryDeployedModels(deploymentResourcePool: string, opts: ProjectsLocationsDeploymentResourcePoolsQueryDeployedModelsOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ deploymentResourcePool }:queryDeployedModels`); if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1QueryDeployedModelsResponse; } /** * Exposes an OpenAI-compatible endpoint for chat completions. * * @param endpoint Required. The name of the endpoint requested to serve the prediction. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async projectsLocationsEndpointsChatCompletions(endpoint: string, req: GoogleApiHttpBody): Promise { req = serializeGoogleApiHttpBody(req); const url = new URL(`${this.#baseUrl}v1/${ endpoint }/chat/completions`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleApiHttpBody(data); } /** * Return a list of tokens based on the input text. * * @param endpoint Required. The name of the Endpoint requested to get lists of tokens and token ids. */ async projectsLocationsEndpointsComputeTokens(endpoint: string, req: GoogleCloudAiplatformV1ComputeTokensRequest): Promise { req = serializeGoogleCloudAiplatformV1ComputeTokensRequest(req); const url = new URL(`${this.#baseUrl}v1/${ endpoint }:computeTokens`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1ComputeTokensResponse(data); } /** * Perform a token counting. * * @param endpoint Required. The name of the Endpoint requested to perform token counting. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async projectsLocationsEndpointsCountTokens(endpoint: string, req: GoogleCloudAiplatformV1CountTokensRequest): Promise { req = serializeGoogleCloudAiplatformV1CountTokensRequest(req); const url = new URL(`${this.#baseUrl}v1/${ endpoint }:countTokens`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1CountTokensResponse; } /** * Creates an Endpoint. * * @param parent Required. The resource name of the Location to create the Endpoint in. Format: `projects/{project}/locations/{location}` */ async projectsLocationsEndpointsCreate(parent: string, req: GoogleCloudAiplatformV1Endpoint, opts: ProjectsLocationsEndpointsCreateOptions = {}): Promise { req = serializeGoogleCloudAiplatformV1Endpoint(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/endpoints`); if (opts.endpointId !== undefined) { url.searchParams.append("endpointId", String(opts.endpointId)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Deletes an Endpoint. * * @param name Required. The name of the Endpoint resource to be deleted. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async projectsLocationsEndpointsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Deploys a Model into this Endpoint, creating a DeployedModel within it. * * @param endpoint Required. The name of the Endpoint resource into which to deploy a Model. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async projectsLocationsEndpointsDeployModel(endpoint: string, req: GoogleCloudAiplatformV1DeployModelRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ endpoint }:deployModel`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Perform an unary online prediction request to a gRPC model server for * Vertex first-party products and frameworks. * * @param endpoint Required. The name of the Endpoint requested to serve the prediction. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async projectsLocationsEndpointsDirectPredict(endpoint: string, req: GoogleCloudAiplatformV1DirectPredictRequest): Promise { req = serializeGoogleCloudAiplatformV1DirectPredictRequest(req); const url = new URL(`${this.#baseUrl}v1/${ endpoint }:directPredict`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1DirectPredictResponse(data); } /** * Perform an unary online prediction request to a gRPC model server for * custom containers. * * @param endpoint Required. The name of the Endpoint requested to serve the prediction. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async projectsLocationsEndpointsDirectRawPredict(endpoint: string, req: GoogleCloudAiplatformV1DirectRawPredictRequest): Promise { req = serializeGoogleCloudAiplatformV1DirectRawPredictRequest(req); const url = new URL(`${this.#baseUrl}v1/${ endpoint }:directRawPredict`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1DirectRawPredictResponse(data); } /** * Perform an online explanation. If deployed_model_id is specified, the * corresponding DeployModel must have explanation_spec populated. If * deployed_model_id is not specified, all DeployedModels must have * explanation_spec populated. * * @param endpoint Required. The name of the Endpoint requested to serve the explanation. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async projectsLocationsEndpointsExplain(endpoint: string, req: GoogleCloudAiplatformV1ExplainRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ endpoint }:explain`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1ExplainResponse; } /** * Fetch an asynchronous online prediction operation. * * @param endpoint Required. The name of the Endpoint requested to serve the prediction. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` or `projects/{project}/locations/{location}/publishers/{publisher}/models/{model}` */ async projectsLocationsEndpointsFetchPredictOperation(endpoint: string, req: GoogleCloudAiplatformV1FetchPredictOperationRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ endpoint }:fetchPredictOperation`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Generate content with multimodal inputs. * * @param model Required. The fully qualified name of the publisher model or tuned model endpoint to use. Publisher model format: `projects/{project}/locations/{location}/publishers/*/models/*` Tuned model endpoint format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async projectsLocationsEndpointsGenerateContent(model: string, req: GoogleCloudAiplatformV1GenerateContentRequest): Promise { req = serializeGoogleCloudAiplatformV1GenerateContentRequest(req); const url = new URL(`${this.#baseUrl}v1/${ model }:generateContent`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1GenerateContentResponse; } /** * Gets an Endpoint. * * @param name Required. The name of the Endpoint resource. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async projectsLocationsEndpointsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1Endpoint(data); } /** * Lists Endpoints in a Location. * * @param parent Required. The resource name of the Location from which to list the Endpoints. Format: `projects/{project}/locations/{location}` */ async projectsLocationsEndpointsList(parent: string, opts: ProjectsLocationsEndpointsListOptions = {}): Promise { opts = serializeProjectsLocationsEndpointsListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/endpoints`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ListEndpointsResponse(data); } /** * Updates an existing deployed model. Updatable fields include * `min_replica_count`, `max_replica_count`, `autoscaling_metric_specs`, * `disable_container_logging` (v1 only), and `enable_container_logging` * (v1beta1 only). * * @param endpoint Required. The name of the Endpoint resource into which to mutate a DeployedModel. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async projectsLocationsEndpointsMutateDeployedModel(endpoint: string, req: GoogleCloudAiplatformV1MutateDeployedModelRequest): Promise { req = serializeGoogleCloudAiplatformV1MutateDeployedModelRequest(req); const url = new URL(`${this.#baseUrl}v1/${ endpoint }:mutateDeployedModel`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsEndpointsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsEndpointsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsEndpointsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsEndpointsOperationsList(name: string, opts: ProjectsLocationsEndpointsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsEndpointsOperationsWait(name: string, opts: ProjectsLocationsEndpointsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsEndpointsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates an Endpoint. * * @param name Output only. The resource name of the Endpoint. */ async projectsLocationsEndpointsPatch(name: string, req: GoogleCloudAiplatformV1Endpoint, opts: ProjectsLocationsEndpointsPatchOptions = {}): Promise { req = serializeGoogleCloudAiplatformV1Endpoint(req); opts = serializeProjectsLocationsEndpointsPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return deserializeGoogleCloudAiplatformV1Endpoint(data); } /** * Perform an online prediction. * * @param endpoint Required. The name of the Endpoint requested to serve the prediction. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async projectsLocationsEndpointsPredict(endpoint: string, req: GoogleCloudAiplatformV1PredictRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ endpoint }:predict`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1PredictResponse; } async projectsLocationsEndpointsPredictLongRunning(endpoint: string, req: GoogleCloudAiplatformV1PredictLongRunningRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ endpoint }:predictLongRunning`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Perform an online prediction with an arbitrary HTTP payload. The response * includes the following HTTP headers: * `X-Vertex-AI-Endpoint-Id`: ID of the * Endpoint that served this prediction. * `X-Vertex-AI-Deployed-Model-Id`: ID * of the Endpoint's DeployedModel that served this prediction. * * @param endpoint Required. The name of the Endpoint requested to serve the prediction. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async projectsLocationsEndpointsRawPredict(endpoint: string, req: GoogleCloudAiplatformV1RawPredictRequest): Promise { req = serializeGoogleCloudAiplatformV1RawPredictRequest(req); const url = new URL(`${this.#baseUrl}v1/${ endpoint }:rawPredict`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleApiHttpBody(data); } /** * Perform a server-side streaming online prediction request for Vertex LLM * streaming. * * @param endpoint Required. The name of the Endpoint requested to serve the prediction. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async projectsLocationsEndpointsServerStreamingPredict(endpoint: string, req: GoogleCloudAiplatformV1StreamingPredictRequest): Promise { req = serializeGoogleCloudAiplatformV1StreamingPredictRequest(req); const url = new URL(`${this.#baseUrl}v1/${ endpoint }:serverStreamingPredict`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1StreamingPredictResponse(data); } /** * Generate content with multimodal inputs with streaming support. * * @param model Required. The fully qualified name of the publisher model or tuned model endpoint to use. Publisher model format: `projects/{project}/locations/{location}/publishers/*/models/*` Tuned model endpoint format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async projectsLocationsEndpointsStreamGenerateContent(model: string, req: GoogleCloudAiplatformV1GenerateContentRequest): Promise { req = serializeGoogleCloudAiplatformV1GenerateContentRequest(req); const url = new URL(`${this.#baseUrl}v1/${ model }:streamGenerateContent`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1GenerateContentResponse; } /** * Perform a streaming online prediction with an arbitrary HTTP payload. * * @param endpoint Required. The name of the Endpoint requested to serve the prediction. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async projectsLocationsEndpointsStreamRawPredict(endpoint: string, req: GoogleCloudAiplatformV1StreamRawPredictRequest): Promise { req = serializeGoogleCloudAiplatformV1StreamRawPredictRequest(req); const url = new URL(`${this.#baseUrl}v1/${ endpoint }:streamRawPredict`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleApiHttpBody(data); } /** * Undeploys a Model from an Endpoint, removing a DeployedModel from it, and * freeing all resources it's using. * * @param endpoint Required. The name of the Endpoint resource from which to undeploy a Model. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async projectsLocationsEndpointsUndeployModel(endpoint: string, req: GoogleCloudAiplatformV1UndeployModelRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ endpoint }:undeployModel`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Updates an Endpoint with a long running operation. * * @param name Output only. The resource name of the Endpoint. */ async projectsLocationsEndpointsUpdate(name: string, req: GoogleCloudAiplatformV1UpdateEndpointLongRunningRequest): Promise { req = serializeGoogleCloudAiplatformV1UpdateEndpointLongRunningRequest(req); const url = new URL(`${this.#baseUrl}v1/${ name }:update`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Evaluates a dataset based on a set of given metrics. * * @param location Required. The resource name of the Location to evaluate the dataset. Format: `projects/{project}/locations/{location}` */ async projectsLocationsEvaluateDataset(location: string, req: GoogleCloudAiplatformV1EvaluateDatasetRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ location }:evaluateDataset`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Evaluates instances based on a given metric. * * @param location Required. The resource name of the Location to evaluate the instances. Format: `projects/{project}/locations/{location}` */ async projectsLocationsEvaluateInstances(location: string, req: GoogleCloudAiplatformV1EvaluateInstancesRequest): Promise { req = serializeGoogleCloudAiplatformV1EvaluateInstancesRequest(req); const url = new URL(`${this.#baseUrl}v1/${ location }:evaluateInstances`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1EvaluateInstancesResponse; } /** * Creates a new FeatureGroup in a given project and location. * * @param parent Required. The resource name of the Location to create FeatureGroups. Format: `projects/{project}/locations/{location}` */ async projectsLocationsFeatureGroupsCreate(parent: string, req: GoogleCloudAiplatformV1FeatureGroup, opts: ProjectsLocationsFeatureGroupsCreateOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/featureGroups`); if (opts.featureGroupId !== undefined) { url.searchParams.append("featureGroupId", String(opts.featureGroupId)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Deletes a single FeatureGroup. * * @param name Required. The name of the FeatureGroup to be deleted. Format: `projects/{project}/locations/{location}/featureGroups/{feature_group}` */ async projectsLocationsFeatureGroupsDelete(name: string, opts: ProjectsLocationsFeatureGroupsDeleteOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.force !== undefined) { url.searchParams.append("force", String(opts.force)); } const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Creates a batch of Features in a given FeatureGroup. * * @param parent Required. The resource name of the EntityType/FeatureGroup to create the batch of Features under. Format: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}` `projects/{project}/locations/{location}/featureGroups/{feature_group}` */ async projectsLocationsFeatureGroupsFeaturesBatchCreate(parent: string, req: GoogleCloudAiplatformV1BatchCreateFeaturesRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/features:batchCreate`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Creates a new Feature in a given FeatureGroup. * * @param parent Required. The resource name of the EntityType or FeatureGroup to create a Feature. Format for entity_type as parent: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}` Format for feature_group as parent: `projects/{project}/locations/{location}/featureGroups/{feature_group}` */ async projectsLocationsFeatureGroupsFeaturesCreate(parent: string, req: GoogleCloudAiplatformV1Feature, opts: ProjectsLocationsFeatureGroupsFeaturesCreateOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/features`); if (opts.featureId !== undefined) { url.searchParams.append("featureId", String(opts.featureId)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Deletes a single Feature. * * @param name Required. The name of the Features to be deleted. Format: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}` `projects/{project}/locations/{location}/featureGroups/{feature_group}/features/{feature}` */ async projectsLocationsFeatureGroupsFeaturesDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets details of a single Feature. * * @param name Required. The name of the Feature resource. Format for entity_type as parent: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}` Format for feature_group as parent: `projects/{project}/locations/{location}/featureGroups/{feature_group}` */ async projectsLocationsFeatureGroupsFeaturesGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1Feature; } /** * Lists Features in a given FeatureGroup. * * @param parent Required. The resource name of the Location to list Features. Format for entity_type as parent: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}` Format for feature_group as parent: `projects/{project}/locations/{location}/featureGroups/{feature_group}` */ async projectsLocationsFeatureGroupsFeaturesList(parent: string, opts: ProjectsLocationsFeatureGroupsFeaturesListOptions = {}): Promise { opts = serializeProjectsLocationsFeatureGroupsFeaturesListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/features`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.latestStatsCount !== undefined) { url.searchParams.append("latestStatsCount", String(opts.latestStatsCount)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListFeaturesResponse; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsFeatureGroupsFeaturesOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsFeatureGroupsFeaturesOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsFeatureGroupsFeaturesOperationsListWait(name: string, opts: ProjectsLocationsFeatureGroupsFeaturesOperationsListWaitOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsFeatureGroupsFeaturesOperationsWait(name: string, opts: ProjectsLocationsFeatureGroupsFeaturesOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsFeatureGroupsFeaturesOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates the parameters of a single Feature. * * @param name Immutable. Name of the Feature. Format: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}` `projects/{project}/locations/{location}/featureGroups/{feature_group}/features/{feature}` The last part feature is assigned by the client. The feature can be up to 64 characters long and can consist only of ASCII Latin letters A-Z and a-z, underscore(_), and ASCII digits 0-9 starting with a letter. The value will be unique given an entity type. */ async projectsLocationsFeatureGroupsFeaturesPatch(name: string, req: GoogleCloudAiplatformV1Feature, opts: ProjectsLocationsFeatureGroupsFeaturesPatchOptions = {}): Promise { opts = serializeProjectsLocationsFeatureGroupsFeaturesPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleLongrunningOperation; } /** * Gets details of a single FeatureGroup. * * @param name Required. The name of the FeatureGroup resource. */ async projectsLocationsFeatureGroupsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1FeatureGroup; } /** * Gets the access control policy for a resource. Returns an empty policy if * the resource exists and does not have a policy set. * * @param resource REQUIRED: The resource for which the policy is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field. */ async projectsLocationsFeatureGroupsGetIamPolicy(resource: string, opts: ProjectsLocationsFeatureGroupsGetIamPolicyOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ resource }:getIamPolicy`); if (opts["options.requestedPolicyVersion"] !== undefined) { url.searchParams.append("options.requestedPolicyVersion", String(opts["options.requestedPolicyVersion"])); } const data = await request(url.href, { client: this.#client, method: "POST", }); return deserializeGoogleIamV1Policy(data); } /** * Lists FeatureGroups in a given project and location. * * @param parent Required. The resource name of the Location to list FeatureGroups. Format: `projects/{project}/locations/{location}` */ async projectsLocationsFeatureGroupsList(parent: string, opts: ProjectsLocationsFeatureGroupsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/featureGroups`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListFeatureGroupsResponse; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsFeatureGroupsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsFeatureGroupsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsFeatureGroupsOperationsListWait(name: string, opts: ProjectsLocationsFeatureGroupsOperationsListWaitOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsFeatureGroupsOperationsWait(name: string, opts: ProjectsLocationsFeatureGroupsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsFeatureGroupsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates the parameters of a single FeatureGroup. * * @param name Identifier. Name of the FeatureGroup. Format: `projects/{project}/locations/{location}/featureGroups/{featureGroup}` */ async projectsLocationsFeatureGroupsPatch(name: string, req: GoogleCloudAiplatformV1FeatureGroup, opts: ProjectsLocationsFeatureGroupsPatchOptions = {}): Promise { opts = serializeProjectsLocationsFeatureGroupsPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleLongrunningOperation; } /** * Sets the access control policy on the specified resource. Replaces any * existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and * `PERMISSION_DENIED` errors. * * @param resource REQUIRED: The resource for which the policy is being specified. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field. */ async projectsLocationsFeatureGroupsSetIamPolicy(resource: string, req: GoogleIamV1SetIamPolicyRequest): Promise { req = serializeGoogleIamV1SetIamPolicyRequest(req); const url = new URL(`${this.#baseUrl}v1/${ resource }:setIamPolicy`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleIamV1Policy(data); } /** * Returns permissions that a caller has on the specified resource. If the * resource does not exist, this will return an empty set of permissions, not * a `NOT_FOUND` error. Note: This operation is designed to be used for * building permission-aware UIs and command-line tools, not for authorization * checking. This operation may "fail open" without warning. * * @param resource REQUIRED: The resource for which the policy detail is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field. */ async projectsLocationsFeatureGroupsTestIamPermissions(resource: string, opts: ProjectsLocationsFeatureGroupsTestIamPermissionsOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ resource }:testIamPermissions`); if (opts.permissions !== undefined) { url.searchParams.append("permissions", String(opts.permissions)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleIamV1TestIamPermissionsResponse; } /** * Creates a new FeatureOnlineStore in a given project and location. * * @param parent Required. The resource name of the Location to create FeatureOnlineStores. Format: `projects/{project}/locations/{location}` */ async projectsLocationsFeatureOnlineStoresCreate(parent: string, req: GoogleCloudAiplatformV1FeatureOnlineStore, opts: ProjectsLocationsFeatureOnlineStoresCreateOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/featureOnlineStores`); if (opts.featureOnlineStoreId !== undefined) { url.searchParams.append("featureOnlineStoreId", String(opts.featureOnlineStoreId)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Deletes a single FeatureOnlineStore. The FeatureOnlineStore must not * contain any FeatureViews. * * @param name Required. The name of the FeatureOnlineStore to be deleted. Format: `projects/{project}/locations/{location}/featureOnlineStores/{feature_online_store}` */ async projectsLocationsFeatureOnlineStoresDelete(name: string, opts: ProjectsLocationsFeatureOnlineStoresDeleteOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.force !== undefined) { url.searchParams.append("force", String(opts.force)); } const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Creates a new FeatureView in a given FeatureOnlineStore. * * @param parent Required. The resource name of the FeatureOnlineStore to create FeatureViews. Format: `projects/{project}/locations/{location}/featureOnlineStores/{feature_online_store}` */ async projectsLocationsFeatureOnlineStoresFeatureViewsCreate(parent: string, req: GoogleCloudAiplatformV1FeatureView, opts: ProjectsLocationsFeatureOnlineStoresFeatureViewsCreateOptions = {}): Promise { req = serializeGoogleCloudAiplatformV1FeatureView(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/featureViews`); if (opts.featureViewId !== undefined) { url.searchParams.append("featureViewId", String(opts.featureViewId)); } if (opts.runSyncImmediately !== undefined) { url.searchParams.append("runSyncImmediately", String(opts.runSyncImmediately)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Deletes a single FeatureView. * * @param name Required. The name of the FeatureView to be deleted. Format: `projects/{project}/locations/{location}/featureOnlineStores/{feature_online_store}/featureViews/{feature_view}` */ async projectsLocationsFeatureOnlineStoresFeatureViewsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets details of a single FeatureViewSync. * * @param name Required. The name of the FeatureViewSync resource. Format: `projects/{project}/locations/{location}/featureOnlineStores/{feature_online_store}/featureViews/{feature_view}/featureViewSyncs/{feature_view_sync}` */ async projectsLocationsFeatureOnlineStoresFeatureViewsFeatureViewSyncsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1FeatureViewSync; } /** * Lists FeatureViewSyncs in a given FeatureView. * * @param parent Required. The resource name of the FeatureView to list FeatureViewSyncs. Format: `projects/{project}/locations/{location}/featureOnlineStores/{feature_online_store}/featureViews/{feature_view}` */ async projectsLocationsFeatureOnlineStoresFeatureViewsFeatureViewSyncsList(parent: string, opts: ProjectsLocationsFeatureOnlineStoresFeatureViewsFeatureViewSyncsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/featureViewSyncs`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListFeatureViewSyncsResponse; } /** * Fetch feature values under a FeatureView. * * @param featureView Required. FeatureView resource format `projects/{project}/locations/{location}/featureOnlineStores/{featureOnlineStore}/featureViews/{featureView}` */ async projectsLocationsFeatureOnlineStoresFeatureViewsFetchFeatureValues(featureView: string, req: GoogleCloudAiplatformV1FetchFeatureValuesRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ featureView }:fetchFeatureValues`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1FetchFeatureValuesResponse(data); } /** * Gets details of a single FeatureView. * * @param name Required. The name of the FeatureView resource. Format: `projects/{project}/locations/{location}/featureOnlineStores/{feature_online_store}/featureViews/{feature_view}` */ async projectsLocationsFeatureOnlineStoresFeatureViewsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1FeatureView(data); } /** * Gets the access control policy for a resource. Returns an empty policy if * the resource exists and does not have a policy set. * * @param resource REQUIRED: The resource for which the policy is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field. */ async projectsLocationsFeatureOnlineStoresFeatureViewsGetIamPolicy(resource: string, opts: ProjectsLocationsFeatureOnlineStoresFeatureViewsGetIamPolicyOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ resource }:getIamPolicy`); if (opts["options.requestedPolicyVersion"] !== undefined) { url.searchParams.append("options.requestedPolicyVersion", String(opts["options.requestedPolicyVersion"])); } const data = await request(url.href, { client: this.#client, method: "POST", }); return deserializeGoogleIamV1Policy(data); } /** * Lists FeatureViews in a given FeatureOnlineStore. * * @param parent Required. The resource name of the FeatureOnlineStore to list FeatureViews. Format: `projects/{project}/locations/{location}/featureOnlineStores/{feature_online_store}` */ async projectsLocationsFeatureOnlineStoresFeatureViewsList(parent: string, opts: ProjectsLocationsFeatureOnlineStoresFeatureViewsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/featureViews`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ListFeatureViewsResponse(data); } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsFeatureOnlineStoresFeatureViewsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsFeatureOnlineStoresFeatureViewsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsFeatureOnlineStoresFeatureViewsOperationsListWait(name: string, opts: ProjectsLocationsFeatureOnlineStoresFeatureViewsOperationsListWaitOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsFeatureOnlineStoresFeatureViewsOperationsWait(name: string, opts: ProjectsLocationsFeatureOnlineStoresFeatureViewsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsFeatureOnlineStoresFeatureViewsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates the parameters of a single FeatureView. * * @param name Identifier. Name of the FeatureView. Format: `projects/{project}/locations/{location}/featureOnlineStores/{feature_online_store}/featureViews/{feature_view}` */ async projectsLocationsFeatureOnlineStoresFeatureViewsPatch(name: string, req: GoogleCloudAiplatformV1FeatureView, opts: ProjectsLocationsFeatureOnlineStoresFeatureViewsPatchOptions = {}): Promise { req = serializeGoogleCloudAiplatformV1FeatureView(req); opts = serializeProjectsLocationsFeatureOnlineStoresFeatureViewsPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleLongrunningOperation; } /** * Search the nearest entities under a FeatureView. Search only works for * indexable feature view; if a feature view isn't indexable, returns Invalid * argument response. * * @param featureView Required. FeatureView resource format `projects/{project}/locations/{location}/featureOnlineStores/{featureOnlineStore}/featureViews/{featureView}` */ async projectsLocationsFeatureOnlineStoresFeatureViewsSearchNearestEntities(featureView: string, req: GoogleCloudAiplatformV1SearchNearestEntitiesRequest): Promise { req = serializeGoogleCloudAiplatformV1SearchNearestEntitiesRequest(req); const url = new URL(`${this.#baseUrl}v1/${ featureView }:searchNearestEntities`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1SearchNearestEntitiesResponse(data); } /** * Sets the access control policy on the specified resource. Replaces any * existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and * `PERMISSION_DENIED` errors. * * @param resource REQUIRED: The resource for which the policy is being specified. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field. */ async projectsLocationsFeatureOnlineStoresFeatureViewsSetIamPolicy(resource: string, req: GoogleIamV1SetIamPolicyRequest): Promise { req = serializeGoogleIamV1SetIamPolicyRequest(req); const url = new URL(`${this.#baseUrl}v1/${ resource }:setIamPolicy`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleIamV1Policy(data); } /** * Triggers on-demand sync for the FeatureView. * * @param featureView Required. Format: `projects/{project}/locations/{location}/featureOnlineStores/{feature_online_store}/featureViews/{feature_view}` */ async projectsLocationsFeatureOnlineStoresFeatureViewsSync(featureView: string, req: GoogleCloudAiplatformV1SyncFeatureViewRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ featureView }:sync`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1SyncFeatureViewResponse; } /** * Returns permissions that a caller has on the specified resource. If the * resource does not exist, this will return an empty set of permissions, not * a `NOT_FOUND` error. Note: This operation is designed to be used for * building permission-aware UIs and command-line tools, not for authorization * checking. This operation may "fail open" without warning. * * @param resource REQUIRED: The resource for which the policy detail is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field. */ async projectsLocationsFeatureOnlineStoresFeatureViewsTestIamPermissions(resource: string, opts: ProjectsLocationsFeatureOnlineStoresFeatureViewsTestIamPermissionsOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ resource }:testIamPermissions`); if (opts.permissions !== undefined) { url.searchParams.append("permissions", String(opts.permissions)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleIamV1TestIamPermissionsResponse; } /** * Gets details of a single FeatureOnlineStore. * * @param name Required. The name of the FeatureOnlineStore resource. */ async projectsLocationsFeatureOnlineStoresGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1FeatureOnlineStore; } /** * Gets the access control policy for a resource. Returns an empty policy if * the resource exists and does not have a policy set. * * @param resource REQUIRED: The resource for which the policy is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field. */ async projectsLocationsFeatureOnlineStoresGetIamPolicy(resource: string, opts: ProjectsLocationsFeatureOnlineStoresGetIamPolicyOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ resource }:getIamPolicy`); if (opts["options.requestedPolicyVersion"] !== undefined) { url.searchParams.append("options.requestedPolicyVersion", String(opts["options.requestedPolicyVersion"])); } const data = await request(url.href, { client: this.#client, method: "POST", }); return deserializeGoogleIamV1Policy(data); } /** * Lists FeatureOnlineStores in a given project and location. * * @param parent Required. The resource name of the Location to list FeatureOnlineStores. Format: `projects/{project}/locations/{location}` */ async projectsLocationsFeatureOnlineStoresList(parent: string, opts: ProjectsLocationsFeatureOnlineStoresListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/featureOnlineStores`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListFeatureOnlineStoresResponse; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsFeatureOnlineStoresOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsFeatureOnlineStoresOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsFeatureOnlineStoresOperationsListWait(name: string, opts: ProjectsLocationsFeatureOnlineStoresOperationsListWaitOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsFeatureOnlineStoresOperationsWait(name: string, opts: ProjectsLocationsFeatureOnlineStoresOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsFeatureOnlineStoresOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates the parameters of a single FeatureOnlineStore. * * @param name Identifier. Name of the FeatureOnlineStore. Format: `projects/{project}/locations/{location}/featureOnlineStores/{featureOnlineStore}` */ async projectsLocationsFeatureOnlineStoresPatch(name: string, req: GoogleCloudAiplatformV1FeatureOnlineStore, opts: ProjectsLocationsFeatureOnlineStoresPatchOptions = {}): Promise { opts = serializeProjectsLocationsFeatureOnlineStoresPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleLongrunningOperation; } /** * Sets the access control policy on the specified resource. Replaces any * existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and * `PERMISSION_DENIED` errors. * * @param resource REQUIRED: The resource for which the policy is being specified. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field. */ async projectsLocationsFeatureOnlineStoresSetIamPolicy(resource: string, req: GoogleIamV1SetIamPolicyRequest): Promise { req = serializeGoogleIamV1SetIamPolicyRequest(req); const url = new URL(`${this.#baseUrl}v1/${ resource }:setIamPolicy`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleIamV1Policy(data); } /** * Returns permissions that a caller has on the specified resource. If the * resource does not exist, this will return an empty set of permissions, not * a `NOT_FOUND` error. Note: This operation is designed to be used for * building permission-aware UIs and command-line tools, not for authorization * checking. This operation may "fail open" without warning. * * @param resource REQUIRED: The resource for which the policy detail is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field. */ async projectsLocationsFeatureOnlineStoresTestIamPermissions(resource: string, opts: ProjectsLocationsFeatureOnlineStoresTestIamPermissionsOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ resource }:testIamPermissions`); if (opts.permissions !== undefined) { url.searchParams.append("permissions", String(opts.permissions)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleIamV1TestIamPermissionsResponse; } /** * Batch reads Feature values from a Featurestore. This API enables batch * reading Feature values, where each read instance in the batch may read * Feature values of entities from one or more EntityTypes. Point-in-time * correctness is guaranteed for Feature values of each read instance as of * each instance's read timestamp. * * @param featurestore Required. The resource name of the Featurestore from which to query Feature values. Format: `projects/{project}/locations/{location}/featurestores/{featurestore}` */ async projectsLocationsFeaturestoresBatchReadFeatureValues(featurestore: string, req: GoogleCloudAiplatformV1BatchReadFeatureValuesRequest): Promise { req = serializeGoogleCloudAiplatformV1BatchReadFeatureValuesRequest(req); const url = new URL(`${this.#baseUrl}v1/${ featurestore }:batchReadFeatureValues`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Creates a new Featurestore in a given project and location. * * @param parent Required. The resource name of the Location to create Featurestores. Format: `projects/{project}/locations/{location}` */ async projectsLocationsFeaturestoresCreate(parent: string, req: GoogleCloudAiplatformV1Featurestore, opts: ProjectsLocationsFeaturestoresCreateOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/featurestores`); if (opts.featurestoreId !== undefined) { url.searchParams.append("featurestoreId", String(opts.featurestoreId)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Deletes a single Featurestore. The Featurestore must not contain any * EntityTypes or `force` must be set to true for the request to succeed. * * @param name Required. The name of the Featurestore to be deleted. Format: `projects/{project}/locations/{location}/featurestores/{featurestore}` */ async projectsLocationsFeaturestoresDelete(name: string, opts: ProjectsLocationsFeaturestoresDeleteOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.force !== undefined) { url.searchParams.append("force", String(opts.force)); } const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Creates a new EntityType in a given Featurestore. * * @param parent Required. The resource name of the Featurestore to create EntityTypes. Format: `projects/{project}/locations/{location}/featurestores/{featurestore}` */ async projectsLocationsFeaturestoresEntityTypesCreate(parent: string, req: GoogleCloudAiplatformV1EntityType, opts: ProjectsLocationsFeaturestoresEntityTypesCreateOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/entityTypes`); if (opts.entityTypeId !== undefined) { url.searchParams.append("entityTypeId", String(opts.entityTypeId)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Deletes a single EntityType. The EntityType must not have any Features or * `force` must be set to true for the request to succeed. * * @param name Required. The name of the EntityType to be deleted. Format: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}` */ async projectsLocationsFeaturestoresEntityTypesDelete(name: string, opts: ProjectsLocationsFeaturestoresEntityTypesDeleteOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.force !== undefined) { url.searchParams.append("force", String(opts.force)); } const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Delete Feature values from Featurestore. The progress of the deletion is * tracked by the returned operation. The deleted feature values are * guaranteed to be invisible to subsequent read operations after the * operation is marked as successfully done. If a delete feature values * operation fails, the feature values returned from reads and exports may be * inconsistent. If consistency is required, the caller must retry the same * delete request again and wait till the new operation returned is marked as * successfully done. * * @param entityType Required. The resource name of the EntityType grouping the Features for which values are being deleted from. Format: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}` */ async projectsLocationsFeaturestoresEntityTypesDeleteFeatureValues(entityType: string, req: GoogleCloudAiplatformV1DeleteFeatureValuesRequest): Promise { req = serializeGoogleCloudAiplatformV1DeleteFeatureValuesRequest(req); const url = new URL(`${this.#baseUrl}v1/${ entityType }:deleteFeatureValues`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Exports Feature values from all the entities of a target EntityType. * * @param entityType Required. The resource name of the EntityType from which to export Feature values. Format: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}` */ async projectsLocationsFeaturestoresEntityTypesExportFeatureValues(entityType: string, req: GoogleCloudAiplatformV1ExportFeatureValuesRequest): Promise { req = serializeGoogleCloudAiplatformV1ExportFeatureValuesRequest(req); const url = new URL(`${this.#baseUrl}v1/${ entityType }:exportFeatureValues`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Creates a batch of Features in a given EntityType. * * @param parent Required. The resource name of the EntityType/FeatureGroup to create the batch of Features under. Format: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}` `projects/{project}/locations/{location}/featureGroups/{feature_group}` */ async projectsLocationsFeaturestoresEntityTypesFeaturesBatchCreate(parent: string, req: GoogleCloudAiplatformV1BatchCreateFeaturesRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/features:batchCreate`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Creates a new Feature in a given EntityType. * * @param parent Required. The resource name of the EntityType or FeatureGroup to create a Feature. Format for entity_type as parent: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}` Format for feature_group as parent: `projects/{project}/locations/{location}/featureGroups/{feature_group}` */ async projectsLocationsFeaturestoresEntityTypesFeaturesCreate(parent: string, req: GoogleCloudAiplatformV1Feature, opts: ProjectsLocationsFeaturestoresEntityTypesFeaturesCreateOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/features`); if (opts.featureId !== undefined) { url.searchParams.append("featureId", String(opts.featureId)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Deletes a single Feature. * * @param name Required. The name of the Features to be deleted. Format: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}` `projects/{project}/locations/{location}/featureGroups/{feature_group}/features/{feature}` */ async projectsLocationsFeaturestoresEntityTypesFeaturesDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets details of a single Feature. * * @param name Required. The name of the Feature resource. Format for entity_type as parent: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}` Format for feature_group as parent: `projects/{project}/locations/{location}/featureGroups/{feature_group}` */ async projectsLocationsFeaturestoresEntityTypesFeaturesGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1Feature; } /** * Lists Features in a given EntityType. * * @param parent Required. The resource name of the Location to list Features. Format for entity_type as parent: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}` Format for feature_group as parent: `projects/{project}/locations/{location}/featureGroups/{feature_group}` */ async projectsLocationsFeaturestoresEntityTypesFeaturesList(parent: string, opts: ProjectsLocationsFeaturestoresEntityTypesFeaturesListOptions = {}): Promise { opts = serializeProjectsLocationsFeaturestoresEntityTypesFeaturesListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/features`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.latestStatsCount !== undefined) { url.searchParams.append("latestStatsCount", String(opts.latestStatsCount)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListFeaturesResponse; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsFeaturestoresEntityTypesFeaturesOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsFeaturestoresEntityTypesFeaturesOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsFeaturestoresEntityTypesFeaturesOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsFeaturestoresEntityTypesFeaturesOperationsList(name: string, opts: ProjectsLocationsFeaturestoresEntityTypesFeaturesOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsFeaturestoresEntityTypesFeaturesOperationsWait(name: string, opts: ProjectsLocationsFeaturestoresEntityTypesFeaturesOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsFeaturestoresEntityTypesFeaturesOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates the parameters of a single Feature. * * @param name Immutable. Name of the Feature. Format: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}` `projects/{project}/locations/{location}/featureGroups/{feature_group}/features/{feature}` The last part feature is assigned by the client. The feature can be up to 64 characters long and can consist only of ASCII Latin letters A-Z and a-z, underscore(_), and ASCII digits 0-9 starting with a letter. The value will be unique given an entity type. */ async projectsLocationsFeaturestoresEntityTypesFeaturesPatch(name: string, req: GoogleCloudAiplatformV1Feature, opts: ProjectsLocationsFeaturestoresEntityTypesFeaturesPatchOptions = {}): Promise { opts = serializeProjectsLocationsFeaturestoresEntityTypesFeaturesPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleCloudAiplatformV1Feature; } /** * Gets details of a single EntityType. * * @param name Required. The name of the EntityType resource. Format: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}` */ async projectsLocationsFeaturestoresEntityTypesGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1EntityType; } /** * Gets the access control policy for a resource. Returns an empty policy if * the resource exists and does not have a policy set. * * @param resource REQUIRED: The resource for which the policy is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field. */ async projectsLocationsFeaturestoresEntityTypesGetIamPolicy(resource: string, opts: ProjectsLocationsFeaturestoresEntityTypesGetIamPolicyOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ resource }:getIamPolicy`); if (opts["options.requestedPolicyVersion"] !== undefined) { url.searchParams.append("options.requestedPolicyVersion", String(opts["options.requestedPolicyVersion"])); } const data = await request(url.href, { client: this.#client, method: "POST", }); return deserializeGoogleIamV1Policy(data); } /** * Imports Feature values into the Featurestore from a source storage. The * progress of the import is tracked by the returned operation. The imported * features are guaranteed to be visible to subsequent read operations after * the operation is marked as successfully done. If an import operation fails, * the Feature values returned from reads and exports may be inconsistent. If * consistency is required, the caller must retry the same import request * again and wait till the new operation returned is marked as successfully * done. There are also scenarios where the caller can cause inconsistency. - * Source data for import contains multiple distinct Feature values for the * same entity ID and timestamp. - Source is modified during an import. This * includes adding, updating, or removing source data and/or metadata. * Examples of updating metadata include but are not limited to changing * storage location, storage class, or retention policy. - Online serving * cluster is under-provisioned. * * @param entityType Required. The resource name of the EntityType grouping the Features for which values are being imported. Format: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}` */ async projectsLocationsFeaturestoresEntityTypesImportFeatureValues(entityType: string, req: GoogleCloudAiplatformV1ImportFeatureValuesRequest): Promise { req = serializeGoogleCloudAiplatformV1ImportFeatureValuesRequest(req); const url = new URL(`${this.#baseUrl}v1/${ entityType }:importFeatureValues`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Lists EntityTypes in a given Featurestore. * * @param parent Required. The resource name of the Featurestore to list EntityTypes. Format: `projects/{project}/locations/{location}/featurestores/{featurestore}` */ async projectsLocationsFeaturestoresEntityTypesList(parent: string, opts: ProjectsLocationsFeaturestoresEntityTypesListOptions = {}): Promise { opts = serializeProjectsLocationsFeaturestoresEntityTypesListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/entityTypes`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListEntityTypesResponse; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsFeaturestoresEntityTypesOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsFeaturestoresEntityTypesOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsFeaturestoresEntityTypesOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsFeaturestoresEntityTypesOperationsList(name: string, opts: ProjectsLocationsFeaturestoresEntityTypesOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsFeaturestoresEntityTypesOperationsWait(name: string, opts: ProjectsLocationsFeaturestoresEntityTypesOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsFeaturestoresEntityTypesOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates the parameters of a single EntityType. * * @param name Immutable. Name of the EntityType. Format: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}` The last part entity_type is assigned by the client. The entity_type can be up to 64 characters long and can consist only of ASCII Latin letters A-Z and a-z and underscore(_), and ASCII digits 0-9 starting with a letter. The value will be unique given a featurestore. */ async projectsLocationsFeaturestoresEntityTypesPatch(name: string, req: GoogleCloudAiplatformV1EntityType, opts: ProjectsLocationsFeaturestoresEntityTypesPatchOptions = {}): Promise { opts = serializeProjectsLocationsFeaturestoresEntityTypesPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleCloudAiplatformV1EntityType; } /** * Reads Feature values of a specific entity of an EntityType. For reading * feature values of multiple entities of an EntityType, please use * StreamingReadFeatureValues. * * @param entityType Required. The resource name of the EntityType for the entity being read. Value format: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}`. For example, for a machine learning model predicting user clicks on a website, an EntityType ID could be `user`. */ async projectsLocationsFeaturestoresEntityTypesReadFeatureValues(entityType: string, req: GoogleCloudAiplatformV1ReadFeatureValuesRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ entityType }:readFeatureValues`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1ReadFeatureValuesResponse(data); } /** * Sets the access control policy on the specified resource. Replaces any * existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and * `PERMISSION_DENIED` errors. * * @param resource REQUIRED: The resource for which the policy is being specified. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field. */ async projectsLocationsFeaturestoresEntityTypesSetIamPolicy(resource: string, req: GoogleIamV1SetIamPolicyRequest): Promise { req = serializeGoogleIamV1SetIamPolicyRequest(req); const url = new URL(`${this.#baseUrl}v1/${ resource }:setIamPolicy`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleIamV1Policy(data); } /** * Reads Feature values for multiple entities. Depending on their size, data * for different entities may be broken up across multiple responses. * * @param entityType Required. The resource name of the entities' type. Value format: `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}`. For example, for a machine learning model predicting user clicks on a website, an EntityType ID could be `user`. */ async projectsLocationsFeaturestoresEntityTypesStreamingReadFeatureValues(entityType: string, req: GoogleCloudAiplatformV1StreamingReadFeatureValuesRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ entityType }:streamingReadFeatureValues`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1ReadFeatureValuesResponse(data); } /** * Returns permissions that a caller has on the specified resource. If the * resource does not exist, this will return an empty set of permissions, not * a `NOT_FOUND` error. Note: This operation is designed to be used for * building permission-aware UIs and command-line tools, not for authorization * checking. This operation may "fail open" without warning. * * @param resource REQUIRED: The resource for which the policy detail is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field. */ async projectsLocationsFeaturestoresEntityTypesTestIamPermissions(resource: string, opts: ProjectsLocationsFeaturestoresEntityTypesTestIamPermissionsOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ resource }:testIamPermissions`); if (opts.permissions !== undefined) { url.searchParams.append("permissions", String(opts.permissions)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleIamV1TestIamPermissionsResponse; } /** * Writes Feature values of one or more entities of an EntityType. The * Feature values are merged into existing entities if any. The Feature values * to be written must have timestamp within the online storage retention. * * @param entityType Required. The resource name of the EntityType for the entities being written. Value format: `projects/{project}/locations/{location}/featurestores/ {featurestore}/entityTypes/{entityType}`. For example, for a machine learning model predicting user clicks on a website, an EntityType ID could be `user`. */ async projectsLocationsFeaturestoresEntityTypesWriteFeatureValues(entityType: string, req: GoogleCloudAiplatformV1WriteFeatureValuesRequest): Promise { req = serializeGoogleCloudAiplatformV1WriteFeatureValuesRequest(req); const url = new URL(`${this.#baseUrl}v1/${ entityType }:writeFeatureValues`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1WriteFeatureValuesResponse; } /** * Gets details of a single Featurestore. * * @param name Required. The name of the Featurestore resource. */ async projectsLocationsFeaturestoresGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1Featurestore; } /** * Gets the access control policy for a resource. Returns an empty policy if * the resource exists and does not have a policy set. * * @param resource REQUIRED: The resource for which the policy is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field. */ async projectsLocationsFeaturestoresGetIamPolicy(resource: string, opts: ProjectsLocationsFeaturestoresGetIamPolicyOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ resource }:getIamPolicy`); if (opts["options.requestedPolicyVersion"] !== undefined) { url.searchParams.append("options.requestedPolicyVersion", String(opts["options.requestedPolicyVersion"])); } const data = await request(url.href, { client: this.#client, method: "POST", }); return deserializeGoogleIamV1Policy(data); } /** * Lists Featurestores in a given project and location. * * @param parent Required. The resource name of the Location to list Featurestores. Format: `projects/{project}/locations/{location}` */ async projectsLocationsFeaturestoresList(parent: string, opts: ProjectsLocationsFeaturestoresListOptions = {}): Promise { opts = serializeProjectsLocationsFeaturestoresListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/featurestores`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListFeaturestoresResponse; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsFeaturestoresOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsFeaturestoresOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsFeaturestoresOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsFeaturestoresOperationsList(name: string, opts: ProjectsLocationsFeaturestoresOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsFeaturestoresOperationsWait(name: string, opts: ProjectsLocationsFeaturestoresOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsFeaturestoresOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates the parameters of a single Featurestore. * * @param name Output only. Name of the Featurestore. Format: `projects/{project}/locations/{location}/featurestores/{featurestore}` */ async projectsLocationsFeaturestoresPatch(name: string, req: GoogleCloudAiplatformV1Featurestore, opts: ProjectsLocationsFeaturestoresPatchOptions = {}): Promise { opts = serializeProjectsLocationsFeaturestoresPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleLongrunningOperation; } /** * Searches Features matching a query in a given project. * * @param location Required. The resource name of the Location to search Features. Format: `projects/{project}/locations/{location}` */ async projectsLocationsFeaturestoresSearchFeatures(location: string, opts: ProjectsLocationsFeaturestoresSearchFeaturesOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ location }/featurestores:searchFeatures`); if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.query !== undefined) { url.searchParams.append("query", String(opts.query)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1SearchFeaturesResponse; } /** * Sets the access control policy on the specified resource. Replaces any * existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and * `PERMISSION_DENIED` errors. * * @param resource REQUIRED: The resource for which the policy is being specified. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field. */ async projectsLocationsFeaturestoresSetIamPolicy(resource: string, req: GoogleIamV1SetIamPolicyRequest): Promise { req = serializeGoogleIamV1SetIamPolicyRequest(req); const url = new URL(`${this.#baseUrl}v1/${ resource }:setIamPolicy`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleIamV1Policy(data); } /** * Returns permissions that a caller has on the specified resource. If the * resource does not exist, this will return an empty set of permissions, not * a `NOT_FOUND` error. Note: This operation is designed to be used for * building permission-aware UIs and command-line tools, not for authorization * checking. This operation may "fail open" without warning. * * @param resource REQUIRED: The resource for which the policy detail is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field. */ async projectsLocationsFeaturestoresTestIamPermissions(resource: string, opts: ProjectsLocationsFeaturestoresTestIamPermissionsOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ resource }:testIamPermissions`); if (opts.permissions !== undefined) { url.searchParams.append("permissions", String(opts.permissions)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleIamV1TestIamPermissionsResponse; } /** * Gets information about a location. * * @param name Resource name for the location. */ async projectsLocationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudLocationLocation; } /** * Cancels a HyperparameterTuningJob. Starts asynchronous cancellation on the * HyperparameterTuningJob. The server makes a best effort to cancel the job, * but success is not guaranteed. Clients can use * JobService.GetHyperparameterTuningJob or other methods to check whether the * cancellation succeeded or whether the job completed despite cancellation. * On successful cancellation, the HyperparameterTuningJob is not deleted; * instead it becomes a job with a HyperparameterTuningJob.error value with a * google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`, and * HyperparameterTuningJob.state is set to `CANCELLED`. * * @param name Required. The name of the HyperparameterTuningJob to cancel. Format: `projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}` */ async projectsLocationsHyperparameterTuningJobsCancel(name: string, req: GoogleCloudAiplatformV1CancelHyperparameterTuningJobRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleProtobufEmpty; } /** * Creates a HyperparameterTuningJob * * @param parent Required. The resource name of the Location to create the HyperparameterTuningJob in. Format: `projects/{project}/locations/{location}` */ async projectsLocationsHyperparameterTuningJobsCreate(parent: string, req: GoogleCloudAiplatformV1HyperparameterTuningJob): Promise { req = serializeGoogleCloudAiplatformV1HyperparameterTuningJob(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/hyperparameterTuningJobs`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1HyperparameterTuningJob(data); } /** * Deletes a HyperparameterTuningJob. * * @param name Required. The name of the HyperparameterTuningJob resource to be deleted. Format: `projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}` */ async projectsLocationsHyperparameterTuningJobsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets a HyperparameterTuningJob * * @param name Required. The name of the HyperparameterTuningJob resource. Format: `projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}` */ async projectsLocationsHyperparameterTuningJobsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1HyperparameterTuningJob(data); } /** * Lists HyperparameterTuningJobs in a Location. * * @param parent Required. The resource name of the Location to list the HyperparameterTuningJobs from. Format: `projects/{project}/locations/{location}` */ async projectsLocationsHyperparameterTuningJobsList(parent: string, opts: ProjectsLocationsHyperparameterTuningJobsListOptions = {}): Promise { opts = serializeProjectsLocationsHyperparameterTuningJobsListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/hyperparameterTuningJobs`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ListHyperparameterTuningJobsResponse(data); } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsHyperparameterTuningJobsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsHyperparameterTuningJobsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsHyperparameterTuningJobsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsHyperparameterTuningJobsOperationsList(name: string, opts: ProjectsLocationsHyperparameterTuningJobsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsHyperparameterTuningJobsOperationsWait(name: string, opts: ProjectsLocationsHyperparameterTuningJobsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsHyperparameterTuningJobsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Creates an IndexEndpoint. * * @param parent Required. The resource name of the Location to create the IndexEndpoint in. Format: `projects/{project}/locations/{location}` */ async projectsLocationsIndexEndpointsCreate(parent: string, req: GoogleCloudAiplatformV1IndexEndpoint): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/indexEndpoints`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Deletes an IndexEndpoint. * * @param name Required. The name of the IndexEndpoint resource to be deleted. Format: `projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}` */ async projectsLocationsIndexEndpointsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Deploys an Index into this IndexEndpoint, creating a DeployedIndex within * it. * * @param indexEndpoint Required. The name of the IndexEndpoint resource into which to deploy an Index. Format: `projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}` */ async projectsLocationsIndexEndpointsDeployIndex(indexEndpoint: string, req: GoogleCloudAiplatformV1DeployIndexRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ indexEndpoint }:deployIndex`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Finds the nearest neighbors of each vector within the request. * * @param indexEndpoint Required. The name of the index endpoint. Format: `projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}` */ async projectsLocationsIndexEndpointsFindNeighbors(indexEndpoint: string, req: GoogleCloudAiplatformV1FindNeighborsRequest): Promise { req = serializeGoogleCloudAiplatformV1FindNeighborsRequest(req); const url = new URL(`${this.#baseUrl}v1/${ indexEndpoint }:findNeighbors`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1FindNeighborsResponse(data); } /** * Gets an IndexEndpoint. * * @param name Required. The name of the IndexEndpoint resource. Format: `projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}` */ async projectsLocationsIndexEndpointsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1IndexEndpoint; } /** * Lists IndexEndpoints in a Location. * * @param parent Required. The resource name of the Location from which to list the IndexEndpoints. Format: `projects/{project}/locations/{location}` */ async projectsLocationsIndexEndpointsList(parent: string, opts: ProjectsLocationsIndexEndpointsListOptions = {}): Promise { opts = serializeProjectsLocationsIndexEndpointsListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/indexEndpoints`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListIndexEndpointsResponse; } /** * Update an existing DeployedIndex under an IndexEndpoint. * * @param indexEndpoint Required. The name of the IndexEndpoint resource into which to deploy an Index. Format: `projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}` */ async projectsLocationsIndexEndpointsMutateDeployedIndex(indexEndpoint: string, req: GoogleCloudAiplatformV1DeployedIndex): Promise { const url = new URL(`${this.#baseUrl}v1/${ indexEndpoint }:mutateDeployedIndex`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsIndexEndpointsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsIndexEndpointsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsIndexEndpointsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsIndexEndpointsOperationsList(name: string, opts: ProjectsLocationsIndexEndpointsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsIndexEndpointsOperationsWait(name: string, opts: ProjectsLocationsIndexEndpointsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsIndexEndpointsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates an IndexEndpoint. * * @param name Output only. The resource name of the IndexEndpoint. */ async projectsLocationsIndexEndpointsPatch(name: string, req: GoogleCloudAiplatformV1IndexEndpoint, opts: ProjectsLocationsIndexEndpointsPatchOptions = {}): Promise { opts = serializeProjectsLocationsIndexEndpointsPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleCloudAiplatformV1IndexEndpoint; } /** * Reads the datapoints/vectors of the given IDs. A maximum of 1000 * datapoints can be retrieved in a batch. * * @param indexEndpoint Required. The name of the index endpoint. Format: `projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}` */ async projectsLocationsIndexEndpointsReadIndexDatapoints(indexEndpoint: string, req: GoogleCloudAiplatformV1ReadIndexDatapointsRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ indexEndpoint }:readIndexDatapoints`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1ReadIndexDatapointsResponse(data); } /** * Undeploys an Index from an IndexEndpoint, removing a DeployedIndex from * it, and freeing all resources it's using. * * @param indexEndpoint Required. The name of the IndexEndpoint resource from which to undeploy an Index. Format: `projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}` */ async projectsLocationsIndexEndpointsUndeployIndex(indexEndpoint: string, req: GoogleCloudAiplatformV1UndeployIndexRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ indexEndpoint }:undeployIndex`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Creates an Index. * * @param parent Required. The resource name of the Location to create the Index in. Format: `projects/{project}/locations/{location}` */ async projectsLocationsIndexesCreate(parent: string, req: GoogleCloudAiplatformV1Index): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/indexes`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Deletes an Index. An Index can only be deleted when all its * DeployedIndexes had been undeployed. * * @param name Required. The name of the Index resource to be deleted. Format: `projects/{project}/locations/{location}/indexes/{index}` */ async projectsLocationsIndexesDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets an Index. * * @param name Required. The name of the Index resource. Format: `projects/{project}/locations/{location}/indexes/{index}` */ async projectsLocationsIndexesGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1Index; } /** * Lists Indexes in a Location. * * @param parent Required. The resource name of the Location from which to list the Indexes. Format: `projects/{project}/locations/{location}` */ async projectsLocationsIndexesList(parent: string, opts: ProjectsLocationsIndexesListOptions = {}): Promise { opts = serializeProjectsLocationsIndexesListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/indexes`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListIndexesResponse; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsIndexesOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsIndexesOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsIndexesOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsIndexesOperationsList(name: string, opts: ProjectsLocationsIndexesOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsIndexesOperationsWait(name: string, opts: ProjectsLocationsIndexesOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsIndexesOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates an Index. * * @param name Output only. The resource name of the Index. */ async projectsLocationsIndexesPatch(name: string, req: GoogleCloudAiplatformV1Index, opts: ProjectsLocationsIndexesPatchOptions = {}): Promise { opts = serializeProjectsLocationsIndexesPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleLongrunningOperation; } /** * Remove Datapoints from an Index. * * @param index Required. The name of the Index resource to be updated. Format: `projects/{project}/locations/{location}/indexes/{index}` */ async projectsLocationsIndexesRemoveDatapoints(index: string, req: GoogleCloudAiplatformV1RemoveDatapointsRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ index }:removeDatapoints`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1RemoveDatapointsResponse; } /** * Add/update Datapoints into an Index. * * @param index Required. The name of the Index resource to be updated. Format: `projects/{project}/locations/{location}/indexes/{index}` */ async projectsLocationsIndexesUpsertDatapoints(index: string, req: GoogleCloudAiplatformV1UpsertDatapointsRequest): Promise { req = serializeGoogleCloudAiplatformV1UpsertDatapointsRequest(req); const url = new URL(`${this.#baseUrl}v1/${ index }:upsertDatapoints`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1UpsertDatapointsResponse; } /** * Lists information about the supported locations for this service. * * @param name The resource that owns the locations collection, if applicable. */ async projectsLocationsList(name: string, opts: ProjectsLocationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/locations`); if (opts.extraLocationTypes !== undefined) { url.searchParams.append("extraLocationTypes", String(opts.extraLocationTypes)); } if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudLocationListLocationsResponse; } /** * Creates an Artifact associated with a MetadataStore. * * @param parent Required. The resource name of the MetadataStore where the Artifact should be created. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}` */ async projectsLocationsMetadataStoresArtifactsCreate(parent: string, req: GoogleCloudAiplatformV1Artifact, opts: ProjectsLocationsMetadataStoresArtifactsCreateOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/artifacts`); if (opts.artifactId !== undefined) { url.searchParams.append("artifactId", String(opts.artifactId)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1Artifact; } /** * Deletes an Artifact. * * @param name Required. The resource name of the Artifact to delete. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}` */ async projectsLocationsMetadataStoresArtifactsDelete(name: string, opts: ProjectsLocationsMetadataStoresArtifactsDeleteOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.etag !== undefined) { url.searchParams.append("etag", String(opts.etag)); } const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Retrieves a specific Artifact. * * @param name Required. The resource name of the Artifact to retrieve. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}` */ async projectsLocationsMetadataStoresArtifactsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1Artifact; } /** * Lists Artifacts in the MetadataStore. * * @param parent Required. The MetadataStore whose Artifacts should be listed. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}` */ async projectsLocationsMetadataStoresArtifactsList(parent: string, opts: ProjectsLocationsMetadataStoresArtifactsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/artifacts`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListArtifactsResponse; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsMetadataStoresArtifactsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsMetadataStoresArtifactsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsMetadataStoresArtifactsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsMetadataStoresArtifactsOperationsList(name: string, opts: ProjectsLocationsMetadataStoresArtifactsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsMetadataStoresArtifactsOperationsWait(name: string, opts: ProjectsLocationsMetadataStoresArtifactsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsMetadataStoresArtifactsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates a stored Artifact. * * @param name Output only. The resource name of the Artifact. */ async projectsLocationsMetadataStoresArtifactsPatch(name: string, req: GoogleCloudAiplatformV1Artifact, opts: ProjectsLocationsMetadataStoresArtifactsPatchOptions = {}): Promise { opts = serializeProjectsLocationsMetadataStoresArtifactsPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.allowMissing !== undefined) { url.searchParams.append("allowMissing", String(opts.allowMissing)); } if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleCloudAiplatformV1Artifact; } /** * Purges Artifacts. * * @param parent Required. The metadata store to purge Artifacts from. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}` */ async projectsLocationsMetadataStoresArtifactsPurge(parent: string, req: GoogleCloudAiplatformV1PurgeArtifactsRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/artifacts:purge`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Retrieves lineage of an Artifact represented through Artifacts and * Executions connected by Event edges and returned as a LineageSubgraph. * * @param artifact Required. The resource name of the Artifact whose Lineage needs to be retrieved as a LineageSubgraph. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}` The request may error with FAILED_PRECONDITION if the number of Artifacts, the number of Executions, or the number of Events that would be returned for the Context exceeds 1000. */ async projectsLocationsMetadataStoresArtifactsQueryArtifactLineageSubgraph(artifact: string, opts: ProjectsLocationsMetadataStoresArtifactsQueryArtifactLineageSubgraphOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ artifact }:queryArtifactLineageSubgraph`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.maxHops !== undefined) { url.searchParams.append("maxHops", String(opts.maxHops)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1LineageSubgraph; } /** * Adds a set of Artifacts and Executions to a Context. If any of the * Artifacts or Executions have already been added to a Context, they are * simply skipped. * * @param context Required. The resource name of the Context that the Artifacts and Executions belong to. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}` */ async projectsLocationsMetadataStoresContextsAddContextArtifactsAndExecutions(context: string, req: GoogleCloudAiplatformV1AddContextArtifactsAndExecutionsRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ context }:addContextArtifactsAndExecutions`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1AddContextArtifactsAndExecutionsResponse; } /** * Adds a set of Contexts as children to a parent Context. If any of the * child Contexts have already been added to the parent Context, they are * simply skipped. If this call would create a cycle or cause any Context to * have more than 10 parents, the request will fail with an INVALID_ARGUMENT * error. * * @param context Required. The resource name of the parent Context. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}` */ async projectsLocationsMetadataStoresContextsAddContextChildren(context: string, req: GoogleCloudAiplatformV1AddContextChildrenRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ context }:addContextChildren`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1AddContextChildrenResponse; } /** * Creates a Context associated with a MetadataStore. * * @param parent Required. The resource name of the MetadataStore where the Context should be created. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}` */ async projectsLocationsMetadataStoresContextsCreate(parent: string, req: GoogleCloudAiplatformV1Context, opts: ProjectsLocationsMetadataStoresContextsCreateOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/contexts`); if (opts.contextId !== undefined) { url.searchParams.append("contextId", String(opts.contextId)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1Context; } /** * Deletes a stored Context. * * @param name Required. The resource name of the Context to delete. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}` */ async projectsLocationsMetadataStoresContextsDelete(name: string, opts: ProjectsLocationsMetadataStoresContextsDeleteOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.etag !== undefined) { url.searchParams.append("etag", String(opts.etag)); } if (opts.force !== undefined) { url.searchParams.append("force", String(opts.force)); } const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Retrieves a specific Context. * * @param name Required. The resource name of the Context to retrieve. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}` */ async projectsLocationsMetadataStoresContextsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1Context; } /** * Lists Contexts on the MetadataStore. * * @param parent Required. The MetadataStore whose Contexts should be listed. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}` */ async projectsLocationsMetadataStoresContextsList(parent: string, opts: ProjectsLocationsMetadataStoresContextsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/contexts`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListContextsResponse; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsMetadataStoresContextsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsMetadataStoresContextsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsMetadataStoresContextsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsMetadataStoresContextsOperationsList(name: string, opts: ProjectsLocationsMetadataStoresContextsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsMetadataStoresContextsOperationsWait(name: string, opts: ProjectsLocationsMetadataStoresContextsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsMetadataStoresContextsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates a stored Context. * * @param name Immutable. The resource name of the Context. */ async projectsLocationsMetadataStoresContextsPatch(name: string, req: GoogleCloudAiplatformV1Context, opts: ProjectsLocationsMetadataStoresContextsPatchOptions = {}): Promise { opts = serializeProjectsLocationsMetadataStoresContextsPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.allowMissing !== undefined) { url.searchParams.append("allowMissing", String(opts.allowMissing)); } if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleCloudAiplatformV1Context; } /** * Purges Contexts. * * @param parent Required. The metadata store to purge Contexts from. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}` */ async projectsLocationsMetadataStoresContextsPurge(parent: string, req: GoogleCloudAiplatformV1PurgeContextsRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/contexts:purge`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Retrieves Artifacts and Executions within the specified Context, connected * by Event edges and returned as a LineageSubgraph. * * @param context Required. The resource name of the Context whose Artifacts and Executions should be retrieved as a LineageSubgraph. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}` The request may error with FAILED_PRECONDITION if the number of Artifacts, the number of Executions, or the number of Events that would be returned for the Context exceeds 1000. */ async projectsLocationsMetadataStoresContextsQueryContextLineageSubgraph(context: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ context }:queryContextLineageSubgraph`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1LineageSubgraph; } /** * Remove a set of children contexts from a parent Context. If any of the * child Contexts were NOT added to the parent Context, they are simply * skipped. * * @param context Required. The resource name of the parent Context. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}` */ async projectsLocationsMetadataStoresContextsRemoveContextChildren(context: string, req: GoogleCloudAiplatformV1RemoveContextChildrenRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ context }:removeContextChildren`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1RemoveContextChildrenResponse; } /** * Initializes a MetadataStore, including allocation of resources. * * @param parent Required. The resource name of the Location where the MetadataStore should be created. Format: `projects/{project}/locations/{location}/` */ async projectsLocationsMetadataStoresCreate(parent: string, req: GoogleCloudAiplatformV1MetadataStore, opts: ProjectsLocationsMetadataStoresCreateOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/metadataStores`); if (opts.metadataStoreId !== undefined) { url.searchParams.append("metadataStoreId", String(opts.metadataStoreId)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Deletes a single MetadataStore and all its child resources (Artifacts, * Executions, and Contexts). * * @param name Required. The resource name of the MetadataStore to delete. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}` */ async projectsLocationsMetadataStoresDelete(name: string, opts: ProjectsLocationsMetadataStoresDeleteOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.force !== undefined) { url.searchParams.append("force", String(opts.force)); } const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Adds Events to the specified Execution. An Event indicates whether an * Artifact was used as an input or output for an Execution. If an Event * already exists between the Execution and the Artifact, the Event is * skipped. * * @param execution Required. The resource name of the Execution that the Events connect Artifacts with. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}` */ async projectsLocationsMetadataStoresExecutionsAddExecutionEvents(execution: string, req: GoogleCloudAiplatformV1AddExecutionEventsRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ execution }:addExecutionEvents`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1AddExecutionEventsResponse; } /** * Creates an Execution associated with a MetadataStore. * * @param parent Required. The resource name of the MetadataStore where the Execution should be created. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}` */ async projectsLocationsMetadataStoresExecutionsCreate(parent: string, req: GoogleCloudAiplatformV1Execution, opts: ProjectsLocationsMetadataStoresExecutionsCreateOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/executions`); if (opts.executionId !== undefined) { url.searchParams.append("executionId", String(opts.executionId)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1Execution; } /** * Deletes an Execution. * * @param name Required. The resource name of the Execution to delete. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}` */ async projectsLocationsMetadataStoresExecutionsDelete(name: string, opts: ProjectsLocationsMetadataStoresExecutionsDeleteOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.etag !== undefined) { url.searchParams.append("etag", String(opts.etag)); } const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Retrieves a specific Execution. * * @param name Required. The resource name of the Execution to retrieve. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}` */ async projectsLocationsMetadataStoresExecutionsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1Execution; } /** * Lists Executions in the MetadataStore. * * @param parent Required. The MetadataStore whose Executions should be listed. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}` */ async projectsLocationsMetadataStoresExecutionsList(parent: string, opts: ProjectsLocationsMetadataStoresExecutionsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/executions`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListExecutionsResponse; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsMetadataStoresExecutionsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsMetadataStoresExecutionsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsMetadataStoresExecutionsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsMetadataStoresExecutionsOperationsList(name: string, opts: ProjectsLocationsMetadataStoresExecutionsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsMetadataStoresExecutionsOperationsWait(name: string, opts: ProjectsLocationsMetadataStoresExecutionsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsMetadataStoresExecutionsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates a stored Execution. * * @param name Output only. The resource name of the Execution. */ async projectsLocationsMetadataStoresExecutionsPatch(name: string, req: GoogleCloudAiplatformV1Execution, opts: ProjectsLocationsMetadataStoresExecutionsPatchOptions = {}): Promise { opts = serializeProjectsLocationsMetadataStoresExecutionsPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.allowMissing !== undefined) { url.searchParams.append("allowMissing", String(opts.allowMissing)); } if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleCloudAiplatformV1Execution; } /** * Purges Executions. * * @param parent Required. The metadata store to purge Executions from. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}` */ async projectsLocationsMetadataStoresExecutionsPurge(parent: string, req: GoogleCloudAiplatformV1PurgeExecutionsRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/executions:purge`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Obtains the set of input and output Artifacts for this Execution, in the * form of LineageSubgraph that also contains the Execution and connecting * Events. * * @param execution Required. The resource name of the Execution whose input and output Artifacts should be retrieved as a LineageSubgraph. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}` */ async projectsLocationsMetadataStoresExecutionsQueryExecutionInputsAndOutputs(execution: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ execution }:queryExecutionInputsAndOutputs`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1LineageSubgraph; } /** * Retrieves a specific MetadataStore. * * @param name Required. The resource name of the MetadataStore to retrieve. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}` */ async projectsLocationsMetadataStoresGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1MetadataStore; } /** * Lists MetadataStores for a Location. * * @param parent Required. The Location whose MetadataStores should be listed. Format: `projects/{project}/locations/{location}` */ async projectsLocationsMetadataStoresList(parent: string, opts: ProjectsLocationsMetadataStoresListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/metadataStores`); if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListMetadataStoresResponse; } /** * Creates a MetadataSchema. * * @param parent Required. The resource name of the MetadataStore where the MetadataSchema should be created. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}` */ async projectsLocationsMetadataStoresMetadataSchemasCreate(parent: string, req: GoogleCloudAiplatformV1MetadataSchema, opts: ProjectsLocationsMetadataStoresMetadataSchemasCreateOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/metadataSchemas`); if (opts.metadataSchemaId !== undefined) { url.searchParams.append("metadataSchemaId", String(opts.metadataSchemaId)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1MetadataSchema; } /** * Retrieves a specific MetadataSchema. * * @param name Required. The resource name of the MetadataSchema to retrieve. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema}` */ async projectsLocationsMetadataStoresMetadataSchemasGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1MetadataSchema; } /** * Lists MetadataSchemas. * * @param parent Required. The MetadataStore whose MetadataSchemas should be listed. Format: `projects/{project}/locations/{location}/metadataStores/{metadatastore}` */ async projectsLocationsMetadataStoresMetadataSchemasList(parent: string, opts: ProjectsLocationsMetadataStoresMetadataSchemasListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/metadataSchemas`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListMetadataSchemasResponse; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsMetadataStoresOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsMetadataStoresOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsMetadataStoresOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsMetadataStoresOperationsList(name: string, opts: ProjectsLocationsMetadataStoresOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsMetadataStoresOperationsWait(name: string, opts: ProjectsLocationsMetadataStoresOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsMetadataStoresOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Batch migrates resources from ml.googleapis.com, automl.googleapis.com, * and datalabeling.googleapis.com to Vertex AI. * * @param parent Required. The location of the migrated resource will live in. Format: `projects/{project}/locations/{location}` */ async projectsLocationsMigratableResourcesBatchMigrate(parent: string, req: GoogleCloudAiplatformV1BatchMigrateResourcesRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/migratableResources:batchMigrate`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsMigratableResourcesOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsMigratableResourcesOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsMigratableResourcesOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsMigratableResourcesOperationsList(name: string, opts: ProjectsLocationsMigratableResourcesOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsMigratableResourcesOperationsWait(name: string, opts: ProjectsLocationsMigratableResourcesOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsMigratableResourcesOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Searches all of the resources in automl.googleapis.com, * datalabeling.googleapis.com and ml.googleapis.com that can be migrated to * Vertex AI's given location. * * @param parent Required. The location that the migratable resources should be searched from. It's the Vertex AI location that the resources can be migrated to, not the resources' original location. Format: `projects/{project}/locations/{location}` */ async projectsLocationsMigratableResourcesSearch(parent: string, req: GoogleCloudAiplatformV1SearchMigratableResourcesRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/migratableResources:search`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1SearchMigratableResourcesResponse; } /** * Creates a ModelDeploymentMonitoringJob. It will run periodically on a * configured interval. * * @param parent Required. The parent of the ModelDeploymentMonitoringJob. Format: `projects/{project}/locations/{location}` */ async projectsLocationsModelDeploymentMonitoringJobsCreate(parent: string, req: GoogleCloudAiplatformV1ModelDeploymentMonitoringJob): Promise { req = serializeGoogleCloudAiplatformV1ModelDeploymentMonitoringJob(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/modelDeploymentMonitoringJobs`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1ModelDeploymentMonitoringJob(data); } /** * Deletes a ModelDeploymentMonitoringJob. * * @param name Required. The resource name of the model monitoring job to delete. Format: `projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}` */ async projectsLocationsModelDeploymentMonitoringJobsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets a ModelDeploymentMonitoringJob. * * @param name Required. The resource name of the ModelDeploymentMonitoringJob. Format: `projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}` */ async projectsLocationsModelDeploymentMonitoringJobsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ModelDeploymentMonitoringJob(data); } /** * Lists ModelDeploymentMonitoringJobs in a Location. * * @param parent Required. The parent of the ModelDeploymentMonitoringJob. Format: `projects/{project}/locations/{location}` */ async projectsLocationsModelDeploymentMonitoringJobsList(parent: string, opts: ProjectsLocationsModelDeploymentMonitoringJobsListOptions = {}): Promise { opts = serializeProjectsLocationsModelDeploymentMonitoringJobsListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/modelDeploymentMonitoringJobs`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ListModelDeploymentMonitoringJobsResponse(data); } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsModelDeploymentMonitoringJobsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsModelDeploymentMonitoringJobsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsModelDeploymentMonitoringJobsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsModelDeploymentMonitoringJobsOperationsList(name: string, opts: ProjectsLocationsModelDeploymentMonitoringJobsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsModelDeploymentMonitoringJobsOperationsWait(name: string, opts: ProjectsLocationsModelDeploymentMonitoringJobsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsModelDeploymentMonitoringJobsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates a ModelDeploymentMonitoringJob. * * @param name Output only. Resource name of a ModelDeploymentMonitoringJob. */ async projectsLocationsModelDeploymentMonitoringJobsPatch(name: string, req: GoogleCloudAiplatformV1ModelDeploymentMonitoringJob, opts: ProjectsLocationsModelDeploymentMonitoringJobsPatchOptions = {}): Promise { req = serializeGoogleCloudAiplatformV1ModelDeploymentMonitoringJob(req); opts = serializeProjectsLocationsModelDeploymentMonitoringJobsPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleLongrunningOperation; } /** * Pauses a ModelDeploymentMonitoringJob. If the job is running, the server * makes a best effort to cancel the job. Will mark * ModelDeploymentMonitoringJob.state to 'PAUSED'. * * @param name Required. The resource name of the ModelDeploymentMonitoringJob to pause. Format: `projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}` */ async projectsLocationsModelDeploymentMonitoringJobsPause(name: string, req: GoogleCloudAiplatformV1PauseModelDeploymentMonitoringJobRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:pause`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleProtobufEmpty; } /** * Resumes a paused ModelDeploymentMonitoringJob. It will start to run from * next scheduled time. A deleted ModelDeploymentMonitoringJob can't be * resumed. * * @param name Required. The resource name of the ModelDeploymentMonitoringJob to resume. Format: `projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}` */ async projectsLocationsModelDeploymentMonitoringJobsResume(name: string, req: GoogleCloudAiplatformV1ResumeModelDeploymentMonitoringJobRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:resume`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleProtobufEmpty; } /** * Searches Model Monitoring Statistics generated within a given time window. * * @param modelDeploymentMonitoringJob Required. ModelDeploymentMonitoring Job resource name. Format: `projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}` */ async projectsLocationsModelDeploymentMonitoringJobsSearchModelDeploymentMonitoringStatsAnomalies(modelDeploymentMonitoringJob: string, req: GoogleCloudAiplatformV1SearchModelDeploymentMonitoringStatsAnomaliesRequest): Promise { req = serializeGoogleCloudAiplatformV1SearchModelDeploymentMonitoringStatsAnomaliesRequest(req); const url = new URL(`${this.#baseUrl}v1/${ modelDeploymentMonitoringJob }:searchModelDeploymentMonitoringStatsAnomalies`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1SearchModelDeploymentMonitoringStatsAnomaliesResponse(data); } /** * Copies an already existing Vertex AI Model into the specified Location. * The source Model must exist in the same Project. When copying custom * Models, the users themselves are responsible for Model.metadata content to * be region-agnostic, as well as making sure that any resources (e.g. files) * it depends on remain accessible. * * @param parent Required. The resource name of the Location into which to copy the Model. Format: `projects/{project}/locations/{location}` */ async projectsLocationsModelsCopy(parent: string, req: GoogleCloudAiplatformV1CopyModelRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/models:copy`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Deletes a Model. A model cannot be deleted if any Endpoint resource has a * DeployedModel based on the model in its deployed_models field. * * @param name Required. The name of the Model resource to be deleted. Format: `projects/{project}/locations/{location}/models/{model}` */ async projectsLocationsModelsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Deletes a Model version. Model version can only be deleted if there are no * DeployedModels created from it. Deleting the only version in the Model is * not allowed. Use DeleteModel for deleting the Model instead. * * @param name Required. The name of the model version to be deleted, with a version ID explicitly included. Example: `projects/{project}/locations/{location}/models/{model}@1234` */ async projectsLocationsModelsDeleteVersion(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:deleteVersion`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets a ModelEvaluation. * * @param name Required. The name of the ModelEvaluation resource. Format: `projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}` */ async projectsLocationsModelsEvaluationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ModelEvaluation; } /** * Imports an externally generated ModelEvaluation. * * @param parent Required. The name of the parent model resource. Format: `projects/{project}/locations/{location}/models/{model}` */ async projectsLocationsModelsEvaluationsImport(parent: string, req: GoogleCloudAiplatformV1ImportModelEvaluationRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/evaluations:import`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1ModelEvaluation; } /** * Lists ModelEvaluations in a Model. * * @param parent Required. The resource name of the Model to list the ModelEvaluations from. Format: `projects/{project}/locations/{location}/models/{model}` */ async projectsLocationsModelsEvaluationsList(parent: string, opts: ProjectsLocationsModelsEvaluationsListOptions = {}): Promise { opts = serializeProjectsLocationsModelsEvaluationsListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/evaluations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListModelEvaluationsResponse; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsModelsEvaluationsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsModelsEvaluationsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsModelsEvaluationsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsModelsEvaluationsOperationsList(name: string, opts: ProjectsLocationsModelsEvaluationsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsModelsEvaluationsOperationsWait(name: string, opts: ProjectsLocationsModelsEvaluationsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsModelsEvaluationsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Imports a list of externally generated EvaluatedAnnotations. * * @param parent Required. The name of the parent ModelEvaluationSlice resource. Format: `projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}` */ async projectsLocationsModelsEvaluationsSlicesBatchImport(parent: string, req: GoogleCloudAiplatformV1BatchImportEvaluatedAnnotationsRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }:batchImport`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1BatchImportEvaluatedAnnotationsResponse; } /** * Gets a ModelEvaluationSlice. * * @param name Required. The name of the ModelEvaluationSlice resource. Format: `projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}` */ async projectsLocationsModelsEvaluationsSlicesGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ModelEvaluationSlice; } /** * Lists ModelEvaluationSlices in a ModelEvaluation. * * @param parent Required. The resource name of the ModelEvaluation to list the ModelEvaluationSlices from. Format: `projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}` */ async projectsLocationsModelsEvaluationsSlicesList(parent: string, opts: ProjectsLocationsModelsEvaluationsSlicesListOptions = {}): Promise { opts = serializeProjectsLocationsModelsEvaluationsSlicesListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/slices`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListModelEvaluationSlicesResponse; } /** * Exports a trained, exportable Model to a location specified by the user. A * Model is considered to be exportable if it has at least one supported * export format. * * @param name Required. The resource name of the Model to export. The resource name may contain version id or version alias to specify the version, if no version is specified, the default version will be exported. */ async projectsLocationsModelsExport(name: string, req: GoogleCloudAiplatformV1ExportModelRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:export`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Gets a Model. * * @param name Required. The name of the Model resource. Format: `projects/{project}/locations/{location}/models/{model}` In order to retrieve a specific version of the model, also provide the version ID or version alias. Example: `projects/{project}/locations/{location}/models/{model}@2` or `projects/{project}/locations/{location}/models/{model}@golden` If no version ID or alias is specified, the "default" version will be returned. The "default" version alias is created for the first version of the model, and can be moved to other versions later on. There will be exactly one default version. */ async projectsLocationsModelsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1Model(data); } /** * Gets the access control policy for a resource. Returns an empty policy if * the resource exists and does not have a policy set. * * @param resource REQUIRED: The resource for which the policy is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field. */ async projectsLocationsModelsGetIamPolicy(resource: string, opts: ProjectsLocationsModelsGetIamPolicyOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ resource }:getIamPolicy`); if (opts["options.requestedPolicyVersion"] !== undefined) { url.searchParams.append("options.requestedPolicyVersion", String(opts["options.requestedPolicyVersion"])); } const data = await request(url.href, { client: this.#client, method: "POST", }); return deserializeGoogleIamV1Policy(data); } /** * Lists Models in a Location. * * @param parent Required. The resource name of the Location to list the Models from. Format: `projects/{project}/locations/{location}` */ async projectsLocationsModelsList(parent: string, opts: ProjectsLocationsModelsListOptions = {}): Promise { opts = serializeProjectsLocationsModelsListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/models`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ListModelsResponse(data); } /** * Lists checkpoints of the specified model version. * * @param name Required. The name of the model version to list checkpoints for. `projects/{project}/locations/{location}/models/{model}@{version}` Example: `projects/{project}/locations/{location}/models/{model}@2` or `projects/{project}/locations/{location}/models/{model}@golden` If no version ID or alias is specified, the latest version will be used. */ async projectsLocationsModelsListCheckpoints(name: string, opts: ProjectsLocationsModelsListCheckpointsOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:listCheckpoints`); if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ListModelVersionCheckpointsResponse(data); } /** * Lists versions of the specified model. * * @param name Required. The name of the model to list versions for. */ async projectsLocationsModelsListVersions(name: string, opts: ProjectsLocationsModelsListVersionsOptions = {}): Promise { opts = serializeProjectsLocationsModelsListVersionsOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:listVersions`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ListModelVersionsResponse(data); } /** * Merges a set of aliases for a Model version. * * @param name Required. The name of the model version to merge aliases, with a version ID explicitly included. Example: `projects/{project}/locations/{location}/models/{model}@1234` */ async projectsLocationsModelsMergeVersionAliases(name: string, req: GoogleCloudAiplatformV1MergeVersionAliasesRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:mergeVersionAliases`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1Model(data); } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsModelsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsModelsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsModelsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsModelsOperationsList(name: string, opts: ProjectsLocationsModelsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsModelsOperationsWait(name: string, opts: ProjectsLocationsModelsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsModelsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates a Model. * * @param name The resource name of the Model. */ async projectsLocationsModelsPatch(name: string, req: GoogleCloudAiplatformV1Model, opts: ProjectsLocationsModelsPatchOptions = {}): Promise { req = serializeGoogleCloudAiplatformV1Model(req); opts = serializeProjectsLocationsModelsPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return deserializeGoogleCloudAiplatformV1Model(data); } /** * Sets the access control policy on the specified resource. Replaces any * existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and * `PERMISSION_DENIED` errors. * * @param resource REQUIRED: The resource for which the policy is being specified. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field. */ async projectsLocationsModelsSetIamPolicy(resource: string, req: GoogleIamV1SetIamPolicyRequest): Promise { req = serializeGoogleIamV1SetIamPolicyRequest(req); const url = new URL(`${this.#baseUrl}v1/${ resource }:setIamPolicy`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleIamV1Policy(data); } /** * Returns permissions that a caller has on the specified resource. If the * resource does not exist, this will return an empty set of permissions, not * a `NOT_FOUND` error. Note: This operation is designed to be used for * building permission-aware UIs and command-line tools, not for authorization * checking. This operation may "fail open" without warning. * * @param resource REQUIRED: The resource for which the policy detail is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field. */ async projectsLocationsModelsTestIamPermissions(resource: string, opts: ProjectsLocationsModelsTestIamPermissionsOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ resource }:testIamPermissions`); if (opts.permissions !== undefined) { url.searchParams.append("permissions", String(opts.permissions)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleIamV1TestIamPermissionsResponse; } /** * Incrementally update the dataset used for an examples model. * * @param model Required. The resource name of the Model to update. Format: `projects/{project}/locations/{location}/models/{model}` */ async projectsLocationsModelsUpdateExplanationDataset(model: string, req: GoogleCloudAiplatformV1UpdateExplanationDatasetRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ model }:updateExplanationDataset`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Uploads a Model artifact into Vertex AI. * * @param parent Required. The resource name of the Location into which to upload the Model. Format: `projects/{project}/locations/{location}` */ async projectsLocationsModelsUpload(parent: string, req: GoogleCloudAiplatformV1UploadModelRequest): Promise { req = serializeGoogleCloudAiplatformV1UploadModelRequest(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/models:upload`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Cancels a NasJob. Starts asynchronous cancellation on the NasJob. The * server makes a best effort to cancel the job, but success is not * guaranteed. Clients can use JobService.GetNasJob or other methods to check * whether the cancellation succeeded or whether the job completed despite * cancellation. On successful cancellation, the NasJob is not deleted; * instead it becomes a job with a NasJob.error value with a * google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`, and * NasJob.state is set to `CANCELLED`. * * @param name Required. The name of the NasJob to cancel. Format: `projects/{project}/locations/{location}/nasJobs/{nas_job}` */ async projectsLocationsNasJobsCancel(name: string, req: GoogleCloudAiplatformV1CancelNasJobRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleProtobufEmpty; } /** * Creates a NasJob * * @param parent Required. The resource name of the Location to create the NasJob in. Format: `projects/{project}/locations/{location}` */ async projectsLocationsNasJobsCreate(parent: string, req: GoogleCloudAiplatformV1NasJob): Promise { req = serializeGoogleCloudAiplatformV1NasJob(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/nasJobs`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1NasJob(data); } /** * Deletes a NasJob. * * @param name Required. The name of the NasJob resource to be deleted. Format: `projects/{project}/locations/{location}/nasJobs/{nas_job}` */ async projectsLocationsNasJobsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets a NasJob * * @param name Required. The name of the NasJob resource. Format: `projects/{project}/locations/{location}/nasJobs/{nas_job}` */ async projectsLocationsNasJobsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1NasJob(data); } /** * Lists NasJobs in a Location. * * @param parent Required. The resource name of the Location to list the NasJobs from. Format: `projects/{project}/locations/{location}` */ async projectsLocationsNasJobsList(parent: string, opts: ProjectsLocationsNasJobsListOptions = {}): Promise { opts = serializeProjectsLocationsNasJobsListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/nasJobs`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ListNasJobsResponse(data); } /** * Gets a NasTrialDetail. * * @param name Required. The name of the NasTrialDetail resource. Format: `projects/{project}/locations/{location}/nasJobs/{nas_job}/nasTrialDetails/{nas_trial_detail}` */ async projectsLocationsNasJobsNasTrialDetailsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1NasTrialDetail; } /** * List top NasTrialDetails of a NasJob. * * @param parent Required. The name of the NasJob resource. Format: `projects/{project}/locations/{location}/nasJobs/{nas_job}` */ async projectsLocationsNasJobsNasTrialDetailsList(parent: string, opts: ProjectsLocationsNasJobsNasTrialDetailsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/nasTrialDetails`); if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListNasTrialDetailsResponse; } /** * Creates a NotebookExecutionJob. * * @param parent Required. The resource name of the Location to create the NotebookExecutionJob. Format: `projects/{project}/locations/{location}` */ async projectsLocationsNotebookExecutionJobsCreate(parent: string, req: GoogleCloudAiplatformV1NotebookExecutionJob, opts: ProjectsLocationsNotebookExecutionJobsCreateOptions = {}): Promise { req = serializeGoogleCloudAiplatformV1NotebookExecutionJob(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/notebookExecutionJobs`); if (opts.notebookExecutionJobId !== undefined) { url.searchParams.append("notebookExecutionJobId", String(opts.notebookExecutionJobId)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Deletes a NotebookExecutionJob. * * @param name Required. The name of the NotebookExecutionJob resource to be deleted. */ async projectsLocationsNotebookExecutionJobsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets a NotebookExecutionJob. * * @param name Required. The name of the NotebookExecutionJob resource. */ async projectsLocationsNotebookExecutionJobsGet(name: string, opts: ProjectsLocationsNotebookExecutionJobsGetOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.view !== undefined) { url.searchParams.append("view", String(opts.view)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1NotebookExecutionJob(data); } /** * Lists NotebookExecutionJobs in a Location. * * @param parent Required. The resource name of the Location from which to list the NotebookExecutionJobs. Format: `projects/{project}/locations/{location}` */ async projectsLocationsNotebookExecutionJobsList(parent: string, opts: ProjectsLocationsNotebookExecutionJobsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/notebookExecutionJobs`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.view !== undefined) { url.searchParams.append("view", String(opts.view)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ListNotebookExecutionJobsResponse(data); } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsNotebookExecutionJobsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsNotebookExecutionJobsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsNotebookExecutionJobsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsNotebookExecutionJobsOperationsList(name: string, opts: ProjectsLocationsNotebookExecutionJobsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsNotebookExecutionJobsOperationsWait(name: string, opts: ProjectsLocationsNotebookExecutionJobsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsNotebookExecutionJobsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Assigns a NotebookRuntime to a user for a particular Notebook file. This * method will either returns an existing assignment or generates a new one. * * @param parent Required. The resource name of the Location to get the NotebookRuntime assignment. Format: `projects/{project}/locations/{location}` */ async projectsLocationsNotebookRuntimesAssign(parent: string, req: GoogleCloudAiplatformV1AssignNotebookRuntimeRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/notebookRuntimes:assign`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Deletes a NotebookRuntime. * * @param name Required. The name of the NotebookRuntime resource to be deleted. Instead of checking whether the name is in valid NotebookRuntime resource name format, directly throw NotFound exception if there is no such NotebookRuntime in spanner. */ async projectsLocationsNotebookRuntimesDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets a NotebookRuntime. * * @param name Required. The name of the NotebookRuntime resource. Instead of checking whether the name is in valid NotebookRuntime resource name format, directly throw NotFound exception if there is no such NotebookRuntime in spanner. */ async projectsLocationsNotebookRuntimesGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1NotebookRuntime; } /** * Lists NotebookRuntimes in a Location. * * @param parent Required. The resource name of the Location from which to list the NotebookRuntimes. Format: `projects/{project}/locations/{location}` */ async projectsLocationsNotebookRuntimesList(parent: string, opts: ProjectsLocationsNotebookRuntimesListOptions = {}): Promise { opts = serializeProjectsLocationsNotebookRuntimesListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/notebookRuntimes`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListNotebookRuntimesResponse; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsNotebookRuntimesOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsNotebookRuntimesOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsNotebookRuntimesOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsNotebookRuntimesOperationsList(name: string, opts: ProjectsLocationsNotebookRuntimesOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsNotebookRuntimesOperationsWait(name: string, opts: ProjectsLocationsNotebookRuntimesOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsNotebookRuntimesOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Starts a NotebookRuntime. * * @param name Required. The name of the NotebookRuntime resource to be started. Instead of checking whether the name is in valid NotebookRuntime resource name format, directly throw NotFound exception if there is no such NotebookRuntime in spanner. */ async projectsLocationsNotebookRuntimesStart(name: string, req: GoogleCloudAiplatformV1StartNotebookRuntimeRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:start`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Stops a NotebookRuntime. * * @param name Required. The name of the NotebookRuntime resource to be stopped. Instead of checking whether the name is in valid NotebookRuntime resource name format, directly throw NotFound exception if there is no such NotebookRuntime in spanner. */ async projectsLocationsNotebookRuntimesStop(name: string, req: GoogleCloudAiplatformV1StopNotebookRuntimeRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:stop`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Upgrades a NotebookRuntime. * * @param name Required. The name of the NotebookRuntime resource to be upgrade. Instead of checking whether the name is in valid NotebookRuntime resource name format, directly throw NotFound exception if there is no such NotebookRuntime in spanner. */ async projectsLocationsNotebookRuntimesUpgrade(name: string, req: GoogleCloudAiplatformV1UpgradeNotebookRuntimeRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:upgrade`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Creates a NotebookRuntimeTemplate. * * @param parent Required. The resource name of the Location to create the NotebookRuntimeTemplate. Format: `projects/{project}/locations/{location}` */ async projectsLocationsNotebookRuntimeTemplatesCreate(parent: string, req: GoogleCloudAiplatformV1NotebookRuntimeTemplate, opts: ProjectsLocationsNotebookRuntimeTemplatesCreateOptions = {}): Promise { req = serializeGoogleCloudAiplatformV1NotebookRuntimeTemplate(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/notebookRuntimeTemplates`); if (opts.notebookRuntimeTemplateId !== undefined) { url.searchParams.append("notebookRuntimeTemplateId", String(opts.notebookRuntimeTemplateId)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Deletes a NotebookRuntimeTemplate. * * @param name Required. The name of the NotebookRuntimeTemplate resource to be deleted. Format: `projects/{project}/locations/{location}/notebookRuntimeTemplates/{notebook_runtime_template}` */ async projectsLocationsNotebookRuntimeTemplatesDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets a NotebookRuntimeTemplate. * * @param name Required. The name of the NotebookRuntimeTemplate resource. Format: `projects/{project}/locations/{location}/notebookRuntimeTemplates/{notebook_runtime_template}` */ async projectsLocationsNotebookRuntimeTemplatesGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1NotebookRuntimeTemplate(data); } /** * Gets the access control policy for a resource. Returns an empty policy if * the resource exists and does not have a policy set. * * @param resource REQUIRED: The resource for which the policy is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field. */ async projectsLocationsNotebookRuntimeTemplatesGetIamPolicy(resource: string, opts: ProjectsLocationsNotebookRuntimeTemplatesGetIamPolicyOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ resource }:getIamPolicy`); if (opts["options.requestedPolicyVersion"] !== undefined) { url.searchParams.append("options.requestedPolicyVersion", String(opts["options.requestedPolicyVersion"])); } const data = await request(url.href, { client: this.#client, method: "POST", }); return deserializeGoogleIamV1Policy(data); } /** * Lists NotebookRuntimeTemplates in a Location. * * @param parent Required. The resource name of the Location from which to list the NotebookRuntimeTemplates. Format: `projects/{project}/locations/{location}` */ async projectsLocationsNotebookRuntimeTemplatesList(parent: string, opts: ProjectsLocationsNotebookRuntimeTemplatesListOptions = {}): Promise { opts = serializeProjectsLocationsNotebookRuntimeTemplatesListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/notebookRuntimeTemplates`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ListNotebookRuntimeTemplatesResponse(data); } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsNotebookRuntimeTemplatesOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsNotebookRuntimeTemplatesOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsNotebookRuntimeTemplatesOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsNotebookRuntimeTemplatesOperationsList(name: string, opts: ProjectsLocationsNotebookRuntimeTemplatesOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsNotebookRuntimeTemplatesOperationsWait(name: string, opts: ProjectsLocationsNotebookRuntimeTemplatesOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsNotebookRuntimeTemplatesOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates a NotebookRuntimeTemplate. * * @param name The resource name of the NotebookRuntimeTemplate. */ async projectsLocationsNotebookRuntimeTemplatesPatch(name: string, req: GoogleCloudAiplatformV1NotebookRuntimeTemplate, opts: ProjectsLocationsNotebookRuntimeTemplatesPatchOptions = {}): Promise { req = serializeGoogleCloudAiplatformV1NotebookRuntimeTemplate(req); opts = serializeProjectsLocationsNotebookRuntimeTemplatesPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return deserializeGoogleCloudAiplatformV1NotebookRuntimeTemplate(data); } /** * Sets the access control policy on the specified resource. Replaces any * existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and * `PERMISSION_DENIED` errors. * * @param resource REQUIRED: The resource for which the policy is being specified. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field. */ async projectsLocationsNotebookRuntimeTemplatesSetIamPolicy(resource: string, req: GoogleIamV1SetIamPolicyRequest): Promise { req = serializeGoogleIamV1SetIamPolicyRequest(req); const url = new URL(`${this.#baseUrl}v1/${ resource }:setIamPolicy`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleIamV1Policy(data); } /** * Returns permissions that a caller has on the specified resource. If the * resource does not exist, this will return an empty set of permissions, not * a `NOT_FOUND` error. Note: This operation is designed to be used for * building permission-aware UIs and command-line tools, not for authorization * checking. This operation may "fail open" without warning. * * @param resource REQUIRED: The resource for which the policy detail is being requested. See [Resource names](https://cloud.google.com/apis/design/resource_names) for the appropriate value for this field. */ async projectsLocationsNotebookRuntimeTemplatesTestIamPermissions(resource: string, opts: ProjectsLocationsNotebookRuntimeTemplatesTestIamPermissionsOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ resource }:testIamPermissions`); if (opts.permissions !== undefined) { url.searchParams.append("permissions", String(opts.permissions)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleIamV1TestIamPermissionsResponse; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsOperationsList(name: string, opts: ProjectsLocationsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsOperationsWait(name: string, opts: ProjectsLocationsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Creates a PersistentResource. * * @param parent Required. The resource name of the Location to create the PersistentResource in. Format: `projects/{project}/locations/{location}` */ async projectsLocationsPersistentResourcesCreate(parent: string, req: GoogleCloudAiplatformV1PersistentResource, opts: ProjectsLocationsPersistentResourcesCreateOptions = {}): Promise { req = serializeGoogleCloudAiplatformV1PersistentResource(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/persistentResources`); if (opts.persistentResourceId !== undefined) { url.searchParams.append("persistentResourceId", String(opts.persistentResourceId)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Deletes a PersistentResource. * * @param name Required. The name of the PersistentResource to be deleted. Format: `projects/{project}/locations/{location}/persistentResources/{persistent_resource}` */ async projectsLocationsPersistentResourcesDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets a PersistentResource. * * @param name Required. The name of the PersistentResource resource. Format: `projects/{project_id_or_number}/locations/{location_id}/persistentResources/{persistent_resource_id}` */ async projectsLocationsPersistentResourcesGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1PersistentResource(data); } /** * Lists PersistentResources in a Location. * * @param parent Required. The resource name of the Location to list the PersistentResources from. Format: `projects/{project}/locations/{location}` */ async projectsLocationsPersistentResourcesList(parent: string, opts: ProjectsLocationsPersistentResourcesListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/persistentResources`); if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ListPersistentResourcesResponse(data); } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsPersistentResourcesOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsPersistentResourcesOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsPersistentResourcesOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsPersistentResourcesOperationsList(name: string, opts: ProjectsLocationsPersistentResourcesOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsPersistentResourcesOperationsWait(name: string, opts: ProjectsLocationsPersistentResourcesOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsPersistentResourcesOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates a PersistentResource. * * @param name Immutable. Resource name of a PersistentResource. */ async projectsLocationsPersistentResourcesPatch(name: string, req: GoogleCloudAiplatformV1PersistentResource, opts: ProjectsLocationsPersistentResourcesPatchOptions = {}): Promise { req = serializeGoogleCloudAiplatformV1PersistentResource(req); opts = serializeProjectsLocationsPersistentResourcesPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleLongrunningOperation; } /** * Reboots a PersistentResource. * * @param name Required. The name of the PersistentResource resource. Format: `projects/{project_id_or_number}/locations/{location_id}/persistentResources/{persistent_resource_id}` */ async projectsLocationsPersistentResourcesReboot(name: string, req: GoogleCloudAiplatformV1RebootPersistentResourceRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:reboot`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Batch cancel PipelineJobs. Firstly the server will check if all the jobs * are in non-terminal states, and skip the jobs that are already terminated. * If the operation failed, none of the pipeline jobs are cancelled. The * server will poll the states of all the pipeline jobs periodically to check * the cancellation status. This operation will return an LRO. * * @param parent Required. The name of the PipelineJobs' parent resource. Format: `projects/{project}/locations/{location}` */ async projectsLocationsPipelineJobsBatchCancel(parent: string, req: GoogleCloudAiplatformV1BatchCancelPipelineJobsRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/pipelineJobs:batchCancel`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Batch deletes PipelineJobs The Operation is atomic. If it fails, none of * the PipelineJobs are deleted. If it succeeds, all of the PipelineJobs are * deleted. * * @param parent Required. The name of the PipelineJobs' parent resource. Format: `projects/{project}/locations/{location}` */ async projectsLocationsPipelineJobsBatchDelete(parent: string, req: GoogleCloudAiplatformV1BatchDeletePipelineJobsRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/pipelineJobs:batchDelete`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Cancels a PipelineJob. Starts asynchronous cancellation on the * PipelineJob. The server makes a best effort to cancel the pipeline, but * success is not guaranteed. Clients can use PipelineService.GetPipelineJob * or other methods to check whether the cancellation succeeded or whether the * pipeline completed despite cancellation. On successful cancellation, the * PipelineJob is not deleted; instead it becomes a pipeline with a * PipelineJob.error value with a google.rpc.Status.code of 1, corresponding * to `Code.CANCELLED`, and PipelineJob.state is set to `CANCELLED`. * * @param name Required. The name of the PipelineJob to cancel. Format: `projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}` */ async projectsLocationsPipelineJobsCancel(name: string, req: GoogleCloudAiplatformV1CancelPipelineJobRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleProtobufEmpty; } /** * Creates a PipelineJob. A PipelineJob will run immediately when created. * * @param parent Required. The resource name of the Location to create the PipelineJob in. Format: `projects/{project}/locations/{location}` */ async projectsLocationsPipelineJobsCreate(parent: string, req: GoogleCloudAiplatformV1PipelineJob, opts: ProjectsLocationsPipelineJobsCreateOptions = {}): Promise { req = serializeGoogleCloudAiplatformV1PipelineJob(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/pipelineJobs`); if (opts.pipelineJobId !== undefined) { url.searchParams.append("pipelineJobId", String(opts.pipelineJobId)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1PipelineJob(data); } /** * Deletes a PipelineJob. * * @param name Required. The name of the PipelineJob resource to be deleted. Format: `projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}` */ async projectsLocationsPipelineJobsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets a PipelineJob. * * @param name Required. The name of the PipelineJob resource. Format: `projects/{project}/locations/{location}/pipelineJobs/{pipeline_job}` */ async projectsLocationsPipelineJobsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1PipelineJob(data); } /** * Lists PipelineJobs in a Location. * * @param parent Required. The resource name of the Location to list the PipelineJobs from. Format: `projects/{project}/locations/{location}` */ async projectsLocationsPipelineJobsList(parent: string, opts: ProjectsLocationsPipelineJobsListOptions = {}): Promise { opts = serializeProjectsLocationsPipelineJobsListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/pipelineJobs`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ListPipelineJobsResponse(data); } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsPipelineJobsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsPipelineJobsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsPipelineJobsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsPipelineJobsOperationsList(name: string, opts: ProjectsLocationsPipelineJobsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsPipelineJobsOperationsWait(name: string, opts: ProjectsLocationsPipelineJobsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsPipelineJobsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Return a list of tokens based on the input text. * * @param endpoint Required. The name of the Endpoint requested to get lists of tokens and token ids. */ async projectsLocationsPublishersModelsComputeTokens(endpoint: string, req: GoogleCloudAiplatformV1ComputeTokensRequest): Promise { req = serializeGoogleCloudAiplatformV1ComputeTokensRequest(req); const url = new URL(`${this.#baseUrl}v1/${ endpoint }:computeTokens`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1ComputeTokensResponse(data); } /** * Perform a token counting. * * @param endpoint Required. The name of the Endpoint requested to perform token counting. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async projectsLocationsPublishersModelsCountTokens(endpoint: string, req: GoogleCloudAiplatformV1CountTokensRequest): Promise { req = serializeGoogleCloudAiplatformV1CountTokensRequest(req); const url = new URL(`${this.#baseUrl}v1/${ endpoint }:countTokens`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1CountTokensResponse; } /** * Fetch an asynchronous online prediction operation. * * @param endpoint Required. The name of the Endpoint requested to serve the prediction. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` or `projects/{project}/locations/{location}/publishers/{publisher}/models/{model}` */ async projectsLocationsPublishersModelsFetchPredictOperation(endpoint: string, req: GoogleCloudAiplatformV1FetchPredictOperationRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ endpoint }:fetchPredictOperation`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Generate content with multimodal inputs. * * @param model Required. The fully qualified name of the publisher model or tuned model endpoint to use. Publisher model format: `projects/{project}/locations/{location}/publishers/*/models/*` Tuned model endpoint format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async projectsLocationsPublishersModelsGenerateContent(model: string, req: GoogleCloudAiplatformV1GenerateContentRequest): Promise { req = serializeGoogleCloudAiplatformV1GenerateContentRequest(req); const url = new URL(`${this.#baseUrl}v1/${ model }:generateContent`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1GenerateContentResponse; } /** * Perform an online prediction. * * @param endpoint Required. The name of the Endpoint requested to serve the prediction. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async projectsLocationsPublishersModelsPredict(endpoint: string, req: GoogleCloudAiplatformV1PredictRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ endpoint }:predict`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1PredictResponse; } async projectsLocationsPublishersModelsPredictLongRunning(endpoint: string, req: GoogleCloudAiplatformV1PredictLongRunningRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ endpoint }:predictLongRunning`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Perform an online prediction with an arbitrary HTTP payload. The response * includes the following HTTP headers: * `X-Vertex-AI-Endpoint-Id`: ID of the * Endpoint that served this prediction. * `X-Vertex-AI-Deployed-Model-Id`: ID * of the Endpoint's DeployedModel that served this prediction. * * @param endpoint Required. The name of the Endpoint requested to serve the prediction. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async projectsLocationsPublishersModelsRawPredict(endpoint: string, req: GoogleCloudAiplatformV1RawPredictRequest): Promise { req = serializeGoogleCloudAiplatformV1RawPredictRequest(req); const url = new URL(`${this.#baseUrl}v1/${ endpoint }:rawPredict`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleApiHttpBody(data); } /** * Perform a server-side streaming online prediction request for Vertex LLM * streaming. * * @param endpoint Required. The name of the Endpoint requested to serve the prediction. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async projectsLocationsPublishersModelsServerStreamingPredict(endpoint: string, req: GoogleCloudAiplatformV1StreamingPredictRequest): Promise { req = serializeGoogleCloudAiplatformV1StreamingPredictRequest(req); const url = new URL(`${this.#baseUrl}v1/${ endpoint }:serverStreamingPredict`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1StreamingPredictResponse(data); } /** * Generate content with multimodal inputs with streaming support. * * @param model Required. The fully qualified name of the publisher model or tuned model endpoint to use. Publisher model format: `projects/{project}/locations/{location}/publishers/*/models/*` Tuned model endpoint format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async projectsLocationsPublishersModelsStreamGenerateContent(model: string, req: GoogleCloudAiplatformV1GenerateContentRequest): Promise { req = serializeGoogleCloudAiplatformV1GenerateContentRequest(req); const url = new URL(`${this.#baseUrl}v1/${ model }:streamGenerateContent`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1GenerateContentResponse; } /** * Perform a streaming online prediction with an arbitrary HTTP payload. * * @param endpoint Required. The name of the Endpoint requested to serve the prediction. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async projectsLocationsPublishersModelsStreamRawPredict(endpoint: string, req: GoogleCloudAiplatformV1StreamRawPredictRequest): Promise { req = serializeGoogleCloudAiplatformV1StreamRawPredictRequest(req); const url = new URL(`${this.#baseUrl}v1/${ endpoint }:streamRawPredict`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleApiHttpBody(data); } /** * Creates a RagCorpus. * * @param parent Required. The resource name of the Location to create the RagCorpus in. Format: `projects/{project}/locations/{location}` */ async projectsLocationsRagCorporaCreate(parent: string, req: GoogleCloudAiplatformV1RagCorpus): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/ragCorpora`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Deletes a RagCorpus. * * @param name Required. The name of the RagCorpus resource to be deleted. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}` */ async projectsLocationsRagCorporaDelete(name: string, opts: ProjectsLocationsRagCorporaDeleteOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.force !== undefined) { url.searchParams.append("force", String(opts.force)); } const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets a RagCorpus. * * @param name Required. The name of the RagCorpus resource. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}` */ async projectsLocationsRagCorporaGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1RagCorpus; } /** * Lists RagCorpora in a Location. * * @param parent Required. The resource name of the Location from which to list the RagCorpora. Format: `projects/{project}/locations/{location}` */ async projectsLocationsRagCorporaList(parent: string, opts: ProjectsLocationsRagCorporaListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/ragCorpora`); if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListRagCorporaResponse; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsRagCorporaOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsRagCorporaOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsRagCorporaOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsRagCorporaOperationsList(name: string, opts: ProjectsLocationsRagCorporaOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsRagCorporaOperationsWait(name: string, opts: ProjectsLocationsRagCorporaOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsRagCorporaOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates a RagCorpus. * * @param name Output only. The resource name of the RagCorpus. */ async projectsLocationsRagCorporaPatch(name: string, req: GoogleCloudAiplatformV1RagCorpus): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleLongrunningOperation; } /** * Deletes a RagFile. * * @param name Required. The name of the RagFile resource to be deleted. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}/ragFiles/{rag_file}` */ async projectsLocationsRagCorporaRagFilesDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets a RagFile. * * @param name Required. The name of the RagFile resource. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}/ragFiles/{rag_file}` */ async projectsLocationsRagCorporaRagFilesGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1RagFile(data); } /** * Import files from Google Cloud Storage or Google Drive into a RagCorpus. * * @param parent Required. The name of the RagCorpus resource into which to import files. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}` */ async projectsLocationsRagCorporaRagFilesImport(parent: string, req: GoogleCloudAiplatformV1ImportRagFilesRequest): Promise { req = serializeGoogleCloudAiplatformV1ImportRagFilesRequest(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/ragFiles:import`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Lists RagFiles in a RagCorpus. * * @param parent Required. The resource name of the RagCorpus from which to list the RagFiles. Format: `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}` */ async projectsLocationsRagCorporaRagFilesList(parent: string, opts: ProjectsLocationsRagCorporaRagFilesListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/ragFiles`); if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ListRagFilesResponse(data); } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsRagCorporaRagFilesOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsRagCorporaRagFilesOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsRagCorporaRagFilesOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsRagCorporaRagFilesOperationsList(name: string, opts: ProjectsLocationsRagCorporaRagFilesOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsRagCorporaRagFilesOperationsWait(name: string, opts: ProjectsLocationsRagCorporaRagFilesOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsRagCorporaRagFilesOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsRagEngineConfigOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsRagEngineConfigOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsRagEngineConfigOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsRagEngineConfigOperationsList(name: string, opts: ProjectsLocationsRagEngineConfigOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsRagEngineConfigOperationsWait(name: string, opts: ProjectsLocationsRagEngineConfigOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsRagEngineConfigOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Creates a reasoning engine. * * @param parent Required. The resource name of the Location to create the ReasoningEngine in. Format: `projects/{project}/locations/{location}` */ async projectsLocationsReasoningEnginesCreate(parent: string, req: GoogleCloudAiplatformV1ReasoningEngine): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/reasoningEngines`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Deletes a reasoning engine. * * @param name Required. The name of the ReasoningEngine resource to be deleted. Format: `projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}` */ async projectsLocationsReasoningEnginesDelete(name: string, opts: ProjectsLocationsReasoningEnginesDeleteOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.force !== undefined) { url.searchParams.append("force", String(opts.force)); } const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets a reasoning engine. * * @param name Required. The name of the ReasoningEngine resource. Format: `projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}` */ async projectsLocationsReasoningEnginesGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ReasoningEngine; } /** * Lists reasoning engines in a location. * * @param parent Required. The resource name of the Location to list the ReasoningEngines from. Format: `projects/{project}/locations/{location}` */ async projectsLocationsReasoningEnginesList(parent: string, opts: ProjectsLocationsReasoningEnginesListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/reasoningEngines`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListReasoningEnginesResponse; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsReasoningEnginesOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsReasoningEnginesOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsReasoningEnginesOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsReasoningEnginesOperationsList(name: string, opts: ProjectsLocationsReasoningEnginesOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsReasoningEnginesOperationsWait(name: string, opts: ProjectsLocationsReasoningEnginesOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsReasoningEnginesOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates a reasoning engine. * * @param name Identifier. The resource name of the ReasoningEngine. */ async projectsLocationsReasoningEnginesPatch(name: string, req: GoogleCloudAiplatformV1ReasoningEngine, opts: ProjectsLocationsReasoningEnginesPatchOptions = {}): Promise { opts = serializeProjectsLocationsReasoningEnginesPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleLongrunningOperation; } /** * Queries using a reasoning engine. * * @param name Required. The name of the ReasoningEngine resource to use. Format: `projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}` */ async projectsLocationsReasoningEnginesQuery(name: string, req: GoogleCloudAiplatformV1QueryReasoningEngineRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:query`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1QueryReasoningEngineResponse; } /** * Streams queries using a reasoning engine. * * @param name Required. The name of the ReasoningEngine resource to use. Format: `projects/{project}/locations/{location}/reasoningEngines/{reasoning_engine}` */ async projectsLocationsReasoningEnginesStreamQuery(name: string, req: GoogleCloudAiplatformV1StreamQueryReasoningEngineRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:streamQuery`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleApiHttpBody(data); } /** * Retrieves relevant contexts for a query. * * @param parent Required. The resource name of the Location from which to retrieve RagContexts. The users must have permission to make a call in the project. Format: `projects/{project}/locations/{location}`. */ async projectsLocationsRetrieveContexts(parent: string, req: GoogleCloudAiplatformV1RetrieveContextsRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }:retrieveContexts`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1RetrieveContextsResponse; } /** * Creates a Schedule. * * @param parent Required. The resource name of the Location to create the Schedule in. Format: `projects/{project}/locations/{location}` */ async projectsLocationsSchedulesCreate(parent: string, req: GoogleCloudAiplatformV1Schedule): Promise { req = serializeGoogleCloudAiplatformV1Schedule(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/schedules`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1Schedule(data); } /** * Deletes a Schedule. * * @param name Required. The name of the Schedule resource to be deleted. Format: `projects/{project}/locations/{location}/schedules/{schedule}` */ async projectsLocationsSchedulesDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets a Schedule. * * @param name Required. The name of the Schedule resource. Format: `projects/{project}/locations/{location}/schedules/{schedule}` */ async projectsLocationsSchedulesGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1Schedule(data); } /** * Lists Schedules in a Location. * * @param parent Required. The resource name of the Location to list the Schedules from. Format: `projects/{project}/locations/{location}` */ async projectsLocationsSchedulesList(parent: string, opts: ProjectsLocationsSchedulesListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/schedules`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ListSchedulesResponse(data); } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsSchedulesOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsSchedulesOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsSchedulesOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsSchedulesOperationsList(name: string, opts: ProjectsLocationsSchedulesOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsSchedulesOperationsWait(name: string, opts: ProjectsLocationsSchedulesOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsSchedulesOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates an active or paused Schedule. When the Schedule is updated, new * runs will be scheduled starting from the updated next execution time after * the update time based on the time_specification in the updated Schedule. * All unstarted runs before the update time will be skipped while already * created runs will NOT be paused or canceled. * * @param name Immutable. The resource name of the Schedule. */ async projectsLocationsSchedulesPatch(name: string, req: GoogleCloudAiplatformV1Schedule, opts: ProjectsLocationsSchedulesPatchOptions = {}): Promise { req = serializeGoogleCloudAiplatformV1Schedule(req); opts = serializeProjectsLocationsSchedulesPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return deserializeGoogleCloudAiplatformV1Schedule(data); } /** * Pauses a Schedule. Will mark Schedule.state to 'PAUSED'. If the schedule * is paused, no new runs will be created. Already created runs will NOT be * paused or canceled. * * @param name Required. The name of the Schedule resource to be paused. Format: `projects/{project}/locations/{location}/schedules/{schedule}` */ async projectsLocationsSchedulesPause(name: string, req: GoogleCloudAiplatformV1PauseScheduleRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:pause`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleProtobufEmpty; } /** * Resumes a paused Schedule to start scheduling new runs. Will mark * Schedule.state to 'ACTIVE'. Only paused Schedule can be resumed. When the * Schedule is resumed, new runs will be scheduled starting from the next * execution time after the current time based on the time_specification in * the Schedule. If Schedule.catch_up is set up true, all missed runs will be * scheduled for backfill first. * * @param name Required. The name of the Schedule resource to be resumed. Format: `projects/{project}/locations/{location}/schedules/{schedule}` */ async projectsLocationsSchedulesResume(name: string, req: GoogleCloudAiplatformV1ResumeScheduleRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:resume`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleProtobufEmpty; } /** * Creates a SpecialistPool. * * @param parent Required. The parent Project name for the new SpecialistPool. The form is `projects/{project}/locations/{location}`. */ async projectsLocationsSpecialistPoolsCreate(parent: string, req: GoogleCloudAiplatformV1SpecialistPool): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/specialistPools`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Deletes a SpecialistPool as well as all Specialists in the pool. * * @param name Required. The resource name of the SpecialistPool to delete. Format: `projects/{project}/locations/{location}/specialistPools/{specialist_pool}` */ async projectsLocationsSpecialistPoolsDelete(name: string, opts: ProjectsLocationsSpecialistPoolsDeleteOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.force !== undefined) { url.searchParams.append("force", String(opts.force)); } const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets a SpecialistPool. * * @param name Required. The name of the SpecialistPool resource. The form is `projects/{project}/locations/{location}/specialistPools/{specialist_pool}`. */ async projectsLocationsSpecialistPoolsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1SpecialistPool; } /** * Lists SpecialistPools in a Location. * * @param parent Required. The name of the SpecialistPool's parent resource. Format: `projects/{project}/locations/{location}` */ async projectsLocationsSpecialistPoolsList(parent: string, opts: ProjectsLocationsSpecialistPoolsListOptions = {}): Promise { opts = serializeProjectsLocationsSpecialistPoolsListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/specialistPools`); if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListSpecialistPoolsResponse; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsSpecialistPoolsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsSpecialistPoolsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsSpecialistPoolsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsSpecialistPoolsOperationsList(name: string, opts: ProjectsLocationsSpecialistPoolsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsSpecialistPoolsOperationsWait(name: string, opts: ProjectsLocationsSpecialistPoolsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsSpecialistPoolsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates a SpecialistPool. * * @param name Required. The resource name of the SpecialistPool. */ async projectsLocationsSpecialistPoolsPatch(name: string, req: GoogleCloudAiplatformV1SpecialistPool, opts: ProjectsLocationsSpecialistPoolsPatchOptions = {}): Promise { opts = serializeProjectsLocationsSpecialistPoolsPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleLongrunningOperation; } /** * Creates a Study. A resource name will be generated after creation of the * Study. * * @param parent Required. The resource name of the Location to create the CustomJob in. Format: `projects/{project}/locations/{location}` */ async projectsLocationsStudiesCreate(parent: string, req: GoogleCloudAiplatformV1Study): Promise { req = serializeGoogleCloudAiplatformV1Study(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/studies`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1Study(data); } /** * Deletes a Study. * * @param name Required. The name of the Study resource to be deleted. Format: `projects/{project}/locations/{location}/studies/{study}` */ async projectsLocationsStudiesDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets a Study by name. * * @param name Required. The name of the Study resource. Format: `projects/{project}/locations/{location}/studies/{study}` */ async projectsLocationsStudiesGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1Study(data); } /** * Lists all the studies in a region for an associated project. * * @param parent Required. The resource name of the Location to list the Study from. Format: `projects/{project}/locations/{location}` */ async projectsLocationsStudiesList(parent: string, opts: ProjectsLocationsStudiesListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/studies`); if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ListStudiesResponse(data); } /** * Looks a study up using the user-defined display_name field instead of the * fully qualified resource name. * * @param parent Required. The resource name of the Location to get the Study from. Format: `projects/{project}/locations/{location}` */ async projectsLocationsStudiesLookup(parent: string, req: GoogleCloudAiplatformV1LookupStudyRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/studies:lookup`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1Study(data); } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsStudiesOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsStudiesOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsStudiesOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsStudiesOperationsList(name: string, opts: ProjectsLocationsStudiesOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsStudiesOperationsWait(name: string, opts: ProjectsLocationsStudiesOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsStudiesOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Adds a measurement of the objective metrics to a Trial. This measurement * is assumed to have been taken before the Trial is complete. * * @param trialName Required. The name of the trial to add measurement. Format: `projects/{project}/locations/{location}/studies/{study}/trials/{trial}` */ async projectsLocationsStudiesTrialsAddTrialMeasurement(trialName: string, req: GoogleCloudAiplatformV1AddTrialMeasurementRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ trialName }:addTrialMeasurement`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1Trial; } /** * Checks whether a Trial should stop or not. Returns a long-running * operation. When the operation is successful, it will contain a * CheckTrialEarlyStoppingStateResponse. * * @param trialName Required. The Trial's name. Format: `projects/{project}/locations/{location}/studies/{study}/trials/{trial}` */ async projectsLocationsStudiesTrialsCheckTrialEarlyStoppingState(trialName: string, req: GoogleCloudAiplatformV1CheckTrialEarlyStoppingStateRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ trialName }:checkTrialEarlyStoppingState`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Marks a Trial as complete. * * @param name Required. The Trial's name. Format: `projects/{project}/locations/{location}/studies/{study}/trials/{trial}` */ async projectsLocationsStudiesTrialsComplete(name: string, req: GoogleCloudAiplatformV1CompleteTrialRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:complete`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1Trial; } /** * Adds a user provided Trial to a Study. * * @param parent Required. The resource name of the Study to create the Trial in. Format: `projects/{project}/locations/{location}/studies/{study}` */ async projectsLocationsStudiesTrialsCreate(parent: string, req: GoogleCloudAiplatformV1Trial): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/trials`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1Trial; } /** * Deletes a Trial. * * @param name Required. The Trial's name. Format: `projects/{project}/locations/{location}/studies/{study}/trials/{trial}` */ async projectsLocationsStudiesTrialsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets a Trial. * * @param name Required. The name of the Trial resource. Format: `projects/{project}/locations/{location}/studies/{study}/trials/{trial}` */ async projectsLocationsStudiesTrialsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1Trial; } /** * Lists the Trials associated with a Study. * * @param parent Required. The resource name of the Study to list the Trial from. Format: `projects/{project}/locations/{location}/studies/{study}` */ async projectsLocationsStudiesTrialsList(parent: string, opts: ProjectsLocationsStudiesTrialsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/trials`); if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListTrialsResponse; } /** * Lists the pareto-optimal Trials for multi-objective Study or the optimal * Trials for single-objective Study. The definition of pareto-optimal can be * checked in wiki page. https://en.wikipedia.org/wiki/Pareto_efficiency * * @param parent Required. The name of the Study that the optimal Trial belongs to. */ async projectsLocationsStudiesTrialsListOptimalTrials(parent: string, req: GoogleCloudAiplatformV1ListOptimalTrialsRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/trials:listOptimalTrials`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1ListOptimalTrialsResponse; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsStudiesTrialsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsStudiesTrialsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsStudiesTrialsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsStudiesTrialsOperationsList(name: string, opts: ProjectsLocationsStudiesTrialsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsStudiesTrialsOperationsWait(name: string, opts: ProjectsLocationsStudiesTrialsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsStudiesTrialsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Stops a Trial. * * @param name Required. The Trial's name. Format: `projects/{project}/locations/{location}/studies/{study}/trials/{trial}` */ async projectsLocationsStudiesTrialsStop(name: string, req: GoogleCloudAiplatformV1StopTrialRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:stop`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1Trial; } /** * Adds one or more Trials to a Study, with parameter values suggested by * Vertex AI Vizier. Returns a long-running operation associated with the * generation of Trial suggestions. When this long-running operation succeeds, * it will contain a SuggestTrialsResponse. * * @param parent Required. The project and location that the Study belongs to. Format: `projects/{project}/locations/{location}/studies/{study}` */ async projectsLocationsStudiesTrialsSuggest(parent: string, req: GoogleCloudAiplatformV1SuggestTrialsRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/trials:suggest`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Reads multiple TensorboardTimeSeries' data. The data point number limit is * 1000 for scalars, 100 for tensors and blob references. If the number of * data points stored is less than the limit, all data is returned. Otherwise, * the number limit of data points is randomly selected from this time series * and returned. * * @param tensorboard Required. The resource name of the Tensorboard containing TensorboardTimeSeries to read data from. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}`. The TensorboardTimeSeries referenced by time_series must be sub resources of this Tensorboard. */ async projectsLocationsTensorboardsBatchRead(tensorboard: string, opts: ProjectsLocationsTensorboardsBatchReadOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ tensorboard }:batchRead`); if (opts.timeSeries !== undefined) { url.searchParams.append("timeSeries", String(opts.timeSeries)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1BatchReadTensorboardTimeSeriesDataResponse(data); } /** * Creates a Tensorboard. * * @param parent Required. The resource name of the Location to create the Tensorboard in. Format: `projects/{project}/locations/{location}` */ async projectsLocationsTensorboardsCreate(parent: string, req: GoogleCloudAiplatformV1Tensorboard): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/tensorboards`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Deletes a Tensorboard. * * @param name Required. The name of the Tensorboard to be deleted. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}` */ async projectsLocationsTensorboardsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Batch create TensorboardTimeSeries that belong to a TensorboardExperiment. * * @param parent Required. The resource name of the TensorboardExperiment to create the TensorboardTimeSeries in. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}` The TensorboardRuns referenced by the parent fields in the CreateTensorboardTimeSeriesRequest messages must be sub resources of this TensorboardExperiment. */ async projectsLocationsTensorboardsExperimentsBatchCreate(parent: string, req: GoogleCloudAiplatformV1BatchCreateTensorboardTimeSeriesRequest): Promise { req = serializeGoogleCloudAiplatformV1BatchCreateTensorboardTimeSeriesRequest(req); const url = new URL(`${this.#baseUrl}v1/${ parent }:batchCreate`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1BatchCreateTensorboardTimeSeriesResponse(data); } /** * Creates a TensorboardExperiment. * * @param parent Required. The resource name of the Tensorboard to create the TensorboardExperiment in. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}` */ async projectsLocationsTensorboardsExperimentsCreate(parent: string, req: GoogleCloudAiplatformV1TensorboardExperiment, opts: ProjectsLocationsTensorboardsExperimentsCreateOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/experiments`); if (opts.tensorboardExperimentId !== undefined) { url.searchParams.append("tensorboardExperimentId", String(opts.tensorboardExperimentId)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1TensorboardExperiment; } /** * Deletes a TensorboardExperiment. * * @param name Required. The name of the TensorboardExperiment to be deleted. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}` */ async projectsLocationsTensorboardsExperimentsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets a TensorboardExperiment. * * @param name Required. The name of the TensorboardExperiment resource. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}` */ async projectsLocationsTensorboardsExperimentsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1TensorboardExperiment; } /** * Lists TensorboardExperiments in a Location. * * @param parent Required. The resource name of the Tensorboard to list TensorboardExperiments. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}` */ async projectsLocationsTensorboardsExperimentsList(parent: string, opts: ProjectsLocationsTensorboardsExperimentsListOptions = {}): Promise { opts = serializeProjectsLocationsTensorboardsExperimentsListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/experiments`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListTensorboardExperimentsResponse; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsTensorboardsExperimentsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsTensorboardsExperimentsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsTensorboardsExperimentsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsTensorboardsExperimentsOperationsList(name: string, opts: ProjectsLocationsTensorboardsExperimentsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsTensorboardsExperimentsOperationsWait(name: string, opts: ProjectsLocationsTensorboardsExperimentsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsTensorboardsExperimentsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates a TensorboardExperiment. * * @param name Output only. Name of the TensorboardExperiment. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}` */ async projectsLocationsTensorboardsExperimentsPatch(name: string, req: GoogleCloudAiplatformV1TensorboardExperiment, opts: ProjectsLocationsTensorboardsExperimentsPatchOptions = {}): Promise { opts = serializeProjectsLocationsTensorboardsExperimentsPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleCloudAiplatformV1TensorboardExperiment; } /** * Batch create TensorboardRuns. * * @param parent Required. The resource name of the TensorboardExperiment to create the TensorboardRuns in. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}` The parent field in the CreateTensorboardRunRequest messages must match this field. */ async projectsLocationsTensorboardsExperimentsRunsBatchCreate(parent: string, req: GoogleCloudAiplatformV1BatchCreateTensorboardRunsRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/runs:batchCreate`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1BatchCreateTensorboardRunsResponse; } /** * Creates a TensorboardRun. * * @param parent Required. The resource name of the TensorboardExperiment to create the TensorboardRun in. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}` */ async projectsLocationsTensorboardsExperimentsRunsCreate(parent: string, req: GoogleCloudAiplatformV1TensorboardRun, opts: ProjectsLocationsTensorboardsExperimentsRunsCreateOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/runs`); if (opts.tensorboardRunId !== undefined) { url.searchParams.append("tensorboardRunId", String(opts.tensorboardRunId)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1TensorboardRun; } /** * Deletes a TensorboardRun. * * @param name Required. The name of the TensorboardRun to be deleted. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}` */ async projectsLocationsTensorboardsExperimentsRunsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets a TensorboardRun. * * @param name Required. The name of the TensorboardRun resource. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}` */ async projectsLocationsTensorboardsExperimentsRunsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1TensorboardRun; } /** * Lists TensorboardRuns in a Location. * * @param parent Required. The resource name of the TensorboardExperiment to list TensorboardRuns. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}` */ async projectsLocationsTensorboardsExperimentsRunsList(parent: string, opts: ProjectsLocationsTensorboardsExperimentsRunsListOptions = {}): Promise { opts = serializeProjectsLocationsTensorboardsExperimentsRunsListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/runs`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListTensorboardRunsResponse; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsTensorboardsExperimentsRunsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsTensorboardsExperimentsRunsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsTensorboardsExperimentsRunsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsTensorboardsExperimentsRunsOperationsList(name: string, opts: ProjectsLocationsTensorboardsExperimentsRunsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsTensorboardsExperimentsRunsOperationsWait(name: string, opts: ProjectsLocationsTensorboardsExperimentsRunsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsTensorboardsExperimentsRunsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates a TensorboardRun. * * @param name Output only. Name of the TensorboardRun. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}` */ async projectsLocationsTensorboardsExperimentsRunsPatch(name: string, req: GoogleCloudAiplatformV1TensorboardRun, opts: ProjectsLocationsTensorboardsExperimentsRunsPatchOptions = {}): Promise { opts = serializeProjectsLocationsTensorboardsExperimentsRunsPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleCloudAiplatformV1TensorboardRun; } /** * Creates a TensorboardTimeSeries. * * @param parent Required. The resource name of the TensorboardRun to create the TensorboardTimeSeries in. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}` */ async projectsLocationsTensorboardsExperimentsRunsTimeSeriesCreate(parent: string, req: GoogleCloudAiplatformV1TensorboardTimeSeries, opts: ProjectsLocationsTensorboardsExperimentsRunsTimeSeriesCreateOptions = {}): Promise { req = serializeGoogleCloudAiplatformV1TensorboardTimeSeries(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/timeSeries`); if (opts.tensorboardTimeSeriesId !== undefined) { url.searchParams.append("tensorboardTimeSeriesId", String(opts.tensorboardTimeSeriesId)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1TensorboardTimeSeries(data); } /** * Deletes a TensorboardTimeSeries. * * @param name Required. The name of the TensorboardTimeSeries to be deleted. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}` */ async projectsLocationsTensorboardsExperimentsRunsTimeSeriesDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Exports a TensorboardTimeSeries' data. Data is returned in paginated * responses. * * @param tensorboardTimeSeries Required. The resource name of the TensorboardTimeSeries to export data from. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}` */ async projectsLocationsTensorboardsExperimentsRunsTimeSeriesExportTensorboardTimeSeries(tensorboardTimeSeries: string, req: GoogleCloudAiplatformV1ExportTensorboardTimeSeriesDataRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ tensorboardTimeSeries }:exportTensorboardTimeSeries`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1ExportTensorboardTimeSeriesDataResponse(data); } /** * Gets a TensorboardTimeSeries. * * @param name Required. The name of the TensorboardTimeSeries resource. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}` */ async projectsLocationsTensorboardsExperimentsRunsTimeSeriesGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1TensorboardTimeSeries(data); } /** * Lists TensorboardTimeSeries in a Location. * * @param parent Required. The resource name of the TensorboardRun to list TensorboardTimeSeries. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}` */ async projectsLocationsTensorboardsExperimentsRunsTimeSeriesList(parent: string, opts: ProjectsLocationsTensorboardsExperimentsRunsTimeSeriesListOptions = {}): Promise { opts = serializeProjectsLocationsTensorboardsExperimentsRunsTimeSeriesListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/timeSeries`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ListTensorboardTimeSeriesResponse(data); } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsTensorboardsExperimentsRunsTimeSeriesOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsTensorboardsExperimentsRunsTimeSeriesOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsTensorboardsExperimentsRunsTimeSeriesOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsTensorboardsExperimentsRunsTimeSeriesOperationsList(name: string, opts: ProjectsLocationsTensorboardsExperimentsRunsTimeSeriesOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsTensorboardsExperimentsRunsTimeSeriesOperationsWait(name: string, opts: ProjectsLocationsTensorboardsExperimentsRunsTimeSeriesOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsTensorboardsExperimentsRunsTimeSeriesOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates a TensorboardTimeSeries. * * @param name Output only. Name of the TensorboardTimeSeries. */ async projectsLocationsTensorboardsExperimentsRunsTimeSeriesPatch(name: string, req: GoogleCloudAiplatformV1TensorboardTimeSeries, opts: ProjectsLocationsTensorboardsExperimentsRunsTimeSeriesPatchOptions = {}): Promise { req = serializeGoogleCloudAiplatformV1TensorboardTimeSeries(req); opts = serializeProjectsLocationsTensorboardsExperimentsRunsTimeSeriesPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return deserializeGoogleCloudAiplatformV1TensorboardTimeSeries(data); } /** * Reads a TensorboardTimeSeries' data. By default, if the number of data * points stored is less than 1000, all data is returned. Otherwise, 1000 data * points is randomly selected from this time series and returned. This value * can be changed by changing max_data_points, which can't be greater than * 10k. * * @param tensorboardTimeSeries Required. The resource name of the TensorboardTimeSeries to read data from. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}` */ async projectsLocationsTensorboardsExperimentsRunsTimeSeriesRead(tensorboardTimeSeries: string, opts: ProjectsLocationsTensorboardsExperimentsRunsTimeSeriesReadOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ tensorboardTimeSeries }:read`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.maxDataPoints !== undefined) { url.searchParams.append("maxDataPoints", String(opts.maxDataPoints)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ReadTensorboardTimeSeriesDataResponse(data); } /** * Gets bytes of TensorboardBlobs. This is to allow reading blob data stored * in consumer project's Cloud Storage bucket without users having to obtain * Cloud Storage access permission. * * @param timeSeries Required. The resource name of the TensorboardTimeSeries to list Blobs. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}` */ async projectsLocationsTensorboardsExperimentsRunsTimeSeriesReadBlobData(timeSeries: string, opts: ProjectsLocationsTensorboardsExperimentsRunsTimeSeriesReadBlobDataOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ timeSeries }:readBlobData`); if (opts.blobIds !== undefined) { url.searchParams.append("blobIds", String(opts.blobIds)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ReadTensorboardBlobDataResponse(data); } /** * Write time series data points into multiple TensorboardTimeSeries under a * TensorboardRun. If any data fail to be ingested, an error is returned. * * @param tensorboardRun Required. The resource name of the TensorboardRun to write data to. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}` */ async projectsLocationsTensorboardsExperimentsRunsWrite(tensorboardRun: string, req: GoogleCloudAiplatformV1WriteTensorboardRunDataRequest): Promise { req = serializeGoogleCloudAiplatformV1WriteTensorboardRunDataRequest(req); const url = new URL(`${this.#baseUrl}v1/${ tensorboardRun }:write`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1WriteTensorboardRunDataResponse; } /** * Write time series data points of multiple TensorboardTimeSeries in * multiple TensorboardRun's. If any data fail to be ingested, an error is * returned. * * @param tensorboardExperiment Required. The resource name of the TensorboardExperiment to write data to. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}` */ async projectsLocationsTensorboardsExperimentsWrite(tensorboardExperiment: string, req: GoogleCloudAiplatformV1WriteTensorboardExperimentDataRequest): Promise { req = serializeGoogleCloudAiplatformV1WriteTensorboardExperimentDataRequest(req); const url = new URL(`${this.#baseUrl}v1/${ tensorboardExperiment }:write`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1WriteTensorboardExperimentDataResponse; } /** * Gets a Tensorboard. * * @param name Required. The name of the Tensorboard resource. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}` */ async projectsLocationsTensorboardsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1Tensorboard; } /** * Lists Tensorboards in a Location. * * @param parent Required. The resource name of the Location to list Tensorboards. Format: `projects/{project}/locations/{location}` */ async projectsLocationsTensorboardsList(parent: string, opts: ProjectsLocationsTensorboardsListOptions = {}): Promise { opts = serializeProjectsLocationsTensorboardsListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/tensorboards`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.orderBy !== undefined) { url.searchParams.append("orderBy", String(opts.orderBy)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleCloudAiplatformV1ListTensorboardsResponse; } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsTensorboardsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsTensorboardsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsTensorboardsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsTensorboardsOperationsList(name: string, opts: ProjectsLocationsTensorboardsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsTensorboardsOperationsWait(name: string, opts: ProjectsLocationsTensorboardsOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsTensorboardsOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Updates a Tensorboard. * * @param name Output only. Name of the Tensorboard. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}` */ async projectsLocationsTensorboardsPatch(name: string, req: GoogleCloudAiplatformV1Tensorboard, opts: ProjectsLocationsTensorboardsPatchOptions = {}): Promise { opts = serializeProjectsLocationsTensorboardsPatchOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.updateMask !== undefined) { url.searchParams.append("updateMask", String(opts.updateMask)); } const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleLongrunningOperation; } /** * Returns the storage size for a given TensorBoard instance. * * @param tensorboard Required. The name of the Tensorboard resource. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}` */ async projectsLocationsTensorboardsReadSize(tensorboard: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ tensorboard }:readSize`); const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ReadTensorboardSizeResponse(data); } /** * Returns a list of monthly active users for a given TensorBoard instance. * * @param tensorboard Required. The name of the Tensorboard resource. Format: `projects/{project}/locations/{location}/tensorboards/{tensorboard}` */ async projectsLocationsTensorboardsReadUsage(tensorboard: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ tensorboard }:readUsage`); const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ReadTensorboardUsageResponse(data); } /** * Cancels a TrainingPipeline. Starts asynchronous cancellation on the * TrainingPipeline. The server makes a best effort to cancel the pipeline, * but success is not guaranteed. Clients can use * PipelineService.GetTrainingPipeline or other methods to check whether the * cancellation succeeded or whether the pipeline completed despite * cancellation. On successful cancellation, the TrainingPipeline is not * deleted; instead it becomes a pipeline with a TrainingPipeline.error value * with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`, and * TrainingPipeline.state is set to `CANCELLED`. * * @param name Required. The name of the TrainingPipeline to cancel. Format: `projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}` */ async projectsLocationsTrainingPipelinesCancel(name: string, req: GoogleCloudAiplatformV1CancelTrainingPipelineRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleProtobufEmpty; } /** * Creates a TrainingPipeline. A created TrainingPipeline right away will be * attempted to be run. * * @param parent Required. The resource name of the Location to create the TrainingPipeline in. Format: `projects/{project}/locations/{location}` */ async projectsLocationsTrainingPipelinesCreate(parent: string, req: GoogleCloudAiplatformV1TrainingPipeline): Promise { req = serializeGoogleCloudAiplatformV1TrainingPipeline(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/trainingPipelines`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1TrainingPipeline(data); } /** * Deletes a TrainingPipeline. * * @param name Required. The name of the TrainingPipeline resource to be deleted. Format: `projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}` */ async projectsLocationsTrainingPipelinesDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleLongrunningOperation; } /** * Gets a TrainingPipeline. * * @param name Required. The name of the TrainingPipeline resource. Format: `projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}` */ async projectsLocationsTrainingPipelinesGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1TrainingPipeline(data); } /** * Lists TrainingPipelines in a Location. * * @param parent Required. The resource name of the Location to list the TrainingPipelines from. Format: `projects/{project}/locations/{location}` */ async projectsLocationsTrainingPipelinesList(parent: string, opts: ProjectsLocationsTrainingPipelinesListOptions = {}): Promise { opts = serializeProjectsLocationsTrainingPipelinesListOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ parent }/trainingPipelines`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } if (opts.readMask !== undefined) { url.searchParams.append("readMask", String(opts.readMask)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ListTrainingPipelinesResponse(data); } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsTrainingPipelinesOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsTrainingPipelinesOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsTrainingPipelinesOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsTrainingPipelinesOperationsList(name: string, opts: ProjectsLocationsTrainingPipelinesOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Waits until the specified long-running operation is done or reaches at * most a specified timeout, returning the latest state. If the operation is * already done, the latest state is immediately returned. If the timeout * specified is greater than the default HTTP/RPC timeout, the HTTP/RPC * timeout is used. If the server does not support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Note that this method is on a best-effort * basis. It may return the latest state before the specified timeout * (including immediately), meaning even an immediate response is no guarantee * that the operation is done. * * @param name The name of the operation resource to wait on. */ async projectsLocationsTrainingPipelinesOperationsWait(name: string, opts: ProjectsLocationsTrainingPipelinesOperationsWaitOptions = {}): Promise { opts = serializeProjectsLocationsTrainingPipelinesOperationsWaitOptions(opts); const url = new URL(`${this.#baseUrl}v1/${ name }:wait`); if (opts.timeout !== undefined) { url.searchParams.append("timeout", String(opts.timeout)); } const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleLongrunningOperation; } /** * Cancels a TuningJob. Starts asynchronous cancellation on the TuningJob. * The server makes a best effort to cancel the job, but success is not * guaranteed. Clients can use GenAiTuningService.GetTuningJob or other * methods to check whether the cancellation succeeded or whether the job * completed despite cancellation. On successful cancellation, the TuningJob * is not deleted; instead it becomes a job with a TuningJob.error value with * a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`, and * TuningJob.state is set to `CANCELLED`. * * @param name Required. The name of the TuningJob to cancel. Format: `projects/{project}/locations/{location}/tuningJobs/{tuning_job}` */ async projectsLocationsTuningJobsCancel(name: string, req: GoogleCloudAiplatformV1CancelTuningJobRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleProtobufEmpty; } /** * Creates a TuningJob. A created TuningJob right away will be attempted to * be run. * * @param parent Required. The resource name of the Location to create the TuningJob in. Format: `projects/{project}/locations/{location}` */ async projectsLocationsTuningJobsCreate(parent: string, req: GoogleCloudAiplatformV1TuningJob): Promise { req = serializeGoogleCloudAiplatformV1TuningJob(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/tuningJobs`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1TuningJob(data); } /** * Gets a TuningJob. * * @param name Required. The name of the TuningJob resource. Format: `projects/{project}/locations/{location}/tuningJobs/{tuning_job}` */ async projectsLocationsTuningJobsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1TuningJob(data); } /** * Lists TuningJobs in a Location. * * @param parent Required. The resource name of the Location to list the TuningJobs from. Format: `projects/{project}/locations/{location}` */ async projectsLocationsTuningJobsList(parent: string, opts: ProjectsLocationsTuningJobsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ parent }/tuningJobs`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1ListTuningJobsResponse(data); } /** * Starts asynchronous cancellation on a long-running operation. The server * makes a best effort to cancel the operation, but success is not guaranteed. * If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or * other methods to check whether the cancellation succeeded or whether the * operation completed despite cancellation. On successful cancellation, the * operation is not deleted; instead, it becomes an operation with an * Operation.error value with a google.rpc.Status.code of `1`, corresponding * to `Code.CANCELLED`. * * @param name The name of the operation resource to be cancelled. */ async projectsLocationsTuningJobsOperationsCancel(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }:cancel`); const data = await request(url.href, { client: this.#client, method: "POST", }); return data as GoogleProtobufEmpty; } /** * Deletes a long-running operation. This method indicates that the client is * no longer interested in the operation result. It does not cancel the * operation. If the server doesn't support this method, it returns * `google.rpc.Code.UNIMPLEMENTED`. * * @param name The name of the operation resource to be deleted. */ async projectsLocationsTuningJobsOperationsDelete(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "DELETE", }); return data as GoogleProtobufEmpty; } /** * Gets the latest state of a long-running operation. Clients can use this * method to poll the operation result at intervals as recommended by the API * service. * * @param name The name of the operation resource. */ async projectsLocationsTuningJobsOperationsGet(name: string): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningOperation; } /** * Lists operations that match the specified filter in the request. If the * server doesn't support this method, it returns `UNIMPLEMENTED`. * * @param name The name of the operation's parent resource. */ async projectsLocationsTuningJobsOperationsList(name: string, opts: ProjectsLocationsTuningJobsOperationsListOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }/operations`); if (opts.filter !== undefined) { url.searchParams.append("filter", String(opts.filter)); } if (opts.pageSize !== undefined) { url.searchParams.append("pageSize", String(opts.pageSize)); } if (opts.pageToken !== undefined) { url.searchParams.append("pageToken", String(opts.pageToken)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return data as GoogleLongrunningListOperationsResponse; } /** * Rebase a TunedModel. * * @param parent Required. The resource name of the Location into which to rebase the Model. Format: `projects/{project}/locations/{location}` */ async projectsLocationsTuningJobsRebaseTunedModel(parent: string, req: GoogleCloudAiplatformV1RebaseTunedModelRequest): Promise { req = serializeGoogleCloudAiplatformV1RebaseTunedModelRequest(req); const url = new URL(`${this.#baseUrl}v1/${ parent }/tuningJobs:rebaseTunedModel`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleLongrunningOperation; } /** * Updates a cache config. * * @param name Identifier. Name of the cache config. Format: - `projects/{project}/cacheConfig`. */ async projectsUpdateCacheConfig(name: string, req: GoogleCloudAiplatformV1CacheConfig): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "PATCH", body, }); return data as GoogleLongrunningOperation; } /** * Return a list of tokens based on the input text. * * @param endpoint Required. The name of the Endpoint requested to get lists of tokens and token ids. */ async publishersModelsComputeTokens(endpoint: string, req: GoogleCloudAiplatformV1ComputeTokensRequest): Promise { req = serializeGoogleCloudAiplatformV1ComputeTokensRequest(req); const url = new URL(`${this.#baseUrl}v1/${ endpoint }:computeTokens`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return deserializeGoogleCloudAiplatformV1ComputeTokensResponse(data); } /** * Perform a token counting. * * @param endpoint Required. The name of the Endpoint requested to perform token counting. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async publishersModelsCountTokens(endpoint: string, req: GoogleCloudAiplatformV1CountTokensRequest): Promise { req = serializeGoogleCloudAiplatformV1CountTokensRequest(req); const url = new URL(`${this.#baseUrl}v1/${ endpoint }:countTokens`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1CountTokensResponse; } /** * Generate content with multimodal inputs. * * @param model Required. The fully qualified name of the publisher model or tuned model endpoint to use. Publisher model format: `projects/{project}/locations/{location}/publishers/*/models/*` Tuned model endpoint format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async publishersModelsGenerateContent(model: string, req: GoogleCloudAiplatformV1GenerateContentRequest): Promise { req = serializeGoogleCloudAiplatformV1GenerateContentRequest(req); const url = new URL(`${this.#baseUrl}v1/${ model }:generateContent`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1GenerateContentResponse; } /** * Gets a Model Garden publisher model. * * @param name Required. The name of the PublisherModel resource. Format: `publishers/{publisher}/models/{publisher_model}` */ async publishersModelsGet(name: string, opts: PublishersModelsGetOptions = {}): Promise { const url = new URL(`${this.#baseUrl}v1/${ name }`); if (opts.huggingFaceToken !== undefined) { url.searchParams.append("huggingFaceToken", String(opts.huggingFaceToken)); } if (opts.isHuggingFaceModel !== undefined) { url.searchParams.append("isHuggingFaceModel", String(opts.isHuggingFaceModel)); } if (opts.languageCode !== undefined) { url.searchParams.append("languageCode", String(opts.languageCode)); } if (opts.view !== undefined) { url.searchParams.append("view", String(opts.view)); } const data = await request(url.href, { client: this.#client, method: "GET", }); return deserializeGoogleCloudAiplatformV1PublisherModel(data); } /** * Perform an online prediction. * * @param endpoint Required. The name of the Endpoint requested to serve the prediction. Format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async publishersModelsPredict(endpoint: string, req: GoogleCloudAiplatformV1PredictRequest): Promise { const url = new URL(`${this.#baseUrl}v1/${ endpoint }:predict`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1PredictResponse; } /** * Generate content with multimodal inputs with streaming support. * * @param model Required. The fully qualified name of the publisher model or tuned model endpoint to use. Publisher model format: `projects/{project}/locations/{location}/publishers/*/models/*` Tuned model endpoint format: `projects/{project}/locations/{location}/endpoints/{endpoint}` */ async publishersModelsStreamGenerateContent(model: string, req: GoogleCloudAiplatformV1GenerateContentRequest): Promise { req = serializeGoogleCloudAiplatformV1GenerateContentRequest(req); const url = new URL(`${this.#baseUrl}v1/${ model }:streamGenerateContent`); const body = JSON.stringify(req); const data = await request(url.href, { client: this.#client, method: "POST", body, }); return data as GoogleCloudAiplatformV1GenerateContentResponse; } } /** * Additional options for AIplatform#batchPredictionJobsCreate. */ export interface BatchPredictionJobsCreateOptions { /** * Required. The resource name of the Location to create the * BatchPredictionJob in. Format: `projects/{project}/locations/{location}` */ parent?: string; } /** * Additional options for AIplatform#batchPredictionJobsList. */ export interface BatchPredictionJobsListOptions { /** * The standard list filter. Supported fields: * `display_name` supports `=`, * `!=` comparisons, and `:` wildcard. * `model_display_name` supports `=`, * `!=` comparisons. * `state` supports `=`, `!=` comparisons. * `create_time` * supports `=`, `!=`,`<`, `<=`,`>`, `>=` comparisons. `create_time` must be * in RFC 3339 format. * `labels` supports general map functions that is: * `labels.key=value` - key:value equality `labels.key:* - key existence Some * examples of using the filter are: * `state="JOB_STATE_SUCCEEDED" AND * display_name:"my_job_*"` * `state!="JOB_STATE_FAILED" OR * display_name="my_job"` * `NOT display_name="my_job"` * * `create_time>"2021-05-18T00:00:00Z"` * `labels.keyA=valueA` * * `labels.keyB:*` */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. Typically obtained via * ListBatchPredictionJobsResponse.next_page_token of the previous * JobService.ListBatchPredictionJobs call. */ pageToken?: string; /** * Required. The resource name of the Location to list the * BatchPredictionJobs from. Format: `projects/{project}/locations/{location}` */ parent?: string; /** * Mask specifying which fields to read. */ readMask?: string /* FieldMask */; } function serializeBatchPredictionJobsListOptions(data: any): BatchPredictionJobsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } function deserializeBatchPredictionJobsListOptions(data: any): BatchPredictionJobsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } /** * Generate video response. */ export interface CloudAiLargeModelsVisionGenerateVideoResponse { /** * The generates samples. */ generatedSamples?: CloudAiLargeModelsVisionMedia[]; /** * Returns if any videos were filtered due to RAI policies. */ raiMediaFilteredCount?: number; /** * Returns rai failure reasons if any. */ raiMediaFilteredReasons?: string[]; /** * List of videos, used to align naming with the external response. */ videos?: CloudAiLargeModelsVisionGenerateVideoResponseVideo[]; } function serializeCloudAiLargeModelsVisionGenerateVideoResponse(data: any): CloudAiLargeModelsVisionGenerateVideoResponse { return { ...data, generatedSamples: data["generatedSamples"] !== undefined ? data["generatedSamples"].map((item: any) => (serializeCloudAiLargeModelsVisionMedia(item))) : undefined, }; } function deserializeCloudAiLargeModelsVisionGenerateVideoResponse(data: any): CloudAiLargeModelsVisionGenerateVideoResponse { return { ...data, generatedSamples: data["generatedSamples"] !== undefined ? data["generatedSamples"].map((item: any) => (deserializeCloudAiLargeModelsVisionMedia(item))) : undefined, }; } export interface CloudAiLargeModelsVisionGenerateVideoResponseVideo { /** * Base64 encoded bytes string representing the video. */ bytesBase64Encoded?: string; /** * Cloud Storage URI where the generated video is written. */ gcsUri?: string; /** * The MIME type of the content of the video. - video/mp4 */ mimeType?: string; } /** * Image. */ export interface CloudAiLargeModelsVisionImage { /** * Image encoding, encoded as "image/png" or "image/jpg". */ encoding?: string; /** * Generation seed for the sampled image. This parameter is exposed to the * user only if one of the following is true: 1. The user specified * per-example seeds in the request. 2. The user doesn't specify the * generation seed in the request. */ generationSeed?: number; /** * Raw bytes. */ image?: Uint8Array; /** * RAI scores for generated image. */ imageRaiScores?: CloudAiLargeModelsVisionImageRAIScores; /** * Image size. The size of the image. Can be self reported, or computed from * the image bytes. */ imageSize?: CloudAiLargeModelsVisionImageImageSize; /** * RAI info for image. */ raiInfo?: CloudAiLargeModelsVisionRaiInfo; /** * Semantic filter info for image. */ semanticFilterResponse?: CloudAiLargeModelsVisionSemanticFilterResponse; /** * Text/Expanded text input for imagen. */ text?: string; /** * Path to another storage (typically Google Cloud Storage). */ uri?: string; } function serializeCloudAiLargeModelsVisionImage(data: any): CloudAiLargeModelsVisionImage { return { ...data, image: data["image"] !== undefined ? encodeBase64(data["image"]) : undefined, }; } function deserializeCloudAiLargeModelsVisionImage(data: any): CloudAiLargeModelsVisionImage { return { ...data, image: data["image"] !== undefined ? decodeBase64(data["image"] as string) : undefined, }; } /** * Image size. */ export interface CloudAiLargeModelsVisionImageImageSize { channels?: number; height?: number; width?: number; } /** * RAI scores for generated image returned. */ export interface CloudAiLargeModelsVisionImageRAIScores { /** * Agile watermark score for image. */ agileWatermarkDetectionScore?: number; } /** * Media. */ export interface CloudAiLargeModelsVisionMedia { /** * Image. */ image?: CloudAiLargeModelsVisionImage; /** * Video */ video?: CloudAiLargeModelsVisionVideo; } function serializeCloudAiLargeModelsVisionMedia(data: any): CloudAiLargeModelsVisionMedia { return { ...data, image: data["image"] !== undefined ? serializeCloudAiLargeModelsVisionImage(data["image"]) : undefined, video: data["video"] !== undefined ? serializeCloudAiLargeModelsVisionVideo(data["video"]) : undefined, }; } function deserializeCloudAiLargeModelsVisionMedia(data: any): CloudAiLargeModelsVisionMedia { return { ...data, image: data["image"] !== undefined ? deserializeCloudAiLargeModelsVisionImage(data["image"]) : undefined, video: data["video"] !== undefined ? deserializeCloudAiLargeModelsVisionVideo(data["video"]) : undefined, }; } export interface CloudAiLargeModelsVisionNamedBoundingBox { classes?: string[]; entities?: string[]; scores?: number[]; x1?: number; x2?: number; y1?: number; y2?: number; } /** * Next ID: 6 */ export interface CloudAiLargeModelsVisionRaiInfo { /** * List of blocked entities from the blocklist if it is detected. */ blockedEntities?: string[]; /** * The list of detected labels for different rai categories. */ detectedLabels?: CloudAiLargeModelsVisionRaiInfoDetectedLabels[]; /** * The model name used to indexing into the RaiFilterConfig map. Would either * be one of imagegeneration@002-006, imagen-3.0-... api endpoint names, or * internal names used for mapping to different filter configs (genselfie, * ai_watermark) than its api endpoint. */ modelName?: string; /** * List of rai categories' information to return */ raiCategories?: string[]; /** * List of rai scores mapping to the rai categories. Rounded to 1 decimal * place. */ scores?: number[]; } /** * Filters returning list of deteceted labels, scores, and bounding boxes. */ export interface CloudAiLargeModelsVisionRaiInfoDetectedLabels { /** * The list of detected entities for the rai signal. */ entities?: CloudAiLargeModelsVisionRaiInfoDetectedLabelsEntity[]; /** * The RAI category for the deteceted labels. */ raiCategory?: string; } /** * An integer bounding box of original pixels of the image for the detected * labels. */ export interface CloudAiLargeModelsVisionRaiInfoDetectedLabelsBoundingBox { /** * The X coordinate of the top-left corner, in pixels. */ x1?: number; /** * The X coordinate of the bottom-right corner, in pixels. */ x2?: number; /** * The Y coordinate of the top-left corner, in pixels. */ y1?: number; /** * The Y coordinate of the bottom-right corner, in pixels. */ y2?: number; } /** * The properties for a detected entity from the rai signal. */ export interface CloudAiLargeModelsVisionRaiInfoDetectedLabelsEntity { /** * Bounding box of the label */ boundingBox?: CloudAiLargeModelsVisionRaiInfoDetectedLabelsBoundingBox; /** * Description of the label */ description?: string; /** * The intersection ratio between the detection bounding box and the mask. */ iouScore?: number; /** * MID of the label */ mid?: string; /** * Confidence score of the label */ score?: number; } export interface CloudAiLargeModelsVisionSemanticFilterResponse { /** * Class labels of the bounding boxes that failed the semantic filtering. * Bounding box coordinates. */ namedBoundingBoxes?: CloudAiLargeModelsVisionNamedBoundingBox[]; /** * This response is added when semantic filter config is turned on in * EditConfig. It reports if this image is passed semantic filter response. If * passed_semantic_filter is false, the bounding box information will be * populated for user to check what caused the semantic filter to fail. */ passedSemanticFilter?: boolean; } /** * Video */ export interface CloudAiLargeModelsVisionVideo { /** * Base 64 encoded video bytes. */ encodedVideo?: string; /** * Video encoding, for example "video/mp4". */ encoding?: string; /** * Text/Expanded text input for Help Me Write. */ text?: string; /** * Path to another storage (typically Google Cloud Storage). */ uri?: string; /** * Raw bytes. */ video?: Uint8Array; } function serializeCloudAiLargeModelsVisionVideo(data: any): CloudAiLargeModelsVisionVideo { return { ...data, video: data["video"] !== undefined ? encodeBase64(data["video"]) : undefined, }; } function deserializeCloudAiLargeModelsVisionVideo(data: any): CloudAiLargeModelsVisionVideo { return { ...data, video: data["video"] !== undefined ? decodeBase64(data["video"] as string) : undefined, }; } /** * Create API error message for Vertex Pipeline. */ export interface CloudAiPlatformCommonCreatePipelineJobApiErrorDetail { /** * The error root cause returned by CreatePipelineJob API. */ errorCause?: | "ERROR_CAUSE_UNSPECIFIED" | "INVALID_PIPELINE_SPEC_FORMAT" | "INVALID_PIPELINE_SPEC" | "INVALID_DEPLOYMENT_CONFIG" | "INVALID_DEPLOYMENT_SPEC" | "INVALID_INSTANCE_SCHEMA" | "INVALID_CUSTOM_JOB" | "INVALID_CONTAINER_SPEC" | "INVALID_NOTIFICATION_EMAIL_SETUP" | "INVALID_SERVICE_ACCOUNT_SETUP" | "INVALID_KMS_SETUP" | "INVALID_NETWORK_SETUP" | "INVALID_PIPELINE_TASK_SPEC" | "INVALID_PIPELINE_TASK_ARTIFACT" | "INVALID_IMPORTER_SPEC" | "INVALID_RESOLVER_SPEC" | "INVALID_RUNTIME_PARAMETERS" | "CLOUD_API_NOT_ENABLED" | "INVALID_GCS_INPUT_URI" | "INVALID_GCS_OUTPUT_URI" | "INVALID_COMPONENT_SPEC" | "INVALID_DAG_OUTPUTS_SPEC" | "INVALID_DAG_SPEC" | "INSUFFICIENT_QUOTA" | "INTERNAL"; /** * Public messages contains actionable items for the error cause. */ publicMessage?: string; } /** * Additional options for AIplatform#datasetsCreate. */ export interface DatasetsCreateOptions { /** * Required. The resource name of the Location to create the Dataset in. * Format: `projects/{project}/locations/{location}` */ parent?: string; } /** * Additional options for AIplatform#datasetsDatasetVersionsGet. */ export interface DatasetsDatasetVersionsGetOptions { /** * Mask specifying which fields to read. */ readMask?: string /* FieldMask */; } function serializeDatasetsDatasetVersionsGetOptions(data: any): DatasetsDatasetVersionsGetOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } function deserializeDatasetsDatasetVersionsGetOptions(data: any): DatasetsDatasetVersionsGetOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } /** * Additional options for AIplatform#datasetsDatasetVersionsList. */ export interface DatasetsDatasetVersionsListOptions { /** * Optional. The standard list filter. */ filter?: string; /** * Optional. A comma-separated list of fields to order by, sorted in * ascending order. Use "desc" after a field name for descending. */ orderBy?: string; /** * Optional. The standard list page size. */ pageSize?: number; /** * Optional. The standard list page token. */ pageToken?: string; /** * Optional. Mask specifying which fields to read. */ readMask?: string /* FieldMask */; } function serializeDatasetsDatasetVersionsListOptions(data: any): DatasetsDatasetVersionsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } function deserializeDatasetsDatasetVersionsListOptions(data: any): DatasetsDatasetVersionsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } /** * Additional options for AIplatform#datasetsDatasetVersionsPatch. */ export interface DatasetsDatasetVersionsPatchOptions { /** * Required. The update mask applies to the resource. For the `FieldMask` * definition, see google.protobuf.FieldMask. Updatable fields: * * `display_name` */ updateMask?: string /* FieldMask */; } function serializeDatasetsDatasetVersionsPatchOptions(data: any): DatasetsDatasetVersionsPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } function deserializeDatasetsDatasetVersionsPatchOptions(data: any): DatasetsDatasetVersionsPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } /** * Additional options for AIplatform#datasetsGet. */ export interface DatasetsGetOptions { /** * Mask specifying which fields to read. */ readMask?: string /* FieldMask */; } function serializeDatasetsGetOptions(data: any): DatasetsGetOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } function deserializeDatasetsGetOptions(data: any): DatasetsGetOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } /** * Additional options for AIplatform#datasetsList. */ export interface DatasetsListOptions { /** * An expression for filtering the results of the request. For field names * both snake_case and camelCase are supported. * `display_name`: supports = * and != * `metadata_schema_uri`: supports = and != * `labels` supports * general map functions that is: * `labels.key=value` - key:value equality * * `labels.key:* or labels:key - key existence * A key including a space must * be quoted. `labels."a key"`. Some examples: * `displayName="myDisplayName"` * * `labels.myKey="myValue"` */ filter?: string; /** * A comma-separated list of fields to order by, sorted in ascending order. * Use "desc" after a field name for descending. Supported fields: * * `display_name` * `create_time` * `update_time` */ orderBy?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; /** * Required. The name of the Dataset's parent resource. Format: * `projects/{project}/locations/{location}` */ parent?: string; /** * Mask specifying which fields to read. */ readMask?: string /* FieldMask */; } function serializeDatasetsListOptions(data: any): DatasetsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } function deserializeDatasetsListOptions(data: any): DatasetsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } /** * Additional options for AIplatform#datasetsPatch. */ export interface DatasetsPatchOptions { /** * Required. The update mask applies to the resource. For the `FieldMask` * definition, see google.protobuf.FieldMask. Updatable fields: * * `display_name` * `description` * `labels` */ updateMask?: string /* FieldMask */; } function serializeDatasetsPatchOptions(data: any): DatasetsPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } function deserializeDatasetsPatchOptions(data: any): DatasetsPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } /** * Message that represents an arbitrary HTTP body. It should only be used for * payload formats that can't be represented as JSON, such as raw binary or an * HTML page. This message can be used both in streaming and non-streaming API * methods in the request as well as the response. It can be used as a top-level * request field, which is convenient if one wants to extract parameters from * either the URL or HTTP template into the request fields and also want access * to the raw HTTP body. Example: message GetResourceRequest { // A unique * request id. string request_id = 1; // The raw HTTP body is bound to this * field. google.api.HttpBody http_body = 2; } service ResourceService { rpc * GetResource(GetResourceRequest) returns (google.api.HttpBody); rpc * UpdateResource(google.api.HttpBody) returns (google.protobuf.Empty); } * Example with streaming methods: service CaldavService { rpc * GetCalendar(stream google.api.HttpBody) returns (stream google.api.HttpBody); * rpc UpdateCalendar(stream google.api.HttpBody) returns (stream * google.api.HttpBody); } Use of this type only changes how the request and * response bodies are handled, all other features will continue to work * unchanged. */ export interface GoogleApiHttpBody { /** * The HTTP Content-Type header value specifying the content type of the * body. */ contentType?: string; /** * The HTTP request/response body as raw binary. */ data?: Uint8Array; /** * Application specific response metadata. Must be set in the first response * for streaming APIs. */ extensions?: { [key: string]: any }[]; } function serializeGoogleApiHttpBody(data: any): GoogleApiHttpBody { return { ...data, data: data["data"] !== undefined ? encodeBase64(data["data"]) : undefined, }; } function deserializeGoogleApiHttpBody(data: any): GoogleApiHttpBody { return { ...data, data: data["data"] !== undefined ? decodeBase64(data["data"] as string) : undefined, }; } /** * Parameters that configure the active learning pipeline. Active learning will * label the data incrementally by several iterations. For every iteration, it * will select a batch of data based on the sampling strategy. */ export interface GoogleCloudAiplatformV1ActiveLearningConfig { /** * Max number of human labeled DataItems. */ maxDataItemCount?: bigint; /** * Max percent of total DataItems for human labeling. */ maxDataItemPercentage?: number; /** * Active learning data sampling config. For every active learning labeling * iteration, it will select a batch of data based on the sampling strategy. */ sampleConfig?: GoogleCloudAiplatformV1SampleConfig; /** * CMLE training config. For every active learning labeling iteration, system * will train a machine learning model on CMLE. The trained model will be used * by data sampling algorithm to select DataItems. */ trainingConfig?: GoogleCloudAiplatformV1TrainingConfig; } function serializeGoogleCloudAiplatformV1ActiveLearningConfig(data: any): GoogleCloudAiplatformV1ActiveLearningConfig { return { ...data, maxDataItemCount: data["maxDataItemCount"] !== undefined ? String(data["maxDataItemCount"]) : undefined, trainingConfig: data["trainingConfig"] !== undefined ? serializeGoogleCloudAiplatformV1TrainingConfig(data["trainingConfig"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1ActiveLearningConfig(data: any): GoogleCloudAiplatformV1ActiveLearningConfig { return { ...data, maxDataItemCount: data["maxDataItemCount"] !== undefined ? BigInt(data["maxDataItemCount"]) : undefined, trainingConfig: data["trainingConfig"] !== undefined ? deserializeGoogleCloudAiplatformV1TrainingConfig(data["trainingConfig"]) : undefined, }; } /** * Request message for MetadataService.AddContextArtifactsAndExecutions. */ export interface GoogleCloudAiplatformV1AddContextArtifactsAndExecutionsRequest { /** * The resource names of the Artifacts to attribute to the Context. Format: * `projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}` */ artifacts?: string[]; /** * The resource names of the Executions to associate with the Context. * Format: * `projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}` */ executions?: string[]; } /** * Response message for MetadataService.AddContextArtifactsAndExecutions. */ export interface GoogleCloudAiplatformV1AddContextArtifactsAndExecutionsResponse { } /** * Request message for MetadataService.AddContextChildren. */ export interface GoogleCloudAiplatformV1AddContextChildrenRequest { /** * The resource names of the child Contexts. */ childContexts?: string[]; } /** * Response message for MetadataService.AddContextChildren. */ export interface GoogleCloudAiplatformV1AddContextChildrenResponse { } /** * Request message for MetadataService.AddExecutionEvents. */ export interface GoogleCloudAiplatformV1AddExecutionEventsRequest { /** * The Events to create and add. */ events?: GoogleCloudAiplatformV1Event[]; } /** * Response message for MetadataService.AddExecutionEvents. */ export interface GoogleCloudAiplatformV1AddExecutionEventsResponse { } /** * Request message for VizierService.AddTrialMeasurement. */ export interface GoogleCloudAiplatformV1AddTrialMeasurementRequest { /** * Required. The measurement to be added to a Trial. */ measurement?: GoogleCloudAiplatformV1Measurement; } /** * Used to assign specific AnnotationSpec to a particular area of a DataItem or * the whole part of the DataItem. */ export interface GoogleCloudAiplatformV1Annotation { /** * Output only. The source of the Annotation. */ readonly annotationSource?: GoogleCloudAiplatformV1UserActionReference; /** * Output only. Timestamp when this Annotation was created. */ readonly createTime?: Date; /** * Optional. Used to perform consistent read-modify-write updates. If not * set, a blind "overwrite" update happens. */ etag?: string; /** * Optional. The labels with user-defined metadata to organize your * Annotations. Label keys and values can be no longer than 64 characters * (Unicode codepoints), can only contain lowercase letters, numeric * characters, underscores and dashes. International characters are allowed. * No more than 64 user labels can be associated with one Annotation(System * labels are excluded). See https://goo.gl/xmQnxf for more information and * examples of labels. System reserved label keys are prefixed with * "aiplatform.googleapis.com/" and are immutable. Following system labels * exist for each Annotation: * * "aiplatform.googleapis.com/annotation_set_name": optional, name of the UI's * annotation set this Annotation belongs to. If not set, the Annotation is * not visible in the UI. * "aiplatform.googleapis.com/payload_schema": output * only, its value is the payload_schema's title. */ labels?: { [key: string]: string }; /** * Output only. Resource name of the Annotation. */ readonly name?: string; /** * Required. The schema of the payload can be found in payload_schema. */ payload?: any; /** * Required. Google Cloud Storage URI points to a YAML file describing * payload. The schema is defined as an [OpenAPI 3.0.2 Schema * Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). * The schema files that can be used here are found in * gs://google-cloud-aiplatform/schema/dataset/annotation/, note that the * chosen schema must be consistent with the parent Dataset's metadata. */ payloadSchemaUri?: string; /** * Output only. Timestamp when this Annotation was last updated. */ readonly updateTime?: Date; } /** * Identifies a concept with which DataItems may be annotated with. */ export interface GoogleCloudAiplatformV1AnnotationSpec { /** * Output only. Timestamp when this AnnotationSpec was created. */ readonly createTime?: Date; /** * Required. The user-defined name of the AnnotationSpec. The name can be up * to 128 characters long and can consist of any UTF-8 characters. */ displayName?: string; /** * Optional. Used to perform consistent read-modify-write updates. If not * set, a blind "overwrite" update happens. */ etag?: string; /** * Output only. Resource name of the AnnotationSpec. */ readonly name?: string; /** * Output only. Timestamp when AnnotationSpec was last updated. */ readonly updateTime?: Date; } /** * The generic reusable api auth config. Deprecated. Please use AuthConfig * (google/cloud/aiplatform/master/auth.proto) instead. */ export interface GoogleCloudAiplatformV1ApiAuth { /** * The API secret. */ apiKeyConfig?: GoogleCloudAiplatformV1ApiAuthApiKeyConfig; } /** * The API secret. */ export interface GoogleCloudAiplatformV1ApiAuthApiKeyConfig { /** * Required. The SecretManager secret version resource name storing API key. * e.g. projects/{project}/secrets/{secret}/versions/{version} */ apiKeySecretVersion?: string; } /** * Instance of a general artifact. */ export interface GoogleCloudAiplatformV1Artifact { /** * Output only. Timestamp when this Artifact was created. */ readonly createTime?: Date; /** * Description of the Artifact */ description?: string; /** * User provided display name of the Artifact. May be up to 128 Unicode * characters. */ displayName?: string; /** * An eTag used to perform consistent read-modify-write updates. If not set, * a blind "overwrite" update happens. */ etag?: string; /** * The labels with user-defined metadata to organize your Artifacts. Label * keys and values can be no longer than 64 characters (Unicode codepoints), * can only contain lowercase letters, numeric characters, underscores and * dashes. International characters are allowed. No more than 64 user labels * can be associated with one Artifact (System labels are excluded). */ labels?: { [key: string]: string }; /** * Properties of the Artifact. Top level metadata keys' heading and trailing * spaces will be trimmed. The size of this field should not exceed 200KB. */ metadata?: { [key: string]: any }; /** * Output only. The resource name of the Artifact. */ readonly name?: string; /** * The title of the schema describing the metadata. Schema title and version * is expected to be registered in earlier Create Schema calls. And both are * used together as unique identifiers to identify schemas within the local * metadata store. */ schemaTitle?: string; /** * The version of the schema in schema_name to use. Schema title and version * is expected to be registered in earlier Create Schema calls. And both are * used together as unique identifiers to identify schemas within the local * metadata store. */ schemaVersion?: string; /** * The state of this Artifact. This is a property of the Artifact, and does * not imply or capture any ongoing process. This property is managed by * clients (such as Vertex AI Pipelines), and the system does not prescribe or * check the validity of state transitions. */ state?: | "STATE_UNSPECIFIED" | "PENDING" | "LIVE"; /** * Output only. Timestamp when this Artifact was last updated. */ readonly updateTime?: Date; /** * The uniform resource identifier of the artifact file. May be empty if * there is no actual artifact file. */ uri?: string; } /** * Metadata information for NotebookService.AssignNotebookRuntime. */ export interface GoogleCloudAiplatformV1AssignNotebookRuntimeOperationMetadata { /** * The operation generic information. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; /** * A human-readable message that shows the intermediate progress details of * NotebookRuntime. */ progressMessage?: string; } /** * Request message for NotebookService.AssignNotebookRuntime. */ export interface GoogleCloudAiplatformV1AssignNotebookRuntimeRequest { /** * Required. Provide runtime specific information (e.g. runtime owner, * notebook id) used for NotebookRuntime assignment. */ notebookRuntime?: GoogleCloudAiplatformV1NotebookRuntime; /** * Optional. User specified ID for the notebook runtime. */ notebookRuntimeId?: string; /** * Required. The resource name of the NotebookRuntimeTemplate based on which * a NotebookRuntime will be assigned (reuse or create a new one). */ notebookRuntimeTemplate?: string; } /** * Attribution that explains a particular prediction output. */ export interface GoogleCloudAiplatformV1Attribution { /** * Output only. Error of feature_attributions caused by approximation used in * the explanation method. Lower value means more precise attributions. * For * Sampled Shapley attribution, increasing path_count might reduce the error. * * For Integrated Gradients attribution, increasing step_count might reduce * the error. * For XRAI attribution, increasing step_count might reduce the * error. See [this introduction](/vertex-ai/docs/explainable-ai/overview) for * more information. */ readonly approximationError?: number; /** * Output only. Model predicted output if the input instance is constructed * from the baselines of all the features defined in * ExplanationMetadata.inputs. The field name of the output is determined by * the key in ExplanationMetadata.outputs. If the Model's predicted output has * multiple dimensions (rank > 1), this is the value in the output located by * output_index. If there are multiple baselines, their output values are * averaged. */ readonly baselineOutputValue?: number; /** * Output only. Attributions of each explained feature. Features are * extracted from the prediction instances according to explanation metadata * for inputs. The value is a struct, whose keys are the name of the feature. * The values are how much the feature in the instance contributed to the * predicted result. The format of the value is determined by the feature's * input format: * If the feature is a scalar value, the attribution value is * a floating number. * If the feature is an array of scalar values, the * attribution value is an array. * If the feature is a struct, the * attribution value is a struct. The keys in the attribution value struct are * the same as the keys in the feature struct. The formats of the values in * the attribution struct are determined by the formats of the values in the * feature struct. The ExplanationMetadata.feature_attributions_schema_uri * field, pointed to by the ExplanationSpec field of the * Endpoint.deployed_models object, points to the schema file that describes * the features and their attribution values (if it is populated). */ readonly featureAttributions?: any; /** * Output only. Model predicted output on the corresponding explanation * instance. The field name of the output is determined by the key in * ExplanationMetadata.outputs. If the Model predicted output has multiple * dimensions, this is the value in the output located by output_index. */ readonly instanceOutputValue?: number; /** * Output only. The display name of the output identified by output_index. * For example, the predicted class name by a multi-classification Model. This * field is only populated iff the Model predicts display names as a separate * field along with the explained output. The predicted display name must has * the same shape of the explained output, and can be located using * output_index. */ readonly outputDisplayName?: string; /** * Output only. The index that locates the explained prediction output. If * the prediction output is a scalar value, output_index is not populated. If * the prediction output has multiple dimensions, the length of the * output_index list is the same as the number of dimensions of the output. * The i-th element in output_index is the element index of the i-th dimension * of the output vector. Indices start from 0. */ readonly outputIndex?: number[]; /** * Output only. Name of the explain output. Specified as the key in * ExplanationMetadata.outputs. */ readonly outputName?: string; } /** * Request message for AugmentPrompt. */ export interface GoogleCloudAiplatformV1AugmentPromptRequest { /** * Optional. Input content to augment, only text format is supported for now. */ contents?: GoogleCloudAiplatformV1Content[]; /** * Optional. Metadata of the backend deployed model. */ model?: GoogleCloudAiplatformV1AugmentPromptRequestModel; /** * Optional. Retrieves contexts from the Vertex RagStore. */ vertexRagStore?: GoogleCloudAiplatformV1VertexRagStore; } function serializeGoogleCloudAiplatformV1AugmentPromptRequest(data: any): GoogleCloudAiplatformV1AugmentPromptRequest { return { ...data, contents: data["contents"] !== undefined ? data["contents"].map((item: any) => (serializeGoogleCloudAiplatformV1Content(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1AugmentPromptRequest(data: any): GoogleCloudAiplatformV1AugmentPromptRequest { return { ...data, contents: data["contents"] !== undefined ? data["contents"].map((item: any) => (deserializeGoogleCloudAiplatformV1Content(item))) : undefined, }; } /** * Metadata of the backend deployed model. */ export interface GoogleCloudAiplatformV1AugmentPromptRequestModel { /** * Optional. The model that the user will send the augmented prompt for * content generation. */ model?: string; /** * Optional. The model version of the backend deployed model. */ modelVersion?: string; } /** * Response message for AugmentPrompt. */ export interface GoogleCloudAiplatformV1AugmentPromptResponse { /** * Augmented prompt, only text format is supported for now. */ augmentedPrompt?: GoogleCloudAiplatformV1Content[]; /** * Retrieved facts from RAG data sources. */ facts?: GoogleCloudAiplatformV1Fact[]; } function serializeGoogleCloudAiplatformV1AugmentPromptResponse(data: any): GoogleCloudAiplatformV1AugmentPromptResponse { return { ...data, augmentedPrompt: data["augmentedPrompt"] !== undefined ? data["augmentedPrompt"].map((item: any) => (serializeGoogleCloudAiplatformV1Content(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1AugmentPromptResponse(data: any): GoogleCloudAiplatformV1AugmentPromptResponse { return { ...data, augmentedPrompt: data["augmentedPrompt"] !== undefined ? data["augmentedPrompt"].map((item: any) => (deserializeGoogleCloudAiplatformV1Content(item))) : undefined, }; } /** * A description of resources that to large degree are decided by Vertex AI, * and require only a modest additional configuration. Each Model supporting * these resources documents its specific guidelines. */ export interface GoogleCloudAiplatformV1AutomaticResources { /** * Immutable. The maximum number of replicas that may be deployed on when the * traffic against it increases. If the requested value is too large, the * deployment will error, but if deployment succeeds then the ability to scale * to that many replicas is guaranteed (barring service outages). If traffic * increases beyond what its replicas at maximum may handle, a portion of the * traffic will be dropped. If this value is not provided, a no upper bound * for scaling under heavy traffic will be assume, though Vertex AI may be * unable to scale beyond certain replica number. */ maxReplicaCount?: number; /** * Immutable. The minimum number of replicas that will be always deployed on. * If traffic against it increases, it may dynamically be deployed onto more * replicas up to max_replica_count, and as traffic decreases, some of these * extra replicas may be freed. If the requested value is too large, the * deployment will error. */ minReplicaCount?: number; } /** * The configs for autorater. This is applicable to both EvaluateInstances and * EvaluateDataset. */ export interface GoogleCloudAiplatformV1AutoraterConfig { /** * Optional. The fully qualified name of the publisher model or tuned * autorater endpoint to use. Publisher model format: * `projects/{project}/locations/{location}/publishers/*\/models/*` Tuned * model endpoint format: * `projects/{project}/locations/{location}/endpoints/{endpoint}` */ autoraterModel?: string; /** * Optional. Default is true. Whether to flip the candidate and baseline * responses. This is only applicable to the pairwise metric. If enabled, also * provide PairwiseMetricSpec.candidate_response_field_name and * PairwiseMetricSpec.baseline_response_field_name. When rendering * PairwiseMetricSpec.metric_prompt_template, the candidate and baseline * fields will be flipped for half of the samples to reduce bias. */ flipEnabled?: boolean; /** * Optional. Number of samples for each instance in the dataset. If not * specified, the default is 4. Minimum value is 1, maximum value is 32. */ samplingCount?: number; } /** * The metric specification that defines the target resource utilization (CPU * utilization, accelerator's duty cycle, and so on) for calculating the desired * replica count. */ export interface GoogleCloudAiplatformV1AutoscalingMetricSpec { /** * Required. The resource metric name. Supported metrics: * For Online * Prediction: * * `aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle` * * `aiplatform.googleapis.com/prediction/online/cpu/utilization` */ metricName?: string; /** * The target resource utilization in percentage (1% - 100%) for the given * metric; once the real usage deviates from the target by a certain * percentage, the machine replicas change. The default value is 60 * (representing 60%) if not provided. */ target?: number; } /** * The storage details for Avro input content. */ export interface GoogleCloudAiplatformV1AvroSource { /** * Required. Google Cloud Storage location. */ gcsSource?: GoogleCloudAiplatformV1GcsSource; } /** * Request message for PipelineService.BatchCancelPipelineJobs. */ export interface GoogleCloudAiplatformV1BatchCancelPipelineJobsRequest { /** * Required. The names of the PipelineJobs to cancel. A maximum of 32 * PipelineJobs can be cancelled in a batch. Format: * `projects/{project}/locations/{location}/pipelineJobs/{pipelineJob}` */ names?: string[]; } /** * Details of operations that perform batch create Features. */ export interface GoogleCloudAiplatformV1BatchCreateFeaturesOperationMetadata { /** * Operation metadata for Feature. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Request message for FeaturestoreService.BatchCreateFeatures. Request message * for FeatureRegistryService.BatchCreateFeatures. */ export interface GoogleCloudAiplatformV1BatchCreateFeaturesRequest { /** * Required. The request message specifying the Features to create. All * Features must be created under the same parent EntityType / FeatureGroup. * The `parent` field in each child request message can be omitted. If * `parent` is set in a child request, then the value must match the `parent` * value in this request message. */ requests?: GoogleCloudAiplatformV1CreateFeatureRequest[]; } /** * Response message for FeaturestoreService.BatchCreateFeatures. */ export interface GoogleCloudAiplatformV1BatchCreateFeaturesResponse { /** * The Features created. */ features?: GoogleCloudAiplatformV1Feature[]; } /** * Request message for TensorboardService.BatchCreateTensorboardRuns. */ export interface GoogleCloudAiplatformV1BatchCreateTensorboardRunsRequest { /** * Required. The request message specifying the TensorboardRuns to create. A * maximum of 1000 TensorboardRuns can be created in a batch. */ requests?: GoogleCloudAiplatformV1CreateTensorboardRunRequest[]; } /** * Response message for TensorboardService.BatchCreateTensorboardRuns. */ export interface GoogleCloudAiplatformV1BatchCreateTensorboardRunsResponse { /** * The created TensorboardRuns. */ tensorboardRuns?: GoogleCloudAiplatformV1TensorboardRun[]; } /** * Request message for TensorboardService.BatchCreateTensorboardTimeSeries. */ export interface GoogleCloudAiplatformV1BatchCreateTensorboardTimeSeriesRequest { /** * Required. The request message specifying the TensorboardTimeSeries to * create. A maximum of 1000 TensorboardTimeSeries can be created in a batch. */ requests?: GoogleCloudAiplatformV1CreateTensorboardTimeSeriesRequest[]; } function serializeGoogleCloudAiplatformV1BatchCreateTensorboardTimeSeriesRequest(data: any): GoogleCloudAiplatformV1BatchCreateTensorboardTimeSeriesRequest { return { ...data, requests: data["requests"] !== undefined ? data["requests"].map((item: any) => (serializeGoogleCloudAiplatformV1CreateTensorboardTimeSeriesRequest(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1BatchCreateTensorboardTimeSeriesRequest(data: any): GoogleCloudAiplatformV1BatchCreateTensorboardTimeSeriesRequest { return { ...data, requests: data["requests"] !== undefined ? data["requests"].map((item: any) => (deserializeGoogleCloudAiplatformV1CreateTensorboardTimeSeriesRequest(item))) : undefined, }; } /** * Response message for TensorboardService.BatchCreateTensorboardTimeSeries. */ export interface GoogleCloudAiplatformV1BatchCreateTensorboardTimeSeriesResponse { /** * The created TensorboardTimeSeries. */ tensorboardTimeSeries?: GoogleCloudAiplatformV1TensorboardTimeSeries[]; } function serializeGoogleCloudAiplatformV1BatchCreateTensorboardTimeSeriesResponse(data: any): GoogleCloudAiplatformV1BatchCreateTensorboardTimeSeriesResponse { return { ...data, tensorboardTimeSeries: data["tensorboardTimeSeries"] !== undefined ? data["tensorboardTimeSeries"].map((item: any) => (serializeGoogleCloudAiplatformV1TensorboardTimeSeries(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1BatchCreateTensorboardTimeSeriesResponse(data: any): GoogleCloudAiplatformV1BatchCreateTensorboardTimeSeriesResponse { return { ...data, tensorboardTimeSeries: data["tensorboardTimeSeries"] !== undefined ? data["tensorboardTimeSeries"].map((item: any) => (deserializeGoogleCloudAiplatformV1TensorboardTimeSeries(item))) : undefined, }; } /** * A description of resources that are used for performing batch operations, * are dedicated to a Model, and need manual configuration. */ export interface GoogleCloudAiplatformV1BatchDedicatedResources { /** * Required. Immutable. The specification of a single machine. */ machineSpec?: GoogleCloudAiplatformV1MachineSpec; /** * Immutable. The maximum number of machine replicas the batch operation may * be scaled to. The default value is 10. */ maxReplicaCount?: number; /** * Immutable. The number of machine replicas used at the start of the batch * operation. If not set, Vertex AI decides starting number, not greater than * max_replica_count */ startingReplicaCount?: number; } /** * Request message for PipelineService.BatchDeletePipelineJobs. */ export interface GoogleCloudAiplatformV1BatchDeletePipelineJobsRequest { /** * Required. The names of the PipelineJobs to delete. A maximum of 32 * PipelineJobs can be deleted in a batch. Format: * `projects/{project}/locations/{location}/pipelineJobs/{pipelineJob}` */ names?: string[]; } /** * Request message for ModelService.BatchImportEvaluatedAnnotations */ export interface GoogleCloudAiplatformV1BatchImportEvaluatedAnnotationsRequest { /** * Required. Evaluated annotations resource to be imported. */ evaluatedAnnotations?: GoogleCloudAiplatformV1EvaluatedAnnotation[]; } /** * Response message for ModelService.BatchImportEvaluatedAnnotations */ export interface GoogleCloudAiplatformV1BatchImportEvaluatedAnnotationsResponse { /** * Output only. Number of EvaluatedAnnotations imported. */ readonly importedEvaluatedAnnotationsCount?: number; } /** * Request message for ModelService.BatchImportModelEvaluationSlices */ export interface GoogleCloudAiplatformV1BatchImportModelEvaluationSlicesRequest { /** * Required. Model evaluation slice resource to be imported. */ modelEvaluationSlices?: GoogleCloudAiplatformV1ModelEvaluationSlice[]; } /** * Response message for ModelService.BatchImportModelEvaluationSlices */ export interface GoogleCloudAiplatformV1BatchImportModelEvaluationSlicesResponse { /** * Output only. List of imported ModelEvaluationSlice.name. */ readonly importedModelEvaluationSlices?: string[]; } /** * Runtime operation information for MigrationService.BatchMigrateResources. */ export interface GoogleCloudAiplatformV1BatchMigrateResourcesOperationMetadata { /** * The common part of the operation metadata. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; /** * Partial results that reflect the latest migration operation progress. */ partialResults?: GoogleCloudAiplatformV1BatchMigrateResourcesOperationMetadataPartialResult[]; } /** * Represents a partial result in batch migration operation for one * MigrateResourceRequest. */ export interface GoogleCloudAiplatformV1BatchMigrateResourcesOperationMetadataPartialResult { /** * Migrated dataset resource name. */ dataset?: string; /** * The error result of the migration request in case of failure. */ error?: GoogleRpcStatus; /** * Migrated model resource name. */ model?: string; /** * It's the same as the value in * BatchMigrateResourcesRequest.migrate_resource_requests. */ request?: GoogleCloudAiplatformV1MigrateResourceRequest; } /** * Request message for MigrationService.BatchMigrateResources. */ export interface GoogleCloudAiplatformV1BatchMigrateResourcesRequest { /** * Required. The request messages specifying the resources to migrate. They * must be in the same location as the destination. Up to 50 resources can be * migrated in one batch. */ migrateResourceRequests?: GoogleCloudAiplatformV1MigrateResourceRequest[]; } /** * Response message for MigrationService.BatchMigrateResources. */ export interface GoogleCloudAiplatformV1BatchMigrateResourcesResponse { /** * Successfully migrated resources. */ migrateResourceResponses?: GoogleCloudAiplatformV1MigrateResourceResponse[]; } /** * A job that uses a Model to produce predictions on multiple input instances. * If predictions for significant portion of the instances fail, the job may * finish without attempting predictions for all remaining instances. */ export interface GoogleCloudAiplatformV1BatchPredictionJob { /** * Output only. Statistics on completed and failed prediction instances. */ readonly completionStats?: GoogleCloudAiplatformV1CompletionStats; /** * Output only. Time when the BatchPredictionJob was created. */ readonly createTime?: Date; /** * The config of resources used by the Model during the batch prediction. If * the Model supports DEDICATED_RESOURCES this config may be provided (and the * job will use these resources), if the Model doesn't support * AUTOMATIC_RESOURCES, this config must be provided. */ dedicatedResources?: GoogleCloudAiplatformV1BatchDedicatedResources; /** * For custom-trained Models and AutoML Tabular Models, the container of the * DeployedModel instances will send `stderr` and `stdout` streams to Cloud * Logging by default. Please note that the logs incur cost, which are subject * to [Cloud Logging pricing](https://cloud.google.com/logging/pricing). User * can disable container logging by setting this flag to true. */ disableContainerLogging?: boolean; /** * Required. The user-defined name of this BatchPredictionJob. */ displayName?: string; /** * Customer-managed encryption key options for a BatchPredictionJob. If this * is set, then all resources created by the BatchPredictionJob will be * encrypted with the provided encryption key. */ encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec; /** * Output only. Time when the BatchPredictionJob entered any of the following * states: `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`. */ readonly endTime?: Date; /** * Output only. Only populated when the job's state is JOB_STATE_FAILED or * JOB_STATE_CANCELLED. */ readonly error?: GoogleRpcStatus; /** * Explanation configuration for this BatchPredictionJob. Can be specified * only if generate_explanation is set to `true`. This value overrides the * value of Model.explanation_spec. All fields of explanation_spec are * optional in the request. If a field of the explanation_spec object is not * populated, the corresponding field of the Model.explanation_spec object is * inherited. */ explanationSpec?: GoogleCloudAiplatformV1ExplanationSpec; /** * Generate explanation with the batch prediction results. When set to * `true`, the batch prediction output changes based on the * `predictions_format` field of the BatchPredictionJob.output_config object: * * `bigquery`: output includes a column named `explanation`. The value is a * struct that conforms to the Explanation object. * `jsonl`: The JSON objects * on each line include an additional entry keyed `explanation`. The value of * the entry is a JSON object that conforms to the Explanation object. * * `csv`: Generating explanations for CSV format is not supported. If this * field is set to true, either the Model.explanation_spec or explanation_spec * must be populated. */ generateExplanation?: boolean; /** * Required. Input configuration of the instances on which predictions are * performed. The schema of any single instance may be specified via the * Model's PredictSchemata's instance_schema_uri. */ inputConfig?: GoogleCloudAiplatformV1BatchPredictionJobInputConfig; /** * Configuration for how to convert batch prediction input instances to the * prediction instances that are sent to the Model. */ instanceConfig?: GoogleCloudAiplatformV1BatchPredictionJobInstanceConfig; /** * The labels with user-defined metadata to organize BatchPredictionJobs. * Label keys and values can be no longer than 64 characters (Unicode * codepoints), can only contain lowercase letters, numeric characters, * underscores and dashes. International characters are allowed. See * https://goo.gl/xmQnxf for more information and examples of labels. */ labels?: { [key: string]: string }; /** * Immutable. Parameters configuring the batch behavior. Currently only * applicable when dedicated_resources are used (in other cases Vertex AI does * the tuning itself). */ manualBatchTuningParameters?: GoogleCloudAiplatformV1ManualBatchTuningParameters; /** * The name of the Model resource that produces the predictions via this job, * must share the same ancestor Location. Starting this job has no impact on * any existing deployments of the Model and their resources. Exactly one of * model and unmanaged_container_model must be set. The model resource name * may contain version id or version alias to specify the version. Example: * `projects/{project}/locations/{location}/models/{model}@2` or * `projects/{project}/locations/{location}/models/{model}@golden` if no * version is specified, the default version will be deployed. The model * resource could also be a publisher model. Example: * `publishers/{publisher}/models/{model}` or * `projects/{project}/locations/{location}/publishers/{publisher}/models/{model}` */ model?: string; /** * The parameters that govern the predictions. The schema of the parameters * may be specified via the Model's PredictSchemata's parameters_schema_uri. */ modelParameters?: any; /** * Output only. The version ID of the Model that produces the predictions via * this job. */ readonly modelVersionId?: string; /** * Output only. Resource name of the BatchPredictionJob. */ readonly name?: string; /** * Required. The Configuration specifying where output predictions should be * written. The schema of any single prediction may be specified as a * concatenation of Model's PredictSchemata's instance_schema_uri and * prediction_schema_uri. */ outputConfig?: GoogleCloudAiplatformV1BatchPredictionJobOutputConfig; /** * Output only. Information further describing the output of this job. */ readonly outputInfo?: GoogleCloudAiplatformV1BatchPredictionJobOutputInfo; /** * Output only. Partial failures encountered. For example, single files that * can't be read. This field never exceeds 20 entries. Status details fields * contain standard Google Cloud error details. */ readonly partialFailures?: GoogleRpcStatus[]; /** * Output only. Information about resources that had been consumed by this * job. Provided in real time at best effort basis, as well as a final value * once the job completes. Note: This field currently may be not populated for * batch predictions that use AutoML Models. */ readonly resourcesConsumed?: GoogleCloudAiplatformV1ResourcesConsumed; /** * Output only. Reserved for future use. */ readonly satisfiesPzi?: boolean; /** * Output only. Reserved for future use. */ readonly satisfiesPzs?: boolean; /** * The service account that the DeployedModel's container runs as. If not * specified, a system generated one will be used, which has minimal * permissions and the custom container, if used, may not have enough * permission to access other Google Cloud resources. Users deploying the * Model must have the `iam.serviceAccounts.actAs` permission on this service * account. */ serviceAccount?: string; /** * Output only. Time when the BatchPredictionJob for the first time entered * the `JOB_STATE_RUNNING` state. */ readonly startTime?: Date; /** * Output only. The detailed state of the job. */ readonly state?: | "JOB_STATE_UNSPECIFIED" | "JOB_STATE_QUEUED" | "JOB_STATE_PENDING" | "JOB_STATE_RUNNING" | "JOB_STATE_SUCCEEDED" | "JOB_STATE_FAILED" | "JOB_STATE_CANCELLING" | "JOB_STATE_CANCELLED" | "JOB_STATE_PAUSED" | "JOB_STATE_EXPIRED" | "JOB_STATE_UPDATING" | "JOB_STATE_PARTIALLY_SUCCEEDED"; /** * Contains model information necessary to perform batch prediction without * requiring uploading to model registry. Exactly one of model and * unmanaged_container_model must be set. */ unmanagedContainerModel?: GoogleCloudAiplatformV1UnmanagedContainerModel; /** * Output only. Time when the BatchPredictionJob was most recently updated. */ readonly updateTime?: Date; } function serializeGoogleCloudAiplatformV1BatchPredictionJob(data: any): GoogleCloudAiplatformV1BatchPredictionJob { return { ...data, unmanagedContainerModel: data["unmanagedContainerModel"] !== undefined ? serializeGoogleCloudAiplatformV1UnmanagedContainerModel(data["unmanagedContainerModel"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1BatchPredictionJob(data: any): GoogleCloudAiplatformV1BatchPredictionJob { return { ...data, createTime: data["createTime"] !== undefined ? new Date(data["createTime"]) : undefined, endTime: data["endTime"] !== undefined ? new Date(data["endTime"]) : undefined, startTime: data["startTime"] !== undefined ? new Date(data["startTime"]) : undefined, unmanagedContainerModel: data["unmanagedContainerModel"] !== undefined ? deserializeGoogleCloudAiplatformV1UnmanagedContainerModel(data["unmanagedContainerModel"]) : undefined, updateTime: data["updateTime"] !== undefined ? new Date(data["updateTime"]) : undefined, }; } /** * Configures the input to BatchPredictionJob. See * Model.supported_input_storage_formats for Model's supported input formats, * and how instances should be expressed via any of them. */ export interface GoogleCloudAiplatformV1BatchPredictionJobInputConfig { /** * The BigQuery location of the input table. The schema of the table should * be in the format described by the given context OpenAPI Schema, if one is * provided. The table may contain additional columns that are not described * by the schema, and they will be ignored. */ bigquerySource?: GoogleCloudAiplatformV1BigQuerySource; /** * The Cloud Storage location for the input instances. */ gcsSource?: GoogleCloudAiplatformV1GcsSource; /** * Required. The format in which instances are given, must be one of the * Model's supported_input_storage_formats. */ instancesFormat?: string; } /** * Configuration defining how to transform batch prediction input instances to * the instances that the Model accepts. */ export interface GoogleCloudAiplatformV1BatchPredictionJobInstanceConfig { /** * Fields that will be excluded in the prediction instance that is sent to * the Model. Excluded will be attached to the batch prediction output if * key_field is not specified. When excluded_fields is populated, * included_fields must be empty. The input must be JSONL with objects at each * line, BigQuery or TfRecord. */ excludedFields?: string[]; /** * Fields that will be included in the prediction instance that is sent to * the Model. If instance_type is `array`, the order of field names in * included_fields also determines the order of the values in the array. When * included_fields is populated, excluded_fields must be empty. The input must * be JSONL with objects at each line, BigQuery or TfRecord. */ includedFields?: string[]; /** * The format of the instance that the Model accepts. Vertex AI will convert * compatible batch prediction input instance formats to the specified format. * Supported values are: * `object`: Each input is converted to JSON object * format. * For `bigquery`, each row is converted to an object. * For * `jsonl`, each line of the JSONL input must be an object. * Does not apply * to `csv`, `file-list`, `tf-record`, or `tf-record-gzip`. * `array`: Each * input is converted to JSON array format. * For `bigquery`, each row is * converted to an array. The order of columns is determined by the BigQuery * column order, unless included_fields is populated. included_fields must be * populated for specifying field orders. * For `jsonl`, if each line of the * JSONL input is an object, included_fields must be populated for specifying * field orders. * Does not apply to `csv`, `file-list`, `tf-record`, or * `tf-record-gzip`. If not specified, Vertex AI converts the batch prediction * input as follows: * For `bigquery` and `csv`, the behavior is the same as * `array`. The order of columns is the same as defined in the file or table, * unless included_fields is populated. * For `jsonl`, the prediction instance * format is determined by each line of the input. * For * `tf-record`/`tf-record-gzip`, each record will be converted to an object in * the format of `{"b64": }`, where `` is the Base64-encoded string of the * content of the record. * For `file-list`, each file in the list will be * converted to an object in the format of `{"b64": }`, where `` is the * Base64-encoded string of the content of the file. */ instanceType?: string; /** * The name of the field that is considered as a key. The values identified * by the key field is not included in the transformed instances that is sent * to the Model. This is similar to specifying this name of the field in * excluded_fields. In addition, the batch prediction output will not include * the instances. Instead the output will only include the value of the key * field, in a field named `key` in the output: * For `jsonl` output format, * the output will have a `key` field instead of the `instance` field. * For * `csv`/`bigquery` output format, the output will have have a `key` column * instead of the instance feature columns. The input must be JSONL with * objects at each line, CSV, BigQuery or TfRecord. */ keyField?: string; } /** * Configures the output of BatchPredictionJob. See * Model.supported_output_storage_formats for supported output formats, and how * predictions are expressed via any of them. */ export interface GoogleCloudAiplatformV1BatchPredictionJobOutputConfig { /** * The BigQuery project or dataset location where the output is to be written * to. If project is provided, a new dataset is created with name * `prediction__` where is made BigQuery-dataset-name compatible (for example, * most special characters become underscores), and timestamp is in * YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the dataset two * tables will be created, `predictions`, and `errors`. If the Model has both * instance and prediction schemata defined then the tables have columns as * follows: The `predictions` table contains instances for which the * prediction succeeded, it has columns as per a concatenation of the Model's * instance and prediction schemata. The `errors` table contains rows for * which the prediction has failed, it has instance columns, as per the * instance schema, followed by a single "errors" column, which as values has * google.rpc.Status represented as a STRUCT, and containing only `code` and * `message`. */ bigqueryDestination?: GoogleCloudAiplatformV1BigQueryDestination; /** * The Cloud Storage location of the directory where the output is to be * written to. In the given directory a new directory is created. Its name is * `prediction--`, where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 * format. Inside of it files `predictions_0001.`, `predictions_0002.`, ..., * `predictions_N.` are created where `` depends on chosen predictions_format, * and N may equal 0001 and depends on the total number of successfully * predicted instances. If the Model has both instance and prediction schemata * defined then each such file contains predictions as per the * predictions_format. If prediction for any instance failed (partially or * completely), then an additional `errors_0001.`, `errors_0002.`,..., * `errors_N.` files are created (N depends on total number of failed * predictions). These files contain the failed instances, as per their * schema, followed by an additional `error` field which as value has * google.rpc.Status containing only `code` and `message` fields. */ gcsDestination?: GoogleCloudAiplatformV1GcsDestination; /** * Required. The format in which Vertex AI gives the predictions, must be one * of the Model's supported_output_storage_formats. */ predictionsFormat?: string; } /** * Further describes this job's output. Supplements output_config. */ export interface GoogleCloudAiplatformV1BatchPredictionJobOutputInfo { /** * Output only. The path of the BigQuery dataset created, in * `bq://projectId.bqDatasetId` format, into which the prediction output is * written. */ readonly bigqueryOutputDataset?: string; /** * Output only. The name of the BigQuery table created, in `predictions_` * format, into which the prediction output is written. Can be used by UI to * generate the BigQuery output path, for example. */ readonly bigqueryOutputTable?: string; /** * Output only. The full path of the Cloud Storage directory created, into * which the prediction output is written. */ readonly gcsOutputDirectory?: string; } /** * Details of operations that batch reads Feature values. */ export interface GoogleCloudAiplatformV1BatchReadFeatureValuesOperationMetadata { /** * Operation metadata for Featurestore batch read Features values. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Request message for FeaturestoreService.BatchReadFeatureValues. */ export interface GoogleCloudAiplatformV1BatchReadFeatureValuesRequest { /** * Similar to csv_read_instances, but from BigQuery source. */ bigqueryReadInstances?: GoogleCloudAiplatformV1BigQuerySource; /** * Each read instance consists of exactly one read timestamp and one or more * entity IDs identifying entities of the corresponding EntityTypes whose * Features are requested. Each output instance contains Feature values of * requested entities concatenated together as of the read time. An example * read instance may be `foo_entity_id, bar_entity_id, * 2020-01-01T10:00:00.123Z`. An example output instance may be * `foo_entity_id, bar_entity_id, 2020-01-01T10:00:00.123Z, * foo_entity_feature1_value, bar_entity_feature2_value`. Timestamp in each * read instance must be millisecond-aligned. `csv_read_instances` are read * instances stored in a plain-text CSV file. The header should be: * [ENTITY_TYPE_ID1], [ENTITY_TYPE_ID2], ..., timestamp The columns can be in * any order. Values in the timestamp column must use the RFC 3339 format, * e.g. `2012-07-30T10:43:17.123Z`. */ csvReadInstances?: GoogleCloudAiplatformV1CsvSource; /** * Required. Specifies output location and format. */ destination?: GoogleCloudAiplatformV1FeatureValueDestination; /** * Required. Specifies EntityType grouping Features to read values of and * settings. */ entityTypeSpecs?: GoogleCloudAiplatformV1BatchReadFeatureValuesRequestEntityTypeSpec[]; /** * When not empty, the specified fields in the *_read_instances source will * be joined as-is in the output, in addition to those fields from the * Featurestore Entity. For BigQuery source, the type of the pass-through * values will be automatically inferred. For CSV source, the pass-through * values will be passed as opaque bytes. */ passThroughFields?: GoogleCloudAiplatformV1BatchReadFeatureValuesRequestPassThroughField[]; /** * Optional. Excludes Feature values with feature generation timestamp before * this timestamp. If not set, retrieve oldest values kept in Feature Store. * Timestamp, if present, must not have higher than millisecond precision. */ startTime?: Date; } function serializeGoogleCloudAiplatformV1BatchReadFeatureValuesRequest(data: any): GoogleCloudAiplatformV1BatchReadFeatureValuesRequest { return { ...data, startTime: data["startTime"] !== undefined ? data["startTime"].toISOString() : undefined, }; } function deserializeGoogleCloudAiplatformV1BatchReadFeatureValuesRequest(data: any): GoogleCloudAiplatformV1BatchReadFeatureValuesRequest { return { ...data, startTime: data["startTime"] !== undefined ? new Date(data["startTime"]) : undefined, }; } /** * Selects Features of an EntityType to read values of and specifies read * settings. */ export interface GoogleCloudAiplatformV1BatchReadFeatureValuesRequestEntityTypeSpec { /** * Required. ID of the EntityType to select Features. The EntityType id is * the entity_type_id specified during EntityType creation. */ entityTypeId?: string; /** * Required. Selectors choosing which Feature values to read from the * EntityType. */ featureSelector?: GoogleCloudAiplatformV1FeatureSelector; /** * Per-Feature settings for the batch read. */ settings?: GoogleCloudAiplatformV1DestinationFeatureSetting[]; } /** * Describe pass-through fields in read_instance source. */ export interface GoogleCloudAiplatformV1BatchReadFeatureValuesRequestPassThroughField { /** * Required. The name of the field in the CSV header or the name of the * column in BigQuery table. The naming restriction is the same as * Feature.name. */ fieldName?: string; } /** * Response message for FeaturestoreService.BatchReadFeatureValues. */ export interface GoogleCloudAiplatformV1BatchReadFeatureValuesResponse { } /** * Response message for TensorboardService.BatchReadTensorboardTimeSeriesData. */ export interface GoogleCloudAiplatformV1BatchReadTensorboardTimeSeriesDataResponse { /** * The returned time series data. */ timeSeriesData?: GoogleCloudAiplatformV1TimeSeriesData[]; } function serializeGoogleCloudAiplatformV1BatchReadTensorboardTimeSeriesDataResponse(data: any): GoogleCloudAiplatformV1BatchReadTensorboardTimeSeriesDataResponse { return { ...data, timeSeriesData: data["timeSeriesData"] !== undefined ? data["timeSeriesData"].map((item: any) => (serializeGoogleCloudAiplatformV1TimeSeriesData(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1BatchReadTensorboardTimeSeriesDataResponse(data: any): GoogleCloudAiplatformV1BatchReadTensorboardTimeSeriesDataResponse { return { ...data, timeSeriesData: data["timeSeriesData"] !== undefined ? data["timeSeriesData"].map((item: any) => (deserializeGoogleCloudAiplatformV1TimeSeriesData(item))) : undefined, }; } /** * The BigQuery location for the output content. */ export interface GoogleCloudAiplatformV1BigQueryDestination { /** * Required. BigQuery URI to a project or table, up to 2000 characters long. * When only the project is specified, the Dataset and Table is created. When * the full table reference is specified, the Dataset must exist and table * must not exist. Accepted forms: * BigQuery path. For example: * `bq://projectId` or `bq://projectId.bqDatasetId` or * `bq://projectId.bqDatasetId.bqTableId`. */ outputUri?: string; } /** * The BigQuery location for the input content. */ export interface GoogleCloudAiplatformV1BigQuerySource { /** * Required. BigQuery URI to a table, up to 2000 characters long. Accepted * forms: * BigQuery path. For example: * `bq://projectId.bqDatasetId.bqTableId`. */ inputUri?: string; } /** * Input for bleu metric. */ export interface GoogleCloudAiplatformV1BleuInput { /** * Required. Repeated bleu instances. */ instances?: GoogleCloudAiplatformV1BleuInstance[]; /** * Required. Spec for bleu score metric. */ metricSpec?: GoogleCloudAiplatformV1BleuSpec; } /** * Spec for bleu instance. */ export interface GoogleCloudAiplatformV1BleuInstance { /** * Required. Output of the evaluated model. */ prediction?: string; /** * Required. Ground truth used to compare against the prediction. */ reference?: string; } /** * Bleu metric value for an instance. */ export interface GoogleCloudAiplatformV1BleuMetricValue { /** * Output only. Bleu score. */ readonly score?: number; } /** * Results for bleu metric. */ export interface GoogleCloudAiplatformV1BleuResults { /** * Output only. Bleu metric values. */ readonly bleuMetricValues?: GoogleCloudAiplatformV1BleuMetricValue[]; } /** * Spec for bleu score metric - calculates the precision of n-grams in the * prediction as compared to reference - returns a score ranging between 0 to 1. */ export interface GoogleCloudAiplatformV1BleuSpec { /** * Optional. Whether to use_effective_order to compute bleu score. */ useEffectiveOrder?: boolean; } /** * Content blob. */ export interface GoogleCloudAiplatformV1Blob { /** * Required. Raw bytes. */ data?: Uint8Array; /** * Optional. Display name of the blob. Used to provide a label or filename to * distinguish blobs. This field is only returned in PromptMessage for prompt * management. It is not currently used in the Gemini GenerateContent calls. */ displayName?: string; /** * Required. The IANA standard MIME type of the source data. */ mimeType?: string; } function serializeGoogleCloudAiplatformV1Blob(data: any): GoogleCloudAiplatformV1Blob { return { ...data, data: data["data"] !== undefined ? encodeBase64(data["data"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1Blob(data: any): GoogleCloudAiplatformV1Blob { return { ...data, data: data["data"] !== undefined ? decodeBase64(data["data"] as string) : undefined, }; } /** * Config for blur baseline. When enabled, a linear path from the maximally * blurred image to the input image is created. Using a blurred baseline instead * of zero (black image) is motivated by the BlurIG approach explained here: * https://arxiv.org/abs/2004.03383 */ export interface GoogleCloudAiplatformV1BlurBaselineConfig { /** * The standard deviation of the blur kernel for the blurred baseline. The * same blurring parameter is used for both the height and the width * dimension. If not set, the method defaults to the zero (i.e. black for * images) baseline. */ maxBlurSigma?: number; } /** * A list of boolean values. */ export interface GoogleCloudAiplatformV1BoolArray { /** * A list of bool values. */ values?: boolean[]; } /** * Config of GenAI caching features. This is a singleton resource. */ export interface GoogleCloudAiplatformV1CacheConfig { /** * If set to true, disables GenAI caching. Otherwise caching is enabled. */ disableCache?: boolean; /** * Identifier. Name of the cache config. Format: - * `projects/{project}/cacheConfig`. */ name?: string; } /** * A resource used in LLM queries for users to explicitly specify what to cache * and how to cache. */ export interface GoogleCloudAiplatformV1CachedContent { /** * Optional. Input only. Immutable. The content to cache */ contents?: GoogleCloudAiplatformV1Content[]; /** * Output only. Creation time of the cache entry. */ readonly createTime?: Date; /** * Optional. Immutable. The user-generated meaningful display name of the * cached content. */ displayName?: string; /** * Timestamp of when this resource is considered expired. This is *always* * provided on output, regardless of what was sent on input. */ expireTime?: Date; /** * Immutable. The name of the `Model` to use for cached content. Currently, * only the published Gemini base models are supported, in form of * projects/{PROJECT}/locations/{LOCATION}/publishers/google/models/{MODEL} */ model?: string; /** * Immutable. Identifier. The server-generated resource name of the cached * content Format: * projects/{project}/locations/{location}/cachedContents/{cached_content} */ name?: string; /** * Optional. Input only. Immutable. Developer set system instruction. * Currently, text only */ systemInstruction?: GoogleCloudAiplatformV1Content; /** * Optional. Input only. Immutable. Tool config. This config is shared for * all tools */ toolConfig?: GoogleCloudAiplatformV1ToolConfig; /** * Optional. Input only. Immutable. A list of `Tools` the model may use to * generate the next response */ tools?: GoogleCloudAiplatformV1Tool[]; /** * Input only. The TTL for this resource. The expiration time is computed: * now + TTL. */ ttl?: number /* Duration */; /** * Output only. When the cache entry was last updated in UTC time. */ readonly updateTime?: Date; /** * Output only. Metadata on the usage of the cached content. */ readonly usageMetadata?: GoogleCloudAiplatformV1CachedContentUsageMetadata; } function serializeGoogleCloudAiplatformV1CachedContent(data: any): GoogleCloudAiplatformV1CachedContent { return { ...data, contents: data["contents"] !== undefined ? data["contents"].map((item: any) => (serializeGoogleCloudAiplatformV1Content(item))) : undefined, expireTime: data["expireTime"] !== undefined ? data["expireTime"].toISOString() : undefined, systemInstruction: data["systemInstruction"] !== undefined ? serializeGoogleCloudAiplatformV1Content(data["systemInstruction"]) : undefined, tools: data["tools"] !== undefined ? data["tools"].map((item: any) => (serializeGoogleCloudAiplatformV1Tool(item))) : undefined, ttl: data["ttl"] !== undefined ? data["ttl"] : undefined, }; } function deserializeGoogleCloudAiplatformV1CachedContent(data: any): GoogleCloudAiplatformV1CachedContent { return { ...data, contents: data["contents"] !== undefined ? data["contents"].map((item: any) => (deserializeGoogleCloudAiplatformV1Content(item))) : undefined, createTime: data["createTime"] !== undefined ? new Date(data["createTime"]) : undefined, expireTime: data["expireTime"] !== undefined ? new Date(data["expireTime"]) : undefined, systemInstruction: data["systemInstruction"] !== undefined ? deserializeGoogleCloudAiplatformV1Content(data["systemInstruction"]) : undefined, tools: data["tools"] !== undefined ? data["tools"].map((item: any) => (deserializeGoogleCloudAiplatformV1Tool(item))) : undefined, ttl: data["ttl"] !== undefined ? data["ttl"] : undefined, updateTime: data["updateTime"] !== undefined ? new Date(data["updateTime"]) : undefined, }; } /** * Metadata on the usage of the cached content. */ export interface GoogleCloudAiplatformV1CachedContentUsageMetadata { /** * Duration of audio in seconds. */ audioDurationSeconds?: number; /** * Number of images. */ imageCount?: number; /** * Number of text characters. */ textCount?: number; /** * Total number of tokens that the cached content consumes. */ totalTokenCount?: number; /** * Duration of video in seconds. */ videoDurationSeconds?: number; } /** * Request message for JobService.CancelBatchPredictionJob. */ export interface GoogleCloudAiplatformV1CancelBatchPredictionJobRequest { } /** * Request message for JobService.CancelCustomJob. */ export interface GoogleCloudAiplatformV1CancelCustomJobRequest { } /** * Request message for JobService.CancelDataLabelingJob. */ export interface GoogleCloudAiplatformV1CancelDataLabelingJobRequest { } /** * Request message for JobService.CancelHyperparameterTuningJob. */ export interface GoogleCloudAiplatformV1CancelHyperparameterTuningJobRequest { } /** * Request message for JobService.CancelNasJob. */ export interface GoogleCloudAiplatformV1CancelNasJobRequest { } /** * Request message for PipelineService.CancelPipelineJob. */ export interface GoogleCloudAiplatformV1CancelPipelineJobRequest { } /** * Request message for PipelineService.CancelTrainingPipeline. */ export interface GoogleCloudAiplatformV1CancelTrainingPipelineRequest { } /** * Request message for GenAiTuningService.CancelTuningJob. */ export interface GoogleCloudAiplatformV1CancelTuningJobRequest { } /** * A response candidate generated from the model. */ export interface GoogleCloudAiplatformV1Candidate { /** * Output only. Average log probability score of the candidate. */ readonly avgLogprobs?: number; /** * Output only. Source attribution of the generated content. */ readonly citationMetadata?: GoogleCloudAiplatformV1CitationMetadata; /** * Output only. Content parts of the candidate. */ readonly content?: GoogleCloudAiplatformV1Content; /** * Output only. Describes the reason the mode stopped generating tokens in * more detail. This is only filled when `finish_reason` is set. */ readonly finishMessage?: string; /** * Output only. The reason why the model stopped generating tokens. If empty, * the model has not stopped generating the tokens. */ readonly finishReason?: | "FINISH_REASON_UNSPECIFIED" | "STOP" | "MAX_TOKENS" | "SAFETY" | "RECITATION" | "OTHER" | "BLOCKLIST" | "PROHIBITED_CONTENT" | "SPII" | "MALFORMED_FUNCTION_CALL"; /** * Output only. Metadata specifies sources used to ground generated content. */ readonly groundingMetadata?: GoogleCloudAiplatformV1GroundingMetadata; /** * Output only. Index of the candidate. */ readonly index?: number; /** * Output only. Log-likelihood scores for the response tokens and top tokens */ readonly logprobsResult?: GoogleCloudAiplatformV1LogprobsResult; /** * Output only. List of ratings for the safety of a response candidate. There * is at most one rating per category. */ readonly safetyRatings?: GoogleCloudAiplatformV1SafetyRating[]; } /** * This message will be placed in the metadata field of a * google.longrunning.Operation associated with a CheckTrialEarlyStoppingState * request. */ export interface GoogleCloudAiplatformV1CheckTrialEarlyStoppingStateMetatdata { /** * Operation metadata for suggesting Trials. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; /** * The name of the Study that the Trial belongs to. */ study?: string; /** * The Trial name. */ trial?: string; } /** * Request message for VizierService.CheckTrialEarlyStoppingState. */ export interface GoogleCloudAiplatformV1CheckTrialEarlyStoppingStateRequest { } /** * Response message for VizierService.CheckTrialEarlyStoppingState. */ export interface GoogleCloudAiplatformV1CheckTrialEarlyStoppingStateResponse { /** * True if the Trial should stop. */ shouldStop?: boolean; } /** * Source attributions for content. */ export interface GoogleCloudAiplatformV1Citation { /** * Output only. End index into the content. */ readonly endIndex?: number; /** * Output only. License of the attribution. */ readonly license?: string; /** * Output only. Publication date of the attribution. */ readonly publicationDate?: GoogleTypeDate; /** * Output only. Start index into the content. */ readonly startIndex?: number; /** * Output only. Title of the attribution. */ readonly title?: string; /** * Output only. Url reference of the attribution. */ readonly uri?: string; } /** * A collection of source attributions for a piece of content. */ export interface GoogleCloudAiplatformV1CitationMetadata { /** * Output only. List of citations. */ readonly citations?: GoogleCloudAiplatformV1Citation[]; } /** * Claim that is extracted from the input text and facts that support it. */ export interface GoogleCloudAiplatformV1Claim { /** * Index in the input text where the claim ends (exclusive). */ endIndex?: number; /** * Indexes of the facts supporting this claim. */ factIndexes?: number[]; /** * Confidence score of this corroboration. */ score?: number; /** * Index in the input text where the claim starts (inclusive). */ startIndex?: number; } /** * Configurations (e.g. inference timeout) that are applied on your endpoints. */ export interface GoogleCloudAiplatformV1ClientConnectionConfig { /** * Customizable online prediction request timeout. */ inferenceTimeout?: number /* Duration */; } function serializeGoogleCloudAiplatformV1ClientConnectionConfig(data: any): GoogleCloudAiplatformV1ClientConnectionConfig { return { ...data, inferenceTimeout: data["inferenceTimeout"] !== undefined ? data["inferenceTimeout"] : undefined, }; } function deserializeGoogleCloudAiplatformV1ClientConnectionConfig(data: any): GoogleCloudAiplatformV1ClientConnectionConfig { return { ...data, inferenceTimeout: data["inferenceTimeout"] !== undefined ? data["inferenceTimeout"] : undefined, }; } /** * Result of executing the [ExecutableCode]. Always follows a `part` containing * the [ExecutableCode]. */ export interface GoogleCloudAiplatformV1CodeExecutionResult { /** * Required. Outcome of the code execution. */ outcome?: | "OUTCOME_UNSPECIFIED" | "OUTCOME_OK" | "OUTCOME_FAILED" | "OUTCOME_DEADLINE_EXCEEDED"; /** * Optional. Contains stdout when code execution is successful, stderr or * other description otherwise. */ output?: string; } /** * Input for coherence metric. */ export interface GoogleCloudAiplatformV1CoherenceInput { /** * Required. Coherence instance. */ instance?: GoogleCloudAiplatformV1CoherenceInstance; /** * Required. Spec for coherence score metric. */ metricSpec?: GoogleCloudAiplatformV1CoherenceSpec; } /** * Spec for coherence instance. */ export interface GoogleCloudAiplatformV1CoherenceInstance { /** * Required. Output of the evaluated model. */ prediction?: string; } /** * Spec for coherence result. */ export interface GoogleCloudAiplatformV1CoherenceResult { /** * Output only. Confidence for coherence score. */ readonly confidence?: number; /** * Output only. Explanation for coherence score. */ readonly explanation?: string; /** * Output only. Coherence score. */ readonly score?: number; } /** * Spec for coherence score metric. */ export interface GoogleCloudAiplatformV1CoherenceSpec { /** * Optional. Which version to use for evaluation. */ version?: number; } /** * Input for Comet metric. */ export interface GoogleCloudAiplatformV1CometInput { /** * Required. Comet instance. */ instance?: GoogleCloudAiplatformV1CometInstance; /** * Required. Spec for comet metric. */ metricSpec?: GoogleCloudAiplatformV1CometSpec; } /** * Spec for Comet instance - The fields used for evaluation are dependent on * the comet version. */ export interface GoogleCloudAiplatformV1CometInstance { /** * Required. Output of the evaluated model. */ prediction?: string; /** * Optional. Ground truth used to compare against the prediction. */ reference?: string; /** * Optional. Source text in original language. */ source?: string; } /** * Spec for Comet result - calculates the comet score for the given instance * using the version specified in the spec. */ export interface GoogleCloudAiplatformV1CometResult { /** * Output only. Comet score. Range depends on version. */ readonly score?: number; } /** * Spec for Comet metric. */ export interface GoogleCloudAiplatformV1CometSpec { /** * Optional. Source language in BCP-47 format. */ sourceLanguage?: string; /** * Optional. Target language in BCP-47 format. Covers both prediction and * reference. */ targetLanguage?: string; /** * Required. Which version to use for evaluation. */ version?: | "COMET_VERSION_UNSPECIFIED" | "COMET_22_SRC_REF"; } /** * Request message for VizierService.CompleteTrial. */ export interface GoogleCloudAiplatformV1CompleteTrialRequest { /** * Optional. If provided, it will be used as the completed Trial's * final_measurement; Otherwise, the service will auto-select a previously * reported measurement as the final-measurement */ finalMeasurement?: GoogleCloudAiplatformV1Measurement; /** * Optional. A human readable reason why the trial was infeasible. This * should only be provided if `trial_infeasible` is true. */ infeasibleReason?: string; /** * Optional. True if the Trial cannot be run with the given Parameter, and * final_measurement will be ignored. */ trialInfeasible?: boolean; } /** * Success and error statistics of processing multiple entities (for example, * DataItems or structured data rows) in batch. */ export interface GoogleCloudAiplatformV1CompletionStats { /** * Output only. The number of entities for which any error was encountered. */ readonly failedCount?: bigint; /** * Output only. In cases when enough errors are encountered a job, pipeline, * or operation may be failed as a whole. Below is the number of entities for * which the processing had not been finished (either in successful or failed * state). Set to -1 if the number is unknown (for example, the operation * failed before the total entity number could be collected). */ readonly incompleteCount?: bigint; /** * Output only. The number of entities that had been processed successfully. */ readonly successfulCount?: bigint; /** * Output only. The number of the successful forecast points that are * generated by the forecasting model. This is ONLY used by the forecasting * batch prediction. */ readonly successfulForecastPointCount?: bigint; } /** * Request message for ComputeTokens RPC call. */ export interface GoogleCloudAiplatformV1ComputeTokensRequest { /** * Optional. Input content. */ contents?: GoogleCloudAiplatformV1Content[]; /** * Optional. The instances that are the input to token computing API call. * Schema is identical to the prediction schema of the text model, even for * the non-text models, like chat models, or Codey models. */ instances?: any[]; /** * Optional. The name of the publisher model requested to serve the * prediction. Format: * projects/{project}/locations/{location}/publishers/*\/models/* */ model?: string; } function serializeGoogleCloudAiplatformV1ComputeTokensRequest(data: any): GoogleCloudAiplatformV1ComputeTokensRequest { return { ...data, contents: data["contents"] !== undefined ? data["contents"].map((item: any) => (serializeGoogleCloudAiplatformV1Content(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ComputeTokensRequest(data: any): GoogleCloudAiplatformV1ComputeTokensRequest { return { ...data, contents: data["contents"] !== undefined ? data["contents"].map((item: any) => (deserializeGoogleCloudAiplatformV1Content(item))) : undefined, }; } /** * Response message for ComputeTokens RPC call. */ export interface GoogleCloudAiplatformV1ComputeTokensResponse { /** * Lists of tokens info from the input. A ComputeTokensRequest could have * multiple instances with a prompt in each instance. We also need to return * lists of tokens info for the request with multiple instances. */ tokensInfo?: GoogleCloudAiplatformV1TokensInfo[]; } function serializeGoogleCloudAiplatformV1ComputeTokensResponse(data: any): GoogleCloudAiplatformV1ComputeTokensResponse { return { ...data, tokensInfo: data["tokensInfo"] !== undefined ? data["tokensInfo"].map((item: any) => (serializeGoogleCloudAiplatformV1TokensInfo(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ComputeTokensResponse(data: any): GoogleCloudAiplatformV1ComputeTokensResponse { return { ...data, tokensInfo: data["tokensInfo"] !== undefined ? data["tokensInfo"].map((item: any) => (deserializeGoogleCloudAiplatformV1TokensInfo(item))) : undefined, }; } /** * The Container Registry location for the container image. */ export interface GoogleCloudAiplatformV1ContainerRegistryDestination { /** * Required. Container Registry URI of a container image. Only Google * Container Registry and Artifact Registry are supported now. Accepted forms: * * Google Container Registry path. For example: * `gcr.io/projectId/imageName:tag`. * Artifact Registry path. For example: * `us-central1-docker.pkg.dev/projectId/repoName/imageName:tag`. If a tag is * not specified, "latest" will be used as the default tag. */ outputUri?: string; } /** * The spec of a Container. */ export interface GoogleCloudAiplatformV1ContainerSpec { /** * The arguments to be passed when starting the container. */ args?: string[]; /** * The command to be invoked when the container is started. It overrides the * entrypoint instruction in Dockerfile when provided. */ command?: string[]; /** * Environment variables to be passed to the container. Maximum limit is 100. */ env?: GoogleCloudAiplatformV1EnvVar[]; /** * Required. The URI of a container image in the Container Registry that is * to be run on each worker replica. */ imageUri?: string; } /** * The base structured datatype containing multi-part content of a message. A * `Content` includes a `role` field designating the producer of the `Content` * and a `parts` field containing multi-part data that contains the content of * the message turn. */ export interface GoogleCloudAiplatformV1Content { /** * Required. Ordered `Parts` that constitute a single message. Parts may have * different IANA MIME types. */ parts?: GoogleCloudAiplatformV1Part[]; /** * Optional. The producer of the content. Must be either 'user' or 'model'. * Useful to set for multi-turn conversations, otherwise can be left blank or * unset. */ role?: string; } function serializeGoogleCloudAiplatformV1Content(data: any): GoogleCloudAiplatformV1Content { return { ...data, parts: data["parts"] !== undefined ? data["parts"].map((item: any) => (serializeGoogleCloudAiplatformV1Part(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1Content(data: any): GoogleCloudAiplatformV1Content { return { ...data, parts: data["parts"] !== undefined ? data["parts"].map((item: any) => (deserializeGoogleCloudAiplatformV1Part(item))) : undefined, }; } /** * Map of placeholder in metric prompt template to contents of model input. */ export interface GoogleCloudAiplatformV1ContentMap { /** * Optional. Map of placeholder to contents. */ values?: { [key: string]: GoogleCloudAiplatformV1ContentMapContents }; } function serializeGoogleCloudAiplatformV1ContentMap(data: any): GoogleCloudAiplatformV1ContentMap { return { ...data, values: data["values"] !== undefined ? Object.fromEntries(Object.entries(data["values"]).map(([k, v]: [string, any]) => ([k, serializeGoogleCloudAiplatformV1ContentMapContents(v)]))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ContentMap(data: any): GoogleCloudAiplatformV1ContentMap { return { ...data, values: data["values"] !== undefined ? Object.fromEntries(Object.entries(data["values"]).map(([k, v]: [string, any]) => ([k, deserializeGoogleCloudAiplatformV1ContentMapContents(v)]))) : undefined, }; } /** * Repeated Content type. */ export interface GoogleCloudAiplatformV1ContentMapContents { /** * Optional. Repeated contents. */ contents?: GoogleCloudAiplatformV1Content[]; } function serializeGoogleCloudAiplatformV1ContentMapContents(data: any): GoogleCloudAiplatformV1ContentMapContents { return { ...data, contents: data["contents"] !== undefined ? data["contents"].map((item: any) => (serializeGoogleCloudAiplatformV1Content(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ContentMapContents(data: any): GoogleCloudAiplatformV1ContentMapContents { return { ...data, contents: data["contents"] !== undefined ? data["contents"].map((item: any) => (deserializeGoogleCloudAiplatformV1Content(item))) : undefined, }; } /** * Instance of a general context. */ export interface GoogleCloudAiplatformV1Context { /** * Output only. Timestamp when this Context was created. */ readonly createTime?: Date; /** * Description of the Context */ description?: string; /** * User provided display name of the Context. May be up to 128 Unicode * characters. */ displayName?: string; /** * An eTag used to perform consistent read-modify-write updates. If not set, * a blind "overwrite" update happens. */ etag?: string; /** * The labels with user-defined metadata to organize your Contexts. Label * keys and values can be no longer than 64 characters (Unicode codepoints), * can only contain lowercase letters, numeric characters, underscores and * dashes. International characters are allowed. No more than 64 user labels * can be associated with one Context (System labels are excluded). */ labels?: { [key: string]: string }; /** * Properties of the Context. Top level metadata keys' heading and trailing * spaces will be trimmed. The size of this field should not exceed 200KB. */ metadata?: { [key: string]: any }; /** * Immutable. The resource name of the Context. */ name?: string; /** * Output only. A list of resource names of Contexts that are parents of this * Context. A Context may have at most 10 parent_contexts. */ readonly parentContexts?: string[]; /** * The title of the schema describing the metadata. Schema title and version * is expected to be registered in earlier Create Schema calls. And both are * used together as unique identifiers to identify schemas within the local * metadata store. */ schemaTitle?: string; /** * The version of the schema in schema_name to use. Schema title and version * is expected to be registered in earlier Create Schema calls. And both are * used together as unique identifiers to identify schemas within the local * metadata store. */ schemaVersion?: string; /** * Output only. Timestamp when this Context was last updated. */ readonly updateTime?: Date; } /** * Details of ModelService.CopyModel operation. */ export interface GoogleCloudAiplatformV1CopyModelOperationMetadata { /** * The common part of the operation metadata. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Request message for ModelService.CopyModel. */ export interface GoogleCloudAiplatformV1CopyModelRequest { /** * Customer-managed encryption key options. If this is set, then the Model * copy will be encrypted with the provided encryption key. */ encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec; /** * Optional. Copy source_model into a new Model with this ID. The ID will * become the final component of the model resource name. This value may be up * to 63 characters, and valid characters are `[a-z0-9_-]`. The first * character cannot be a number or hyphen. */ modelId?: string; /** * Optional. Specify this field to copy source_model into this existing Model * as a new version. Format: * `projects/{project}/locations/{location}/models/{model}` */ parentModel?: string; /** * Required. The resource name of the Model to copy. That Model must be in * the same Project. Format: * `projects/{project}/locations/{location}/models/{model}` */ sourceModel?: string; } /** * Response message of ModelService.CopyModel operation. */ export interface GoogleCloudAiplatformV1CopyModelResponse { /** * The name of the copied Model resource. Format: * `projects/{project}/locations/{location}/models/{model}` */ model?: string; /** * Output only. The version ID of the model that is copied. */ readonly modelVersionId?: string; } /** * RagCorpus status. */ export interface GoogleCloudAiplatformV1CorpusStatus { /** * Output only. Only when the `state` field is ERROR. */ readonly errorStatus?: string; /** * Output only. RagCorpus life state. */ readonly state?: | "UNKNOWN" | "INITIALIZED" | "ACTIVE" | "ERROR"; } /** * Request message for CorroborateContent. */ export interface GoogleCloudAiplatformV1CorroborateContentRequest { /** * Optional. Input content to corroborate, only text format is supported for * now. */ content?: GoogleCloudAiplatformV1Content; /** * Optional. Facts used to generate the text can also be used to corroborate * the text. */ facts?: GoogleCloudAiplatformV1Fact[]; /** * Optional. Parameters that can be set to override default settings per * request. */ parameters?: GoogleCloudAiplatformV1CorroborateContentRequestParameters; } function serializeGoogleCloudAiplatformV1CorroborateContentRequest(data: any): GoogleCloudAiplatformV1CorroborateContentRequest { return { ...data, content: data["content"] !== undefined ? serializeGoogleCloudAiplatformV1Content(data["content"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1CorroborateContentRequest(data: any): GoogleCloudAiplatformV1CorroborateContentRequest { return { ...data, content: data["content"] !== undefined ? deserializeGoogleCloudAiplatformV1Content(data["content"]) : undefined, }; } /** * Parameters that can be overrided per request. */ export interface GoogleCloudAiplatformV1CorroborateContentRequestParameters { /** * Optional. Only return claims with citation score larger than the * threshold. */ citationThreshold?: number; } /** * Response message for CorroborateContent. */ export interface GoogleCloudAiplatformV1CorroborateContentResponse { /** * Claims that are extracted from the input content and facts that support * the claims. */ claims?: GoogleCloudAiplatformV1Claim[]; /** * Confidence score of corroborating content. Value is [0,1] with 1 is the * most confidence. */ corroborationScore?: number; } /** * Request message for PredictionService.CountTokens. */ export interface GoogleCloudAiplatformV1CountTokensRequest { /** * Optional. Input content. */ contents?: GoogleCloudAiplatformV1Content[]; /** * Optional. Generation config that the model will use to generate the * response. */ generationConfig?: GoogleCloudAiplatformV1GenerationConfig; /** * Optional. The instances that are the input to token counting call. Schema * is identical to the prediction schema of the underlying model. */ instances?: any[]; /** * Optional. The name of the publisher model requested to serve the * prediction. Format: * `projects/{project}/locations/{location}/publishers/*\/models/*` */ model?: string; /** * Optional. The user provided system instructions for the model. Note: only * text should be used in parts and content in each part will be in a separate * paragraph. */ systemInstruction?: GoogleCloudAiplatformV1Content; /** * Optional. A list of `Tools` the model may use to generate the next * response. A `Tool` is a piece of code that enables the system to interact * with external systems to perform an action, or set of actions, outside of * knowledge and scope of the model. */ tools?: GoogleCloudAiplatformV1Tool[]; } function serializeGoogleCloudAiplatformV1CountTokensRequest(data: any): GoogleCloudAiplatformV1CountTokensRequest { return { ...data, contents: data["contents"] !== undefined ? data["contents"].map((item: any) => (serializeGoogleCloudAiplatformV1Content(item))) : undefined, generationConfig: data["generationConfig"] !== undefined ? serializeGoogleCloudAiplatformV1GenerationConfig(data["generationConfig"]) : undefined, systemInstruction: data["systemInstruction"] !== undefined ? serializeGoogleCloudAiplatformV1Content(data["systemInstruction"]) : undefined, tools: data["tools"] !== undefined ? data["tools"].map((item: any) => (serializeGoogleCloudAiplatformV1Tool(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1CountTokensRequest(data: any): GoogleCloudAiplatformV1CountTokensRequest { return { ...data, contents: data["contents"] !== undefined ? data["contents"].map((item: any) => (deserializeGoogleCloudAiplatformV1Content(item))) : undefined, generationConfig: data["generationConfig"] !== undefined ? deserializeGoogleCloudAiplatformV1GenerationConfig(data["generationConfig"]) : undefined, systemInstruction: data["systemInstruction"] !== undefined ? deserializeGoogleCloudAiplatformV1Content(data["systemInstruction"]) : undefined, tools: data["tools"] !== undefined ? data["tools"].map((item: any) => (deserializeGoogleCloudAiplatformV1Tool(item))) : undefined, }; } /** * Response message for PredictionService.CountTokens. */ export interface GoogleCloudAiplatformV1CountTokensResponse { /** * Output only. List of modalities that were processed in the request input. */ readonly promptTokensDetails?: GoogleCloudAiplatformV1ModalityTokenCount[]; /** * The total number of billable characters counted across all instances from * the request. */ totalBillableCharacters?: number; /** * The total number of tokens counted across all instances from the request. */ totalTokens?: number; } /** * Runtime operation information for DatasetService.CreateDataset. */ export interface GoogleCloudAiplatformV1CreateDatasetOperationMetadata { /** * The operation generic information. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Runtime operation information for DatasetService.CreateDatasetVersion. */ export interface GoogleCloudAiplatformV1CreateDatasetVersionOperationMetadata { /** * The common part of the operation metadata. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Runtime operation information for CreateDeploymentResourcePool method. */ export interface GoogleCloudAiplatformV1CreateDeploymentResourcePoolOperationMetadata { /** * The operation generic information. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Request message for CreateDeploymentResourcePool method. */ export interface GoogleCloudAiplatformV1CreateDeploymentResourcePoolRequest { /** * Required. The DeploymentResourcePool to create. */ deploymentResourcePool?: GoogleCloudAiplatformV1DeploymentResourcePool; /** * Required. The ID to use for the DeploymentResourcePool, which will become * the final component of the DeploymentResourcePool's resource name. The * maximum length is 63 characters, and valid characters are * `/^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$/`. */ deploymentResourcePoolId?: string; } /** * Runtime operation information for EndpointService.CreateEndpoint. */ export interface GoogleCloudAiplatformV1CreateEndpointOperationMetadata { /** * The operation generic information. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Details of operations that perform create EntityType. */ export interface GoogleCloudAiplatformV1CreateEntityTypeOperationMetadata { /** * Operation metadata for EntityType. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Details of operations that perform create FeatureGroup. */ export interface GoogleCloudAiplatformV1CreateFeatureGroupOperationMetadata { /** * Operation metadata for FeatureGroup. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Details of operations that perform create FeatureOnlineStore. */ export interface GoogleCloudAiplatformV1CreateFeatureOnlineStoreOperationMetadata { /** * Operation metadata for FeatureOnlineStore. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Details of operations that perform create Feature. */ export interface GoogleCloudAiplatformV1CreateFeatureOperationMetadata { /** * Operation metadata for Feature. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Request message for FeaturestoreService.CreateFeature. Request message for * FeatureRegistryService.CreateFeature. */ export interface GoogleCloudAiplatformV1CreateFeatureRequest { /** * Required. The Feature to create. */ feature?: GoogleCloudAiplatformV1Feature; /** * Required. The ID to use for the Feature, which will become the final * component of the Feature's resource name. This value may be up to 128 * characters, and valid characters are `[a-z0-9_]`. The first character * cannot be a number. The value must be unique within an * EntityType/FeatureGroup. */ featureId?: string; /** * Required. The resource name of the EntityType or FeatureGroup to create a * Feature. Format for entity_type as parent: * `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}` * Format for feature_group as parent: * `projects/{project}/locations/{location}/featureGroups/{feature_group}` */ parent?: string; } /** * Details of operations that perform create Featurestore. */ export interface GoogleCloudAiplatformV1CreateFeaturestoreOperationMetadata { /** * Operation metadata for Featurestore. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Details of operations that perform create FeatureView. */ export interface GoogleCloudAiplatformV1CreateFeatureViewOperationMetadata { /** * Operation metadata for FeatureView Create. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Runtime operation information for IndexEndpointService.CreateIndexEndpoint. */ export interface GoogleCloudAiplatformV1CreateIndexEndpointOperationMetadata { /** * The operation generic information. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Runtime operation information for IndexService.CreateIndex. */ export interface GoogleCloudAiplatformV1CreateIndexOperationMetadata { /** * The operation generic information. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; /** * The operation metadata with regard to Matching Engine Index operation. */ nearestNeighborSearchOperationMetadata?: GoogleCloudAiplatformV1NearestNeighborSearchOperationMetadata; } function serializeGoogleCloudAiplatformV1CreateIndexOperationMetadata(data: any): GoogleCloudAiplatformV1CreateIndexOperationMetadata { return { ...data, nearestNeighborSearchOperationMetadata: data["nearestNeighborSearchOperationMetadata"] !== undefined ? serializeGoogleCloudAiplatformV1NearestNeighborSearchOperationMetadata(data["nearestNeighborSearchOperationMetadata"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1CreateIndexOperationMetadata(data: any): GoogleCloudAiplatformV1CreateIndexOperationMetadata { return { ...data, nearestNeighborSearchOperationMetadata: data["nearestNeighborSearchOperationMetadata"] !== undefined ? deserializeGoogleCloudAiplatformV1NearestNeighborSearchOperationMetadata(data["nearestNeighborSearchOperationMetadata"]) : undefined, }; } /** * Details of operations that perform MetadataService.CreateMetadataStore. */ export interface GoogleCloudAiplatformV1CreateMetadataStoreOperationMetadata { /** * Operation metadata for creating a MetadataStore. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Metadata information for NotebookService.CreateNotebookExecutionJob. */ export interface GoogleCloudAiplatformV1CreateNotebookExecutionJobOperationMetadata { /** * The operation generic information. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; /** * A human-readable message that shows the intermediate progress details of * NotebookRuntime. */ progressMessage?: string; } /** * Request message for [NotebookService.CreateNotebookExecutionJob] */ export interface GoogleCloudAiplatformV1CreateNotebookExecutionJobRequest { /** * Required. The NotebookExecutionJob to create. */ notebookExecutionJob?: GoogleCloudAiplatformV1NotebookExecutionJob; /** * Optional. User specified ID for the NotebookExecutionJob. */ notebookExecutionJobId?: string; /** * Required. The resource name of the Location to create the * NotebookExecutionJob. Format: `projects/{project}/locations/{location}` */ parent?: string; } function serializeGoogleCloudAiplatformV1CreateNotebookExecutionJobRequest(data: any): GoogleCloudAiplatformV1CreateNotebookExecutionJobRequest { return { ...data, notebookExecutionJob: data["notebookExecutionJob"] !== undefined ? serializeGoogleCloudAiplatformV1NotebookExecutionJob(data["notebookExecutionJob"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1CreateNotebookExecutionJobRequest(data: any): GoogleCloudAiplatformV1CreateNotebookExecutionJobRequest { return { ...data, notebookExecutionJob: data["notebookExecutionJob"] !== undefined ? deserializeGoogleCloudAiplatformV1NotebookExecutionJob(data["notebookExecutionJob"]) : undefined, }; } /** * Metadata information for NotebookService.CreateNotebookRuntimeTemplate. */ export interface GoogleCloudAiplatformV1CreateNotebookRuntimeTemplateOperationMetadata { /** * The operation generic information. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Details of operations that perform create PersistentResource. */ export interface GoogleCloudAiplatformV1CreatePersistentResourceOperationMetadata { /** * Operation metadata for PersistentResource. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; /** * Progress Message for Create LRO */ progressMessage?: string; } /** * Request message for PipelineService.CreatePipelineJob. */ export interface GoogleCloudAiplatformV1CreatePipelineJobRequest { /** * Required. The resource name of the Location to create the PipelineJob in. * Format: `projects/{project}/locations/{location}` */ parent?: string; /** * Required. The PipelineJob to create. */ pipelineJob?: GoogleCloudAiplatformV1PipelineJob; /** * The ID to use for the PipelineJob, which will become the final component * of the PipelineJob name. If not provided, an ID will be automatically * generated. This value should be less than 128 characters, and valid * characters are `/a-z-/`. */ pipelineJobId?: string; } function serializeGoogleCloudAiplatformV1CreatePipelineJobRequest(data: any): GoogleCloudAiplatformV1CreatePipelineJobRequest { return { ...data, pipelineJob: data["pipelineJob"] !== undefined ? serializeGoogleCloudAiplatformV1PipelineJob(data["pipelineJob"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1CreatePipelineJobRequest(data: any): GoogleCloudAiplatformV1CreatePipelineJobRequest { return { ...data, pipelineJob: data["pipelineJob"] !== undefined ? deserializeGoogleCloudAiplatformV1PipelineJob(data["pipelineJob"]) : undefined, }; } /** * Details of operations that perform create FeatureGroup. */ export interface GoogleCloudAiplatformV1CreateRegistryFeatureOperationMetadata { /** * Operation metadata for Feature. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Runtime operation information for * SpecialistPoolService.CreateSpecialistPool. */ export interface GoogleCloudAiplatformV1CreateSpecialistPoolOperationMetadata { /** * The operation generic information. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Details of operations that perform create Tensorboard. */ export interface GoogleCloudAiplatformV1CreateTensorboardOperationMetadata { /** * Operation metadata for Tensorboard. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Request message for TensorboardService.CreateTensorboardRun. */ export interface GoogleCloudAiplatformV1CreateTensorboardRunRequest { /** * Required. The resource name of the TensorboardExperiment to create the * TensorboardRun in. Format: * `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}` */ parent?: string; /** * Required. The TensorboardRun to create. */ tensorboardRun?: GoogleCloudAiplatformV1TensorboardRun; /** * Required. The ID to use for the Tensorboard run, which becomes the final * component of the Tensorboard run's resource name. This value should be * 1-128 characters, and valid characters are `/a-z-/`. */ tensorboardRunId?: string; } /** * Request message for TensorboardService.CreateTensorboardTimeSeries. */ export interface GoogleCloudAiplatformV1CreateTensorboardTimeSeriesRequest { /** * Required. The resource name of the TensorboardRun to create the * TensorboardTimeSeries in. Format: * `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}` */ parent?: string; /** * Required. The TensorboardTimeSeries to create. */ tensorboardTimeSeries?: GoogleCloudAiplatformV1TensorboardTimeSeries; /** * Optional. The user specified unique ID to use for the * TensorboardTimeSeries, which becomes the final component of the * TensorboardTimeSeries's resource name. This value should match "a-z0-9{0, * 127}" */ tensorboardTimeSeriesId?: string; } function serializeGoogleCloudAiplatformV1CreateTensorboardTimeSeriesRequest(data: any): GoogleCloudAiplatformV1CreateTensorboardTimeSeriesRequest { return { ...data, tensorboardTimeSeries: data["tensorboardTimeSeries"] !== undefined ? serializeGoogleCloudAiplatformV1TensorboardTimeSeries(data["tensorboardTimeSeries"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1CreateTensorboardTimeSeriesRequest(data: any): GoogleCloudAiplatformV1CreateTensorboardTimeSeriesRequest { return { ...data, tensorboardTimeSeries: data["tensorboardTimeSeries"] !== undefined ? deserializeGoogleCloudAiplatformV1TensorboardTimeSeries(data["tensorboardTimeSeries"]) : undefined, }; } /** * The storage details for CSV output content. */ export interface GoogleCloudAiplatformV1CsvDestination { /** * Required. Google Cloud Storage location. */ gcsDestination?: GoogleCloudAiplatformV1GcsDestination; } /** * The storage details for CSV input content. */ export interface GoogleCloudAiplatformV1CsvSource { /** * Required. Google Cloud Storage location. */ gcsSource?: GoogleCloudAiplatformV1GcsSource; } /** * Represents a job that runs custom workloads such as a Docker container or a * Python package. A CustomJob can have multiple worker pools and each worker * pool can have its own machine and input spec. A CustomJob will be cleaned up * once the job enters terminal state (failed or succeeded). */ export interface GoogleCloudAiplatformV1CustomJob { /** * Output only. Time when the CustomJob was created. */ readonly createTime?: Date; /** * Required. The display name of the CustomJob. The name can be up to 128 * characters long and can consist of any UTF-8 characters. */ displayName?: string; /** * Customer-managed encryption key options for a CustomJob. If this is set, * then all resources created by the CustomJob will be encrypted with the * provided encryption key. */ encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec; /** * Output only. Time when the CustomJob entered any of the following states: * `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`. */ readonly endTime?: Date; /** * Output only. Only populated when job's state is `JOB_STATE_FAILED` or * `JOB_STATE_CANCELLED`. */ readonly error?: GoogleRpcStatus; /** * Required. Job spec. */ jobSpec?: GoogleCloudAiplatformV1CustomJobSpec; /** * The labels with user-defined metadata to organize CustomJobs. Label keys * and values can be no longer than 64 characters (Unicode codepoints), can * only contain lowercase letters, numeric characters, underscores and dashes. * International characters are allowed. See https://goo.gl/xmQnxf for more * information and examples of labels. */ labels?: { [key: string]: string }; /** * Output only. Resource name of a CustomJob. */ readonly name?: string; /** * Output only. Reserved for future use. */ readonly satisfiesPzi?: boolean; /** * Output only. Reserved for future use. */ readonly satisfiesPzs?: boolean; /** * Output only. Time when the CustomJob for the first time entered the * `JOB_STATE_RUNNING` state. */ readonly startTime?: Date; /** * Output only. The detailed state of the job. */ readonly state?: | "JOB_STATE_UNSPECIFIED" | "JOB_STATE_QUEUED" | "JOB_STATE_PENDING" | "JOB_STATE_RUNNING" | "JOB_STATE_SUCCEEDED" | "JOB_STATE_FAILED" | "JOB_STATE_CANCELLING" | "JOB_STATE_CANCELLED" | "JOB_STATE_PAUSED" | "JOB_STATE_EXPIRED" | "JOB_STATE_UPDATING" | "JOB_STATE_PARTIALLY_SUCCEEDED"; /** * Output only. Time when the CustomJob was most recently updated. */ readonly updateTime?: Date; /** * Output only. URIs for accessing [interactive * shells](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) * (one URI for each training node). Only available if * job_spec.enable_web_access is `true`. The keys are names of each node in * the training job; for example, `workerpool0-0` for the primary node, * `workerpool1-0` for the first node in the second worker pool, and * `workerpool1-1` for the second node in the second worker pool. The values * are the URIs for each node's interactive shell. */ readonly webAccessUris?: { [key: string]: string }; } function serializeGoogleCloudAiplatformV1CustomJob(data: any): GoogleCloudAiplatformV1CustomJob { return { ...data, jobSpec: data["jobSpec"] !== undefined ? serializeGoogleCloudAiplatformV1CustomJobSpec(data["jobSpec"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1CustomJob(data: any): GoogleCloudAiplatformV1CustomJob { return { ...data, createTime: data["createTime"] !== undefined ? new Date(data["createTime"]) : undefined, endTime: data["endTime"] !== undefined ? new Date(data["endTime"]) : undefined, jobSpec: data["jobSpec"] !== undefined ? deserializeGoogleCloudAiplatformV1CustomJobSpec(data["jobSpec"]) : undefined, startTime: data["startTime"] !== undefined ? new Date(data["startTime"]) : undefined, updateTime: data["updateTime"] !== undefined ? new Date(data["updateTime"]) : undefined, }; } /** * Represents the spec of a CustomJob. */ export interface GoogleCloudAiplatformV1CustomJobSpec { /** * The Cloud Storage location to store the output of this CustomJob or * HyperparameterTuningJob. For HyperparameterTuningJob, the * baseOutputDirectory of each child CustomJob backing a Trial is set to a * subdirectory of name id under its parent HyperparameterTuningJob's * baseOutputDirectory. The following Vertex AI environment variables will be * passed to containers or python modules when this field is set: For * CustomJob: * AIP_MODEL_DIR = `/model/` * AIP_CHECKPOINT_DIR = * `/checkpoints/` * AIP_TENSORBOARD_LOG_DIR = `/logs/` For CustomJob backing * a Trial of HyperparameterTuningJob: * AIP_MODEL_DIR = `//model/` * * AIP_CHECKPOINT_DIR = `//checkpoints/` * AIP_TENSORBOARD_LOG_DIR = `//logs/` */ baseOutputDirectory?: GoogleCloudAiplatformV1GcsDestination; /** * Optional. Whether you want Vertex AI to enable access to the customized * dashboard in training chief container. If set to `true`, you can access the * dashboard at the URIs given by CustomJob.web_access_uris or * Trial.web_access_uris (within HyperparameterTuningJob.trials). */ enableDashboardAccess?: boolean; /** * Optional. Whether you want Vertex AI to enable [interactive shell * access](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) * to training containers. If set to `true`, you can access interactive shells * at the URIs given by CustomJob.web_access_uris or Trial.web_access_uris * (within HyperparameterTuningJob.trials). */ enableWebAccess?: boolean; /** * Optional. The Experiment associated with this job. Format: * `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}` */ experiment?: string; /** * Optional. The Experiment Run associated with this job. Format: * `projects/{project}/locations/{location}/metadataStores/{metadataStores}/contexts/{experiment-name}-{experiment-run-name}` */ experimentRun?: string; /** * Optional. The name of the Model resources for which to generate a mapping * to artifact URIs. Applicable only to some of the Google-provided custom * jobs. Format: `projects/{project}/locations/{location}/models/{model}` In * order to retrieve a specific version of the model, also provide the version * ID or version alias. Example: * `projects/{project}/locations/{location}/models/{model}@2` or * `projects/{project}/locations/{location}/models/{model}@golden` If no * version ID or alias is specified, the "default" version will be returned. * The "default" version alias is created for the first version of the model, * and can be moved to other versions later on. There will be exactly one * default version. */ models?: string[]; /** * Optional. The full name of the Compute Engine * [network](/compute/docs/networks-and-firewalls#networks) to which the Job * should be peered. For example, `projects/12345/global/networks/myVPC`. * [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form * `projects/{project}/global/networks/{network}`. Where {project} is a * project number, as in `12345`, and {network} is a network name. To specify * this field, you must have already [configured VPC Network Peering for * Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If * this field is left unspecified, the job is not peered with any network. */ network?: string; /** * Optional. The ID of the PersistentResource in the same Project and * Location which to run If this is specified, the job will be run on existing * machines held by the PersistentResource instead of on-demand short-live * machines. The network and CMEK configs on the job should be consistent with * those on the PersistentResource, otherwise, the job will be rejected. */ persistentResourceId?: string; /** * The ID of the location to store protected artifacts. e.g. us-central1. * Populate only when the location is different than CustomJob location. List * of supported locations: * https://cloud.google.com/vertex-ai/docs/general/locations */ protectedArtifactLocationId?: string; /** * Optional. A list of names for the reserved ip ranges under the VPC network * that can be used for this job. If set, we will deploy the job within the * provided ip ranges. Otherwise, the job will be deployed to any ip ranges * under the provided VPC network. Example: ['vertex-ai-ip-range']. */ reservedIpRanges?: string[]; /** * Scheduling options for a CustomJob. */ scheduling?: GoogleCloudAiplatformV1Scheduling; /** * Specifies the service account for workload run-as account. Users * submitting jobs must have act-as permission on this run-as account. If * unspecified, the [Vertex AI Custom Code Service * Agent](https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) * for the CustomJob's project is used. */ serviceAccount?: string; /** * Optional. The name of a Vertex AI Tensorboard resource to which this * CustomJob will upload Tensorboard logs. Format: * `projects/{project}/locations/{location}/tensorboards/{tensorboard}` */ tensorboard?: string; /** * Required. The spec of the worker pools including machine type and Docker * image. All worker pools except the first one are optional and can be * skipped by providing an empty value. */ workerPoolSpecs?: GoogleCloudAiplatformV1WorkerPoolSpec[]; } function serializeGoogleCloudAiplatformV1CustomJobSpec(data: any): GoogleCloudAiplatformV1CustomJobSpec { return { ...data, scheduling: data["scheduling"] !== undefined ? serializeGoogleCloudAiplatformV1Scheduling(data["scheduling"]) : undefined, workerPoolSpecs: data["workerPoolSpecs"] !== undefined ? data["workerPoolSpecs"].map((item: any) => (serializeGoogleCloudAiplatformV1WorkerPoolSpec(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1CustomJobSpec(data: any): GoogleCloudAiplatformV1CustomJobSpec { return { ...data, scheduling: data["scheduling"] !== undefined ? deserializeGoogleCloudAiplatformV1Scheduling(data["scheduling"]) : undefined, workerPoolSpecs: data["workerPoolSpecs"] !== undefined ? data["workerPoolSpecs"].map((item: any) => (deserializeGoogleCloudAiplatformV1WorkerPoolSpec(item))) : undefined, }; } /** * Spec for custom output. */ export interface GoogleCloudAiplatformV1CustomOutput { /** * Output only. List of raw output strings. */ readonly rawOutputs?: GoogleCloudAiplatformV1RawOutput; } /** * Spec for custom output format configuration. */ export interface GoogleCloudAiplatformV1CustomOutputFormatConfig { /** * Optional. Whether to return raw output. */ returnRawOutput?: boolean; } /** * A piece of data in a Dataset. Could be an image, a video, a document or * plain text. */ export interface GoogleCloudAiplatformV1DataItem { /** * Output only. Timestamp when this DataItem was created. */ readonly createTime?: Date; /** * Optional. Used to perform consistent read-modify-write updates. If not * set, a blind "overwrite" update happens. */ etag?: string; /** * Optional. The labels with user-defined metadata to organize your * DataItems. Label keys and values can be no longer than 64 characters * (Unicode codepoints), can only contain lowercase letters, numeric * characters, underscores and dashes. International characters are allowed. * No more than 64 user labels can be associated with one DataItem(System * labels are excluded). See https://goo.gl/xmQnxf for more information and * examples of labels. System reserved label keys are prefixed with * "aiplatform.googleapis.com/" and are immutable. */ labels?: { [key: string]: string }; /** * Output only. The resource name of the DataItem. */ readonly name?: string; /** * Required. The data that the DataItem represents (for example, an image or * a text snippet). The schema of the payload is stored in the parent * Dataset's metadata schema's dataItemSchemaUri field. */ payload?: any; /** * Output only. Reserved for future use. */ readonly satisfiesPzi?: boolean; /** * Output only. Reserved for future use. */ readonly satisfiesPzs?: boolean; /** * Output only. Timestamp when this DataItem was last updated. */ readonly updateTime?: Date; } /** * A container for a single DataItem and Annotations on it. */ export interface GoogleCloudAiplatformV1DataItemView { /** * The Annotations on the DataItem. If too many Annotations should be * returned for the DataItem, this field will be truncated per * annotations_limit in request. If it was, then the has_truncated_annotations * will be set to true. */ annotations?: GoogleCloudAiplatformV1Annotation[]; /** * The DataItem. */ dataItem?: GoogleCloudAiplatformV1DataItem; /** * True if and only if the Annotations field has been truncated. It happens * if more Annotations for this DataItem met the request's annotation_filter * than are allowed to be returned by annotations_limit. Note that if * Annotations field is not being returned due to field mask, then this field * will not be set to true no matter how many Annotations are there. */ hasTruncatedAnnotations?: boolean; } /** * DataLabelingJob is used to trigger a human labeling job on unlabeled data * from the following Dataset: */ export interface GoogleCloudAiplatformV1DataLabelingJob { /** * Parameters that configure the active learning pipeline. Active learning * will label the data incrementally via several iterations. For every * iteration, it will select a batch of data based on the sampling strategy. */ activeLearningConfig?: GoogleCloudAiplatformV1ActiveLearningConfig; /** * Labels to assign to annotations generated by this DataLabelingJob. Label * keys and values can be no longer than 64 characters (Unicode codepoints), * can only contain lowercase letters, numeric characters, underscores and * dashes. International characters are allowed. See https://goo.gl/xmQnxf for * more information and examples of labels. System reserved label keys are * prefixed with "aiplatform.googleapis.com/" and are immutable. */ annotationLabels?: { [key: string]: string }; /** * Output only. Timestamp when this DataLabelingJob was created. */ readonly createTime?: Date; /** * Output only. Estimated cost(in US dollars) that the DataLabelingJob has * incurred to date. */ readonly currentSpend?: GoogleTypeMoney; /** * Required. Dataset resource names. Right now we only support labeling from * a single Dataset. Format: * `projects/{project}/locations/{location}/datasets/{dataset}` */ datasets?: string[]; /** * Required. The user-defined name of the DataLabelingJob. The name can be up * to 128 characters long and can consist of any UTF-8 characters. Display * name of a DataLabelingJob. */ displayName?: string; /** * Customer-managed encryption key spec for a DataLabelingJob. If set, this * DataLabelingJob will be secured by this key. Note: Annotations created in * the DataLabelingJob are associated with the EncryptionSpec of the Dataset * they are exported to. */ encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec; /** * Output only. DataLabelingJob errors. It is only populated when job's state * is `JOB_STATE_FAILED` or `JOB_STATE_CANCELLED`. */ readonly error?: GoogleRpcStatus; /** * Required. Input config parameters for the DataLabelingJob. */ inputs?: any; /** * Required. Points to a YAML file stored on Google Cloud Storage describing * the config for a specific type of DataLabelingJob. The schema files that * can be used here are found in the * https://storage.googleapis.com/google-cloud-aiplatform bucket in the * /schema/datalabelingjob/inputs/ folder. */ inputsSchemaUri?: string; /** * Required. The Google Cloud Storage location of the instruction pdf. This * pdf is shared with labelers, and provides detailed description on how to * label DataItems in Datasets. */ instructionUri?: string; /** * Required. Number of labelers to work on each DataItem. */ labelerCount?: number; /** * Output only. Current labeling job progress percentage scaled in interval * [0, 100], indicating the percentage of DataItems that has been finished. */ readonly labelingProgress?: number; /** * The labels with user-defined metadata to organize your DataLabelingJobs. * Label keys and values can be no longer than 64 characters (Unicode * codepoints), can only contain lowercase letters, numeric characters, * underscores and dashes. International characters are allowed. See * https://goo.gl/xmQnxf for more information and examples of labels. System * reserved label keys are prefixed with "aiplatform.googleapis.com/" and are * immutable. Following system labels exist for each DataLabelingJob: * * "aiplatform.googleapis.com/schema": output only, its value is the * inputs_schema's title. */ labels?: { [key: string]: string }; /** * Output only. Resource name of the DataLabelingJob. */ readonly name?: string; /** * The SpecialistPools' resource names associated with this job. */ specialistPools?: string[]; /** * Output only. The detailed state of the job. */ readonly state?: | "JOB_STATE_UNSPECIFIED" | "JOB_STATE_QUEUED" | "JOB_STATE_PENDING" | "JOB_STATE_RUNNING" | "JOB_STATE_SUCCEEDED" | "JOB_STATE_FAILED" | "JOB_STATE_CANCELLING" | "JOB_STATE_CANCELLED" | "JOB_STATE_PAUSED" | "JOB_STATE_EXPIRED" | "JOB_STATE_UPDATING" | "JOB_STATE_PARTIALLY_SUCCEEDED"; /** * Output only. Timestamp when this DataLabelingJob was updated most * recently. */ readonly updateTime?: Date; } function serializeGoogleCloudAiplatformV1DataLabelingJob(data: any): GoogleCloudAiplatformV1DataLabelingJob { return { ...data, activeLearningConfig: data["activeLearningConfig"] !== undefined ? serializeGoogleCloudAiplatformV1ActiveLearningConfig(data["activeLearningConfig"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1DataLabelingJob(data: any): GoogleCloudAiplatformV1DataLabelingJob { return { ...data, activeLearningConfig: data["activeLearningConfig"] !== undefined ? deserializeGoogleCloudAiplatformV1ActiveLearningConfig(data["activeLearningConfig"]) : undefined, createTime: data["createTime"] !== undefined ? new Date(data["createTime"]) : undefined, currentSpend: data["currentSpend"] !== undefined ? deserializeGoogleTypeMoney(data["currentSpend"]) : undefined, updateTime: data["updateTime"] !== undefined ? new Date(data["updateTime"]) : undefined, }; } /** * A collection of DataItems and Annotations on them. */ export interface GoogleCloudAiplatformV1Dataset { /** * Output only. Timestamp when this Dataset was created. */ readonly createTime?: Date; /** * Output only. The number of DataItems in this Dataset. Only apply for * non-structured Dataset. */ readonly dataItemCount?: bigint; /** * The description of the Dataset. */ description?: string; /** * Required. The user-defined name of the Dataset. The name can be up to 128 * characters long and can consist of any UTF-8 characters. */ displayName?: string; /** * Customer-managed encryption key spec for a Dataset. If set, this Dataset * and all sub-resources of this Dataset will be secured by this key. */ encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec; /** * Used to perform consistent read-modify-write updates. If not set, a blind * "overwrite" update happens. */ etag?: string; /** * The labels with user-defined metadata to organize your Datasets. Label * keys and values can be no longer than 64 characters (Unicode codepoints), * can only contain lowercase letters, numeric characters, underscores and * dashes. International characters are allowed. No more than 64 user labels * can be associated with one Dataset (System labels are excluded). See * https://goo.gl/xmQnxf for more information and examples of labels. System * reserved label keys are prefixed with "aiplatform.googleapis.com/" and are * immutable. Following system labels exist for each Dataset: * * "aiplatform.googleapis.com/dataset_metadata_schema": output only, its value * is the metadata_schema's title. */ labels?: { [key: string]: string }; /** * Required. Additional information about the Dataset. */ metadata?: any; /** * Output only. The resource name of the Artifact that was created in * MetadataStore when creating the Dataset. The Artifact resource name pattern * is * `projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}`. */ readonly metadataArtifact?: string; /** * Required. Points to a YAML file stored on Google Cloud Storage describing * additional information about the Dataset. The schema is defined as an * OpenAPI 3.0.2 Schema Object. The schema files that can be used here are * found in gs://google-cloud-aiplatform/schema/dataset/metadata/. */ metadataSchemaUri?: string; /** * Optional. Reference to the public base model last used by the dataset. * Only set for prompt datasets. */ modelReference?: string; /** * Output only. Identifier. The resource name of the Dataset. Format: * `projects/{project}/locations/{location}/datasets/{dataset}` */ readonly name?: string; /** * Output only. Reserved for future use. */ readonly satisfiesPzi?: boolean; /** * Output only. Reserved for future use. */ readonly satisfiesPzs?: boolean; /** * All SavedQueries belong to the Dataset will be returned in List/Get * Dataset response. The annotation_specs field will not be populated except * for UI cases which will only use annotation_spec_count. In CreateDataset * request, a SavedQuery is created together if this field is set, up to one * SavedQuery can be set in CreateDatasetRequest. The SavedQuery should not * contain any AnnotationSpec. */ savedQueries?: GoogleCloudAiplatformV1SavedQuery[]; /** * Output only. Timestamp when this Dataset was last updated. */ readonly updateTime?: Date; } /** * Describes the dataset version. */ export interface GoogleCloudAiplatformV1DatasetVersion { /** * Output only. Name of the associated BigQuery dataset. */ readonly bigQueryDatasetName?: string; /** * Output only. Timestamp when this DatasetVersion was created. */ readonly createTime?: Date; /** * The user-defined name of the DatasetVersion. The name can be up to 128 * characters long and can consist of any UTF-8 characters. */ displayName?: string; /** * Used to perform consistent read-modify-write updates. If not set, a blind * "overwrite" update happens. */ etag?: string; /** * Required. Output only. Additional information about the DatasetVersion. */ readonly metadata?: any; /** * Output only. Reference to the public base model last used by the dataset * version. Only set for prompt dataset versions. */ readonly modelReference?: string; /** * Output only. Identifier. The resource name of the DatasetVersion. Format: * `projects/{project}/locations/{location}/datasets/{dataset}/datasetVersions/{dataset_version}` */ readonly name?: string; /** * Output only. Reserved for future use. */ readonly satisfiesPzi?: boolean; /** * Output only. Reserved for future use. */ readonly satisfiesPzs?: boolean; /** * Output only. Timestamp when this DatasetVersion was last updated. */ readonly updateTime?: Date; } /** * A description of resources that are dedicated to a DeployedModel or * DeployedIndex, and that need a higher degree of manual configuration. */ export interface GoogleCloudAiplatformV1DedicatedResources { /** * Immutable. The metric specifications that overrides a resource utilization * metric (CPU utilization, accelerator's duty cycle, and so on) target value * (default to 60 if not set). At most one entry is allowed per metric. If * machine_spec.accelerator_count is above 0, the autoscaling will be based on * both CPU utilization and accelerator's duty cycle metrics and scale up when * either metrics exceeds its target value while scale down if both metrics * are under their target value. The default target value is 60 for both * metrics. If machine_spec.accelerator_count is 0, the autoscaling will be * based on CPU utilization metric only with default target value 60 if not * explicitly set. For example, in the case of Online Prediction, if you want * to override target CPU utilization to 80, you should set * autoscaling_metric_specs.metric_name to * `aiplatform.googleapis.com/prediction/online/cpu/utilization` and * autoscaling_metric_specs.target to `80`. */ autoscalingMetricSpecs?: GoogleCloudAiplatformV1AutoscalingMetricSpec[]; /** * Required. Immutable. The specification of a single machine being used. */ machineSpec?: GoogleCloudAiplatformV1MachineSpec; /** * Immutable. The maximum number of replicas that may be deployed on when the * traffic against it increases. If the requested value is too large, the * deployment will error, but if deployment succeeds then the ability to scale * to that many replicas is guaranteed (barring service outages). If traffic * increases beyond what its replicas at maximum may handle, a portion of the * traffic will be dropped. If this value is not provided, will use * min_replica_count as the default value. The value of this field impacts the * charge against Vertex CPU and GPU quotas. Specifically, you will be charged * for (max_replica_count * number of cores in the selected machine type) and * (max_replica_count * number of GPUs per replica in the selected machine * type). */ maxReplicaCount?: number; /** * Required. Immutable. The minimum number of machine replicas that will be * always deployed on. This value must be greater than or equal to 1. If * traffic increases, it may dynamically be deployed onto more replicas, and * as traffic decreases, some of these extra replicas may be freed. */ minReplicaCount?: number; /** * Optional. Number of required available replicas for the deployment to * succeed. This field is only needed when partial deployment/mutation is * desired. If set, the deploy/mutate operation will succeed once * available_replica_count reaches required_replica_count, and the rest of the * replicas will be retried. If not set, the default required_replica_count * will be min_replica_count. */ requiredReplicaCount?: number; /** * Optional. If true, schedule the deployment workload on [spot * VMs](https://cloud.google.com/kubernetes-engine/docs/concepts/spot-vms). */ spot?: boolean; } /** * Details of operations that delete Feature values. */ export interface GoogleCloudAiplatformV1DeleteFeatureValuesOperationMetadata { /** * Operation metadata for Featurestore delete Features values. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Request message for FeaturestoreService.DeleteFeatureValues. */ export interface GoogleCloudAiplatformV1DeleteFeatureValuesRequest { /** * Select feature values to be deleted by specifying entities. */ selectEntity?: GoogleCloudAiplatformV1DeleteFeatureValuesRequestSelectEntity; /** * Select feature values to be deleted by specifying time range and features. */ selectTimeRangeAndFeature?: GoogleCloudAiplatformV1DeleteFeatureValuesRequestSelectTimeRangeAndFeature; } function serializeGoogleCloudAiplatformV1DeleteFeatureValuesRequest(data: any): GoogleCloudAiplatformV1DeleteFeatureValuesRequest { return { ...data, selectTimeRangeAndFeature: data["selectTimeRangeAndFeature"] !== undefined ? serializeGoogleCloudAiplatformV1DeleteFeatureValuesRequestSelectTimeRangeAndFeature(data["selectTimeRangeAndFeature"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1DeleteFeatureValuesRequest(data: any): GoogleCloudAiplatformV1DeleteFeatureValuesRequest { return { ...data, selectTimeRangeAndFeature: data["selectTimeRangeAndFeature"] !== undefined ? deserializeGoogleCloudAiplatformV1DeleteFeatureValuesRequestSelectTimeRangeAndFeature(data["selectTimeRangeAndFeature"]) : undefined, }; } /** * Message to select entity. If an entity id is selected, all the feature * values corresponding to the entity id will be deleted, including the * entityId. */ export interface GoogleCloudAiplatformV1DeleteFeatureValuesRequestSelectEntity { /** * Required. Selectors choosing feature values of which entity id to be * deleted from the EntityType. */ entityIdSelector?: GoogleCloudAiplatformV1EntityIdSelector; } /** * Message to select time range and feature. Values of the selected feature * generated within an inclusive time range will be deleted. Using this option * permanently deletes the feature values from the specified feature IDs within * the specified time range. This might include data from the online storage. If * you want to retain any deleted historical data in the online storage, you * must re-ingest it. */ export interface GoogleCloudAiplatformV1DeleteFeatureValuesRequestSelectTimeRangeAndFeature { /** * Required. Selectors choosing which feature values to be deleted from the * EntityType. */ featureSelector?: GoogleCloudAiplatformV1FeatureSelector; /** * If set, data will not be deleted from online storage. When time range is * older than the data in online storage, setting this to be true will make * the deletion have no impact on online serving. */ skipOnlineStorageDelete?: boolean; /** * Required. Select feature generated within a half-inclusive time range. The * time range is lower inclusive and upper exclusive. */ timeRange?: GoogleTypeInterval; } function serializeGoogleCloudAiplatformV1DeleteFeatureValuesRequestSelectTimeRangeAndFeature(data: any): GoogleCloudAiplatformV1DeleteFeatureValuesRequestSelectTimeRangeAndFeature { return { ...data, timeRange: data["timeRange"] !== undefined ? serializeGoogleTypeInterval(data["timeRange"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1DeleteFeatureValuesRequestSelectTimeRangeAndFeature(data: any): GoogleCloudAiplatformV1DeleteFeatureValuesRequestSelectTimeRangeAndFeature { return { ...data, timeRange: data["timeRange"] !== undefined ? deserializeGoogleTypeInterval(data["timeRange"]) : undefined, }; } /** * Response message for FeaturestoreService.DeleteFeatureValues. */ export interface GoogleCloudAiplatformV1DeleteFeatureValuesResponse { /** * Response for request specifying the entities to delete */ selectEntity?: GoogleCloudAiplatformV1DeleteFeatureValuesResponseSelectEntity; /** * Response for request specifying time range and feature */ selectTimeRangeAndFeature?: GoogleCloudAiplatformV1DeleteFeatureValuesResponseSelectTimeRangeAndFeature; } function serializeGoogleCloudAiplatformV1DeleteFeatureValuesResponse(data: any): GoogleCloudAiplatformV1DeleteFeatureValuesResponse { return { ...data, selectEntity: data["selectEntity"] !== undefined ? serializeGoogleCloudAiplatformV1DeleteFeatureValuesResponseSelectEntity(data["selectEntity"]) : undefined, selectTimeRangeAndFeature: data["selectTimeRangeAndFeature"] !== undefined ? serializeGoogleCloudAiplatformV1DeleteFeatureValuesResponseSelectTimeRangeAndFeature(data["selectTimeRangeAndFeature"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1DeleteFeatureValuesResponse(data: any): GoogleCloudAiplatformV1DeleteFeatureValuesResponse { return { ...data, selectEntity: data["selectEntity"] !== undefined ? deserializeGoogleCloudAiplatformV1DeleteFeatureValuesResponseSelectEntity(data["selectEntity"]) : undefined, selectTimeRangeAndFeature: data["selectTimeRangeAndFeature"] !== undefined ? deserializeGoogleCloudAiplatformV1DeleteFeatureValuesResponseSelectTimeRangeAndFeature(data["selectTimeRangeAndFeature"]) : undefined, }; } /** * Response message if the request uses the SelectEntity option. */ export interface GoogleCloudAiplatformV1DeleteFeatureValuesResponseSelectEntity { /** * The count of deleted entity rows in the offline storage. Each row * corresponds to the combination of an entity ID and a timestamp. One entity * ID can have multiple rows in the offline storage. */ offlineStorageDeletedEntityRowCount?: bigint; /** * The count of deleted entities in the online storage. Each entity ID * corresponds to one entity. */ onlineStorageDeletedEntityCount?: bigint; } function serializeGoogleCloudAiplatformV1DeleteFeatureValuesResponseSelectEntity(data: any): GoogleCloudAiplatformV1DeleteFeatureValuesResponseSelectEntity { return { ...data, offlineStorageDeletedEntityRowCount: data["offlineStorageDeletedEntityRowCount"] !== undefined ? String(data["offlineStorageDeletedEntityRowCount"]) : undefined, onlineStorageDeletedEntityCount: data["onlineStorageDeletedEntityCount"] !== undefined ? String(data["onlineStorageDeletedEntityCount"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1DeleteFeatureValuesResponseSelectEntity(data: any): GoogleCloudAiplatformV1DeleteFeatureValuesResponseSelectEntity { return { ...data, offlineStorageDeletedEntityRowCount: data["offlineStorageDeletedEntityRowCount"] !== undefined ? BigInt(data["offlineStorageDeletedEntityRowCount"]) : undefined, onlineStorageDeletedEntityCount: data["onlineStorageDeletedEntityCount"] !== undefined ? BigInt(data["onlineStorageDeletedEntityCount"]) : undefined, }; } /** * Response message if the request uses the SelectTimeRangeAndFeature option. */ export interface GoogleCloudAiplatformV1DeleteFeatureValuesResponseSelectTimeRangeAndFeature { /** * The count of the features or columns impacted. This is the same as the * feature count in the request. */ impactedFeatureCount?: bigint; /** * The count of modified entity rows in the offline storage. Each row * corresponds to the combination of an entity ID and a timestamp. One entity * ID can have multiple rows in the offline storage. Within each row, only the * features specified in the request are deleted. */ offlineStorageModifiedEntityRowCount?: bigint; /** * The count of modified entities in the online storage. Each entity ID * corresponds to one entity. Within each entity, only the features specified * in the request are deleted. */ onlineStorageModifiedEntityCount?: bigint; } function serializeGoogleCloudAiplatformV1DeleteFeatureValuesResponseSelectTimeRangeAndFeature(data: any): GoogleCloudAiplatformV1DeleteFeatureValuesResponseSelectTimeRangeAndFeature { return { ...data, impactedFeatureCount: data["impactedFeatureCount"] !== undefined ? String(data["impactedFeatureCount"]) : undefined, offlineStorageModifiedEntityRowCount: data["offlineStorageModifiedEntityRowCount"] !== undefined ? String(data["offlineStorageModifiedEntityRowCount"]) : undefined, onlineStorageModifiedEntityCount: data["onlineStorageModifiedEntityCount"] !== undefined ? String(data["onlineStorageModifiedEntityCount"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1DeleteFeatureValuesResponseSelectTimeRangeAndFeature(data: any): GoogleCloudAiplatformV1DeleteFeatureValuesResponseSelectTimeRangeAndFeature { return { ...data, impactedFeatureCount: data["impactedFeatureCount"] !== undefined ? BigInt(data["impactedFeatureCount"]) : undefined, offlineStorageModifiedEntityRowCount: data["offlineStorageModifiedEntityRowCount"] !== undefined ? BigInt(data["offlineStorageModifiedEntityRowCount"]) : undefined, onlineStorageModifiedEntityCount: data["onlineStorageModifiedEntityCount"] !== undefined ? BigInt(data["onlineStorageModifiedEntityCount"]) : undefined, }; } /** * Details of operations that perform MetadataService.DeleteMetadataStore. */ export interface GoogleCloudAiplatformV1DeleteMetadataStoreOperationMetadata { /** * Operation metadata for deleting a MetadataStore. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Details of operations that perform deletes of any entities. */ export interface GoogleCloudAiplatformV1DeleteOperationMetadata { /** * The common part of the operation metadata. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * A deployment of an Index. IndexEndpoints contain one or more * DeployedIndexes. */ export interface GoogleCloudAiplatformV1DeployedIndex { /** * Optional. A description of resources that the DeployedIndex uses, which to * large degree are decided by Vertex AI, and optionally allows only a modest * additional configuration. If min_replica_count is not set, the default * value is 2 (we don't provide SLA when min_replica_count=1). If * max_replica_count is not set, the default value is min_replica_count. The * max allowed replica count is 1000. */ automaticResources?: GoogleCloudAiplatformV1AutomaticResources; /** * Output only. Timestamp when the DeployedIndex was created. */ readonly createTime?: Date; /** * Optional. A description of resources that are dedicated to the * DeployedIndex, and that need a higher degree of manual configuration. The * field min_replica_count must be set to a value strictly greater than 0, or * else validation will fail. We don't provide SLA when min_replica_count=1. * If max_replica_count is not set, the default value is min_replica_count. * The max allowed replica count is 1000. Available machine types for SMALL * shard: e2-standard-2 and all machine types available for MEDIUM and LARGE * shard. Available machine types for MEDIUM shard: e2-standard-16 and all * machine types available for LARGE shard. Available machine types for LARGE * shard: e2-highmem-16, n2d-standard-32. n1-standard-16 and n1-standard-32 * are still available, but we recommend e2-standard-16 and e2-highmem-16 for * cost efficiency. */ dedicatedResources?: GoogleCloudAiplatformV1DedicatedResources; /** * Optional. If set, the authentication is enabled for the private endpoint. */ deployedIndexAuthConfig?: GoogleCloudAiplatformV1DeployedIndexAuthConfig; /** * Optional. The deployment group can be no longer than 64 characters (eg: * 'test', 'prod'). If not set, we will use the 'default' deployment group. * Creating `deployment_groups` with `reserved_ip_ranges` is a recommended * practice when the peered network has multiple peering ranges. This creates * your deployments from predictable IP spaces for easier traffic * administration. Also, one deployment_group (except 'default') can only be * used with the same reserved_ip_ranges which means if the deployment_group * has been used with reserved_ip_ranges: [a, b, c], using it with [a, b] or * [d, e] is disallowed. Note: we only support up to 5 deployment groups(not * including 'default'). */ deploymentGroup?: string; /** * The display name of the DeployedIndex. If not provided upon creation, the * Index's display_name is used. */ displayName?: string; /** * Optional. If true, private endpoint's access logs are sent to Cloud * Logging. These logs are like standard server access logs, containing * information like timestamp and latency for each MatchRequest. Note that * logs may incur a cost, especially if the deployed index receives a high * queries per second rate (QPS). Estimate your costs before enabling this * option. */ enableAccessLogging?: boolean; /** * Required. The user specified ID of the DeployedIndex. The ID can be up to * 128 characters long and must start with a letter and only contain letters, * numbers, and underscores. The ID must be unique within the project it is * created in. */ id?: string; /** * Required. The name of the Index this is the deployment of. We may refer to * this Index as the DeployedIndex's "original" Index. */ index?: string; /** * Output only. The DeployedIndex may depend on various data on its original * Index. Additionally when certain changes to the original Index are being * done (e.g. when what the Index contains is being changed) the DeployedIndex * may be asynchronously updated in the background to reflect these changes. * If this timestamp's value is at least the Index.update_time of the original * Index, it means that this DeployedIndex and the original Index are in sync. * If this timestamp is older, then to see which updates this DeployedIndex * already contains (and which it does not), one must list the operations that * are running on the original Index. Only the successfully completed * Operations with update_time equal or before this sync time are contained in * this DeployedIndex. */ readonly indexSyncTime?: Date; /** * Output only. Provides paths for users to send requests directly to the * deployed index services running on Cloud via private services access. This * field is populated if network is configured. */ readonly privateEndpoints?: GoogleCloudAiplatformV1IndexPrivateEndpoints; /** * Optional. If set for PSC deployed index, PSC connection will be * automatically created after deployment is done and the endpoint information * is populated in private_endpoints.psc_automated_endpoints. */ pscAutomationConfigs?: GoogleCloudAiplatformV1PSCAutomationConfig[]; /** * Optional. A list of reserved ip ranges under the VPC network that can be * used for this DeployedIndex. If set, we will deploy the index within the * provided ip ranges. Otherwise, the index might be deployed to any ip ranges * under the provided VPC network. The value should be the name of the address * (https://cloud.google.com/compute/docs/reference/rest/v1/addresses) * Example: ['vertex-ai-ip-range']. For more information about subnets and * network IP ranges, please see * https://cloud.google.com/vpc/docs/subnets#manually_created_subnet_ip_ranges. */ reservedIpRanges?: string[]; } /** * Used to set up the auth on the DeployedIndex's private endpoint. */ export interface GoogleCloudAiplatformV1DeployedIndexAuthConfig { /** * Defines the authentication provider that the DeployedIndex uses. */ authProvider?: GoogleCloudAiplatformV1DeployedIndexAuthConfigAuthProvider; } /** * Configuration for an authentication provider, including support for [JSON * Web Token * (JWT)](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32). */ export interface GoogleCloudAiplatformV1DeployedIndexAuthConfigAuthProvider { /** * A list of allowed JWT issuers. Each entry must be a valid Google service * account, in the following format: * `service-account-name@project-id.iam.gserviceaccount.com` */ allowedIssuers?: string[]; /** * The list of JWT * [audiences](https://tools.ietf.org/html/draft-ietf-oauth-json-web-token-32#section-4.1.3). * that are allowed to access. A JWT containing any of these audiences will be * accepted. */ audiences?: string[]; } /** * Points to a DeployedIndex. */ export interface GoogleCloudAiplatformV1DeployedIndexRef { /** * Immutable. The ID of the DeployedIndex in the above IndexEndpoint. */ deployedIndexId?: string; /** * Output only. The display name of the DeployedIndex. */ readonly displayName?: string; /** * Immutable. A resource name of the IndexEndpoint. */ indexEndpoint?: string; } /** * A deployment of a Model. Endpoints contain one or more DeployedModels. */ export interface GoogleCloudAiplatformV1DeployedModel { /** * A description of resources that to large degree are decided by Vertex AI, * and require only a modest additional configuration. */ automaticResources?: GoogleCloudAiplatformV1AutomaticResources; /** * Output only. Timestamp when the DeployedModel was created. */ readonly createTime?: Date; /** * A description of resources that are dedicated to the DeployedModel, and * that need a higher degree of manual configuration. */ dedicatedResources?: GoogleCloudAiplatformV1DedicatedResources; /** * For custom-trained Models and AutoML Tabular Models, the container of the * DeployedModel instances will send `stderr` and `stdout` streams to Cloud * Logging by default. Please note that the logs incur cost, which are subject * to [Cloud Logging pricing](https://cloud.google.com/logging/pricing). User * can disable container logging by setting this flag to true. */ disableContainerLogging?: boolean; /** * If true, deploy the model without explainable feature, regardless the * existence of Model.explanation_spec or explanation_spec. */ disableExplanations?: boolean; /** * The display name of the DeployedModel. If not provided upon creation, the * Model's display_name is used. */ displayName?: string; /** * If true, online prediction access logs are sent to Cloud Logging. These * logs are like standard server access logs, containing information like * timestamp and latency for each prediction request. Note that logs may incur * a cost, especially if your project receives prediction requests at a high * queries per second rate (QPS). Estimate your costs before enabling this * option. */ enableAccessLogging?: boolean; /** * Explanation configuration for this DeployedModel. When deploying a Model * using EndpointService.DeployModel, this value overrides the value of * Model.explanation_spec. All fields of explanation_spec are optional in the * request. If a field of explanation_spec is not populated, the value of the * same field of Model.explanation_spec is inherited. If the corresponding * Model.explanation_spec is not populated, all fields of the explanation_spec * will be used for the explanation configuration. */ explanationSpec?: GoogleCloudAiplatformV1ExplanationSpec; /** * Configuration for faster model deployment. */ fasterDeploymentConfig?: GoogleCloudAiplatformV1FasterDeploymentConfig; /** * Immutable. The ID of the DeployedModel. If not provided upon deployment, * Vertex AI will generate a value for this ID. This value should be 1-10 * characters, and valid characters are `/[0-9]/`. */ id?: string; /** * Required. The resource name of the Model that this is the deployment of. * Note that the Model may be in a different location than the DeployedModel's * Endpoint. The resource name may contain version id or version alias to * specify the version. Example: * `projects/{project}/locations/{location}/models/{model}@2` or * `projects/{project}/locations/{location}/models/{model}@golden` if no * version is specified, the default version will be deployed. */ model?: string; /** * Output only. The version ID of the model that is deployed. */ readonly modelVersionId?: string; /** * Output only. Provide paths for users to send predict/explain/health * requests directly to the deployed model services running on Cloud via * private services access. This field is populated if network is configured. */ readonly privateEndpoints?: GoogleCloudAiplatformV1PrivateEndpoints; /** * The service account that the DeployedModel's container runs as. Specify * the email address of the service account. If this service account is not * specified, the container runs as a service account that doesn't have access * to the resource project. Users deploying the Model must have the * `iam.serviceAccounts.actAs` permission on this service account. */ serviceAccount?: string; /** * The resource name of the shared DeploymentResourcePool to deploy on. * Format: * `projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}` */ sharedResources?: string; /** * Optional. Spec for configuring speculative decoding. */ speculativeDecodingSpec?: GoogleCloudAiplatformV1SpeculativeDecodingSpec; /** * Output only. Runtime status of the deployed model. */ readonly status?: GoogleCloudAiplatformV1DeployedModelStatus; /** * System labels to apply to Model Garden deployments. System labels are * managed by Google for internal use only. */ systemLabels?: { [key: string]: string }; } /** * Points to a DeployedModel. */ export interface GoogleCloudAiplatformV1DeployedModelRef { /** * Immutable. An ID of a DeployedModel in the above Endpoint. */ deployedModelId?: string; /** * Immutable. A resource name of an Endpoint. */ endpoint?: string; } /** * Runtime status of the deployed model. */ export interface GoogleCloudAiplatformV1DeployedModelStatus { /** * Output only. The number of available replicas of the deployed model. */ readonly availableReplicaCount?: number; /** * Output only. The time at which the status was last updated. */ readonly lastUpdateTime?: Date; /** * Output only. The latest deployed model's status message (if any). */ readonly message?: string; } /** * Runtime operation information for IndexEndpointService.DeployIndex. */ export interface GoogleCloudAiplatformV1DeployIndexOperationMetadata { /** * The unique index id specified by user */ deployedIndexId?: string; /** * The operation generic information. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Request message for IndexEndpointService.DeployIndex. */ export interface GoogleCloudAiplatformV1DeployIndexRequest { /** * Required. The DeployedIndex to be created within the IndexEndpoint. */ deployedIndex?: GoogleCloudAiplatformV1DeployedIndex; } /** * Response message for IndexEndpointService.DeployIndex. */ export interface GoogleCloudAiplatformV1DeployIndexResponse { /** * The DeployedIndex that had been deployed in the IndexEndpoint. */ deployedIndex?: GoogleCloudAiplatformV1DeployedIndex; } /** * A description of resources that can be shared by multiple DeployedModels, * whose underlying specification consists of a DedicatedResources. */ export interface GoogleCloudAiplatformV1DeploymentResourcePool { /** * Output only. Timestamp when this DeploymentResourcePool was created. */ readonly createTime?: Date; /** * Required. The underlying DedicatedResources that the * DeploymentResourcePool uses. */ dedicatedResources?: GoogleCloudAiplatformV1DedicatedResources; /** * If the DeploymentResourcePool is deployed with custom-trained Models or * AutoML Tabular Models, the container(s) of the DeploymentResourcePool will * send `stderr` and `stdout` streams to Cloud Logging by default. Please note * that the logs incur cost, which are subject to [Cloud Logging * pricing](https://cloud.google.com/logging/pricing). User can disable * container logging by setting this flag to true. */ disableContainerLogging?: boolean; /** * Customer-managed encryption key spec for a DeploymentResourcePool. If set, * this DeploymentResourcePool will be secured by this key. Endpoints and the * DeploymentResourcePool they deploy in need to have the same EncryptionSpec. */ encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec; /** * Immutable. The resource name of the DeploymentResourcePool. Format: * `projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}` */ name?: string; /** * Output only. Reserved for future use. */ readonly satisfiesPzi?: boolean; /** * Output only. Reserved for future use. */ readonly satisfiesPzs?: boolean; /** * The service account that the DeploymentResourcePool's container(s) run as. * Specify the email address of the service account. If this service account * is not specified, the container(s) run as a service account that doesn't * have access to the resource project. Users deploying the Models to this * DeploymentResourcePool must have the `iam.serviceAccounts.actAs` permission * on this service account. */ serviceAccount?: string; } /** * Runtime operation information for EndpointService.DeployModel. */ export interface GoogleCloudAiplatformV1DeployModelOperationMetadata { /** * The operation generic information. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Request message for EndpointService.DeployModel. */ export interface GoogleCloudAiplatformV1DeployModelRequest { /** * Required. The DeployedModel to be created within the Endpoint. Note that * Endpoint.traffic_split must be updated for the DeployedModel to start * receiving traffic, either as part of this call, or via * EndpointService.UpdateEndpoint. */ deployedModel?: GoogleCloudAiplatformV1DeployedModel; /** * A map from a DeployedModel's ID to the percentage of this Endpoint's * traffic that should be forwarded to that DeployedModel. If this field is * non-empty, then the Endpoint's traffic_split will be overwritten with it. * To refer to the ID of the just being deployed Model, a "0" should be used, * and the actual ID of the new DeployedModel will be filled in its place by * this method. The traffic percentage values must add up to 100. If this * field is empty, then the Endpoint's traffic_split is not updated. */ trafficSplit?: { [key: string]: number }; } /** * Response message for EndpointService.DeployModel. */ export interface GoogleCloudAiplatformV1DeployModelResponse { /** * The DeployedModel that had been deployed in the Endpoint. */ deployedModel?: GoogleCloudAiplatformV1DeployedModel; } export interface GoogleCloudAiplatformV1DestinationFeatureSetting { /** * Specify the field name in the export destination. If not specified, * Feature ID is used. */ destinationField?: string; /** * Required. The ID of the Feature to apply the setting to. */ featureId?: string; } /** * Request message for PredictionService.DirectPredict. */ export interface GoogleCloudAiplatformV1DirectPredictRequest { /** * The prediction input. */ inputs?: GoogleCloudAiplatformV1Tensor[]; /** * The parameters that govern the prediction. */ parameters?: GoogleCloudAiplatformV1Tensor; } function serializeGoogleCloudAiplatformV1DirectPredictRequest(data: any): GoogleCloudAiplatformV1DirectPredictRequest { return { ...data, inputs: data["inputs"] !== undefined ? data["inputs"].map((item: any) => (serializeGoogleCloudAiplatformV1Tensor(item))) : undefined, parameters: data["parameters"] !== undefined ? serializeGoogleCloudAiplatformV1Tensor(data["parameters"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1DirectPredictRequest(data: any): GoogleCloudAiplatformV1DirectPredictRequest { return { ...data, inputs: data["inputs"] !== undefined ? data["inputs"].map((item: any) => (deserializeGoogleCloudAiplatformV1Tensor(item))) : undefined, parameters: data["parameters"] !== undefined ? deserializeGoogleCloudAiplatformV1Tensor(data["parameters"]) : undefined, }; } /** * Response message for PredictionService.DirectPredict. */ export interface GoogleCloudAiplatformV1DirectPredictResponse { /** * The prediction output. */ outputs?: GoogleCloudAiplatformV1Tensor[]; /** * The parameters that govern the prediction. */ parameters?: GoogleCloudAiplatformV1Tensor; } function serializeGoogleCloudAiplatformV1DirectPredictResponse(data: any): GoogleCloudAiplatformV1DirectPredictResponse { return { ...data, outputs: data["outputs"] !== undefined ? data["outputs"].map((item: any) => (serializeGoogleCloudAiplatformV1Tensor(item))) : undefined, parameters: data["parameters"] !== undefined ? serializeGoogleCloudAiplatformV1Tensor(data["parameters"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1DirectPredictResponse(data: any): GoogleCloudAiplatformV1DirectPredictResponse { return { ...data, outputs: data["outputs"] !== undefined ? data["outputs"].map((item: any) => (deserializeGoogleCloudAiplatformV1Tensor(item))) : undefined, parameters: data["parameters"] !== undefined ? deserializeGoogleCloudAiplatformV1Tensor(data["parameters"]) : undefined, }; } /** * Request message for PredictionService.DirectRawPredict. */ export interface GoogleCloudAiplatformV1DirectRawPredictRequest { /** * The prediction input. */ input?: Uint8Array; /** * Fully qualified name of the API method being invoked to perform * predictions. Format: `/namespace.Service/Method/` Example: * `/tensorflow.serving.PredictionService/Predict` */ methodName?: string; } function serializeGoogleCloudAiplatformV1DirectRawPredictRequest(data: any): GoogleCloudAiplatformV1DirectRawPredictRequest { return { ...data, input: data["input"] !== undefined ? encodeBase64(data["input"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1DirectRawPredictRequest(data: any): GoogleCloudAiplatformV1DirectRawPredictRequest { return { ...data, input: data["input"] !== undefined ? decodeBase64(data["input"] as string) : undefined, }; } /** * Response message for PredictionService.DirectRawPredict. */ export interface GoogleCloudAiplatformV1DirectRawPredictResponse { /** * The prediction output. */ output?: Uint8Array; } function serializeGoogleCloudAiplatformV1DirectRawPredictResponse(data: any): GoogleCloudAiplatformV1DirectRawPredictResponse { return { ...data, output: data["output"] !== undefined ? encodeBase64(data["output"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1DirectRawPredictResponse(data: any): GoogleCloudAiplatformV1DirectRawPredictResponse { return { ...data, output: data["output"] !== undefined ? decodeBase64(data["output"] as string) : undefined, }; } /** * The input content is encapsulated and uploaded in the request. */ export interface GoogleCloudAiplatformV1DirectUploadSource { } /** * Represents the spec of disk options. */ export interface GoogleCloudAiplatformV1DiskSpec { /** * Size in GB of the boot disk (default is 100GB). */ bootDiskSizeGb?: number; /** * Type of the boot disk. For non-A3U machines, the default value is * "pd-ssd", for A3U machines, the default value is "hyperdisk-balanced". * Valid values: "pd-ssd" (Persistent Disk Solid State Drive), "pd-standard" * (Persistent Disk Hard Disk Drive) or "hyperdisk-balanced". */ bootDiskType?: string; } /** * A list of double values. */ export interface GoogleCloudAiplatformV1DoubleArray { /** * A list of double values. */ values?: number[]; } /** * Describes the options to customize dynamic retrieval. */ export interface GoogleCloudAiplatformV1DynamicRetrievalConfig { /** * Optional. The threshold to be used in dynamic retrieval. If not set, a * system default value is used. */ dynamicThreshold?: number; /** * The mode of the predictor to be used in dynamic retrieval. */ mode?: | "MODE_UNSPECIFIED" | "MODE_DYNAMIC"; } /** * Represents a customer-managed encryption key spec that can be applied to a * top-level resource. */ export interface GoogleCloudAiplatformV1EncryptionSpec { /** * Required. The Cloud KMS resource identifier of the customer managed * encryption key used to protect a resource. Has the form: * `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. * The key needs to be in the same region as where the compute resource is * created. */ kmsKeyName?: string; } /** * Models are deployed into it, and afterwards Endpoint is called to obtain * predictions and explanations. */ export interface GoogleCloudAiplatformV1Endpoint { /** * Configurations that are applied to the endpoint for online prediction. */ clientConnectionConfig?: GoogleCloudAiplatformV1ClientConnectionConfig; /** * Output only. Timestamp when this Endpoint was created. */ readonly createTime?: Date; /** * Output only. DNS of the dedicated endpoint. Will only be populated if * dedicated_endpoint_enabled is true. Depending on the features enabled, uid * might be a random number or a string. For example, if fast_tryout is * enabled, uid will be fasttryout. Format: * `https://{endpoint_id}.{region}-{uid}.prediction.vertexai.goog`. */ readonly dedicatedEndpointDns?: string; /** * If true, the endpoint will be exposed through a dedicated DNS * [Endpoint.dedicated_endpoint_dns]. Your request to the dedicated DNS will * be isolated from other users' traffic and will have better performance and * reliability. Note: Once you enabled dedicated endpoint, you won't be able * to send request to the shared DNS {region}-aiplatform.googleapis.com. The * limitation will be removed soon. */ dedicatedEndpointEnabled?: boolean; /** * Output only. The models deployed in this Endpoint. To add or remove * DeployedModels use EndpointService.DeployModel and * EndpointService.UndeployModel respectively. */ readonly deployedModels?: GoogleCloudAiplatformV1DeployedModel[]; /** * The description of the Endpoint. */ description?: string; /** * Required. The display name of the Endpoint. The name can be up to 128 * characters long and can consist of any UTF-8 characters. */ displayName?: string; /** * Deprecated: If true, expose the Endpoint via private service connect. Only * one of the fields, network or enable_private_service_connect, can be set. */ enablePrivateServiceConnect?: boolean; /** * Customer-managed encryption key spec for an Endpoint. If set, this * Endpoint and all sub-resources of this Endpoint will be secured by this * key. */ encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec; /** * Used to perform consistent read-modify-write updates. If not set, a blind * "overwrite" update happens. */ etag?: string; /** * Optional. Configuration for GenAiAdvancedFeatures. If the endpoint is * serving GenAI models, advanced features like native RAG integration can be * configured. Currently, only Model Garden models are supported. */ genAiAdvancedFeaturesConfig?: GoogleCloudAiplatformV1GenAiAdvancedFeaturesConfig; /** * The labels with user-defined metadata to organize your Endpoints. Label * keys and values can be no longer than 64 characters (Unicode codepoints), * can only contain lowercase letters, numeric characters, underscores and * dashes. International characters are allowed. See https://goo.gl/xmQnxf for * more information and examples of labels. */ labels?: { [key: string]: string }; /** * Output only. Resource name of the Model Monitoring job associated with * this Endpoint if monitoring is enabled by * JobService.CreateModelDeploymentMonitoringJob. Format: * `projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}` */ readonly modelDeploymentMonitoringJob?: string; /** * Output only. The resource name of the Endpoint. */ readonly name?: string; /** * Optional. The full name of the Google Compute Engine * [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) * to which the Endpoint should be peered. Private services access must * already be configured for the network. If left unspecified, the Endpoint is * not peered with any network. Only one of the fields, network or * enable_private_service_connect, can be set. * [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): * `projects/{project}/global/networks/{network}`. Where `{project}` is a * project number, as in `12345`, and `{network}` is network name. */ network?: string; /** * Configures the request-response logging for online prediction. */ predictRequestResponseLoggingConfig?: GoogleCloudAiplatformV1PredictRequestResponseLoggingConfig; /** * Optional. Configuration for private service connect. network and * private_service_connect_config are mutually exclusive. */ privateServiceConnectConfig?: GoogleCloudAiplatformV1PrivateServiceConnectConfig; /** * Output only. Reserved for future use. */ readonly satisfiesPzi?: boolean; /** * Output only. Reserved for future use. */ readonly satisfiesPzs?: boolean; /** * A map from a DeployedModel's ID to the percentage of this Endpoint's * traffic that should be forwarded to that DeployedModel. If a * DeployedModel's ID is not listed in this map, then it receives no traffic. * The traffic percentage values must add up to 100, or map must be empty if * the Endpoint is to not accept any traffic at a moment. */ trafficSplit?: { [key: string]: number }; /** * Output only. Timestamp when this Endpoint was last updated. */ readonly updateTime?: Date; } function serializeGoogleCloudAiplatformV1Endpoint(data: any): GoogleCloudAiplatformV1Endpoint { return { ...data, clientConnectionConfig: data["clientConnectionConfig"] !== undefined ? serializeGoogleCloudAiplatformV1ClientConnectionConfig(data["clientConnectionConfig"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1Endpoint(data: any): GoogleCloudAiplatformV1Endpoint { return { ...data, clientConnectionConfig: data["clientConnectionConfig"] !== undefined ? deserializeGoogleCloudAiplatformV1ClientConnectionConfig(data["clientConnectionConfig"]) : undefined, createTime: data["createTime"] !== undefined ? new Date(data["createTime"]) : undefined, updateTime: data["updateTime"] !== undefined ? new Date(data["updateTime"]) : undefined, }; } /** * Tool to search public web data, powered by Vertex AI Search and Sec4 * compliance. */ export interface GoogleCloudAiplatformV1EnterpriseWebSearch { } /** * Selector for entityId. Getting ids from the given source. */ export interface GoogleCloudAiplatformV1EntityIdSelector { /** * Source of Csv */ csvSource?: GoogleCloudAiplatformV1CsvSource; /** * Source column that holds entity IDs. If not provided, entity IDs are * extracted from the column named entity_id. */ entityIdField?: string; } /** * An entity type is a type of object in a system that needs to be modeled and * have stored information about. For example, driver is an entity type, and * driver0 is an instance of an entity type driver. */ export interface GoogleCloudAiplatformV1EntityType { /** * Output only. Timestamp when this EntityType was created. */ readonly createTime?: Date; /** * Optional. Description of the EntityType. */ description?: string; /** * Optional. Used to perform a consistent read-modify-write updates. If not * set, a blind "overwrite" update happens. */ etag?: string; /** * Optional. The labels with user-defined metadata to organize your * EntityTypes. Label keys and values can be no longer than 64 characters * (Unicode codepoints), can only contain lowercase letters, numeric * characters, underscores and dashes. International characters are allowed. * See https://goo.gl/xmQnxf for more information on and examples of labels. * No more than 64 user labels can be associated with one EntityType (System * labels are excluded)." System reserved label keys are prefixed with * "aiplatform.googleapis.com/" and are immutable. */ labels?: { [key: string]: string }; /** * Optional. The default monitoring configuration for all Features with value * type (Feature.ValueType) BOOL, STRING, DOUBLE or INT64 under this * EntityType. If this is populated with * [FeaturestoreMonitoringConfig.monitoring_interval] specified, snapshot * analysis monitoring is enabled. Otherwise, snapshot analysis monitoring is * disabled. */ monitoringConfig?: GoogleCloudAiplatformV1FeaturestoreMonitoringConfig; /** * Immutable. Name of the EntityType. Format: * `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}` * The last part entity_type is assigned by the client. The entity_type can be * up to 64 characters long and can consist only of ASCII Latin letters A-Z * and a-z and underscore(_), and ASCII digits 0-9 starting with a letter. The * value will be unique given a featurestore. */ name?: string; /** * Optional. Config for data retention policy in offline storage. TTL in days * for feature values that will be stored in offline storage. The Feature * Store offline storage periodically removes obsolete feature values older * than `offline_storage_ttl_days` since the feature generation time. If unset * (or explicitly set to 0), default to 4000 days TTL. */ offlineStorageTtlDays?: number; /** * Output only. Reserved for future use. */ readonly satisfiesPzi?: boolean; /** * Output only. Reserved for future use. */ readonly satisfiesPzs?: boolean; /** * Output only. Timestamp when this EntityType was most recently updated. */ readonly updateTime?: Date; } /** * Represents an environment variable present in a Container or Python Module. */ export interface GoogleCloudAiplatformV1EnvVar { /** * Required. Name of the environment variable. Must be a valid C identifier. */ name?: string; /** * Required. Variables that reference a $(VAR_NAME) are expanded using the * previous defined environment variables in the container and any service * environment variables. If a variable cannot be resolved, the reference in * the input string will be unchanged. The $(VAR_NAME) syntax can be escaped * with a double $$, ie: $$(VAR_NAME). Escaped references will never be * expanded, regardless of whether the variable exists or not. */ value?: string; } /** * Model error analysis for each annotation. */ export interface GoogleCloudAiplatformV1ErrorAnalysisAnnotation { /** * Attributed items for a given annotation, typically representing neighbors * from the training sets constrained by the query type. */ attributedItems?: GoogleCloudAiplatformV1ErrorAnalysisAnnotationAttributedItem[]; /** * The outlier score of this annotated item. Usually defined as the min of * all distances from attributed items. */ outlierScore?: number; /** * The threshold used to determine if this annotation is an outlier or not. */ outlierThreshold?: number; /** * The query type used for finding the attributed items. */ queryType?: | "QUERY_TYPE_UNSPECIFIED" | "ALL_SIMILAR" | "SAME_CLASS_SIMILAR" | "SAME_CLASS_DISSIMILAR"; } /** * Attributed items for a given annotation, typically representing neighbors * from the training sets constrained by the query type. */ export interface GoogleCloudAiplatformV1ErrorAnalysisAnnotationAttributedItem { /** * The unique ID for each annotation. Used by FE to allocate the annotation * in DB. */ annotationResourceName?: string; /** * The distance of this item to the annotation. */ distance?: number; } /** * True positive, false positive, or false negative. EvaluatedAnnotation is * only available under ModelEvaluationSlice with slice of `annotationSpec` * dimension. */ export interface GoogleCloudAiplatformV1EvaluatedAnnotation { /** * Output only. The data item payload that the Model predicted this * EvaluatedAnnotation on. */ readonly dataItemPayload?: any; /** * Annotations of model error analysis results. */ errorAnalysisAnnotations?: GoogleCloudAiplatformV1ErrorAnalysisAnnotation[]; /** * Output only. ID of the EvaluatedDataItemView under the same ancestor * ModelEvaluation. The EvaluatedDataItemView consists of all ground truths * and predictions on data_item_payload. */ readonly evaluatedDataItemViewId?: string; /** * Explanations of predictions. Each element of the explanations indicates * the explanation for one explanation Method. The attributions list in the * EvaluatedAnnotationExplanation.explanation object corresponds to the * predictions list. For example, the second element in the attributions list * explains the second element in the predictions list. */ explanations?: GoogleCloudAiplatformV1EvaluatedAnnotationExplanation[]; /** * Output only. The ground truth Annotations, i.e. the Annotations that exist * in the test data the Model is evaluated on. For true positive, there is one * and only one ground truth annotation, which matches the only prediction in * predictions. For false positive, there are zero or more ground truth * annotations that are similar to the only prediction in predictions, but not * enough for a match. For false negative, there is one and only one ground * truth annotation, which doesn't match any predictions created by the model. * The schema of the ground truth is stored in * ModelEvaluation.annotation_schema_uri */ readonly groundTruths?: any[]; /** * Output only. The model predicted annotations. For true positive, there is * one and only one prediction, which matches the only one ground truth * annotation in ground_truths. For false positive, there is one and only one * prediction, which doesn't match any ground truth annotation of the * corresponding data_item_view_id. For false negative, there are zero or more * predictions which are similar to the only ground truth annotation in * ground_truths but not enough for a match. The schema of the prediction is * stored in ModelEvaluation.annotation_schema_uri */ readonly predictions?: any[]; /** * Output only. Type of the EvaluatedAnnotation. */ readonly type?: | "EVALUATED_ANNOTATION_TYPE_UNSPECIFIED" | "TRUE_POSITIVE" | "FALSE_POSITIVE" | "FALSE_NEGATIVE"; } /** * Explanation result of the prediction produced by the Model. */ export interface GoogleCloudAiplatformV1EvaluatedAnnotationExplanation { /** * Explanation attribution response details. */ explanation?: GoogleCloudAiplatformV1Explanation; /** * Explanation type. For AutoML Image Classification models, possible values * are: * `image-integrated-gradients` * `image-xrai` */ explanationType?: string; } /** * Request message for EvaluationService.EvaluateDataset. */ export interface GoogleCloudAiplatformV1EvaluateDatasetRequest { /** * Optional. Autorater config used for evaluation. Currently only publisher * Gemini models are supported. Format: * `projects/{PROJECT}/locations/{LOCATION}/publishers/google/models/{MODEL}.` */ autoraterConfig?: GoogleCloudAiplatformV1AutoraterConfig; /** * Required. The dataset used for evaluation. */ dataset?: GoogleCloudAiplatformV1EvaluationDataset; /** * Required. The metrics used for evaluation. */ metrics?: GoogleCloudAiplatformV1Metric[]; /** * Required. Config for evaluation output. */ outputConfig?: GoogleCloudAiplatformV1OutputConfig; } /** * Request message for EvaluationService.EvaluateInstances. */ export interface GoogleCloudAiplatformV1EvaluateInstancesRequest { /** * Optional. Autorater config used for evaluation. */ autoraterConfig?: GoogleCloudAiplatformV1AutoraterConfig; /** * Instances and metric spec for bleu metric. */ bleuInput?: GoogleCloudAiplatformV1BleuInput; /** * Input for coherence metric. */ coherenceInput?: GoogleCloudAiplatformV1CoherenceInput; /** * Translation metrics. Input for Comet metric. */ cometInput?: GoogleCloudAiplatformV1CometInput; /** * Auto metric instances. Instances and metric spec for exact match metric. */ exactMatchInput?: GoogleCloudAiplatformV1ExactMatchInput; /** * LLM-based metric instance. General text generation metrics, applicable to * other categories. Input for fluency metric. */ fluencyInput?: GoogleCloudAiplatformV1FluencyInput; /** * Input for fulfillment metric. */ fulfillmentInput?: GoogleCloudAiplatformV1FulfillmentInput; /** * Input for groundedness metric. */ groundednessInput?: GoogleCloudAiplatformV1GroundednessInput; /** * Input for Metricx metric. */ metricxInput?: GoogleCloudAiplatformV1MetricxInput; /** * Input for pairwise metric. */ pairwiseMetricInput?: GoogleCloudAiplatformV1PairwiseMetricInput; /** * Input for pairwise question answering quality metric. */ pairwiseQuestionAnsweringQualityInput?: GoogleCloudAiplatformV1PairwiseQuestionAnsweringQualityInput; /** * Input for pairwise summarization quality metric. */ pairwiseSummarizationQualityInput?: GoogleCloudAiplatformV1PairwiseSummarizationQualityInput; /** * Input for pointwise metric. */ pointwiseMetricInput?: GoogleCloudAiplatformV1PointwiseMetricInput; /** * Input for question answering correctness metric. */ questionAnsweringCorrectnessInput?: GoogleCloudAiplatformV1QuestionAnsweringCorrectnessInput; /** * Input for question answering helpfulness metric. */ questionAnsweringHelpfulnessInput?: GoogleCloudAiplatformV1QuestionAnsweringHelpfulnessInput; /** * Input for question answering quality metric. */ questionAnsweringQualityInput?: GoogleCloudAiplatformV1QuestionAnsweringQualityInput; /** * Input for question answering relevance metric. */ questionAnsweringRelevanceInput?: GoogleCloudAiplatformV1QuestionAnsweringRelevanceInput; /** * Instances and metric spec for rouge metric. */ rougeInput?: GoogleCloudAiplatformV1RougeInput; /** * Rubric Based Instruction Following metric. */ rubricBasedInstructionFollowingInput?: GoogleCloudAiplatformV1RubricBasedInstructionFollowingInput; /** * Input for safety metric. */ safetyInput?: GoogleCloudAiplatformV1SafetyInput; /** * Input for summarization helpfulness metric. */ summarizationHelpfulnessInput?: GoogleCloudAiplatformV1SummarizationHelpfulnessInput; /** * Input for summarization quality metric. */ summarizationQualityInput?: GoogleCloudAiplatformV1SummarizationQualityInput; /** * Input for summarization verbosity metric. */ summarizationVerbosityInput?: GoogleCloudAiplatformV1SummarizationVerbosityInput; /** * Tool call metric instances. Input for tool call valid metric. */ toolCallValidInput?: GoogleCloudAiplatformV1ToolCallValidInput; /** * Input for tool name match metric. */ toolNameMatchInput?: GoogleCloudAiplatformV1ToolNameMatchInput; /** * Input for tool parameter key match metric. */ toolParameterKeyMatchInput?: GoogleCloudAiplatformV1ToolParameterKeyMatchInput; /** * Input for tool parameter key value match metric. */ toolParameterKvMatchInput?: GoogleCloudAiplatformV1ToolParameterKVMatchInput; /** * Input for trajectory match any order metric. */ trajectoryAnyOrderMatchInput?: GoogleCloudAiplatformV1TrajectoryAnyOrderMatchInput; /** * Input for trajectory exact match metric. */ trajectoryExactMatchInput?: GoogleCloudAiplatformV1TrajectoryExactMatchInput; /** * Input for trajectory in order match metric. */ trajectoryInOrderMatchInput?: GoogleCloudAiplatformV1TrajectoryInOrderMatchInput; /** * Input for trajectory precision metric. */ trajectoryPrecisionInput?: GoogleCloudAiplatformV1TrajectoryPrecisionInput; /** * Input for trajectory recall metric. */ trajectoryRecallInput?: GoogleCloudAiplatformV1TrajectoryRecallInput; /** * Input for trajectory single tool use metric. */ trajectorySingleToolUseInput?: GoogleCloudAiplatformV1TrajectorySingleToolUseInput; } function serializeGoogleCloudAiplatformV1EvaluateInstancesRequest(data: any): GoogleCloudAiplatformV1EvaluateInstancesRequest { return { ...data, pairwiseMetricInput: data["pairwiseMetricInput"] !== undefined ? serializeGoogleCloudAiplatformV1PairwiseMetricInput(data["pairwiseMetricInput"]) : undefined, pointwiseMetricInput: data["pointwiseMetricInput"] !== undefined ? serializeGoogleCloudAiplatformV1PointwiseMetricInput(data["pointwiseMetricInput"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1EvaluateInstancesRequest(data: any): GoogleCloudAiplatformV1EvaluateInstancesRequest { return { ...data, pairwiseMetricInput: data["pairwiseMetricInput"] !== undefined ? deserializeGoogleCloudAiplatformV1PairwiseMetricInput(data["pairwiseMetricInput"]) : undefined, pointwiseMetricInput: data["pointwiseMetricInput"] !== undefined ? deserializeGoogleCloudAiplatformV1PointwiseMetricInput(data["pointwiseMetricInput"]) : undefined, }; } /** * Response message for EvaluationService.EvaluateInstances. */ export interface GoogleCloudAiplatformV1EvaluateInstancesResponse { /** * Results for bleu metric. */ bleuResults?: GoogleCloudAiplatformV1BleuResults; /** * Result for coherence metric. */ coherenceResult?: GoogleCloudAiplatformV1CoherenceResult; /** * Translation metrics. Result for Comet metric. */ cometResult?: GoogleCloudAiplatformV1CometResult; /** * Auto metric evaluation results. Results for exact match metric. */ exactMatchResults?: GoogleCloudAiplatformV1ExactMatchResults; /** * LLM-based metric evaluation result. General text generation metrics, * applicable to other categories. Result for fluency metric. */ fluencyResult?: GoogleCloudAiplatformV1FluencyResult; /** * Result for fulfillment metric. */ fulfillmentResult?: GoogleCloudAiplatformV1FulfillmentResult; /** * Result for groundedness metric. */ groundednessResult?: GoogleCloudAiplatformV1GroundednessResult; /** * Result for Metricx metric. */ metricxResult?: GoogleCloudAiplatformV1MetricxResult; /** * Result for pairwise metric. */ pairwiseMetricResult?: GoogleCloudAiplatformV1PairwiseMetricResult; /** * Result for pairwise question answering quality metric. */ pairwiseQuestionAnsweringQualityResult?: GoogleCloudAiplatformV1PairwiseQuestionAnsweringQualityResult; /** * Result for pairwise summarization quality metric. */ pairwiseSummarizationQualityResult?: GoogleCloudAiplatformV1PairwiseSummarizationQualityResult; /** * Generic metrics. Result for pointwise metric. */ pointwiseMetricResult?: GoogleCloudAiplatformV1PointwiseMetricResult; /** * Result for question answering correctness metric. */ questionAnsweringCorrectnessResult?: GoogleCloudAiplatformV1QuestionAnsweringCorrectnessResult; /** * Result for question answering helpfulness metric. */ questionAnsweringHelpfulnessResult?: GoogleCloudAiplatformV1QuestionAnsweringHelpfulnessResult; /** * Question answering only metrics. Result for question answering quality * metric. */ questionAnsweringQualityResult?: GoogleCloudAiplatformV1QuestionAnsweringQualityResult; /** * Result for question answering relevance metric. */ questionAnsweringRelevanceResult?: GoogleCloudAiplatformV1QuestionAnsweringRelevanceResult; /** * Results for rouge metric. */ rougeResults?: GoogleCloudAiplatformV1RougeResults; /** * Result for rubric based instruction following metric. */ rubricBasedInstructionFollowingResult?: GoogleCloudAiplatformV1RubricBasedInstructionFollowingResult; /** * Result for safety metric. */ safetyResult?: GoogleCloudAiplatformV1SafetyResult; /** * Result for summarization helpfulness metric. */ summarizationHelpfulnessResult?: GoogleCloudAiplatformV1SummarizationHelpfulnessResult; /** * Summarization only metrics. Result for summarization quality metric. */ summarizationQualityResult?: GoogleCloudAiplatformV1SummarizationQualityResult; /** * Result for summarization verbosity metric. */ summarizationVerbosityResult?: GoogleCloudAiplatformV1SummarizationVerbosityResult; /** * Tool call metrics. Results for tool call valid metric. */ toolCallValidResults?: GoogleCloudAiplatformV1ToolCallValidResults; /** * Results for tool name match metric. */ toolNameMatchResults?: GoogleCloudAiplatformV1ToolNameMatchResults; /** * Results for tool parameter key match metric. */ toolParameterKeyMatchResults?: GoogleCloudAiplatformV1ToolParameterKeyMatchResults; /** * Results for tool parameter key value match metric. */ toolParameterKvMatchResults?: GoogleCloudAiplatformV1ToolParameterKVMatchResults; /** * Result for trajectory any order match metric. */ trajectoryAnyOrderMatchResults?: GoogleCloudAiplatformV1TrajectoryAnyOrderMatchResults; /** * Result for trajectory exact match metric. */ trajectoryExactMatchResults?: GoogleCloudAiplatformV1TrajectoryExactMatchResults; /** * Result for trajectory in order match metric. */ trajectoryInOrderMatchResults?: GoogleCloudAiplatformV1TrajectoryInOrderMatchResults; /** * Result for trajectory precision metric. */ trajectoryPrecisionResults?: GoogleCloudAiplatformV1TrajectoryPrecisionResults; /** * Results for trajectory recall metric. */ trajectoryRecallResults?: GoogleCloudAiplatformV1TrajectoryRecallResults; /** * Results for trajectory single tool use metric. */ trajectorySingleToolUseResults?: GoogleCloudAiplatformV1TrajectorySingleToolUseResults; } /** * The dataset used for evaluation. */ export interface GoogleCloudAiplatformV1EvaluationDataset { /** * BigQuery source holds the dataset. */ bigquerySource?: GoogleCloudAiplatformV1BigQuerySource; /** * Cloud storage source holds the dataset. Currently only one Cloud Storage * file path is supported. */ gcsSource?: GoogleCloudAiplatformV1GcsSource; } /** * An edge describing the relationship between an Artifact and an Execution in * a lineage graph. */ export interface GoogleCloudAiplatformV1Event { /** * Required. The relative resource name of the Artifact in the Event. */ artifact?: string; /** * Output only. Time the Event occurred. */ readonly eventTime?: Date; /** * Output only. The relative resource name of the Execution in the Event. */ readonly execution?: string; /** * The labels with user-defined metadata to annotate Events. Label keys and * values can be no longer than 64 characters (Unicode codepoints), can only * contain lowercase letters, numeric characters, underscores and dashes. * International characters are allowed. No more than 64 user labels can be * associated with one Event (System labels are excluded). See * https://goo.gl/xmQnxf for more information and examples of labels. System * reserved label keys are prefixed with "aiplatform.googleapis.com/" and are * immutable. */ labels?: { [key: string]: string }; /** * Required. The type of the Event. */ type?: | "TYPE_UNSPECIFIED" | "INPUT" | "OUTPUT"; } /** * Input for exact match metric. */ export interface GoogleCloudAiplatformV1ExactMatchInput { /** * Required. Repeated exact match instances. */ instances?: GoogleCloudAiplatformV1ExactMatchInstance[]; /** * Required. Spec for exact match metric. */ metricSpec?: GoogleCloudAiplatformV1ExactMatchSpec; } /** * Spec for exact match instance. */ export interface GoogleCloudAiplatformV1ExactMatchInstance { /** * Required. Output of the evaluated model. */ prediction?: string; /** * Required. Ground truth used to compare against the prediction. */ reference?: string; } /** * Exact match metric value for an instance. */ export interface GoogleCloudAiplatformV1ExactMatchMetricValue { /** * Output only. Exact match score. */ readonly score?: number; } /** * Results for exact match metric. */ export interface GoogleCloudAiplatformV1ExactMatchResults { /** * Output only. Exact match metric values. */ readonly exactMatchMetricValues?: GoogleCloudAiplatformV1ExactMatchMetricValue[]; } /** * Spec for exact match metric - returns 1 if prediction and reference exactly * matches, otherwise 0. */ export interface GoogleCloudAiplatformV1ExactMatchSpec { } /** * Example-based explainability that returns the nearest neighbors from the * provided dataset. */ export interface GoogleCloudAiplatformV1Examples { /** * The Cloud Storage input instances. */ exampleGcsSource?: GoogleCloudAiplatformV1ExamplesExampleGcsSource; /** * The full configuration for the generated index, the semantics are the same * as metadata and should match * [NearestNeighborSearchConfig](https://cloud.google.com/vertex-ai/docs/explainable-ai/configuring-explanations-example-based#nearest-neighbor-search-config). */ nearestNeighborSearchConfig?: any; /** * The number of neighbors to return when querying for examples. */ neighborCount?: number; /** * Simplified preset configuration, which automatically sets configuration * values based on the desired query speed-precision trade-off and modality. */ presets?: GoogleCloudAiplatformV1Presets; } /** * The Cloud Storage input instances. */ export interface GoogleCloudAiplatformV1ExamplesExampleGcsSource { /** * The format in which instances are given, if not specified, assume it's * JSONL format. Currently only JSONL format is supported. */ dataFormat?: | "DATA_FORMAT_UNSPECIFIED" | "JSONL"; /** * The Cloud Storage location for the input instances. */ gcsSource?: GoogleCloudAiplatformV1GcsSource; } /** * Overrides for example-based explanations. */ export interface GoogleCloudAiplatformV1ExamplesOverride { /** * The number of neighbors to return that have the same crowding tag. */ crowdingCount?: number; /** * The format of the data being provided with each call. */ dataFormat?: | "DATA_FORMAT_UNSPECIFIED" | "INSTANCES" | "EMBEDDINGS"; /** * The number of neighbors to return. */ neighborCount?: number; /** * Restrict the resulting nearest neighbors to respect these constraints. */ restrictions?: GoogleCloudAiplatformV1ExamplesRestrictionsNamespace[]; /** * If true, return the embeddings instead of neighbors. */ returnEmbeddings?: boolean; } /** * Restrictions namespace for example-based explanations overrides. */ export interface GoogleCloudAiplatformV1ExamplesRestrictionsNamespace { /** * The list of allowed tags. */ allow?: string[]; /** * The list of deny tags. */ deny?: string[]; /** * The namespace name. */ namespaceName?: string; } /** * Code generated by the model that is meant to be executed, and the result * returned to the model. Generated when using the [FunctionDeclaration] tool * and [FunctionCallingConfig] mode is set to [Mode.CODE]. */ export interface GoogleCloudAiplatformV1ExecutableCode { /** * Required. The code to be executed. */ code?: string; /** * Required. Programming language of the `code`. */ language?: | "LANGUAGE_UNSPECIFIED" | "PYTHON"; } /** * Instance of a general execution. */ export interface GoogleCloudAiplatformV1Execution { /** * Output only. Timestamp when this Execution was created. */ readonly createTime?: Date; /** * Description of the Execution */ description?: string; /** * User provided display name of the Execution. May be up to 128 Unicode * characters. */ displayName?: string; /** * An eTag used to perform consistent read-modify-write updates. If not set, * a blind "overwrite" update happens. */ etag?: string; /** * The labels with user-defined metadata to organize your Executions. Label * keys and values can be no longer than 64 characters (Unicode codepoints), * can only contain lowercase letters, numeric characters, underscores and * dashes. International characters are allowed. No more than 64 user labels * can be associated with one Execution (System labels are excluded). */ labels?: { [key: string]: string }; /** * Properties of the Execution. Top level metadata keys' heading and trailing * spaces will be trimmed. The size of this field should not exceed 200KB. */ metadata?: { [key: string]: any }; /** * Output only. The resource name of the Execution. */ readonly name?: string; /** * The title of the schema describing the metadata. Schema title and version * is expected to be registered in earlier Create Schema calls. And both are * used together as unique identifiers to identify schemas within the local * metadata store. */ schemaTitle?: string; /** * The version of the schema in `schema_title` to use. Schema title and * version is expected to be registered in earlier Create Schema calls. And * both are used together as unique identifiers to identify schemas within the * local metadata store. */ schemaVersion?: string; /** * The state of this Execution. This is a property of the Execution, and does * not imply or capture any ongoing process. This property is managed by * clients (such as Vertex AI Pipelines) and the system does not prescribe or * check the validity of state transitions. */ state?: | "STATE_UNSPECIFIED" | "NEW" | "RUNNING" | "COMPLETE" | "FAILED" | "CACHED" | "CANCELLED"; /** * Output only. Timestamp when this Execution was last updated. */ readonly updateTime?: Date; } /** * Request message for PredictionService.Explain. */ export interface GoogleCloudAiplatformV1ExplainRequest { /** * If specified, this ExplainRequest will be served by the chosen * DeployedModel, overriding Endpoint.traffic_split. */ deployedModelId?: string; /** * If specified, overrides the explanation_spec of the DeployedModel. Can be * used for explaining prediction results with different configurations, such * as: - Explaining top-5 predictions results as opposed to top-1; - * Increasing path count or step count of the attribution methods to reduce * approximate errors; - Using different baselines for explaining the * prediction results. */ explanationSpecOverride?: GoogleCloudAiplatformV1ExplanationSpecOverride; /** * Required. The instances that are the input to the explanation call. A * DeployedModel may have an upper limit on the number of instances it * supports per request, and when it is exceeded the explanation call errors * in case of AutoML Models, or, in case of customer created Models, the * behaviour is as documented by that Model. The schema of any single instance * may be specified via Endpoint's DeployedModels' Model's PredictSchemata's * instance_schema_uri. */ instances?: any[]; /** * The parameters that govern the prediction. The schema of the parameters * may be specified via Endpoint's DeployedModels' Model's PredictSchemata's * parameters_schema_uri. */ parameters?: any; } /** * Response message for PredictionService.Explain. */ export interface GoogleCloudAiplatformV1ExplainResponse { /** * ID of the Endpoint's DeployedModel that served this explanation. */ deployedModelId?: string; /** * The explanations of the Model's PredictResponse.predictions. It has the * same number of elements as instances to be explained. */ explanations?: GoogleCloudAiplatformV1Explanation[]; /** * The predictions that are the output of the predictions call. Same as * PredictResponse.predictions. */ predictions?: any[]; } /** * Explanation of a prediction (provided in PredictResponse.predictions) * produced by the Model on a given instance. */ export interface GoogleCloudAiplatformV1Explanation { /** * Output only. Feature attributions grouped by predicted outputs. For Models * that predict only one output, such as regression Models that predict only * one score, there is only one attibution that explains the predicted output. * For Models that predict multiple outputs, such as multiclass Models that * predict multiple classes, each element explains one specific item. * Attribution.output_index can be used to identify which output this * attribution is explaining. By default, we provide Shapley values for the * predicted class. However, you can configure the explanation request to * generate Shapley values for any other classes too. For example, if a model * predicts a probability of `0.4` for approving a loan application, the * model's decision is to reject the application since `p(reject) = 0.6 > * p(approve) = 0.4`, and the default Shapley values would be computed for * rejection decision and not approval, even though the latter might be the * positive class. If users set ExplanationParameters.top_k, the attributions * are sorted by instance_output_value in descending order. If * ExplanationParameters.output_indices is specified, the attributions are * stored by Attribution.output_index in the same order as they appear in the * output_indices. */ readonly attributions?: GoogleCloudAiplatformV1Attribution[]; /** * Output only. List of the nearest neighbors for example-based explanations. * For models deployed with the examples explanations feature enabled, the * attributions field is empty and instead the neighbors field is populated. */ readonly neighbors?: GoogleCloudAiplatformV1Neighbor[]; } /** * Metadata describing the Model's input and output for explanation. */ export interface GoogleCloudAiplatformV1ExplanationMetadata { /** * Points to a YAML file stored on Google Cloud Storage describing the format * of the feature attributions. The schema is defined as an OpenAPI 3.0.2 * [Schema * Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). * AutoML tabular Models always have this field populated by Vertex AI. Note: * The URI given on output may be different, including the URI scheme, than * the one given on input. The output URI will point to a location where the * user only has a read access. */ featureAttributionsSchemaUri?: string; /** * Required. Map from feature names to feature input metadata. Keys are the * name of the features. Values are the specification of the feature. An empty * InputMetadata is valid. It describes a text feature which has the name * specified as the key in ExplanationMetadata.inputs. The baseline of the * empty feature is chosen by Vertex AI. For Vertex AI-provided Tensorflow * images, the key can be any friendly name of the feature. Once specified, * featureAttributions are keyed by this key (if not grouped with another * feature). For custom images, the key must match with the key in instance. */ inputs?: { [key: string]: GoogleCloudAiplatformV1ExplanationMetadataInputMetadata }; /** * Name of the source to generate embeddings for example based explanations. */ latentSpaceSource?: string; /** * Required. Map from output names to output metadata. For Vertex AI-provided * Tensorflow images, keys can be any user defined string that consists of any * UTF-8 characters. For custom images, keys are the name of the output field * in the prediction to be explained. Currently only one key is allowed. */ outputs?: { [key: string]: GoogleCloudAiplatformV1ExplanationMetadataOutputMetadata }; } /** * Metadata of the input of a feature. Fields other than * InputMetadata.input_baselines are applicable only for Models that are using * Vertex AI-provided images for Tensorflow. */ export interface GoogleCloudAiplatformV1ExplanationMetadataInputMetadata { /** * Specifies the shape of the values of the input if the input is a sparse * representation. Refer to Tensorflow documentation for more details: * https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor. */ denseShapeTensorName?: string; /** * A list of baselines for the encoded tensor. The shape of each baseline * should match the shape of the encoded tensor. If a scalar is provided, * Vertex AI broadcasts to the same shape as the encoded tensor. */ encodedBaselines?: any[]; /** * Encoded tensor is a transformation of the input tensor. Must be provided * if choosing Integrated Gradients attribution or XRAI attribution and the * input tensor is not differentiable. An encoded tensor is generated if the * input tensor is encoded by a lookup table. */ encodedTensorName?: string; /** * Defines how the feature is encoded into the input tensor. Defaults to * IDENTITY. */ encoding?: | "ENCODING_UNSPECIFIED" | "IDENTITY" | "BAG_OF_FEATURES" | "BAG_OF_FEATURES_SPARSE" | "INDICATOR" | "COMBINED_EMBEDDING" | "CONCAT_EMBEDDING"; /** * The domain details of the input feature value. Like min/max, original mean * or standard deviation if normalized. */ featureValueDomain?: GoogleCloudAiplatformV1ExplanationMetadataInputMetadataFeatureValueDomain; /** * Name of the group that the input belongs to. Features with the same group * name will be treated as one feature when computing attributions. Features * grouped together can have different shapes in value. If provided, there * will be one single attribution generated in * Attribution.feature_attributions, keyed by the group name. */ groupName?: string; /** * A list of feature names for each index in the input tensor. Required when * the input InputMetadata.encoding is BAG_OF_FEATURES, * BAG_OF_FEATURES_SPARSE, INDICATOR. */ indexFeatureMapping?: string[]; /** * Specifies the index of the values of the input tensor. Required when the * input tensor is a sparse representation. Refer to Tensorflow documentation * for more details: * https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor. */ indicesTensorName?: string; /** * Baseline inputs for this feature. If no baseline is specified, Vertex AI * chooses the baseline for this feature. If multiple baselines are specified, * Vertex AI returns the average attributions across them in * Attribution.feature_attributions. For Vertex AI-provided Tensorflow images * (both 1.x and 2.x), the shape of each baseline must match the shape of the * input tensor. If a scalar is provided, we broadcast to the same shape as * the input tensor. For custom images, the element of the baselines must be * in the same format as the feature's input in the instance[]. The schema of * any single instance may be specified via Endpoint's DeployedModels' Model's * PredictSchemata's instance_schema_uri. */ inputBaselines?: any[]; /** * Name of the input tensor for this feature. Required and is only applicable * to Vertex AI-provided images for Tensorflow. */ inputTensorName?: string; /** * Modality of the feature. Valid values are: numeric, image. Defaults to * numeric. */ modality?: string; /** * Visualization configurations for image explanation. */ visualization?: GoogleCloudAiplatformV1ExplanationMetadataInputMetadataVisualization; } /** * Domain details of the input feature value. Provides numeric information * about the feature, such as its range (min, max). If the feature has been * pre-processed, for example with z-scoring, then it provides information about * how to recover the original feature. For example, if the input feature is an * image and it has been pre-processed to obtain 0-mean and stddev = 1 values, * then original_mean, and original_stddev refer to the mean and stddev of the * original feature (e.g. image tensor) from which input feature (with mean = 0 * and stddev = 1) was obtained. */ export interface GoogleCloudAiplatformV1ExplanationMetadataInputMetadataFeatureValueDomain { /** * The maximum permissible value for this feature. */ maxValue?: number; /** * The minimum permissible value for this feature. */ minValue?: number; /** * If this input feature has been normalized to a mean value of 0, the * original_mean specifies the mean value of the domain prior to * normalization. */ originalMean?: number; /** * If this input feature has been normalized to a standard deviation of 1.0, * the original_stddev specifies the standard deviation of the domain prior to * normalization. */ originalStddev?: number; } /** * Visualization configurations for image explanation. */ export interface GoogleCloudAiplatformV1ExplanationMetadataInputMetadataVisualization { /** * Excludes attributions below the specified percentile, from the highlighted * areas. Defaults to 62. */ clipPercentLowerbound?: number; /** * Excludes attributions above the specified percentile from the highlighted * areas. Using the clip_percent_upperbound and clip_percent_lowerbound * together can be useful for filtering out noise and making it easier to see * areas of strong attribution. Defaults to 99.9. */ clipPercentUpperbound?: number; /** * The color scheme used for the highlighted areas. Defaults to PINK_GREEN * for Integrated Gradients attribution, which shows positive attributions in * green and negative in pink. Defaults to VIRIDIS for XRAI attribution, which * highlights the most influential regions in yellow and the least influential * in blue. */ colorMap?: | "COLOR_MAP_UNSPECIFIED" | "PINK_GREEN" | "VIRIDIS" | "RED" | "GREEN" | "RED_GREEN" | "PINK_WHITE_GREEN"; /** * How the original image is displayed in the visualization. Adjusting the * overlay can help increase visual clarity if the original image makes it * difficult to view the visualization. Defaults to NONE. */ overlayType?: | "OVERLAY_TYPE_UNSPECIFIED" | "NONE" | "ORIGINAL" | "GRAYSCALE" | "MASK_BLACK"; /** * Whether to only highlight pixels with positive contributions, negative or * both. Defaults to POSITIVE. */ polarity?: | "POLARITY_UNSPECIFIED" | "POSITIVE" | "NEGATIVE" | "BOTH"; /** * Type of the image visualization. Only applicable to Integrated Gradients * attribution. OUTLINES shows regions of attribution, while PIXELS shows * per-pixel attribution. Defaults to OUTLINES. */ type?: | "TYPE_UNSPECIFIED" | "PIXELS" | "OUTLINES"; } /** * Metadata of the prediction output to be explained. */ export interface GoogleCloudAiplatformV1ExplanationMetadataOutputMetadata { /** * Specify a field name in the prediction to look for the display name. Use * this if the prediction contains the display names for the outputs. The * display names in the prediction must have the same shape of the outputs, so * that it can be located by Attribution.output_index for a specific output. */ displayNameMappingKey?: string; /** * Static mapping between the index and display name. Use this if the outputs * are a deterministic n-dimensional array, e.g. a list of scores of all the * classes in a pre-defined order for a multi-classification Model. It's not * feasible if the outputs are non-deterministic, e.g. the Model produces * top-k classes or sort the outputs by their values. The shape of the value * must be an n-dimensional array of strings. The number of dimensions must * match that of the outputs to be explained. The * Attribution.output_display_name is populated by locating in the mapping * with Attribution.output_index. */ indexDisplayNameMapping?: any; /** * Name of the output tensor. Required and is only applicable to Vertex AI * provided images for Tensorflow. */ outputTensorName?: string; } /** * The ExplanationMetadata entries that can be overridden at online explanation * time. */ export interface GoogleCloudAiplatformV1ExplanationMetadataOverride { /** * Required. Overrides the input metadata of the features. The key is the * name of the feature to be overridden. The keys specified here must exist in * the input metadata to be overridden. If a feature is not specified here, * the corresponding feature's input metadata is not overridden. */ inputs?: { [key: string]: GoogleCloudAiplatformV1ExplanationMetadataOverrideInputMetadataOverride }; } /** * The input metadata entries to be overridden. */ export interface GoogleCloudAiplatformV1ExplanationMetadataOverrideInputMetadataOverride { /** * Baseline inputs for this feature. This overrides the `input_baseline` * field of the ExplanationMetadata.InputMetadata object of the corresponding * feature's input metadata. If it's not specified, the original baselines are * not overridden. */ inputBaselines?: any[]; } /** * Parameters to configure explaining for Model's predictions. */ export interface GoogleCloudAiplatformV1ExplanationParameters { /** * Example-based explanations that returns the nearest neighbors from the * provided dataset. */ examples?: GoogleCloudAiplatformV1Examples; /** * An attribution method that computes Aumann-Shapley values taking advantage * of the model's fully differentiable structure. Refer to this paper for more * details: https://arxiv.org/abs/1703.01365 */ integratedGradientsAttribution?: GoogleCloudAiplatformV1IntegratedGradientsAttribution; /** * If populated, only returns attributions that have output_index contained * in output_indices. It must be an ndarray of integers, with the same shape * of the output it's explaining. If not populated, returns attributions for * top_k indices of outputs. If neither top_k nor output_indices is populated, * returns the argmax index of the outputs. Only applicable to Models that * predict multiple outputs (e,g, multi-class Models that predict multiple * classes). */ outputIndices?: any[]; /** * An attribution method that approximates Shapley values for features that * contribute to the label being predicted. A sampling strategy is used to * approximate the value rather than considering all subsets of features. * Refer to this paper for model details: https://arxiv.org/abs/1306.4265. */ sampledShapleyAttribution?: GoogleCloudAiplatformV1SampledShapleyAttribution; /** * If populated, returns attributions for top K indices of outputs (defaults * to 1). Only applies to Models that predicts more than one outputs (e,g, * multi-class Models). When set to -1, returns explanations for all outputs. */ topK?: number; /** * An attribution method that redistributes Integrated Gradients attribution * to segmented regions, taking advantage of the model's fully differentiable * structure. Refer to this paper for more details: * https://arxiv.org/abs/1906.02825 XRAI currently performs better on natural * images, like a picture of a house or an animal. If the images are taken in * artificial environments, like a lab or manufacturing line, or from * diagnostic equipment, like x-rays or quality-control cameras, use * Integrated Gradients instead. */ xraiAttribution?: GoogleCloudAiplatformV1XraiAttribution; } /** * Specification of Model explanation. */ export interface GoogleCloudAiplatformV1ExplanationSpec { /** * Optional. Metadata describing the Model's input and output for * explanation. */ metadata?: GoogleCloudAiplatformV1ExplanationMetadata; /** * Required. Parameters that configure explaining of the Model's predictions. */ parameters?: GoogleCloudAiplatformV1ExplanationParameters; } /** * The ExplanationSpec entries that can be overridden at online explanation * time. */ export interface GoogleCloudAiplatformV1ExplanationSpecOverride { /** * The example-based explanations parameter overrides. */ examplesOverride?: GoogleCloudAiplatformV1ExamplesOverride; /** * The metadata to be overridden. If not specified, no metadata is * overridden. */ metadata?: GoogleCloudAiplatformV1ExplanationMetadataOverride; /** * The parameters to be overridden. Note that the attribution method cannot * be changed. If not specified, no parameter is overridden. */ parameters?: GoogleCloudAiplatformV1ExplanationParameters; } /** * Describes what part of the Dataset is to be exported, the destination of the * export and how to export. */ export interface GoogleCloudAiplatformV1ExportDataConfig { /** * The Cloud Storage URI that points to a YAML file describing the annotation * schema. The schema is defined as an OpenAPI 3.0.2 [Schema * Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). * The schema files that can be used here are found in * gs://google-cloud-aiplatform/schema/dataset/annotation/, note that the * chosen schema must be consistent with metadata of the Dataset specified by * ExportDataRequest.name. Only used for custom training data export use * cases. Only applicable to Datasets that have DataItems and Annotations. * Only Annotations that both match this schema and belong to DataItems not * ignored by the split method are used in respectively training, validation * or test role, depending on the role of the DataItem they are on. When used * in conjunction with annotations_filter, the Annotations used for training * are filtered by both annotations_filter and annotation_schema_uri. */ annotationSchemaUri?: string; /** * An expression for filtering what part of the Dataset is to be exported. * Only Annotations that match this filter will be exported. The filter syntax * is the same as in ListAnnotations. */ annotationsFilter?: string; /** * Indicates the usage of the exported files. */ exportUse?: | "EXPORT_USE_UNSPECIFIED" | "CUSTOM_CODE_TRAINING"; /** * Split based on the provided filters for each set. */ filterSplit?: GoogleCloudAiplatformV1ExportFilterSplit; /** * Split based on fractions defining the size of each set. */ fractionSplit?: GoogleCloudAiplatformV1ExportFractionSplit; /** * The Google Cloud Storage location where the output is to be written to. In * the given directory a new directory will be created with name: * `export-data--` where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 * format. All export output will be written into that directory. Inside that * directory, annotations with the same schema will be grouped into sub * directories which are named with the corresponding annotations' schema * title. Inside these sub directories, a schema.yaml will be created to * describe the output format. */ gcsDestination?: GoogleCloudAiplatformV1GcsDestination; /** * The ID of a SavedQuery (annotation set) under the Dataset specified by * ExportDataRequest.name used for filtering Annotations for training. Only * used for custom training data export use cases. Only applicable to Datasets * that have SavedQueries. Only Annotations that are associated with this * SavedQuery are used in respectively training. When used in conjunction with * annotations_filter, the Annotations used for training are filtered by both * saved_query_id and annotations_filter. Only one of saved_query_id and * annotation_schema_uri should be specified as both of them represent the * same thing: problem type. */ savedQueryId?: string; } /** * Runtime operation information for DatasetService.ExportData. */ export interface GoogleCloudAiplatformV1ExportDataOperationMetadata { /** * A Google Cloud Storage directory which path ends with '/'. The exported * data is stored in the directory. */ gcsOutputDirectory?: string; /** * The common part of the operation metadata. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Request message for DatasetService.ExportData. */ export interface GoogleCloudAiplatformV1ExportDataRequest { /** * Required. The desired output location. */ exportConfig?: GoogleCloudAiplatformV1ExportDataConfig; } /** * Response message for DatasetService.ExportData. */ export interface GoogleCloudAiplatformV1ExportDataResponse { /** * Only present for custom code training export use case. Records data stats, * i.e., train/validation/test item/annotation counts calculated during the * export operation. */ dataStats?: GoogleCloudAiplatformV1ModelDataStats; /** * All of the files that are exported in this export operation. For custom * code training export, only three (training, validation and test) Cloud * Storage paths in wildcard format are populated (for example, * gs://.../training-*). */ exportedFiles?: string[]; } function serializeGoogleCloudAiplatformV1ExportDataResponse(data: any): GoogleCloudAiplatformV1ExportDataResponse { return { ...data, dataStats: data["dataStats"] !== undefined ? serializeGoogleCloudAiplatformV1ModelDataStats(data["dataStats"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1ExportDataResponse(data: any): GoogleCloudAiplatformV1ExportDataResponse { return { ...data, dataStats: data["dataStats"] !== undefined ? deserializeGoogleCloudAiplatformV1ModelDataStats(data["dataStats"]) : undefined, }; } /** * Details of operations that exports Features values. */ export interface GoogleCloudAiplatformV1ExportFeatureValuesOperationMetadata { /** * Operation metadata for Featurestore export Feature values. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Request message for FeaturestoreService.ExportFeatureValues. */ export interface GoogleCloudAiplatformV1ExportFeatureValuesRequest { /** * Required. Specifies destination location and format. */ destination?: GoogleCloudAiplatformV1FeatureValueDestination; /** * Required. Selects Features to export values of. */ featureSelector?: GoogleCloudAiplatformV1FeatureSelector; /** * Exports all historical values of all entities of the EntityType within a * time range */ fullExport?: GoogleCloudAiplatformV1ExportFeatureValuesRequestFullExport; /** * Per-Feature export settings. */ settings?: GoogleCloudAiplatformV1DestinationFeatureSetting[]; /** * Exports the latest Feature values of all entities of the EntityType within * a time range. */ snapshotExport?: GoogleCloudAiplatformV1ExportFeatureValuesRequestSnapshotExport; } function serializeGoogleCloudAiplatformV1ExportFeatureValuesRequest(data: any): GoogleCloudAiplatformV1ExportFeatureValuesRequest { return { ...data, fullExport: data["fullExport"] !== undefined ? serializeGoogleCloudAiplatformV1ExportFeatureValuesRequestFullExport(data["fullExport"]) : undefined, snapshotExport: data["snapshotExport"] !== undefined ? serializeGoogleCloudAiplatformV1ExportFeatureValuesRequestSnapshotExport(data["snapshotExport"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1ExportFeatureValuesRequest(data: any): GoogleCloudAiplatformV1ExportFeatureValuesRequest { return { ...data, fullExport: data["fullExport"] !== undefined ? deserializeGoogleCloudAiplatformV1ExportFeatureValuesRequestFullExport(data["fullExport"]) : undefined, snapshotExport: data["snapshotExport"] !== undefined ? deserializeGoogleCloudAiplatformV1ExportFeatureValuesRequestSnapshotExport(data["snapshotExport"]) : undefined, }; } /** * Describes exporting all historical Feature values of all entities of the * EntityType between [start_time, end_time]. */ export interface GoogleCloudAiplatformV1ExportFeatureValuesRequestFullExport { /** * Exports Feature values as of this timestamp. If not set, retrieve values * as of now. Timestamp, if present, must not have higher than millisecond * precision. */ endTime?: Date; /** * Excludes Feature values with feature generation timestamp before this * timestamp. If not set, retrieve oldest values kept in Feature Store. * Timestamp, if present, must not have higher than millisecond precision. */ startTime?: Date; } function serializeGoogleCloudAiplatformV1ExportFeatureValuesRequestFullExport(data: any): GoogleCloudAiplatformV1ExportFeatureValuesRequestFullExport { return { ...data, endTime: data["endTime"] !== undefined ? data["endTime"].toISOString() : undefined, startTime: data["startTime"] !== undefined ? data["startTime"].toISOString() : undefined, }; } function deserializeGoogleCloudAiplatformV1ExportFeatureValuesRequestFullExport(data: any): GoogleCloudAiplatformV1ExportFeatureValuesRequestFullExport { return { ...data, endTime: data["endTime"] !== undefined ? new Date(data["endTime"]) : undefined, startTime: data["startTime"] !== undefined ? new Date(data["startTime"]) : undefined, }; } /** * Describes exporting the latest Feature values of all entities of the * EntityType between [start_time, snapshot_time]. */ export interface GoogleCloudAiplatformV1ExportFeatureValuesRequestSnapshotExport { /** * Exports Feature values as of this timestamp. If not set, retrieve values * as of now. Timestamp, if present, must not have higher than millisecond * precision. */ snapshotTime?: Date; /** * Excludes Feature values with feature generation timestamp before this * timestamp. If not set, retrieve oldest values kept in Feature Store. * Timestamp, if present, must not have higher than millisecond precision. */ startTime?: Date; } function serializeGoogleCloudAiplatformV1ExportFeatureValuesRequestSnapshotExport(data: any): GoogleCloudAiplatformV1ExportFeatureValuesRequestSnapshotExport { return { ...data, snapshotTime: data["snapshotTime"] !== undefined ? data["snapshotTime"].toISOString() : undefined, startTime: data["startTime"] !== undefined ? data["startTime"].toISOString() : undefined, }; } function deserializeGoogleCloudAiplatformV1ExportFeatureValuesRequestSnapshotExport(data: any): GoogleCloudAiplatformV1ExportFeatureValuesRequestSnapshotExport { return { ...data, snapshotTime: data["snapshotTime"] !== undefined ? new Date(data["snapshotTime"]) : undefined, startTime: data["startTime"] !== undefined ? new Date(data["startTime"]) : undefined, }; } /** * Response message for FeaturestoreService.ExportFeatureValues. */ export interface GoogleCloudAiplatformV1ExportFeatureValuesResponse { } /** * Assigns input data to training, validation, and test sets based on the given * filters, data pieces not matched by any filter are ignored. Currently only * supported for Datasets containing DataItems. If any of the filters in this * message are to match nothing, then they can be set as '-' (the minus sign). * Supported only for unstructured Datasets. */ export interface GoogleCloudAiplatformV1ExportFilterSplit { /** * Required. A filter on DataItems of the Dataset. DataItems that match this * filter are used to test the Model. A filter with same syntax as the one * used in DatasetService.ListDataItems may be used. If a single DataItem is * matched by more than one of the FilterSplit filters, then it is assigned to * the first set that applies to it in the training, validation, test order. */ testFilter?: string; /** * Required. A filter on DataItems of the Dataset. DataItems that match this * filter are used to train the Model. A filter with same syntax as the one * used in DatasetService.ListDataItems may be used. If a single DataItem is * matched by more than one of the FilterSplit filters, then it is assigned to * the first set that applies to it in the training, validation, test order. */ trainingFilter?: string; /** * Required. A filter on DataItems of the Dataset. DataItems that match this * filter are used to validate the Model. A filter with same syntax as the one * used in DatasetService.ListDataItems may be used. If a single DataItem is * matched by more than one of the FilterSplit filters, then it is assigned to * the first set that applies to it in the training, validation, test order. */ validationFilter?: string; } /** * Assigns the input data to training, validation, and test sets as per the * given fractions. Any of `training_fraction`, `validation_fraction` and * `test_fraction` may optionally be provided, they must sum to up to 1. If the * provided ones sum to less than 1, the remainder is assigned to sets as * decided by Vertex AI. If none of the fractions are set, by default roughly * 80% of data is used for training, 10% for validation, and 10% for test. */ export interface GoogleCloudAiplatformV1ExportFractionSplit { /** * The fraction of the input data that is to be used to evaluate the Model. */ testFraction?: number; /** * The fraction of the input data that is to be used to train the Model. */ trainingFraction?: number; /** * The fraction of the input data that is to be used to validate the Model. */ validationFraction?: number; } /** * Details of ModelService.ExportModel operation. */ export interface GoogleCloudAiplatformV1ExportModelOperationMetadata { /** * The common part of the operation metadata. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; /** * Output only. Information further describing the output of this Model * export. */ readonly outputInfo?: GoogleCloudAiplatformV1ExportModelOperationMetadataOutputInfo; } /** * Further describes the output of the ExportModel. Supplements * ExportModelRequest.OutputConfig. */ export interface GoogleCloudAiplatformV1ExportModelOperationMetadataOutputInfo { /** * Output only. If the Model artifact is being exported to Google Cloud * Storage this is the full path of the directory created, into which the * Model files are being written to. */ readonly artifactOutputUri?: string; /** * Output only. If the Model image is being exported to Google Container * Registry or Artifact Registry this is the full path of the image created. */ readonly imageOutputUri?: string; } /** * Request message for ModelService.ExportModel. */ export interface GoogleCloudAiplatformV1ExportModelRequest { /** * Required. The desired output location and configuration. */ outputConfig?: GoogleCloudAiplatformV1ExportModelRequestOutputConfig; } /** * Output configuration for the Model export. */ export interface GoogleCloudAiplatformV1ExportModelRequestOutputConfig { /** * The Cloud Storage location where the Model artifact is to be written to. * Under the directory given as the destination a new one with name * "`model-export--`", where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 * format, will be created. Inside, the Model and any of its supporting files * will be written. This field should only be set when the `exportableContent` * field of the [Model.supported_export_formats] object contains `ARTIFACT`. */ artifactDestination?: GoogleCloudAiplatformV1GcsDestination; /** * The ID of the format in which the Model must be exported. Each Model lists * the export formats it supports. If no value is provided here, then the * first from the list of the Model's supported formats is used by default. */ exportFormatId?: string; /** * The Google Container Registry or Artifact Registry uri where the Model * container image will be copied to. This field should only be set when the * `exportableContent` field of the [Model.supported_export_formats] object * contains `IMAGE`. */ imageDestination?: GoogleCloudAiplatformV1ContainerRegistryDestination; } /** * Response message of ModelService.ExportModel operation. */ export interface GoogleCloudAiplatformV1ExportModelResponse { } /** * Request message for TensorboardService.ExportTensorboardTimeSeriesData. */ export interface GoogleCloudAiplatformV1ExportTensorboardTimeSeriesDataRequest { /** * Exports the TensorboardTimeSeries' data that match the filter expression. */ filter?: string; /** * Field to use to sort the TensorboardTimeSeries' data. By default, * TensorboardTimeSeries' data is returned in a pseudo random order. */ orderBy?: string; /** * The maximum number of data points to return per page. The default * page_size is 1000. Values must be between 1 and 10000. Values above 10000 * are coerced to 10000. */ pageSize?: number; /** * A page token, received from a previous ExportTensorboardTimeSeriesData * call. Provide this to retrieve the subsequent page. When paginating, all * other parameters provided to ExportTensorboardTimeSeriesData must match the * call that provided the page token. */ pageToken?: string; } /** * Response message for TensorboardService.ExportTensorboardTimeSeriesData. */ export interface GoogleCloudAiplatformV1ExportTensorboardTimeSeriesDataResponse { /** * A token, which can be sent as page_token to retrieve the next page. If * this field is omitted, there are no subsequent pages. */ nextPageToken?: string; /** * The returned time series data points. */ timeSeriesDataPoints?: GoogleCloudAiplatformV1TimeSeriesDataPoint[]; } function serializeGoogleCloudAiplatformV1ExportTensorboardTimeSeriesDataResponse(data: any): GoogleCloudAiplatformV1ExportTensorboardTimeSeriesDataResponse { return { ...data, timeSeriesDataPoints: data["timeSeriesDataPoints"] !== undefined ? data["timeSeriesDataPoints"].map((item: any) => (serializeGoogleCloudAiplatformV1TimeSeriesDataPoint(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ExportTensorboardTimeSeriesDataResponse(data: any): GoogleCloudAiplatformV1ExportTensorboardTimeSeriesDataResponse { return { ...data, timeSeriesDataPoints: data["timeSeriesDataPoints"] !== undefined ? data["timeSeriesDataPoints"].map((item: any) => (deserializeGoogleCloudAiplatformV1TimeSeriesDataPoint(item))) : undefined, }; } /** * The fact used in grounding. */ export interface GoogleCloudAiplatformV1Fact { /** * If present, chunk properties. */ chunk?: GoogleCloudAiplatformV1RagChunk; /** * Query that is used to retrieve this fact. */ query?: string; /** * If present, according to the underlying Vector DB and the selected metric * type, the score can be either the distance or the similarity between the * query and the fact and its range depends on the metric type. For example, * if the metric type is COSINE_DISTANCE, it represents the distance between * the query and the fact. The larger the distance, the less relevant the fact * is to the query. The range is [0, 2], while 0 means the most relevant and 2 * means the least relevant. */ score?: number; /** * If present, the summary/snippet of the fact. */ summary?: string; /** * If present, it refers to the title of this fact. */ title?: string; /** * If present, this uri links to the source of the fact. */ uri?: string; /** * If present, the distance between the query vector and this fact vector. */ vectorDistance?: number; } /** * Configuration for faster model deployment. */ export interface GoogleCloudAiplatformV1FasterDeploymentConfig { /** * If true, enable fast tryout feature for this deployed model. */ fastTryoutEnabled?: boolean; } /** * Feature Metadata information. For example, color is a feature that describes * an apple. */ export interface GoogleCloudAiplatformV1Feature { /** * Output only. Only applicable for Vertex AI Feature Store (Legacy). * Timestamp when this EntityType was created. */ readonly createTime?: Date; /** * Description of the Feature. */ description?: string; /** * Optional. Only applicable for Vertex AI Feature Store (Legacy). If not * set, use the monitoring_config defined for the EntityType this Feature * belongs to. Only Features with type (Feature.ValueType) BOOL, STRING, * DOUBLE or INT64 can enable monitoring. If set to true, all types of data * monitoring are disabled despite the config on EntityType. */ disableMonitoring?: boolean; /** * Used to perform a consistent read-modify-write updates. If not set, a * blind "overwrite" update happens. */ etag?: string; /** * Optional. The labels with user-defined metadata to organize your Features. * Label keys and values can be no longer than 64 characters (Unicode * codepoints), can only contain lowercase letters, numeric characters, * underscores and dashes. International characters are allowed. See * https://goo.gl/xmQnxf for more information on and examples of labels. No * more than 64 user labels can be associated with one Feature (System labels * are excluded)." System reserved label keys are prefixed with * "aiplatform.googleapis.com/" and are immutable. */ labels?: { [key: string]: string }; /** * Output only. Only applicable for Vertex AI Feature Store (Legacy). The * list of historical stats and anomalies with specified objectives. */ readonly monitoringStatsAnomalies?: GoogleCloudAiplatformV1FeatureMonitoringStatsAnomaly[]; /** * Immutable. Name of the Feature. Format: * `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entity_type}/features/{feature}` * `projects/{project}/locations/{location}/featureGroups/{feature_group}/features/{feature}` * The last part feature is assigned by the client. The feature can be up to * 64 characters long and can consist only of ASCII Latin letters A-Z and a-z, * underscore(_), and ASCII digits 0-9 starting with a letter. The value will * be unique given an entity type. */ name?: string; /** * Entity responsible for maintaining this feature. Can be comma separated * list of email addresses or URIs. */ pointOfContact?: string; /** * Output only. Only applicable for Vertex AI Feature Store (Legacy). * Timestamp when this EntityType was most recently updated. */ readonly updateTime?: Date; /** * Immutable. Only applicable for Vertex AI Feature Store (Legacy). Type of * Feature value. */ valueType?: | "VALUE_TYPE_UNSPECIFIED" | "BOOL" | "BOOL_ARRAY" | "DOUBLE" | "DOUBLE_ARRAY" | "INT64" | "INT64_ARRAY" | "STRING" | "STRING_ARRAY" | "BYTES" | "STRUCT"; /** * Only applicable for Vertex AI Feature Store. The name of the BigQuery * Table/View column hosting data for this version. If no value is provided, * will use feature_id. */ versionColumnName?: string; } /** * Vertex AI Feature Group. */ export interface GoogleCloudAiplatformV1FeatureGroup { /** * Indicates that features for this group come from BigQuery Table/View. By * default treats the source as a sparse time series source. The BigQuery * source table or view must have at least one entity ID column and a column * named `feature_timestamp`. */ bigQuery?: GoogleCloudAiplatformV1FeatureGroupBigQuery; /** * Output only. Timestamp when this FeatureGroup was created. */ readonly createTime?: Date; /** * Optional. Description of the FeatureGroup. */ description?: string; /** * Optional. Used to perform consistent read-modify-write updates. If not * set, a blind "overwrite" update happens. */ etag?: string; /** * Optional. The labels with user-defined metadata to organize your * FeatureGroup. Label keys and values can be no longer than 64 characters * (Unicode codepoints), can only contain lowercase letters, numeric * characters, underscores and dashes. International characters are allowed. * See https://goo.gl/xmQnxf for more information on and examples of labels. * No more than 64 user labels can be associated with one FeatureGroup(System * labels are excluded)." System reserved label keys are prefixed with * "aiplatform.googleapis.com/" and are immutable. */ labels?: { [key: string]: string }; /** * Identifier. Name of the FeatureGroup. Format: * `projects/{project}/locations/{location}/featureGroups/{featureGroup}` */ name?: string; /** * Output only. A Service Account unique to this FeatureGroup. The role * bigquery.dataViewer should be granted to this service account to allow * Vertex AI Feature Store to access source data while running jobs under this * FeatureGroup. */ readonly serviceAccountEmail?: string; /** * Optional. Service agent type used during jobs under a FeatureGroup. By * default, the Vertex AI Service Agent is used. When using an IAM Policy to * isolate this FeatureGroup within a project, a separate service account * should be provisioned by setting this field to * `SERVICE_AGENT_TYPE_FEATURE_GROUP`. This will generate a separate service * account to access the BigQuery source table. */ serviceAgentType?: | "SERVICE_AGENT_TYPE_UNSPECIFIED" | "SERVICE_AGENT_TYPE_PROJECT" | "SERVICE_AGENT_TYPE_FEATURE_GROUP"; /** * Output only. Timestamp when this FeatureGroup was last updated. */ readonly updateTime?: Date; } /** * Input source type for BigQuery Tables and Views. */ export interface GoogleCloudAiplatformV1FeatureGroupBigQuery { /** * Required. Immutable. The BigQuery source URI that points to either a * BigQuery Table or View. */ bigQuerySource?: GoogleCloudAiplatformV1BigQuerySource; /** * Optional. If set, all feature values will be fetched from a single row per * unique entityId including nulls. If not set, will collapse all rows for * each unique entityId into a singe row with any non-null values if present, * if no non-null values are present will sync null. ex: If source has schema * `(entity_id, feature_timestamp, f0, f1)` and the following rows: `(e1, * 2020-01-01T10:00:00.123Z, 10, 15)` `(e1, 2020-02-01T10:00:00.123Z, 20, * null)` If dense is set, `(e1, 20, null)` is synced to online stores. If * dense is not set, `(e1, 20, 15)` is synced to online stores. */ dense?: boolean; /** * Optional. Columns to construct entity_id / row keys. If not provided * defaults to `entity_id`. */ entityIdColumns?: string[]; /** * Optional. Set if the data source is not a time-series. */ staticDataSource?: boolean; /** * Optional. If the source is a time-series source, this can be set to * control how downstream sources (ex: FeatureView ) will treat time-series * sources. If not set, will treat the source as a time-series source with * `feature_timestamp` as timestamp column and no scan boundary. */ timeSeries?: GoogleCloudAiplatformV1FeatureGroupBigQueryTimeSeries; } export interface GoogleCloudAiplatformV1FeatureGroupBigQueryTimeSeries { /** * Optional. Column hosting timestamp values for a time-series source. Will * be used to determine the latest `feature_values` for each entity. Optional. * If not provided, column named `feature_timestamp` of type `TIMESTAMP` will * be used. */ timestampColumn?: string; } /** * A list of historical SnapshotAnalysis or ImportFeaturesAnalysis stats * requested by user, sorted by FeatureStatsAnomaly.start_time descending. */ export interface GoogleCloudAiplatformV1FeatureMonitoringStatsAnomaly { /** * Output only. The stats and anomalies generated at specific timestamp. */ readonly featureStatsAnomaly?: GoogleCloudAiplatformV1FeatureStatsAnomaly; /** * Output only. The objective for each stats. */ readonly objective?: | "OBJECTIVE_UNSPECIFIED" | "IMPORT_FEATURE_ANALYSIS" | "SNAPSHOT_ANALYSIS"; } /** * Noise sigma by features. Noise sigma represents the standard deviation of * the gaussian kernel that will be used to add noise to interpolated inputs * prior to computing gradients. */ export interface GoogleCloudAiplatformV1FeatureNoiseSigma { /** * Noise sigma per feature. No noise is added to features that are not set. */ noiseSigma?: GoogleCloudAiplatformV1FeatureNoiseSigmaNoiseSigmaForFeature[]; } /** * Noise sigma for a single feature. */ export interface GoogleCloudAiplatformV1FeatureNoiseSigmaNoiseSigmaForFeature { /** * The name of the input feature for which noise sigma is provided. The * features are defined in explanation metadata inputs. */ name?: string; /** * This represents the standard deviation of the Gaussian kernel that will be * used to add noise to the feature prior to computing gradients. Similar to * noise_sigma but represents the noise added to the current feature. Defaults * to 0.1. */ sigma?: number; } /** * Vertex AI Feature Online Store provides a centralized repository for serving * ML features and embedding indexes at low latency. The Feature Online Store is * a top-level container. */ export interface GoogleCloudAiplatformV1FeatureOnlineStore { /** * Contains settings for the Cloud Bigtable instance that will be created to * serve featureValues for all FeatureViews under this FeatureOnlineStore. */ bigtable?: GoogleCloudAiplatformV1FeatureOnlineStoreBigtable; /** * Output only. Timestamp when this FeatureOnlineStore was created. */ readonly createTime?: Date; /** * Optional. The dedicated serving endpoint for this FeatureOnlineStore, * which is different from common Vertex service endpoint. */ dedicatedServingEndpoint?: GoogleCloudAiplatformV1FeatureOnlineStoreDedicatedServingEndpoint; /** * Optional. Customer-managed encryption key spec for data storage. If set, * online store will be secured by this key. */ encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec; /** * Optional. Used to perform consistent read-modify-write updates. If not * set, a blind "overwrite" update happens. */ etag?: string; /** * Optional. The labels with user-defined metadata to organize your * FeatureOnlineStore. Label keys and values can be no longer than 64 * characters (Unicode codepoints), can only contain lowercase letters, * numeric characters, underscores and dashes. International characters are * allowed. See https://goo.gl/xmQnxf for more information on and examples of * labels. No more than 64 user labels can be associated with one * FeatureOnlineStore(System labels are excluded)." System reserved label keys * are prefixed with "aiplatform.googleapis.com/" and are immutable. */ labels?: { [key: string]: string }; /** * Identifier. Name of the FeatureOnlineStore. Format: * `projects/{project}/locations/{location}/featureOnlineStores/{featureOnlineStore}` */ name?: string; /** * Contains settings for the Optimized store that will be created to serve * featureValues for all FeatureViews under this FeatureOnlineStore. When * choose Optimized storage type, need to set * PrivateServiceConnectConfig.enable_private_service_connect to use private * endpoint. Otherwise will use public endpoint by default. */ optimized?: GoogleCloudAiplatformV1FeatureOnlineStoreOptimized; /** * Output only. Reserved for future use. */ readonly satisfiesPzi?: boolean; /** * Output only. Reserved for future use. */ readonly satisfiesPzs?: boolean; /** * Output only. State of the featureOnlineStore. */ readonly state?: | "STATE_UNSPECIFIED" | "STABLE" | "UPDATING"; /** * Output only. Timestamp when this FeatureOnlineStore was last updated. */ readonly updateTime?: Date; } export interface GoogleCloudAiplatformV1FeatureOnlineStoreBigtable { /** * Required. Autoscaling config applied to Bigtable Instance. */ autoScaling?: GoogleCloudAiplatformV1FeatureOnlineStoreBigtableAutoScaling; } export interface GoogleCloudAiplatformV1FeatureOnlineStoreBigtableAutoScaling { /** * Optional. A percentage of the cluster's CPU capacity. Can be from 10% to * 80%. When a cluster's CPU utilization exceeds the target that you have set, * Bigtable immediately adds nodes to the cluster. When CPU utilization is * substantially lower than the target, Bigtable removes nodes. If not set * will default to 50%. */ cpuUtilizationTarget?: number; /** * Required. The maximum number of nodes to scale up to. Must be greater than * or equal to min_node_count, and less than or equal to 10 times of * 'min_node_count'. */ maxNodeCount?: number; /** * Required. The minimum number of nodes to scale down to. Must be greater * than or equal to 1. */ minNodeCount?: number; } /** * The dedicated serving endpoint for this FeatureOnlineStore. Only need to set * when you choose Optimized storage type. Public endpoint is provisioned by * default. */ export interface GoogleCloudAiplatformV1FeatureOnlineStoreDedicatedServingEndpoint { /** * Optional. Private service connect config. The private service connection * is available only for Optimized storage type, not for embedding management * now. If PrivateServiceConnectConfig.enable_private_service_connect set to * true, customers will use private service connection to send request. * Otherwise, the connection will set to public endpoint. */ privateServiceConnectConfig?: GoogleCloudAiplatformV1PrivateServiceConnectConfig; /** * Output only. This field will be populated with the domain name to use for * this FeatureOnlineStore */ readonly publicEndpointDomainName?: string; /** * Output only. The name of the service attachment resource. Populated if * private service connect is enabled and after FeatureViewSync is created. */ readonly serviceAttachment?: string; } /** * Optimized storage type */ export interface GoogleCloudAiplatformV1FeatureOnlineStoreOptimized { } /** * Selector for Features of an EntityType. */ export interface GoogleCloudAiplatformV1FeatureSelector { /** * Required. Matches Features based on ID. */ idMatcher?: GoogleCloudAiplatformV1IdMatcher; } /** * Stats and Anomaly generated at specific timestamp for specific Feature. The * start_time and end_time are used to define the time range of the dataset that * current stats belongs to, e.g. prediction traffic is bucketed into prediction * datasets by time window. If the Dataset is not defined by time window, * start_time = end_time. Timestamp of the stats and anomalies always refers to * end_time. Raw stats and anomalies are stored in stats_uri or anomaly_uri in * the tensorflow defined protos. Field data_stats contains almost identical * information with the raw stats in Vertex AI defined proto, for UI to display. */ export interface GoogleCloudAiplatformV1FeatureStatsAnomaly { /** * This is the threshold used when detecting anomalies. The threshold can be * changed by user, so this one might be different from ThresholdConfig.value. */ anomalyDetectionThreshold?: number; /** * Path of the anomaly file for current feature values in Cloud Storage * bucket. Format: gs:////anomalies. Example: * gs://monitoring_bucket/feature_name/anomalies. Stats are stored as binary * format with Protobuf message Anoamlies are stored as binary format with * Protobuf message [tensorflow.metadata.v0.AnomalyInfo] * (https://github.com/tensorflow/metadata/blob/master/tensorflow_metadata/proto/v0/anomalies.proto). */ anomalyUri?: string; /** * Deviation from the current stats to baseline stats. 1. For categorical * feature, the distribution distance is calculated by L-inifinity norm. 2. * For numerical feature, the distribution distance is calculated by * Jensen–Shannon divergence. */ distributionDeviation?: number; /** * The end timestamp of window where stats were generated. For objectives * where time window doesn't make sense (e.g. Featurestore Snapshot * Monitoring), end_time indicates the timestamp of the data used to generate * stats (e.g. timestamp we take snapshots for feature values). */ endTime?: Date; /** * Feature importance score, only populated when cross-feature monitoring is * enabled. For now only used to represent feature attribution score within * range [0, 1] for * ModelDeploymentMonitoringObjectiveType.FEATURE_ATTRIBUTION_SKEW and * ModelDeploymentMonitoringObjectiveType.FEATURE_ATTRIBUTION_DRIFT. */ score?: number; /** * The start timestamp of window where stats were generated. For objectives * where time window doesn't make sense (e.g. Featurestore Snapshot * Monitoring), start_time is only used to indicate the monitoring intervals, * so it always equals to (end_time - monitoring_interval). */ startTime?: Date; /** * Path of the stats file for current feature values in Cloud Storage bucket. * Format: gs:////stats. Example: gs://monitoring_bucket/feature_name/stats. * Stats are stored as binary format with Protobuf message * [tensorflow.metadata.v0.FeatureNameStatistics](https://github.com/tensorflow/metadata/blob/master/tensorflow_metadata/proto/v0/statistics.proto). */ statsUri?: string; } function serializeGoogleCloudAiplatformV1FeatureStatsAnomaly(data: any): GoogleCloudAiplatformV1FeatureStatsAnomaly { return { ...data, endTime: data["endTime"] !== undefined ? data["endTime"].toISOString() : undefined, startTime: data["startTime"] !== undefined ? data["startTime"].toISOString() : undefined, }; } function deserializeGoogleCloudAiplatformV1FeatureStatsAnomaly(data: any): GoogleCloudAiplatformV1FeatureStatsAnomaly { return { ...data, endTime: data["endTime"] !== undefined ? new Date(data["endTime"]) : undefined, startTime: data["startTime"] !== undefined ? new Date(data["startTime"]) : undefined, }; } /** * Vertex AI Feature Store provides a centralized repository for organizing, * storing, and serving ML features. The Featurestore is a top-level container * for your features and their values. */ export interface GoogleCloudAiplatformV1Featurestore { /** * Output only. Timestamp when this Featurestore was created. */ readonly createTime?: Date; /** * Optional. Customer-managed encryption key spec for data storage. If set, * both of the online and offline data storage will be secured by this key. */ encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec; /** * Optional. Used to perform consistent read-modify-write updates. If not * set, a blind "overwrite" update happens. */ etag?: string; /** * Optional. The labels with user-defined metadata to organize your * Featurestore. Label keys and values can be no longer than 64 characters * (Unicode codepoints), can only contain lowercase letters, numeric * characters, underscores and dashes. International characters are allowed. * See https://goo.gl/xmQnxf for more information on and examples of labels. * No more than 64 user labels can be associated with one Featurestore(System * labels are excluded)." System reserved label keys are prefixed with * "aiplatform.googleapis.com/" and are immutable. */ labels?: { [key: string]: string }; /** * Output only. Name of the Featurestore. Format: * `projects/{project}/locations/{location}/featurestores/{featurestore}` */ readonly name?: string; /** * Optional. Config for online storage resources. The field should not * co-exist with the field of `OnlineStoreReplicationConfig`. If both of it * and OnlineStoreReplicationConfig are unset, the feature store will not have * an online store and cannot be used for online serving. */ onlineServingConfig?: GoogleCloudAiplatformV1FeaturestoreOnlineServingConfig; /** * Optional. TTL in days for feature values that will be stored in online * serving storage. The Feature Store online storage periodically removes * obsolete feature values older than `online_storage_ttl_days` since the * feature generation time. Note that `online_storage_ttl_days` should be less * than or equal to `offline_storage_ttl_days` for each EntityType under a * featurestore. If not set, default to 4000 days */ onlineStorageTtlDays?: number; /** * Output only. Reserved for future use. */ readonly satisfiesPzi?: boolean; /** * Output only. Reserved for future use. */ readonly satisfiesPzs?: boolean; /** * Output only. State of the featurestore. */ readonly state?: | "STATE_UNSPECIFIED" | "STABLE" | "UPDATING"; /** * Output only. Timestamp when this Featurestore was last updated. */ readonly updateTime?: Date; } /** * Configuration of how features in Featurestore are monitored. */ export interface GoogleCloudAiplatformV1FeaturestoreMonitoringConfig { /** * Threshold for categorical features of anomaly detection. This is shared by * all types of Featurestore Monitoring for categorical features (i.e. * Features with type (Feature.ValueType) BOOL or STRING). */ categoricalThresholdConfig?: GoogleCloudAiplatformV1FeaturestoreMonitoringConfigThresholdConfig; /** * The config for ImportFeatures Analysis Based Feature Monitoring. */ importFeaturesAnalysis?: GoogleCloudAiplatformV1FeaturestoreMonitoringConfigImportFeaturesAnalysis; /** * Threshold for numerical features of anomaly detection. This is shared by * all objectives of Featurestore Monitoring for numerical features (i.e. * Features with type (Feature.ValueType) DOUBLE or INT64). */ numericalThresholdConfig?: GoogleCloudAiplatformV1FeaturestoreMonitoringConfigThresholdConfig; /** * The config for Snapshot Analysis Based Feature Monitoring. */ snapshotAnalysis?: GoogleCloudAiplatformV1FeaturestoreMonitoringConfigSnapshotAnalysis; } /** * Configuration of the Featurestore's ImportFeature Analysis Based Monitoring. * This type of analysis generates statistics for values of each Feature * imported by every ImportFeatureValues operation. */ export interface GoogleCloudAiplatformV1FeaturestoreMonitoringConfigImportFeaturesAnalysis { /** * The baseline used to do anomaly detection for the statistics generated by * import features analysis. */ anomalyDetectionBaseline?: | "BASELINE_UNSPECIFIED" | "LATEST_STATS" | "MOST_RECENT_SNAPSHOT_STATS" | "PREVIOUS_IMPORT_FEATURES_STATS"; /** * Whether to enable / disable / inherite default hebavior for import * features analysis. */ state?: | "STATE_UNSPECIFIED" | "DEFAULT" | "ENABLED" | "DISABLED"; } /** * Configuration of the Featurestore's Snapshot Analysis Based Monitoring. This * type of analysis generates statistics for each Feature based on a snapshot of * the latest feature value of each entities every monitoring_interval. */ export interface GoogleCloudAiplatformV1FeaturestoreMonitoringConfigSnapshotAnalysis { /** * The monitoring schedule for snapshot analysis. For EntityType-level * config: unset / disabled = true indicates disabled by default for Features * under it; otherwise by default enable snapshot analysis monitoring with * monitoring_interval for Features under it. Feature-level config: disabled = * true indicates disabled regardless of the EntityType-level config; unset * monitoring_interval indicates going with EntityType-level config; otherwise * run snapshot analysis monitoring with monitoring_interval regardless of the * EntityType-level config. Explicitly Disable the snapshot analysis based * monitoring. */ disabled?: boolean; /** * Configuration of the snapshot analysis based monitoring pipeline running * interval. The value indicates number of days. */ monitoringIntervalDays?: number; /** * Customized export features time window for snapshot analysis. Unit is one * day. Default value is 3 weeks. Minimum value is 1 day. Maximum value is * 4000 days. */ stalenessDays?: number; } /** * The config for Featurestore Monitoring threshold. */ export interface GoogleCloudAiplatformV1FeaturestoreMonitoringConfigThresholdConfig { /** * Specify a threshold value that can trigger the alert. 1. For categorical * feature, the distribution distance is calculated by L-inifinity norm. 2. * For numerical feature, the distribution distance is calculated by * Jensen–Shannon divergence. Each feature must have a non-zero threshold if * they need to be monitored. Otherwise no alert will be triggered for that * feature. */ value?: number; } /** * OnlineServingConfig specifies the details for provisioning online serving * resources. */ export interface GoogleCloudAiplatformV1FeaturestoreOnlineServingConfig { /** * The number of nodes for the online store. The number of nodes doesn't * scale automatically, but you can manually update the number of nodes. If * set to 0, the featurestore will not have an online store and cannot be used * for online serving. */ fixedNodeCount?: number; /** * Online serving scaling configuration. Only one of `fixed_node_count` and * `scaling` can be set. Setting one will reset the other. */ scaling?: GoogleCloudAiplatformV1FeaturestoreOnlineServingConfigScaling; } /** * Online serving scaling configuration. If min_node_count and max_node_count * are set to the same value, the cluster will be configured with the fixed * number of node (no auto-scaling). */ export interface GoogleCloudAiplatformV1FeaturestoreOnlineServingConfigScaling { /** * Optional. The cpu utilization that the Autoscaler should be trying to * achieve. This number is on a scale from 0 (no utilization) to 100 (total * utilization), and is limited between 10 and 80. When a cluster's CPU * utilization exceeds the target that you have set, Bigtable immediately adds * nodes to the cluster. When CPU utilization is substantially lower than the * target, Bigtable removes nodes. If not set or set to 0, default to 50. */ cpuUtilizationTarget?: number; /** * The maximum number of nodes to scale up to. Must be greater than * min_node_count, and less than or equal to 10 times of 'min_node_count'. */ maxNodeCount?: number; /** * Required. The minimum number of nodes to scale down to. Must be greater * than or equal to 1. */ minNodeCount?: number; } /** * Value for a feature. */ export interface GoogleCloudAiplatformV1FeatureValue { /** * A list of bool type feature value. */ boolArrayValue?: GoogleCloudAiplatformV1BoolArray; /** * Bool type feature value. */ boolValue?: boolean; /** * Bytes feature value. */ bytesValue?: Uint8Array; /** * A list of double type feature value. */ doubleArrayValue?: GoogleCloudAiplatformV1DoubleArray; /** * Double type feature value. */ doubleValue?: number; /** * A list of int64 type feature value. */ int64ArrayValue?: GoogleCloudAiplatformV1Int64Array; /** * Int64 feature value. */ int64Value?: bigint; /** * Metadata of feature value. */ metadata?: GoogleCloudAiplatformV1FeatureValueMetadata; /** * A list of string type feature value. */ stringArrayValue?: GoogleCloudAiplatformV1StringArray; /** * String feature value. */ stringValue?: string; /** * A struct type feature value. */ structValue?: GoogleCloudAiplatformV1StructValue; } function serializeGoogleCloudAiplatformV1FeatureValue(data: any): GoogleCloudAiplatformV1FeatureValue { return { ...data, bytesValue: data["bytesValue"] !== undefined ? encodeBase64(data["bytesValue"]) : undefined, int64ArrayValue: data["int64ArrayValue"] !== undefined ? serializeGoogleCloudAiplatformV1Int64Array(data["int64ArrayValue"]) : undefined, int64Value: data["int64Value"] !== undefined ? String(data["int64Value"]) : undefined, metadata: data["metadata"] !== undefined ? serializeGoogleCloudAiplatformV1FeatureValueMetadata(data["metadata"]) : undefined, structValue: data["structValue"] !== undefined ? serializeGoogleCloudAiplatformV1StructValue(data["structValue"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1FeatureValue(data: any): GoogleCloudAiplatformV1FeatureValue { return { ...data, bytesValue: data["bytesValue"] !== undefined ? decodeBase64(data["bytesValue"] as string) : undefined, int64ArrayValue: data["int64ArrayValue"] !== undefined ? deserializeGoogleCloudAiplatformV1Int64Array(data["int64ArrayValue"]) : undefined, int64Value: data["int64Value"] !== undefined ? BigInt(data["int64Value"]) : undefined, metadata: data["metadata"] !== undefined ? deserializeGoogleCloudAiplatformV1FeatureValueMetadata(data["metadata"]) : undefined, structValue: data["structValue"] !== undefined ? deserializeGoogleCloudAiplatformV1StructValue(data["structValue"]) : undefined, }; } /** * A destination location for Feature values and format. */ export interface GoogleCloudAiplatformV1FeatureValueDestination { /** * Output in BigQuery format. BigQueryDestination.output_uri in * FeatureValueDestination.bigquery_destination must refer to a table. */ bigqueryDestination?: GoogleCloudAiplatformV1BigQueryDestination; /** * Output in CSV format. Array Feature value types are not allowed in CSV * format. */ csvDestination?: GoogleCloudAiplatformV1CsvDestination; /** * Output in TFRecord format. Below are the mapping from Feature value type * in Featurestore to Feature value type in TFRecord: Value type in * Featurestore | Value type in TFRecord DOUBLE, DOUBLE_ARRAY | FLOAT_LIST * INT64, INT64_ARRAY | INT64_LIST STRING, STRING_ARRAY, BYTES | BYTES_LIST * true -> byte_string("true"), false -> byte_string("false") BOOL, BOOL_ARRAY * (true, false) | BYTES_LIST */ tfrecordDestination?: GoogleCloudAiplatformV1TFRecordDestination; } /** * Container for list of values. */ export interface GoogleCloudAiplatformV1FeatureValueList { /** * A list of feature values. All of them should be the same data type. */ values?: GoogleCloudAiplatformV1FeatureValue[]; } function serializeGoogleCloudAiplatformV1FeatureValueList(data: any): GoogleCloudAiplatformV1FeatureValueList { return { ...data, values: data["values"] !== undefined ? data["values"].map((item: any) => (serializeGoogleCloudAiplatformV1FeatureValue(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1FeatureValueList(data: any): GoogleCloudAiplatformV1FeatureValueList { return { ...data, values: data["values"] !== undefined ? data["values"].map((item: any) => (deserializeGoogleCloudAiplatformV1FeatureValue(item))) : undefined, }; } /** * Metadata of feature value. */ export interface GoogleCloudAiplatformV1FeatureValueMetadata { /** * Feature generation timestamp. Typically, it is provided by user at feature * ingestion time. If not, feature store will use the system timestamp when * the data is ingested into feature store. For streaming ingestion, the time, * aligned by days, must be no older than five years (1825 days) and no later * than one year (366 days) in the future. */ generateTime?: Date; } function serializeGoogleCloudAiplatformV1FeatureValueMetadata(data: any): GoogleCloudAiplatformV1FeatureValueMetadata { return { ...data, generateTime: data["generateTime"] !== undefined ? data["generateTime"].toISOString() : undefined, }; } function deserializeGoogleCloudAiplatformV1FeatureValueMetadata(data: any): GoogleCloudAiplatformV1FeatureValueMetadata { return { ...data, generateTime: data["generateTime"] !== undefined ? new Date(data["generateTime"]) : undefined, }; } /** * FeatureView is representation of values that the FeatureOnlineStore will * serve based on its syncConfig. */ export interface GoogleCloudAiplatformV1FeatureView { /** * Optional. Configures how data is supposed to be extracted from a BigQuery * source to be loaded onto the FeatureOnlineStore. */ bigQuerySource?: GoogleCloudAiplatformV1FeatureViewBigQuerySource; /** * Output only. Timestamp when this FeatureView was created. */ readonly createTime?: Date; /** * Optional. Used to perform consistent read-modify-write updates. If not * set, a blind "overwrite" update happens. */ etag?: string; /** * Optional. Configures the features from a Feature Registry source that need * to be loaded onto the FeatureOnlineStore. */ featureRegistrySource?: GoogleCloudAiplatformV1FeatureViewFeatureRegistrySource; /** * Optional. Configuration for index preparation for vector search. It * contains the required configurations to create an index from source data, * so that approximate nearest neighbor (a.k.a ANN) algorithms search can be * performed during online serving. */ indexConfig?: GoogleCloudAiplatformV1FeatureViewIndexConfig; /** * Optional. The labels with user-defined metadata to organize your * FeatureViews. Label keys and values can be no longer than 64 characters * (Unicode codepoints), can only contain lowercase letters, numeric * characters, underscores and dashes. International characters are allowed. * See https://goo.gl/xmQnxf for more information on and examples of labels. * No more than 64 user labels can be associated with one * FeatureOnlineStore(System labels are excluded)." System reserved label keys * are prefixed with "aiplatform.googleapis.com/" and are immutable. */ labels?: { [key: string]: string }; /** * Identifier. Name of the FeatureView. Format: * `projects/{project}/locations/{location}/featureOnlineStores/{feature_online_store}/featureViews/{feature_view}` */ name?: string; /** * Optional. Configuration for FeatureView created under Optimized * FeatureOnlineStore. */ optimizedConfig?: GoogleCloudAiplatformV1FeatureViewOptimizedConfig; /** * Output only. Reserved for future use. */ readonly satisfiesPzi?: boolean; /** * Output only. Reserved for future use. */ readonly satisfiesPzs?: boolean; /** * Output only. A Service Account unique to this FeatureView. The role * bigquery.dataViewer should be granted to this service account to allow * Vertex AI Feature Store to sync data to the online store. */ readonly serviceAccountEmail?: string; /** * Optional. Service agent type used during data sync. By default, the Vertex * AI Service Agent is used. When using an IAM Policy to isolate this * FeatureView within a project, a separate service account should be * provisioned by setting this field to `SERVICE_AGENT_TYPE_FEATURE_VIEW`. * This will generate a separate service account to access the BigQuery source * table. */ serviceAgentType?: | "SERVICE_AGENT_TYPE_UNSPECIFIED" | "SERVICE_AGENT_TYPE_PROJECT" | "SERVICE_AGENT_TYPE_FEATURE_VIEW"; /** * Configures when data is to be synced/updated for this FeatureView. At the * end of the sync the latest featureValues for each entityId of this * FeatureView are made ready for online serving. */ syncConfig?: GoogleCloudAiplatformV1FeatureViewSyncConfig; /** * Output only. Timestamp when this FeatureView was last updated. */ readonly updateTime?: Date; /** * Optional. The Vertex RAG Source that the FeatureView is linked to. */ vertexRagSource?: GoogleCloudAiplatformV1FeatureViewVertexRagSource; } function serializeGoogleCloudAiplatformV1FeatureView(data: any): GoogleCloudAiplatformV1FeatureView { return { ...data, featureRegistrySource: data["featureRegistrySource"] !== undefined ? serializeGoogleCloudAiplatformV1FeatureViewFeatureRegistrySource(data["featureRegistrySource"]) : undefined, indexConfig: data["indexConfig"] !== undefined ? serializeGoogleCloudAiplatformV1FeatureViewIndexConfig(data["indexConfig"]) : undefined, vertexRagSource: data["vertexRagSource"] !== undefined ? serializeGoogleCloudAiplatformV1FeatureViewVertexRagSource(data["vertexRagSource"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1FeatureView(data: any): GoogleCloudAiplatformV1FeatureView { return { ...data, createTime: data["createTime"] !== undefined ? new Date(data["createTime"]) : undefined, featureRegistrySource: data["featureRegistrySource"] !== undefined ? deserializeGoogleCloudAiplatformV1FeatureViewFeatureRegistrySource(data["featureRegistrySource"]) : undefined, indexConfig: data["indexConfig"] !== undefined ? deserializeGoogleCloudAiplatformV1FeatureViewIndexConfig(data["indexConfig"]) : undefined, updateTime: data["updateTime"] !== undefined ? new Date(data["updateTime"]) : undefined, vertexRagSource: data["vertexRagSource"] !== undefined ? deserializeGoogleCloudAiplatformV1FeatureViewVertexRagSource(data["vertexRagSource"]) : undefined, }; } export interface GoogleCloudAiplatformV1FeatureViewBigQuerySource { /** * Required. Columns to construct entity_id / row keys. */ entityIdColumns?: string[]; /** * Required. The BigQuery view URI that will be materialized on each sync * trigger based on FeatureView.SyncConfig. */ uri?: string; } /** * Lookup key for a feature view. */ export interface GoogleCloudAiplatformV1FeatureViewDataKey { /** * The actual Entity ID will be composed from this struct. This should match * with the way ID is defined in the FeatureView spec. */ compositeKey?: GoogleCloudAiplatformV1FeatureViewDataKeyCompositeKey; /** * String key to use for lookup. */ key?: string; } /** * ID that is comprised from several parts (columns). */ export interface GoogleCloudAiplatformV1FeatureViewDataKeyCompositeKey { /** * Parts to construct Entity ID. Should match with the same ID columns as * defined in FeatureView in the same order. */ parts?: string[]; } /** * A Feature Registry source for features that need to be synced to Online * Store. */ export interface GoogleCloudAiplatformV1FeatureViewFeatureRegistrySource { /** * Required. List of features that need to be synced to Online Store. */ featureGroups?: GoogleCloudAiplatformV1FeatureViewFeatureRegistrySourceFeatureGroup[]; /** * Optional. The project number of the parent project of the Feature Groups. */ projectNumber?: bigint; } function serializeGoogleCloudAiplatformV1FeatureViewFeatureRegistrySource(data: any): GoogleCloudAiplatformV1FeatureViewFeatureRegistrySource { return { ...data, projectNumber: data["projectNumber"] !== undefined ? String(data["projectNumber"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1FeatureViewFeatureRegistrySource(data: any): GoogleCloudAiplatformV1FeatureViewFeatureRegistrySource { return { ...data, projectNumber: data["projectNumber"] !== undefined ? BigInt(data["projectNumber"]) : undefined, }; } /** * Features belonging to a single feature group that will be synced to Online * Store. */ export interface GoogleCloudAiplatformV1FeatureViewFeatureRegistrySourceFeatureGroup { /** * Required. Identifier of the feature group. */ featureGroupId?: string; /** * Required. Identifiers of features under the feature group. */ featureIds?: string[]; } /** * Configuration for vector indexing. */ export interface GoogleCloudAiplatformV1FeatureViewIndexConfig { /** * Optional. Configuration options for using brute force search, which simply * implements the standard linear search in the database for each query. It is * primarily meant for benchmarking and to generate the ground truth for * approximate search. */ bruteForceConfig?: GoogleCloudAiplatformV1FeatureViewIndexConfigBruteForceConfig; /** * Optional. Column of crowding. This column contains crowding attribute * which is a constraint on a neighbor list produced by * FeatureOnlineStoreService.SearchNearestEntities to diversify search * results. If NearestNeighborQuery.per_crowding_attribute_neighbor_count is * set to K in SearchNearestEntitiesRequest, it's guaranteed that no more than * K entities of the same crowding attribute are returned in the response. */ crowdingColumn?: string; /** * Optional. The distance measure used in nearest neighbor search. */ distanceMeasureType?: | "DISTANCE_MEASURE_TYPE_UNSPECIFIED" | "SQUARED_L2_DISTANCE" | "COSINE_DISTANCE" | "DOT_PRODUCT_DISTANCE"; /** * Optional. Column of embedding. This column contains the source data to * create index for vector search. embedding_column must be set when using * vector search. */ embeddingColumn?: string; /** * Optional. The number of dimensions of the input embedding. */ embeddingDimension?: number; /** * Optional. Columns of features that're used to filter vector search * results. */ filterColumns?: string[]; /** * Optional. Configuration options for the tree-AH algorithm (Shallow tree + * Asymmetric Hashing). Please refer to this paper for more details: * https://arxiv.org/abs/1908.10396 */ treeAhConfig?: GoogleCloudAiplatformV1FeatureViewIndexConfigTreeAHConfig; } function serializeGoogleCloudAiplatformV1FeatureViewIndexConfig(data: any): GoogleCloudAiplatformV1FeatureViewIndexConfig { return { ...data, treeAhConfig: data["treeAhConfig"] !== undefined ? serializeGoogleCloudAiplatformV1FeatureViewIndexConfigTreeAHConfig(data["treeAhConfig"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1FeatureViewIndexConfig(data: any): GoogleCloudAiplatformV1FeatureViewIndexConfig { return { ...data, treeAhConfig: data["treeAhConfig"] !== undefined ? deserializeGoogleCloudAiplatformV1FeatureViewIndexConfigTreeAHConfig(data["treeAhConfig"]) : undefined, }; } /** * Configuration options for using brute force search. */ export interface GoogleCloudAiplatformV1FeatureViewIndexConfigBruteForceConfig { } /** * Configuration options for the tree-AH algorithm. */ export interface GoogleCloudAiplatformV1FeatureViewIndexConfigTreeAHConfig { /** * Optional. Number of embeddings on each leaf node. The default value is * 1000 if not set. */ leafNodeEmbeddingCount?: bigint; } function serializeGoogleCloudAiplatformV1FeatureViewIndexConfigTreeAHConfig(data: any): GoogleCloudAiplatformV1FeatureViewIndexConfigTreeAHConfig { return { ...data, leafNodeEmbeddingCount: data["leafNodeEmbeddingCount"] !== undefined ? String(data["leafNodeEmbeddingCount"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1FeatureViewIndexConfigTreeAHConfig(data: any): GoogleCloudAiplatformV1FeatureViewIndexConfigTreeAHConfig { return { ...data, leafNodeEmbeddingCount: data["leafNodeEmbeddingCount"] !== undefined ? BigInt(data["leafNodeEmbeddingCount"]) : undefined, }; } /** * Configuration for FeatureViews created in Optimized FeatureOnlineStore. */ export interface GoogleCloudAiplatformV1FeatureViewOptimizedConfig { /** * Optional. A description of resources that the FeatureView uses, which to * large degree are decided by Vertex AI, and optionally allows only a modest * additional configuration. If min_replica_count is not set, the default * value is 2. If max_replica_count is not set, the default value is 6. The * max allowed replica count is 1000. */ automaticResources?: GoogleCloudAiplatformV1AutomaticResources; } /** * FeatureViewSync is a representation of sync operation which copies data from * data source to Feature View in Online Store. */ export interface GoogleCloudAiplatformV1FeatureViewSync { /** * Output only. Time when this FeatureViewSync is created. Creation of a * FeatureViewSync means that the job is pending / waiting for sufficient * resources but may not have started the actual data transfer yet. */ readonly createTime?: Date; /** * Output only. Final status of the FeatureViewSync. */ readonly finalStatus?: GoogleRpcStatus; /** * Identifier. Name of the FeatureViewSync. Format: * `projects/{project}/locations/{location}/featureOnlineStores/{feature_online_store}/featureViews/{feature_view}/featureViewSyncs/{feature_view_sync}` */ name?: string; /** * Output only. Time when this FeatureViewSync is finished. */ readonly runTime?: GoogleTypeInterval; /** * Output only. Reserved for future use. */ readonly satisfiesPzi?: boolean; /** * Output only. Reserved for future use. */ readonly satisfiesPzs?: boolean; /** * Output only. Summary of the sync job. */ readonly syncSummary?: GoogleCloudAiplatformV1FeatureViewSyncSyncSummary; } /** * Configuration for Sync. Only one option is set. */ export interface GoogleCloudAiplatformV1FeatureViewSyncConfig { /** * Optional. If true, syncs the FeatureView in a continuous manner to Online * Store. */ continuous?: boolean; /** * Cron schedule (https://en.wikipedia.org/wiki/Cron) to launch scheduled * runs. To explicitly set a timezone to the cron tab, apply a prefix in the * cron tab: "CRON_TZ=${IANA_TIME_ZONE}" or "TZ=${IANA_TIME_ZONE}". The * ${IANA_TIME_ZONE} may only be a valid string from IANA time zone database. * For example, "CRON_TZ=America/New_York 1 * * * *", or "TZ=America/New_York * 1 * * * *". */ cron?: string; } /** * Summary from the Sync job. For continuous syncs, the summary is updated * periodically. For batch syncs, it gets updated on completion of the sync. */ export interface GoogleCloudAiplatformV1FeatureViewSyncSyncSummary { /** * Output only. Total number of rows synced. */ readonly rowSynced?: bigint; /** * Lower bound of the system time watermark for the sync job. This is only * set for continuously syncing feature views. */ systemWatermarkTime?: Date; /** * Output only. BigQuery slot milliseconds consumed for the sync job. */ readonly totalSlot?: bigint; } function serializeGoogleCloudAiplatformV1FeatureViewSyncSyncSummary(data: any): GoogleCloudAiplatformV1FeatureViewSyncSyncSummary { return { ...data, systemWatermarkTime: data["systemWatermarkTime"] !== undefined ? data["systemWatermarkTime"].toISOString() : undefined, }; } function deserializeGoogleCloudAiplatformV1FeatureViewSyncSyncSummary(data: any): GoogleCloudAiplatformV1FeatureViewSyncSyncSummary { return { ...data, rowSynced: data["rowSynced"] !== undefined ? BigInt(data["rowSynced"]) : undefined, systemWatermarkTime: data["systemWatermarkTime"] !== undefined ? new Date(data["systemWatermarkTime"]) : undefined, totalSlot: data["totalSlot"] !== undefined ? BigInt(data["totalSlot"]) : undefined, }; } /** * A Vertex Rag source for features that need to be synced to Online Store. */ export interface GoogleCloudAiplatformV1FeatureViewVertexRagSource { /** * Optional. The RAG corpus id corresponding to this FeatureView. */ ragCorpusId?: bigint; /** * Required. The BigQuery view/table URI that will be materialized on each * manual sync trigger. The table/view is expected to have the following * columns and types at least: - `corpus_id` (STRING, NULLABLE/REQUIRED) - * `file_id` (STRING, NULLABLE/REQUIRED) - `chunk_id` (STRING, * NULLABLE/REQUIRED) - `chunk_data_type` (STRING, NULLABLE/REQUIRED) - * `chunk_data` (STRING, NULLABLE/REQUIRED) - `embeddings` (FLOAT, REPEATED) - * `file_original_uri` (STRING, NULLABLE/REQUIRED) */ uri?: string; } function serializeGoogleCloudAiplatformV1FeatureViewVertexRagSource(data: any): GoogleCloudAiplatformV1FeatureViewVertexRagSource { return { ...data, ragCorpusId: data["ragCorpusId"] !== undefined ? String(data["ragCorpusId"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1FeatureViewVertexRagSource(data: any): GoogleCloudAiplatformV1FeatureViewVertexRagSource { return { ...data, ragCorpusId: data["ragCorpusId"] !== undefined ? BigInt(data["ragCorpusId"]) : undefined, }; } /** * Request message for FeatureOnlineStoreService.FetchFeatureValues. All the * features under the requested feature view will be returned. */ export interface GoogleCloudAiplatformV1FetchFeatureValuesRequest { /** * Optional. Response data format. If not set, * FeatureViewDataFormat.KEY_VALUE will be used. */ dataFormat?: | "FEATURE_VIEW_DATA_FORMAT_UNSPECIFIED" | "KEY_VALUE" | "PROTO_STRUCT"; /** * Optional. The request key to fetch feature values for. */ dataKey?: GoogleCloudAiplatformV1FeatureViewDataKey; } /** * Response message for FeatureOnlineStoreService.FetchFeatureValues */ export interface GoogleCloudAiplatformV1FetchFeatureValuesResponse { /** * The data key associated with this response. Will only be populated for * FeatureOnlineStoreService.StreamingFetchFeatureValues RPCs. */ dataKey?: GoogleCloudAiplatformV1FeatureViewDataKey; /** * Feature values in KeyValue format. */ keyValues?: GoogleCloudAiplatformV1FetchFeatureValuesResponseFeatureNameValuePairList; /** * Feature values in proto Struct format. */ protoStruct?: { [key: string]: any }; } function serializeGoogleCloudAiplatformV1FetchFeatureValuesResponse(data: any): GoogleCloudAiplatformV1FetchFeatureValuesResponse { return { ...data, keyValues: data["keyValues"] !== undefined ? serializeGoogleCloudAiplatformV1FetchFeatureValuesResponseFeatureNameValuePairList(data["keyValues"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1FetchFeatureValuesResponse(data: any): GoogleCloudAiplatformV1FetchFeatureValuesResponse { return { ...data, keyValues: data["keyValues"] !== undefined ? deserializeGoogleCloudAiplatformV1FetchFeatureValuesResponseFeatureNameValuePairList(data["keyValues"]) : undefined, }; } /** * Response structure in the format of key (feature name) and (feature) value * pair. */ export interface GoogleCloudAiplatformV1FetchFeatureValuesResponseFeatureNameValuePairList { /** * List of feature names and values. */ features?: GoogleCloudAiplatformV1FetchFeatureValuesResponseFeatureNameValuePairListFeatureNameValuePair[]; } function serializeGoogleCloudAiplatformV1FetchFeatureValuesResponseFeatureNameValuePairList(data: any): GoogleCloudAiplatformV1FetchFeatureValuesResponseFeatureNameValuePairList { return { ...data, features: data["features"] !== undefined ? data["features"].map((item: any) => (serializeGoogleCloudAiplatformV1FetchFeatureValuesResponseFeatureNameValuePairListFeatureNameValuePair(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1FetchFeatureValuesResponseFeatureNameValuePairList(data: any): GoogleCloudAiplatformV1FetchFeatureValuesResponseFeatureNameValuePairList { return { ...data, features: data["features"] !== undefined ? data["features"].map((item: any) => (deserializeGoogleCloudAiplatformV1FetchFeatureValuesResponseFeatureNameValuePairListFeatureNameValuePair(item))) : undefined, }; } /** * Feature name & value pair. */ export interface GoogleCloudAiplatformV1FetchFeatureValuesResponseFeatureNameValuePairListFeatureNameValuePair { /** * Feature short name. */ name?: string; /** * Feature value. */ value?: GoogleCloudAiplatformV1FeatureValue; } function serializeGoogleCloudAiplatformV1FetchFeatureValuesResponseFeatureNameValuePairListFeatureNameValuePair(data: any): GoogleCloudAiplatformV1FetchFeatureValuesResponseFeatureNameValuePairListFeatureNameValuePair { return { ...data, value: data["value"] !== undefined ? serializeGoogleCloudAiplatformV1FeatureValue(data["value"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1FetchFeatureValuesResponseFeatureNameValuePairListFeatureNameValuePair(data: any): GoogleCloudAiplatformV1FetchFeatureValuesResponseFeatureNameValuePairListFeatureNameValuePair { return { ...data, value: data["value"] !== undefined ? deserializeGoogleCloudAiplatformV1FeatureValue(data["value"]) : undefined, }; } /** * Request message for PredictionService.FetchPredictOperation. */ export interface GoogleCloudAiplatformV1FetchPredictOperationRequest { /** * Required. The server-assigned name for the operation. */ operationName?: string; } /** * URI based data. */ export interface GoogleCloudAiplatformV1FileData { /** * Optional. Display name of the file data. Used to provide a label or * filename to distinguish file datas. This field is only returned in * PromptMessage for prompt management. It is not currently used in the Gemini * GenerateContent calls. */ displayName?: string; /** * Required. URI. */ fileUri?: string; /** * Required. The IANA standard MIME type of the source data. */ mimeType?: string; } /** * RagFile status. */ export interface GoogleCloudAiplatformV1FileStatus { /** * Output only. Only when the `state` field is ERROR. */ readonly errorStatus?: string; /** * Output only. RagFile state. */ readonly state?: | "STATE_UNSPECIFIED" | "ACTIVE" | "ERROR"; } /** * Assigns input data to training, validation, and test sets based on the given * filters, data pieces not matched by any filter are ignored. Currently only * supported for Datasets containing DataItems. If any of the filters in this * message are to match nothing, then they can be set as '-' (the minus sign). * Supported only for unstructured Datasets. */ export interface GoogleCloudAiplatformV1FilterSplit { /** * Required. A filter on DataItems of the Dataset. DataItems that match this * filter are used to test the Model. A filter with same syntax as the one * used in DatasetService.ListDataItems may be used. If a single DataItem is * matched by more than one of the FilterSplit filters, then it is assigned to * the first set that applies to it in the training, validation, test order. */ testFilter?: string; /** * Required. A filter on DataItems of the Dataset. DataItems that match this * filter are used to train the Model. A filter with same syntax as the one * used in DatasetService.ListDataItems may be used. If a single DataItem is * matched by more than one of the FilterSplit filters, then it is assigned to * the first set that applies to it in the training, validation, test order. */ trainingFilter?: string; /** * Required. A filter on DataItems of the Dataset. DataItems that match this * filter are used to validate the Model. A filter with same syntax as the one * used in DatasetService.ListDataItems may be used. If a single DataItem is * matched by more than one of the FilterSplit filters, then it is assigned to * the first set that applies to it in the training, validation, test order. */ validationFilter?: string; } /** * The request message for MatchService.FindNeighbors. */ export interface GoogleCloudAiplatformV1FindNeighborsRequest { /** * The ID of the DeployedIndex that will serve the request. This request is * sent to a specific IndexEndpoint, as per the IndexEndpoint.network. That * IndexEndpoint also has IndexEndpoint.deployed_indexes, and each such index * has a DeployedIndex.id field. The value of the field below must equal one * of the DeployedIndex.id fields of the IndexEndpoint that is being called * for this request. */ deployedIndexId?: string; /** * The list of queries. */ queries?: GoogleCloudAiplatformV1FindNeighborsRequestQuery[]; /** * If set to true, the full datapoints (including all vector values and * restricts) of the nearest neighbors are returned. Note that returning full * datapoint will significantly increase the latency and cost of the query. */ returnFullDatapoint?: boolean; } function serializeGoogleCloudAiplatformV1FindNeighborsRequest(data: any): GoogleCloudAiplatformV1FindNeighborsRequest { return { ...data, queries: data["queries"] !== undefined ? data["queries"].map((item: any) => (serializeGoogleCloudAiplatformV1FindNeighborsRequestQuery(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1FindNeighborsRequest(data: any): GoogleCloudAiplatformV1FindNeighborsRequest { return { ...data, queries: data["queries"] !== undefined ? data["queries"].map((item: any) => (deserializeGoogleCloudAiplatformV1FindNeighborsRequestQuery(item))) : undefined, }; } /** * A query to find a number of the nearest neighbors (most similar vectors) of * a vector. */ export interface GoogleCloudAiplatformV1FindNeighborsRequestQuery { /** * The number of neighbors to find via approximate search before exact * reordering is performed. If not set, the default value from scam config is * used; if set, this value must be > 0. */ approximateNeighborCount?: number; /** * Required. The datapoint/vector whose nearest neighbors should be searched * for. */ datapoint?: GoogleCloudAiplatformV1IndexDatapoint; /** * The fraction of the number of leaves to search, set at query time allows * user to tune search performance. This value increase result in both search * accuracy and latency increase. The value should be between 0.0 and 1.0. If * not set or set to 0.0, query uses the default value specified in * NearestNeighborSearchConfig.TreeAHConfig.fraction_leaf_nodes_to_search. */ fractionLeafNodesToSearchOverride?: number; /** * The number of nearest neighbors to be retrieved from database for each * query. If not set, will use the default from the service configuration * (https://cloud.google.com/vertex-ai/docs/matching-engine/configuring-indexes#nearest-neighbor-search-config). */ neighborCount?: number; /** * Crowding is a constraint on a neighbor list produced by nearest neighbor * search requiring that no more than some value k' of the k neighbors * returned have the same value of crowding_attribute. It's used for improving * result diversity. This field is the maximum number of matches with the same * crowding tag. */ perCrowdingAttributeNeighborCount?: number; /** * Optional. Represents RRF algorithm that combines search results. */ rrf?: GoogleCloudAiplatformV1FindNeighborsRequestQueryRRF; } function serializeGoogleCloudAiplatformV1FindNeighborsRequestQuery(data: any): GoogleCloudAiplatformV1FindNeighborsRequestQuery { return { ...data, datapoint: data["datapoint"] !== undefined ? serializeGoogleCloudAiplatformV1IndexDatapoint(data["datapoint"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1FindNeighborsRequestQuery(data: any): GoogleCloudAiplatformV1FindNeighborsRequestQuery { return { ...data, datapoint: data["datapoint"] !== undefined ? deserializeGoogleCloudAiplatformV1IndexDatapoint(data["datapoint"]) : undefined, }; } /** * Parameters for RRF algorithm that combines search results. */ export interface GoogleCloudAiplatformV1FindNeighborsRequestQueryRRF { /** * Required. Users can provide an alpha value to give more weight to dense vs * sparse results. For example, if the alpha is 0, we only return sparse and * if the alpha is 1, we only return dense. */ alpha?: number; } /** * The response message for MatchService.FindNeighbors. */ export interface GoogleCloudAiplatformV1FindNeighborsResponse { /** * The nearest neighbors of the query datapoints. */ nearestNeighbors?: GoogleCloudAiplatformV1FindNeighborsResponseNearestNeighbors[]; } function serializeGoogleCloudAiplatformV1FindNeighborsResponse(data: any): GoogleCloudAiplatformV1FindNeighborsResponse { return { ...data, nearestNeighbors: data["nearestNeighbors"] !== undefined ? data["nearestNeighbors"].map((item: any) => (serializeGoogleCloudAiplatformV1FindNeighborsResponseNearestNeighbors(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1FindNeighborsResponse(data: any): GoogleCloudAiplatformV1FindNeighborsResponse { return { ...data, nearestNeighbors: data["nearestNeighbors"] !== undefined ? data["nearestNeighbors"].map((item: any) => (deserializeGoogleCloudAiplatformV1FindNeighborsResponseNearestNeighbors(item))) : undefined, }; } /** * Nearest neighbors for one query. */ export interface GoogleCloudAiplatformV1FindNeighborsResponseNearestNeighbors { /** * The ID of the query datapoint. */ id?: string; /** * All its neighbors. */ neighbors?: GoogleCloudAiplatformV1FindNeighborsResponseNeighbor[]; } function serializeGoogleCloudAiplatformV1FindNeighborsResponseNearestNeighbors(data: any): GoogleCloudAiplatformV1FindNeighborsResponseNearestNeighbors { return { ...data, neighbors: data["neighbors"] !== undefined ? data["neighbors"].map((item: any) => (serializeGoogleCloudAiplatformV1FindNeighborsResponseNeighbor(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1FindNeighborsResponseNearestNeighbors(data: any): GoogleCloudAiplatformV1FindNeighborsResponseNearestNeighbors { return { ...data, neighbors: data["neighbors"] !== undefined ? data["neighbors"].map((item: any) => (deserializeGoogleCloudAiplatformV1FindNeighborsResponseNeighbor(item))) : undefined, }; } /** * A neighbor of the query vector. */ export interface GoogleCloudAiplatformV1FindNeighborsResponseNeighbor { /** * The datapoint of the neighbor. Note that full datapoints are returned only * when "return_full_datapoint" is set to true. Otherwise, only the * "datapoint_id" and "crowding_tag" fields are populated. */ datapoint?: GoogleCloudAiplatformV1IndexDatapoint; /** * The distance between the neighbor and the dense embedding query. */ distance?: number; /** * The distance between the neighbor and the query sparse_embedding. */ sparseDistance?: number; } function serializeGoogleCloudAiplatformV1FindNeighborsResponseNeighbor(data: any): GoogleCloudAiplatformV1FindNeighborsResponseNeighbor { return { ...data, datapoint: data["datapoint"] !== undefined ? serializeGoogleCloudAiplatformV1IndexDatapoint(data["datapoint"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1FindNeighborsResponseNeighbor(data: any): GoogleCloudAiplatformV1FindNeighborsResponseNeighbor { return { ...data, datapoint: data["datapoint"] !== undefined ? deserializeGoogleCloudAiplatformV1IndexDatapoint(data["datapoint"]) : undefined, }; } /** * Input for fluency metric. */ export interface GoogleCloudAiplatformV1FluencyInput { /** * Required. Fluency instance. */ instance?: GoogleCloudAiplatformV1FluencyInstance; /** * Required. Spec for fluency score metric. */ metricSpec?: GoogleCloudAiplatformV1FluencySpec; } /** * Spec for fluency instance. */ export interface GoogleCloudAiplatformV1FluencyInstance { /** * Required. Output of the evaluated model. */ prediction?: string; } /** * Spec for fluency result. */ export interface GoogleCloudAiplatformV1FluencyResult { /** * Output only. Confidence for fluency score. */ readonly confidence?: number; /** * Output only. Explanation for fluency score. */ readonly explanation?: string; /** * Output only. Fluency score. */ readonly score?: number; } /** * Spec for fluency score metric. */ export interface GoogleCloudAiplatformV1FluencySpec { /** * Optional. Which version to use for evaluation. */ version?: number; } /** * Assigns the input data to training, validation, and test sets as per the * given fractions. Any of `training_fraction`, `validation_fraction` and * `test_fraction` may optionally be provided, they must sum to up to 1. If the * provided ones sum to less than 1, the remainder is assigned to sets as * decided by Vertex AI. If none of the fractions are set, by default roughly * 80% of data is used for training, 10% for validation, and 10% for test. */ export interface GoogleCloudAiplatformV1FractionSplit { /** * The fraction of the input data that is to be used to evaluate the Model. */ testFraction?: number; /** * The fraction of the input data that is to be used to train the Model. */ trainingFraction?: number; /** * The fraction of the input data that is to be used to validate the Model. */ validationFraction?: number; } /** * Input for fulfillment metric. */ export interface GoogleCloudAiplatformV1FulfillmentInput { /** * Required. Fulfillment instance. */ instance?: GoogleCloudAiplatformV1FulfillmentInstance; /** * Required. Spec for fulfillment score metric. */ metricSpec?: GoogleCloudAiplatformV1FulfillmentSpec; } /** * Spec for fulfillment instance. */ export interface GoogleCloudAiplatformV1FulfillmentInstance { /** * Required. Inference instruction prompt to compare prediction with. */ instruction?: string; /** * Required. Output of the evaluated model. */ prediction?: string; } /** * Spec for fulfillment result. */ export interface GoogleCloudAiplatformV1FulfillmentResult { /** * Output only. Confidence for fulfillment score. */ readonly confidence?: number; /** * Output only. Explanation for fulfillment score. */ readonly explanation?: string; /** * Output only. Fulfillment score. */ readonly score?: number; } /** * Spec for fulfillment metric. */ export interface GoogleCloudAiplatformV1FulfillmentSpec { /** * Optional. Which version to use for evaluation. */ version?: number; } /** * A predicted [FunctionCall] returned from the model that contains a string * representing the [FunctionDeclaration.name] and a structured JSON object * containing the parameters and their values. */ export interface GoogleCloudAiplatformV1FunctionCall { /** * Optional. The function parameters and values in JSON object format. See * [FunctionDeclaration.parameters] for parameter details. */ args?: { [key: string]: any }; /** * Required. The name of the function to call. Matches * [FunctionDeclaration.name]. */ name?: string; } /** * Function calling config. */ export interface GoogleCloudAiplatformV1FunctionCallingConfig { /** * Optional. Function names to call. Only set when the Mode is ANY. Function * names should match [FunctionDeclaration.name]. With mode set to ANY, model * will predict a function call from the set of function names provided. */ allowedFunctionNames?: string[]; /** * Optional. Function calling mode. */ mode?: | "MODE_UNSPECIFIED" | "AUTO" | "ANY" | "NONE"; } /** * Structured representation of a function declaration as defined by the * [OpenAPI 3.0 specification](https://spec.openapis.org/oas/v3.0.3). Included * in this declaration are the function name, description, parameters and * response type. This FunctionDeclaration is a representation of a block of * code that can be used as a `Tool` by the model and executed by the client. */ export interface GoogleCloudAiplatformV1FunctionDeclaration { /** * Optional. Description and purpose of the function. Model uses it to decide * how and whether to call the function. */ description?: string; /** * Required. The name of the function to call. Must start with a letter or an * underscore. Must be a-z, A-Z, 0-9, or contain underscores, dots and dashes, * with a maximum length of 64. */ name?: string; /** * Optional. Describes the parameters to this function in JSON Schema Object * format. Reflects the Open API 3.03 Parameter Object. string Key: the name * of the parameter. Parameter names are case sensitive. Schema Value: the * Schema defining the type used for the parameter. For function with no * parameters, this can be left unset. Parameter names must start with a * letter or an underscore and must only contain chars a-z, A-Z, 0-9, or * underscores with a maximum length of 64. Example with 1 required and 1 * optional parameter: type: OBJECT properties: param1: type: STRING param2: * type: INTEGER required: - param1 */ parameters?: GoogleCloudAiplatformV1Schema; /** * Optional. Describes the output from this function in JSON Schema format. * Reflects the Open API 3.03 Response Object. The Schema defines the type * used for the response value of the function. */ response?: GoogleCloudAiplatformV1Schema; } function serializeGoogleCloudAiplatformV1FunctionDeclaration(data: any): GoogleCloudAiplatformV1FunctionDeclaration { return { ...data, parameters: data["parameters"] !== undefined ? serializeGoogleCloudAiplatformV1Schema(data["parameters"]) : undefined, response: data["response"] !== undefined ? serializeGoogleCloudAiplatformV1Schema(data["response"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1FunctionDeclaration(data: any): GoogleCloudAiplatformV1FunctionDeclaration { return { ...data, parameters: data["parameters"] !== undefined ? deserializeGoogleCloudAiplatformV1Schema(data["parameters"]) : undefined, response: data["response"] !== undefined ? deserializeGoogleCloudAiplatformV1Schema(data["response"]) : undefined, }; } /** * The result output from a [FunctionCall] that contains a string representing * the [FunctionDeclaration.name] and a structured JSON object containing any * output from the function is used as context to the model. This should contain * the result of a [FunctionCall] made based on model prediction. */ export interface GoogleCloudAiplatformV1FunctionResponse { /** * Required. The name of the function to call. Matches * [FunctionDeclaration.name] and [FunctionCall.name]. */ name?: string; /** * Required. The function response in JSON object format. Use "output" key to * specify function output and "error" key to specify error details (if any). * If "output" and "error" keys are not specified, then whole "response" is * treated as function output. */ response?: { [key: string]: any }; } /** * The Google Cloud Storage location where the output is to be written to. */ export interface GoogleCloudAiplatformV1GcsDestination { /** * Required. Google Cloud Storage URI to output directory. If the uri doesn't * end with '/', a '/' will be automatically appended. The directory is * created if it doesn't exist. */ outputUriPrefix?: string; } /** * The Google Cloud Storage location for the input content. */ export interface GoogleCloudAiplatformV1GcsSource { /** * Required. Google Cloud Storage URI(-s) to the input file(s). May contain * wildcards. For more information on wildcards, see * https://cloud.google.com/storage/docs/wildcards. */ uris?: string[]; } /** * Configuration for GenAiAdvancedFeatures. */ export interface GoogleCloudAiplatformV1GenAiAdvancedFeaturesConfig { /** * Configuration for Retrieval Augmented Generation feature. */ ragConfig?: GoogleCloudAiplatformV1GenAiAdvancedFeaturesConfigRagConfig; } /** * Configuration for Retrieval Augmented Generation feature. */ export interface GoogleCloudAiplatformV1GenAiAdvancedFeaturesConfigRagConfig { /** * If true, enable Retrieval Augmented Generation in ChatCompletion request. * Once enabled, the endpoint will be identified as GenAI endpoint and * Arthedain router will be used. */ enableRag?: boolean; } /** * Request message for [PredictionService.GenerateContent]. */ export interface GoogleCloudAiplatformV1GenerateContentRequest { /** * Optional. The name of the cached content used as context to serve the * prediction. Note: only used in explicit caching, where users can have * control over caching (e.g. what content to cache) and enjoy guaranteed cost * savings. Format: * `projects/{project}/locations/{location}/cachedContents/{cachedContent}` */ cachedContent?: string; /** * Required. The content of the current conversation with the model. For * single-turn queries, this is a single instance. For multi-turn queries, * this is a repeated field that contains conversation history + latest * request. */ contents?: GoogleCloudAiplatformV1Content[]; /** * Optional. Generation config. */ generationConfig?: GoogleCloudAiplatformV1GenerationConfig; /** * Optional. The labels with user-defined metadata for the request. It is * used for billing and reporting only. Label keys and values can be no longer * than 63 characters (Unicode codepoints) and can only contain lowercase * letters, numeric characters, underscores, and dashes. International * characters are allowed. Label values are optional. Label keys must start * with a letter. */ labels?: { [key: string]: string }; /** * Optional. Per request settings for blocking unsafe content. Enforced on * GenerateContentResponse.candidates. */ safetySettings?: GoogleCloudAiplatformV1SafetySetting[]; /** * Optional. The user provided system instructions for the model. Note: only * text should be used in parts and content in each part will be in a separate * paragraph. */ systemInstruction?: GoogleCloudAiplatformV1Content; /** * Optional. Tool config. This config is shared for all tools provided in the * request. */ toolConfig?: GoogleCloudAiplatformV1ToolConfig; /** * Optional. A list of `Tools` the model may use to generate the next * response. A `Tool` is a piece of code that enables the system to interact * with external systems to perform an action, or set of actions, outside of * knowledge and scope of the model. */ tools?: GoogleCloudAiplatformV1Tool[]; } function serializeGoogleCloudAiplatformV1GenerateContentRequest(data: any): GoogleCloudAiplatformV1GenerateContentRequest { return { ...data, contents: data["contents"] !== undefined ? data["contents"].map((item: any) => (serializeGoogleCloudAiplatformV1Content(item))) : undefined, generationConfig: data["generationConfig"] !== undefined ? serializeGoogleCloudAiplatformV1GenerationConfig(data["generationConfig"]) : undefined, systemInstruction: data["systemInstruction"] !== undefined ? serializeGoogleCloudAiplatformV1Content(data["systemInstruction"]) : undefined, tools: data["tools"] !== undefined ? data["tools"].map((item: any) => (serializeGoogleCloudAiplatformV1Tool(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1GenerateContentRequest(data: any): GoogleCloudAiplatformV1GenerateContentRequest { return { ...data, contents: data["contents"] !== undefined ? data["contents"].map((item: any) => (deserializeGoogleCloudAiplatformV1Content(item))) : undefined, generationConfig: data["generationConfig"] !== undefined ? deserializeGoogleCloudAiplatformV1GenerationConfig(data["generationConfig"]) : undefined, systemInstruction: data["systemInstruction"] !== undefined ? deserializeGoogleCloudAiplatformV1Content(data["systemInstruction"]) : undefined, tools: data["tools"] !== undefined ? data["tools"].map((item: any) => (deserializeGoogleCloudAiplatformV1Tool(item))) : undefined, }; } /** * Response message for [PredictionService.GenerateContent]. */ export interface GoogleCloudAiplatformV1GenerateContentResponse { /** * Output only. Generated candidates. */ readonly candidates?: GoogleCloudAiplatformV1Candidate[]; /** * Output only. Timestamp when the request is made to the server. */ readonly createTime?: Date; /** * Output only. The model version used to generate the response. */ readonly modelVersion?: string; /** * Output only. Content filter results for a prompt sent in the request. * Note: Sent only in the first stream chunk. Only happens when no candidates * were generated due to content violations. */ readonly promptFeedback?: GoogleCloudAiplatformV1GenerateContentResponsePromptFeedback; /** * Output only. response_id is used to identify each response. It is the * encoding of the event_id. */ readonly responseId?: string; /** * Usage metadata about the response(s). */ usageMetadata?: GoogleCloudAiplatformV1GenerateContentResponseUsageMetadata; } /** * Content filter results for a prompt sent in the request. */ export interface GoogleCloudAiplatformV1GenerateContentResponsePromptFeedback { /** * Output only. Blocked reason. */ readonly blockReason?: | "BLOCKED_REASON_UNSPECIFIED" | "SAFETY" | "OTHER" | "BLOCKLIST" | "PROHIBITED_CONTENT"; /** * Output only. A readable block reason message. */ readonly blockReasonMessage?: string; /** * Output only. Safety ratings. */ readonly safetyRatings?: GoogleCloudAiplatformV1SafetyRating[]; } /** * Usage metadata about response(s). */ export interface GoogleCloudAiplatformV1GenerateContentResponseUsageMetadata { /** * Output only. Number of tokens in the cached part in the input (the cached * content). */ readonly cachedContentTokenCount?: number; /** * Output only. List of modalities of the cached content in the request * input. */ readonly cacheTokensDetails?: GoogleCloudAiplatformV1ModalityTokenCount[]; /** * Number of tokens in the response(s). */ candidatesTokenCount?: number; /** * Output only. List of modalities that were returned in the response. */ readonly candidatesTokensDetails?: GoogleCloudAiplatformV1ModalityTokenCount[]; /** * Number of tokens in the request. When `cached_content` is set, this is * still the total effective prompt size meaning this includes the number of * tokens in the cached content. */ promptTokenCount?: number; /** * Output only. List of modalities that were processed in the request input. */ readonly promptTokensDetails?: GoogleCloudAiplatformV1ModalityTokenCount[]; /** * Output only. Number of tokens present in thoughts output. */ readonly thoughtsTokenCount?: number; /** * Output only. Number of tokens present in tool-use prompt(s). */ readonly toolUsePromptTokenCount?: number; /** * Output only. List of modalities that were processed for tool-use request * inputs. */ readonly toolUsePromptTokensDetails?: GoogleCloudAiplatformV1ModalityTokenCount[]; /** * Total token count for prompt, response candidates, and tool-use prompts * (if present). */ totalTokenCount?: number; /** * Output only. Traffic type. This shows whether a request consumes * Pay-As-You-Go or Provisioned Throughput quota. */ readonly trafficType?: | "TRAFFIC_TYPE_UNSPECIFIED" | "ON_DEMAND" | "PROVISIONED_THROUGHPUT"; } /** * Generate video response. */ export interface GoogleCloudAiplatformV1GenerateVideoResponse { /** * The cloud storage uris of the generated videos. */ generatedSamples?: string[]; /** * Returns if any videos were filtered due to RAI policies. */ raiMediaFilteredCount?: number; /** * Returns rai failure reasons if any. */ raiMediaFilteredReasons?: string[]; /** * List of video bytes or Cloud Storage URIs of the generated videos. */ videos?: GoogleCloudAiplatformV1GenerateVideoResponseVideo[]; } /** * A generated video. */ export interface GoogleCloudAiplatformV1GenerateVideoResponseVideo { /** * Base64 encoded bytes string representing the video. */ bytesBase64Encoded?: string; /** * Cloud Storage URI where the generated video is written. */ gcsUri?: string; /** * The MIME type of the content of the video. - video/mp4 */ mimeType?: string; } /** * Generation config. */ export interface GoogleCloudAiplatformV1GenerationConfig { /** * Optional. If enabled, audio timestamp will be included in the request to * the model. */ audioTimestamp?: boolean; /** * Optional. Number of candidates to generate. */ candidateCount?: number; /** * Optional. Frequency penalties. */ frequencyPenalty?: number; /** * Optional. Logit probabilities. */ logprobs?: number; /** * Optional. The maximum number of output tokens to generate per message. */ maxOutputTokens?: number; /** * Optional. If specified, the media resolution specified will be used. */ mediaResolution?: | "MEDIA_RESOLUTION_UNSPECIFIED" | "MEDIA_RESOLUTION_LOW" | "MEDIA_RESOLUTION_MEDIUM" | "MEDIA_RESOLUTION_HIGH"; /** * Optional. Positive penalties. */ presencePenalty?: number; /** * Optional. If true, export the logprobs results in response. */ responseLogprobs?: boolean; /** * Optional. Output response mimetype of the generated candidate text. * Supported mimetype: - `text/plain`: (default) Text output. - * `application/json`: JSON response in the candidates. The model needs to be * prompted to output the appropriate response type, otherwise the behavior is * undefined. This is a preview feature. */ responseMimeType?: string; /** * Optional. The modalities of the response. */ responseModalities?: | "MODALITY_UNSPECIFIED" | "TEXT" | "IMAGE" | "AUDIO"[]; /** * Optional. The `Schema` object allows the definition of input and output * data types. These types can be objects, but also primitives and arrays. * Represents a select subset of an [OpenAPI 3.0 schema * object](https://spec.openapis.org/oas/v3.0.3#schema). If set, a compatible * response_mime_type must also be set. Compatible mimetypes: * `application/json`: Schema for JSON response. */ responseSchema?: GoogleCloudAiplatformV1Schema; /** * Optional. Routing configuration. */ routingConfig?: GoogleCloudAiplatformV1GenerationConfigRoutingConfig; /** * Optional. Seed. */ seed?: number; /** * Optional. The speech generation config. */ speechConfig?: GoogleCloudAiplatformV1SpeechConfig; /** * Optional. Stop sequences. */ stopSequences?: string[]; /** * Optional. Controls the randomness of predictions. */ temperature?: number; /** * Optional. Config for thinking features. An error will be returned if this * field is set for models that don't support thinking. */ thinkingConfig?: GoogleCloudAiplatformV1GenerationConfigThinkingConfig; /** * Optional. If specified, top-k sampling will be used. */ topK?: number; /** * Optional. If specified, nucleus sampling will be used. */ topP?: number; } function serializeGoogleCloudAiplatformV1GenerationConfig(data: any): GoogleCloudAiplatformV1GenerationConfig { return { ...data, responseSchema: data["responseSchema"] !== undefined ? serializeGoogleCloudAiplatformV1Schema(data["responseSchema"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1GenerationConfig(data: any): GoogleCloudAiplatformV1GenerationConfig { return { ...data, responseSchema: data["responseSchema"] !== undefined ? deserializeGoogleCloudAiplatformV1Schema(data["responseSchema"]) : undefined, }; } /** * The configuration for routing the request to a specific model. */ export interface GoogleCloudAiplatformV1GenerationConfigRoutingConfig { /** * Automated routing. */ autoMode?: GoogleCloudAiplatformV1GenerationConfigRoutingConfigAutoRoutingMode; /** * Manual routing. */ manualMode?: GoogleCloudAiplatformV1GenerationConfigRoutingConfigManualRoutingMode; } /** * When automated routing is specified, the routing will be determined by the * pretrained routing model and customer provided model routing preference. */ export interface GoogleCloudAiplatformV1GenerationConfigRoutingConfigAutoRoutingMode { /** * The model routing preference. */ modelRoutingPreference?: | "UNKNOWN" | "PRIORITIZE_QUALITY" | "BALANCED" | "PRIORITIZE_COST"; } /** * When manual routing is set, the specified model will be used directly. */ export interface GoogleCloudAiplatformV1GenerationConfigRoutingConfigManualRoutingMode { /** * The model name to use. Only the public LLM models are accepted. See * [Supported * models](https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference#supported-models). */ modelName?: string; } /** * Config for thinking features. */ export interface GoogleCloudAiplatformV1GenerationConfigThinkingConfig { /** * Optional. Indicates the thinking budget in tokens. This is only applied * when enable_thinking is true. */ thinkingBudget?: number; } /** * Generic Metadata shared by all operations. */ export interface GoogleCloudAiplatformV1GenericOperationMetadata { /** * Output only. Time when the operation was created. */ readonly createTime?: Date; /** * Output only. Partial failures encountered. E.g. single files that couldn't * be read. This field should never exceed 20 entries. Status details field * will contain standard Google Cloud error details. */ readonly partialFailures?: GoogleRpcStatus[]; /** * Output only. Time when the operation was updated for the last time. If the * operation has finished (successfully or not), this is the finish time. */ readonly updateTime?: Date; } /** * Contains information about the source of the models generated from * Generative AI Studio. */ export interface GoogleCloudAiplatformV1GenieSource { /** * Required. The public base model URI. */ baseModelUri?: string; } /** * The Google Drive location for the input content. */ export interface GoogleCloudAiplatformV1GoogleDriveSource { /** * Required. Google Drive resource IDs. */ resourceIds?: GoogleCloudAiplatformV1GoogleDriveSourceResourceId[]; } /** * The type and ID of the Google Drive resource. */ export interface GoogleCloudAiplatformV1GoogleDriveSourceResourceId { /** * Required. The ID of the Google Drive resource. */ resourceId?: string; /** * Required. The type of the Google Drive resource. */ resourceType?: | "RESOURCE_TYPE_UNSPECIFIED" | "RESOURCE_TYPE_FILE" | "RESOURCE_TYPE_FOLDER"; } /** * Tool to retrieve public web data for grounding, powered by Google. */ export interface GoogleCloudAiplatformV1GoogleSearchRetrieval { /** * Specifies the dynamic retrieval configuration for the given source. */ dynamicRetrievalConfig?: GoogleCloudAiplatformV1DynamicRetrievalConfig; } /** * Input for groundedness metric. */ export interface GoogleCloudAiplatformV1GroundednessInput { /** * Required. Groundedness instance. */ instance?: GoogleCloudAiplatformV1GroundednessInstance; /** * Required. Spec for groundedness metric. */ metricSpec?: GoogleCloudAiplatformV1GroundednessSpec; } /** * Spec for groundedness instance. */ export interface GoogleCloudAiplatformV1GroundednessInstance { /** * Required. Background information provided in context used to compare * against the prediction. */ context?: string; /** * Required. Output of the evaluated model. */ prediction?: string; } /** * Spec for groundedness result. */ export interface GoogleCloudAiplatformV1GroundednessResult { /** * Output only. Confidence for groundedness score. */ readonly confidence?: number; /** * Output only. Explanation for groundedness score. */ readonly explanation?: string; /** * Output only. Groundedness score. */ readonly score?: number; } /** * Spec for groundedness metric. */ export interface GoogleCloudAiplatformV1GroundednessSpec { /** * Optional. Which version to use for evaluation. */ version?: number; } /** * Grounding chunk. */ export interface GoogleCloudAiplatformV1GroundingChunk { /** * Grounding chunk from context retrieved by the retrieval tools. */ retrievedContext?: GoogleCloudAiplatformV1GroundingChunkRetrievedContext; /** * Grounding chunk from the web. */ web?: GoogleCloudAiplatformV1GroundingChunkWeb; } /** * Chunk from context retrieved by the retrieval tools. */ export interface GoogleCloudAiplatformV1GroundingChunkRetrievedContext { /** * Additional context for the RAG retrieval result. This is only populated * when using the RAG retrieval tool. */ ragChunk?: GoogleCloudAiplatformV1RagChunk; /** * Text of the attribution. */ text?: string; /** * Title of the attribution. */ title?: string; /** * URI reference of the attribution. */ uri?: string; } /** * Chunk from the web. */ export interface GoogleCloudAiplatformV1GroundingChunkWeb { /** * Domain of the (original) URI. */ domain?: string; /** * Title of the chunk. */ title?: string; /** * URI reference of the chunk. */ uri?: string; } /** * Metadata returned to client when grounding is enabled. */ export interface GoogleCloudAiplatformV1GroundingMetadata { /** * List of supporting references retrieved from specified grounding source. */ groundingChunks?: GoogleCloudAiplatformV1GroundingChunk[]; /** * Optional. List of grounding support. */ groundingSupports?: GoogleCloudAiplatformV1GroundingSupport[]; /** * Optional. Output only. Retrieval metadata. */ readonly retrievalMetadata?: GoogleCloudAiplatformV1RetrievalMetadata; /** * Optional. Google search entry for the following-up web searches. */ searchEntryPoint?: GoogleCloudAiplatformV1SearchEntryPoint; /** * Optional. Web search queries for the following-up web search. */ webSearchQueries?: string[]; } function serializeGoogleCloudAiplatformV1GroundingMetadata(data: any): GoogleCloudAiplatformV1GroundingMetadata { return { ...data, searchEntryPoint: data["searchEntryPoint"] !== undefined ? serializeGoogleCloudAiplatformV1SearchEntryPoint(data["searchEntryPoint"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1GroundingMetadata(data: any): GoogleCloudAiplatformV1GroundingMetadata { return { ...data, searchEntryPoint: data["searchEntryPoint"] !== undefined ? deserializeGoogleCloudAiplatformV1SearchEntryPoint(data["searchEntryPoint"]) : undefined, }; } /** * Grounding support. */ export interface GoogleCloudAiplatformV1GroundingSupport { /** * Confidence score of the support references. Ranges from 0 to 1. 1 is the * most confident. This list must have the same size as the * grounding_chunk_indices. */ confidenceScores?: number[]; /** * A list of indices (into 'grounding_chunk') specifying the citations * associated with the claim. For instance [1,3,4] means that * grounding_chunk[1], grounding_chunk[3], grounding_chunk[4] are the * retrieved content attributed to the claim. */ groundingChunkIndices?: number[]; /** * Segment of the content this support belongs to. */ segment?: GoogleCloudAiplatformV1Segment; } /** * Represents a HyperparameterTuningJob. A HyperparameterTuningJob has a Study * specification and multiple CustomJobs with identical CustomJob specification. */ export interface GoogleCloudAiplatformV1HyperparameterTuningJob { /** * Output only. Time when the HyperparameterTuningJob was created. */ readonly createTime?: Date; /** * Required. The display name of the HyperparameterTuningJob. The name can be * up to 128 characters long and can consist of any UTF-8 characters. */ displayName?: string; /** * Customer-managed encryption key options for a HyperparameterTuningJob. If * this is set, then all resources created by the HyperparameterTuningJob will * be encrypted with the provided encryption key. */ encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec; /** * Output only. Time when the HyperparameterTuningJob entered any of the * following states: `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, * `JOB_STATE_CANCELLED`. */ readonly endTime?: Date; /** * Output only. Only populated when job's state is JOB_STATE_FAILED or * JOB_STATE_CANCELLED. */ readonly error?: GoogleRpcStatus; /** * The labels with user-defined metadata to organize * HyperparameterTuningJobs. Label keys and values can be no longer than 64 * characters (Unicode codepoints), can only contain lowercase letters, * numeric characters, underscores and dashes. International characters are * allowed. See https://goo.gl/xmQnxf for more information and examples of * labels. */ labels?: { [key: string]: string }; /** * The number of failed Trials that need to be seen before failing the * HyperparameterTuningJob. If set to 0, Vertex AI decides how many Trials * must fail before the whole job fails. */ maxFailedTrialCount?: number; /** * Required. The desired total number of Trials. */ maxTrialCount?: number; /** * Output only. Resource name of the HyperparameterTuningJob. */ readonly name?: string; /** * Required. The desired number of Trials to run in parallel. */ parallelTrialCount?: number; /** * Output only. Reserved for future use. */ readonly satisfiesPzi?: boolean; /** * Output only. Reserved for future use. */ readonly satisfiesPzs?: boolean; /** * Output only. Time when the HyperparameterTuningJob for the first time * entered the `JOB_STATE_RUNNING` state. */ readonly startTime?: Date; /** * Output only. The detailed state of the job. */ readonly state?: | "JOB_STATE_UNSPECIFIED" | "JOB_STATE_QUEUED" | "JOB_STATE_PENDING" | "JOB_STATE_RUNNING" | "JOB_STATE_SUCCEEDED" | "JOB_STATE_FAILED" | "JOB_STATE_CANCELLING" | "JOB_STATE_CANCELLED" | "JOB_STATE_PAUSED" | "JOB_STATE_EXPIRED" | "JOB_STATE_UPDATING" | "JOB_STATE_PARTIALLY_SUCCEEDED"; /** * Required. Study configuration of the HyperparameterTuningJob. */ studySpec?: GoogleCloudAiplatformV1StudySpec; /** * Required. The spec of a trial job. The same spec applies to the CustomJobs * created in all the trials. */ trialJobSpec?: GoogleCloudAiplatformV1CustomJobSpec; /** * Output only. Trials of the HyperparameterTuningJob. */ readonly trials?: GoogleCloudAiplatformV1Trial[]; /** * Output only. Time when the HyperparameterTuningJob was most recently * updated. */ readonly updateTime?: Date; } function serializeGoogleCloudAiplatformV1HyperparameterTuningJob(data: any): GoogleCloudAiplatformV1HyperparameterTuningJob { return { ...data, studySpec: data["studySpec"] !== undefined ? serializeGoogleCloudAiplatformV1StudySpec(data["studySpec"]) : undefined, trialJobSpec: data["trialJobSpec"] !== undefined ? serializeGoogleCloudAiplatformV1CustomJobSpec(data["trialJobSpec"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1HyperparameterTuningJob(data: any): GoogleCloudAiplatformV1HyperparameterTuningJob { return { ...data, createTime: data["createTime"] !== undefined ? new Date(data["createTime"]) : undefined, endTime: data["endTime"] !== undefined ? new Date(data["endTime"]) : undefined, startTime: data["startTime"] !== undefined ? new Date(data["startTime"]) : undefined, studySpec: data["studySpec"] !== undefined ? deserializeGoogleCloudAiplatformV1StudySpec(data["studySpec"]) : undefined, trialJobSpec: data["trialJobSpec"] !== undefined ? deserializeGoogleCloudAiplatformV1CustomJobSpec(data["trialJobSpec"]) : undefined, updateTime: data["updateTime"] !== undefined ? new Date(data["updateTime"]) : undefined, }; } /** * Matcher for Features of an EntityType by Feature ID. */ export interface GoogleCloudAiplatformV1IdMatcher { /** * Required. The following are accepted as `ids`: * A single-element list * containing only `*`, which selects all Features in the target EntityType, * or * A list containing only Feature IDs, which selects only Features with * those IDs in the target EntityType. */ ids?: string[]; } /** * Describes the location from where we import data into a Dataset, together * with the labels that will be applied to the DataItems and the Annotations. */ export interface GoogleCloudAiplatformV1ImportDataConfig { /** * Labels that will be applied to newly imported Annotations. If two * Annotations are identical, one of them will be deduped. Two Annotations are * considered identical if their payload, payload_schema_uri and all of their * labels are the same. These labels will be overridden by Annotation labels * specified inside index file referenced by import_schema_uri, e.g. jsonl * file. */ annotationLabels?: { [key: string]: string }; /** * Labels that will be applied to newly imported DataItems. If an identical * DataItem as one being imported already exists in the Dataset, then these * labels will be appended to these of the already existing one, and if labels * with identical key is imported before, the old label value will be * overwritten. If two DataItems are identical in the same import data * operation, the labels will be combined and if key collision happens in this * case, one of the values will be picked randomly. Two DataItems are * considered identical if their content bytes are identical (e.g. image bytes * or pdf bytes). These labels will be overridden by Annotation labels * specified inside index file referenced by import_schema_uri, e.g. jsonl * file. */ dataItemLabels?: { [key: string]: string }; /** * The Google Cloud Storage location for the input content. */ gcsSource?: GoogleCloudAiplatformV1GcsSource; /** * Required. Points to a YAML file stored on Google Cloud Storage describing * the import format. Validation will be done against the schema. The schema * is defined as an [OpenAPI 3.0.2 Schema * Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). */ importSchemaUri?: string; } /** * Runtime operation information for DatasetService.ImportData. */ export interface GoogleCloudAiplatformV1ImportDataOperationMetadata { /** * The common part of the operation metadata. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Request message for DatasetService.ImportData. */ export interface GoogleCloudAiplatformV1ImportDataRequest { /** * Required. The desired input locations. The contents of all input locations * will be imported in one batch. */ importConfigs?: GoogleCloudAiplatformV1ImportDataConfig[]; } /** * Response message for DatasetService.ImportData. */ export interface GoogleCloudAiplatformV1ImportDataResponse { } /** * Details of operations that perform import Feature values. */ export interface GoogleCloudAiplatformV1ImportFeatureValuesOperationMetadata { /** * List of ImportFeatureValues operations running under a single EntityType * that are blocking this operation. */ blockingOperationIds?: bigint[]; /** * Operation metadata for Featurestore import Feature values. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; /** * Number of entities that have been imported by the operation. */ importedEntityCount?: bigint; /** * Number of Feature values that have been imported by the operation. */ importedFeatureValueCount?: bigint; /** * The number of rows in input source that weren't imported due to either * * Not having any featureValues. * Having a null entityId. * Having a null * timestamp. * Not being parsable (applicable for CSV sources). */ invalidRowCount?: bigint; /** * The source URI from where Feature values are imported. */ sourceUris?: string[]; /** * The number rows that weren't ingested due to having timestamps outside the * retention boundary. */ timestampOutsideRetentionRowsCount?: bigint; } function serializeGoogleCloudAiplatformV1ImportFeatureValuesOperationMetadata(data: any): GoogleCloudAiplatformV1ImportFeatureValuesOperationMetadata { return { ...data, blockingOperationIds: data["blockingOperationIds"] !== undefined ? data["blockingOperationIds"].map((item: any) => (String(item))) : undefined, importedEntityCount: data["importedEntityCount"] !== undefined ? String(data["importedEntityCount"]) : undefined, importedFeatureValueCount: data["importedFeatureValueCount"] !== undefined ? String(data["importedFeatureValueCount"]) : undefined, invalidRowCount: data["invalidRowCount"] !== undefined ? String(data["invalidRowCount"]) : undefined, timestampOutsideRetentionRowsCount: data["timestampOutsideRetentionRowsCount"] !== undefined ? String(data["timestampOutsideRetentionRowsCount"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1ImportFeatureValuesOperationMetadata(data: any): GoogleCloudAiplatformV1ImportFeatureValuesOperationMetadata { return { ...data, blockingOperationIds: data["blockingOperationIds"] !== undefined ? data["blockingOperationIds"].map((item: any) => (BigInt(item))) : undefined, importedEntityCount: data["importedEntityCount"] !== undefined ? BigInt(data["importedEntityCount"]) : undefined, importedFeatureValueCount: data["importedFeatureValueCount"] !== undefined ? BigInt(data["importedFeatureValueCount"]) : undefined, invalidRowCount: data["invalidRowCount"] !== undefined ? BigInt(data["invalidRowCount"]) : undefined, timestampOutsideRetentionRowsCount: data["timestampOutsideRetentionRowsCount"] !== undefined ? BigInt(data["timestampOutsideRetentionRowsCount"]) : undefined, }; } /** * Request message for FeaturestoreService.ImportFeatureValues. */ export interface GoogleCloudAiplatformV1ImportFeatureValuesRequest { avroSource?: GoogleCloudAiplatformV1AvroSource; bigquerySource?: GoogleCloudAiplatformV1BigQuerySource; csvSource?: GoogleCloudAiplatformV1CsvSource; /** * If true, API doesn't start ingestion analysis pipeline. */ disableIngestionAnalysis?: boolean; /** * If set, data will not be imported for online serving. This is typically * used for backfilling, where Feature generation timestamps are not in the * timestamp range needed for online serving. */ disableOnlineServing?: boolean; /** * Source column that holds entity IDs. If not provided, entity IDs are * extracted from the column named entity_id. */ entityIdField?: string; /** * Required. Specifications defining which Feature values to import from the * entity. The request fails if no feature_specs are provided, and having * multiple feature_specs for one Feature is not allowed. */ featureSpecs?: GoogleCloudAiplatformV1ImportFeatureValuesRequestFeatureSpec[]; /** * Single Feature timestamp for all entities being imported. The timestamp * must not have higher than millisecond precision. */ featureTime?: Date; /** * Source column that holds the Feature timestamp for all Feature values in * each entity. */ featureTimeField?: string; /** * Specifies the number of workers that are used to write data to the * Featurestore. Consider the online serving capacity that you require to * achieve the desired import throughput without interfering with online * serving. The value must be positive, and less than or equal to 100. If not * set, defaults to using 1 worker. The low count ensures minimal impact on * online serving performance. */ workerCount?: number; } function serializeGoogleCloudAiplatformV1ImportFeatureValuesRequest(data: any): GoogleCloudAiplatformV1ImportFeatureValuesRequest { return { ...data, featureTime: data["featureTime"] !== undefined ? data["featureTime"].toISOString() : undefined, }; } function deserializeGoogleCloudAiplatformV1ImportFeatureValuesRequest(data: any): GoogleCloudAiplatformV1ImportFeatureValuesRequest { return { ...data, featureTime: data["featureTime"] !== undefined ? new Date(data["featureTime"]) : undefined, }; } /** * Defines the Feature value(s) to import. */ export interface GoogleCloudAiplatformV1ImportFeatureValuesRequestFeatureSpec { /** * Required. ID of the Feature to import values of. This Feature must exist * in the target EntityType, or the request will fail. */ id?: string; /** * Source column to get the Feature values from. If not set, uses the column * with the same name as the Feature ID. */ sourceField?: string; } /** * Response message for FeaturestoreService.ImportFeatureValues. */ export interface GoogleCloudAiplatformV1ImportFeatureValuesResponse { /** * Number of entities that have been imported by the operation. */ importedEntityCount?: bigint; /** * Number of Feature values that have been imported by the operation. */ importedFeatureValueCount?: bigint; /** * The number of rows in input source that weren't imported due to either * * Not having any featureValues. * Having a null entityId. * Having a null * timestamp. * Not being parsable (applicable for CSV sources). */ invalidRowCount?: bigint; /** * The number rows that weren't ingested due to having feature timestamps * outside the retention boundary. */ timestampOutsideRetentionRowsCount?: bigint; } function serializeGoogleCloudAiplatformV1ImportFeatureValuesResponse(data: any): GoogleCloudAiplatformV1ImportFeatureValuesResponse { return { ...data, importedEntityCount: data["importedEntityCount"] !== undefined ? String(data["importedEntityCount"]) : undefined, importedFeatureValueCount: data["importedFeatureValueCount"] !== undefined ? String(data["importedFeatureValueCount"]) : undefined, invalidRowCount: data["invalidRowCount"] !== undefined ? String(data["invalidRowCount"]) : undefined, timestampOutsideRetentionRowsCount: data["timestampOutsideRetentionRowsCount"] !== undefined ? String(data["timestampOutsideRetentionRowsCount"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1ImportFeatureValuesResponse(data: any): GoogleCloudAiplatformV1ImportFeatureValuesResponse { return { ...data, importedEntityCount: data["importedEntityCount"] !== undefined ? BigInt(data["importedEntityCount"]) : undefined, importedFeatureValueCount: data["importedFeatureValueCount"] !== undefined ? BigInt(data["importedFeatureValueCount"]) : undefined, invalidRowCount: data["invalidRowCount"] !== undefined ? BigInt(data["invalidRowCount"]) : undefined, timestampOutsideRetentionRowsCount: data["timestampOutsideRetentionRowsCount"] !== undefined ? BigInt(data["timestampOutsideRetentionRowsCount"]) : undefined, }; } /** * Request message for ModelService.ImportModelEvaluation */ export interface GoogleCloudAiplatformV1ImportModelEvaluationRequest { /** * Required. Model evaluation resource to be imported. */ modelEvaluation?: GoogleCloudAiplatformV1ModelEvaluation; } /** * Config for importing RagFiles. */ export interface GoogleCloudAiplatformV1ImportRagFilesConfig { /** * Google Cloud Storage location. Supports importing individual files as well * as entire Google Cloud Storage directories. Sample formats: - * `gs://bucket_name/my_directory/object_name/my_file.txt` - * `gs://bucket_name/my_directory` */ gcsSource?: GoogleCloudAiplatformV1GcsSource; /** * Google Drive location. Supports importing individual files as well as * Google Drive folders. */ googleDriveSource?: GoogleCloudAiplatformV1GoogleDriveSource; /** * The BigQuery destination to write import result to. It should be a * bigquery table resource name (e.g. "bq://projectId.bqDatasetId.bqTableId"). * The dataset must exist. If the table does not exist, it will be created * with the expected schema. If the table exists, the schema will be validated * and data will be added to this existing table. */ importResultBigquerySink?: GoogleCloudAiplatformV1BigQueryDestination; /** * The Cloud Storage path to write import result to. */ importResultGcsSink?: GoogleCloudAiplatformV1GcsDestination; /** * Jira queries with their corresponding authentication. */ jiraSource?: GoogleCloudAiplatformV1JiraSource; /** * Optional. The max number of queries per minute that this job is allowed to * make to the embedding model specified on the corpus. This value is specific * to this job and not shared across other import jobs. Consult the Quotas * page on the project to set an appropriate value here. If unspecified, a * default value of 1,000 QPM would be used. */ maxEmbeddingRequestsPerMin?: number; /** * The BigQuery destination to write partial failures to. It should be a * bigquery table resource name (e.g. "bq://projectId.bqDatasetId.bqTableId"). * The dataset must exist. If the table does not exist, it will be created * with the expected schema. If the table exists, the schema will be validated * and data will be added to this existing table. Deprecated. Prefer to use * `import_result_bq_sink`. */ partialFailureBigquerySink?: GoogleCloudAiplatformV1BigQueryDestination; /** * The Cloud Storage path to write partial failures to. Deprecated. Prefer to * use `import_result_gcs_sink`. */ partialFailureGcsSink?: GoogleCloudAiplatformV1GcsDestination; /** * Optional. Specifies the parsing config for RagFiles. RAG will use the * default parser if this field is not set. */ ragFileParsingConfig?: GoogleCloudAiplatformV1RagFileParsingConfig; /** * Specifies the transformation config for RagFiles. */ ragFileTransformationConfig?: GoogleCloudAiplatformV1RagFileTransformationConfig; /** * SharePoint sources. */ sharePointSources?: GoogleCloudAiplatformV1SharePointSources; /** * Slack channels with their corresponding access tokens. */ slackSource?: GoogleCloudAiplatformV1SlackSource; } function serializeGoogleCloudAiplatformV1ImportRagFilesConfig(data: any): GoogleCloudAiplatformV1ImportRagFilesConfig { return { ...data, slackSource: data["slackSource"] !== undefined ? serializeGoogleCloudAiplatformV1SlackSource(data["slackSource"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1ImportRagFilesConfig(data: any): GoogleCloudAiplatformV1ImportRagFilesConfig { return { ...data, slackSource: data["slackSource"] !== undefined ? deserializeGoogleCloudAiplatformV1SlackSource(data["slackSource"]) : undefined, }; } /** * Request message for VertexRagDataService.ImportRagFiles. */ export interface GoogleCloudAiplatformV1ImportRagFilesRequest { /** * Required. The config for the RagFiles to be synced and imported into the * RagCorpus. VertexRagDataService.ImportRagFiles. */ importRagFilesConfig?: GoogleCloudAiplatformV1ImportRagFilesConfig; } function serializeGoogleCloudAiplatformV1ImportRagFilesRequest(data: any): GoogleCloudAiplatformV1ImportRagFilesRequest { return { ...data, importRagFilesConfig: data["importRagFilesConfig"] !== undefined ? serializeGoogleCloudAiplatformV1ImportRagFilesConfig(data["importRagFilesConfig"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1ImportRagFilesRequest(data: any): GoogleCloudAiplatformV1ImportRagFilesRequest { return { ...data, importRagFilesConfig: data["importRagFilesConfig"] !== undefined ? deserializeGoogleCloudAiplatformV1ImportRagFilesConfig(data["importRagFilesConfig"]) : undefined, }; } /** * A representation of a collection of database items organized in a way that * allows for approximate nearest neighbor (a.k.a ANN) algorithms search. */ export interface GoogleCloudAiplatformV1Index { /** * Output only. Timestamp when this Index was created. */ readonly createTime?: Date; /** * Output only. The pointers to DeployedIndexes created from this Index. An * Index can be only deleted if all its DeployedIndexes had been undeployed * first. */ readonly deployedIndexes?: GoogleCloudAiplatformV1DeployedIndexRef[]; /** * The description of the Index. */ description?: string; /** * Required. The display name of the Index. The name can be up to 128 * characters long and can consist of any UTF-8 characters. */ displayName?: string; /** * Immutable. Customer-managed encryption key spec for an Index. If set, this * Index and all sub-resources of this Index will be secured by this key. */ encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec; /** * Used to perform consistent read-modify-write updates. If not set, a blind * "overwrite" update happens. */ etag?: string; /** * Output only. Stats of the index resource. */ readonly indexStats?: GoogleCloudAiplatformV1IndexStats; /** * Immutable. The update method to use with this Index. If not set, * BATCH_UPDATE will be used by default. */ indexUpdateMethod?: | "INDEX_UPDATE_METHOD_UNSPECIFIED" | "BATCH_UPDATE" | "STREAM_UPDATE"; /** * The labels with user-defined metadata to organize your Indexes. Label keys * and values can be no longer than 64 characters (Unicode codepoints), can * only contain lowercase letters, numeric characters, underscores and dashes. * International characters are allowed. See https://goo.gl/xmQnxf for more * information and examples of labels. */ labels?: { [key: string]: string }; /** * An additional information about the Index; the schema of the metadata can * be found in metadata_schema. */ metadata?: any; /** * Immutable. Points to a YAML file stored on Google Cloud Storage describing * additional information about the Index, that is specific to it. Unset if * the Index does not have any additional information. The schema is defined * as an OpenAPI 3.0.2 [Schema * Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). * Note: The URI given on output will be immutable and probably different, * including the URI scheme, than the one given on input. The output URI will * point to a location where the user only has a read access. */ metadataSchemaUri?: string; /** * Output only. The resource name of the Index. */ readonly name?: string; /** * Output only. Reserved for future use. */ readonly satisfiesPzi?: boolean; /** * Output only. Reserved for future use. */ readonly satisfiesPzs?: boolean; /** * Output only. Timestamp when this Index was most recently updated. This * also includes any update to the contents of the Index. Note that Operations * working on this Index may have their * Operations.metadata.generic_metadata.update_time a little after the value * of this timestamp, yet that does not mean their results are not already * reflected in the Index. Result of any successfully completed Operation on * the Index is reflected in it. */ readonly updateTime?: Date; } /** * A datapoint of Index. */ export interface GoogleCloudAiplatformV1IndexDatapoint { /** * Optional. CrowdingTag of the datapoint, the number of neighbors to return * in each crowding can be configured during query. */ crowdingTag?: GoogleCloudAiplatformV1IndexDatapointCrowdingTag; /** * Required. Unique identifier of the datapoint. */ datapointId?: string; /** * Required. Feature embedding vector for dense index. An array of numbers * with the length of [NearestNeighborSearchConfig.dimensions]. */ featureVector?: number[]; /** * Optional. List of Restrict of the datapoint, used to perform "restricted * searches" where boolean rule are used to filter the subset of the database * eligible for matching. This uses numeric comparisons. */ numericRestricts?: GoogleCloudAiplatformV1IndexDatapointNumericRestriction[]; /** * Optional. List of Restrict of the datapoint, used to perform "restricted * searches" where boolean rule are used to filter the subset of the database * eligible for matching. This uses categorical tokens. See: * https://cloud.google.com/vertex-ai/docs/matching-engine/filtering */ restricts?: GoogleCloudAiplatformV1IndexDatapointRestriction[]; /** * Optional. Feature embedding vector for sparse index. */ sparseEmbedding?: GoogleCloudAiplatformV1IndexDatapointSparseEmbedding; } function serializeGoogleCloudAiplatformV1IndexDatapoint(data: any): GoogleCloudAiplatformV1IndexDatapoint { return { ...data, numericRestricts: data["numericRestricts"] !== undefined ? data["numericRestricts"].map((item: any) => (serializeGoogleCloudAiplatformV1IndexDatapointNumericRestriction(item))) : undefined, sparseEmbedding: data["sparseEmbedding"] !== undefined ? serializeGoogleCloudAiplatformV1IndexDatapointSparseEmbedding(data["sparseEmbedding"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1IndexDatapoint(data: any): GoogleCloudAiplatformV1IndexDatapoint { return { ...data, numericRestricts: data["numericRestricts"] !== undefined ? data["numericRestricts"].map((item: any) => (deserializeGoogleCloudAiplatformV1IndexDatapointNumericRestriction(item))) : undefined, sparseEmbedding: data["sparseEmbedding"] !== undefined ? deserializeGoogleCloudAiplatformV1IndexDatapointSparseEmbedding(data["sparseEmbedding"]) : undefined, }; } /** * Crowding tag is a constraint on a neighbor list produced by nearest neighbor * search requiring that no more than some value k' of the k neighbors returned * have the same value of crowding_attribute. */ export interface GoogleCloudAiplatformV1IndexDatapointCrowdingTag { /** * The attribute value used for crowding. The maximum number of neighbors to * return per crowding attribute value (per_crowding_attribute_num_neighbors) * is configured per-query. This field is ignored if * per_crowding_attribute_num_neighbors is larger than the total number of * neighbors to return for a given query. */ crowdingAttribute?: string; } /** * This field allows restricts to be based on numeric comparisons rather than * categorical tokens. */ export interface GoogleCloudAiplatformV1IndexDatapointNumericRestriction { /** * The namespace of this restriction. e.g.: cost. */ namespace?: string; /** * This MUST be specified for queries and must NOT be specified for * datapoints. */ op?: | "OPERATOR_UNSPECIFIED" | "LESS" | "LESS_EQUAL" | "EQUAL" | "GREATER_EQUAL" | "GREATER" | "NOT_EQUAL"; /** * Represents 64 bit float. */ valueDouble?: number; /** * Represents 32 bit float. */ valueFloat?: number; /** * Represents 64 bit integer. */ valueInt?: bigint; } function serializeGoogleCloudAiplatformV1IndexDatapointNumericRestriction(data: any): GoogleCloudAiplatformV1IndexDatapointNumericRestriction { return { ...data, valueInt: data["valueInt"] !== undefined ? String(data["valueInt"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1IndexDatapointNumericRestriction(data: any): GoogleCloudAiplatformV1IndexDatapointNumericRestriction { return { ...data, valueInt: data["valueInt"] !== undefined ? BigInt(data["valueInt"]) : undefined, }; } /** * Restriction of a datapoint which describe its attributes(tokens) from each * of several attribute categories(namespaces). */ export interface GoogleCloudAiplatformV1IndexDatapointRestriction { /** * The attributes to allow in this namespace. e.g.: 'red' */ allowList?: string[]; /** * The attributes to deny in this namespace. e.g.: 'blue' */ denyList?: string[]; /** * The namespace of this restriction. e.g.: color. */ namespace?: string; } /** * Feature embedding vector for sparse index. An array of numbers whose values * are located in the specified dimensions. */ export interface GoogleCloudAiplatformV1IndexDatapointSparseEmbedding { /** * Required. The list of indexes for the embedding values of the sparse * vector. */ dimensions?: bigint[]; /** * Required. The list of embedding values of the sparse vector. */ values?: number[]; } function serializeGoogleCloudAiplatformV1IndexDatapointSparseEmbedding(data: any): GoogleCloudAiplatformV1IndexDatapointSparseEmbedding { return { ...data, dimensions: data["dimensions"] !== undefined ? data["dimensions"].map((item: any) => (String(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1IndexDatapointSparseEmbedding(data: any): GoogleCloudAiplatformV1IndexDatapointSparseEmbedding { return { ...data, dimensions: data["dimensions"] !== undefined ? data["dimensions"].map((item: any) => (BigInt(item))) : undefined, }; } /** * Indexes are deployed into it. An IndexEndpoint can have multiple * DeployedIndexes. */ export interface GoogleCloudAiplatformV1IndexEndpoint { /** * Output only. Timestamp when this IndexEndpoint was created. */ readonly createTime?: Date; /** * Output only. The indexes deployed in this endpoint. */ readonly deployedIndexes?: GoogleCloudAiplatformV1DeployedIndex[]; /** * The description of the IndexEndpoint. */ description?: string; /** * Required. The display name of the IndexEndpoint. The name can be up to 128 * characters long and can consist of any UTF-8 characters. */ displayName?: string; /** * Optional. Deprecated: If true, expose the IndexEndpoint via private * service connect. Only one of the fields, network or * enable_private_service_connect, can be set. */ enablePrivateServiceConnect?: boolean; /** * Immutable. Customer-managed encryption key spec for an IndexEndpoint. If * set, this IndexEndpoint and all sub-resources of this IndexEndpoint will be * secured by this key. */ encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec; /** * Used to perform consistent read-modify-write updates. If not set, a blind * "overwrite" update happens. */ etag?: string; /** * The labels with user-defined metadata to organize your IndexEndpoints. * Label keys and values can be no longer than 64 characters (Unicode * codepoints), can only contain lowercase letters, numeric characters, * underscores and dashes. International characters are allowed. See * https://goo.gl/xmQnxf for more information and examples of labels. */ labels?: { [key: string]: string }; /** * Output only. The resource name of the IndexEndpoint. */ readonly name?: string; /** * Optional. The full name of the Google Compute Engine * [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks) * to which the IndexEndpoint should be peered. Private services access must * already be configured for the network. If left unspecified, the Endpoint is * not peered with any network. network and private_service_connect_config are * mutually exclusive. * [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): * `projects/{project}/global/networks/{network}`. Where {project} is a * project number, as in '12345', and {network} is network name. */ network?: string; /** * Optional. Configuration for private service connect. network and * private_service_connect_config are mutually exclusive. */ privateServiceConnectConfig?: GoogleCloudAiplatformV1PrivateServiceConnectConfig; /** * Output only. If public_endpoint_enabled is true, this field will be * populated with the domain name to use for this index endpoint. */ readonly publicEndpointDomainName?: string; /** * Optional. If true, the deployed index will be accessible through public * endpoint. */ publicEndpointEnabled?: boolean; /** * Output only. Reserved for future use. */ readonly satisfiesPzi?: boolean; /** * Output only. Reserved for future use. */ readonly satisfiesPzs?: boolean; /** * Output only. Timestamp when this IndexEndpoint was last updated. This * timestamp is not updated when the endpoint's DeployedIndexes are updated, * e.g. due to updates of the original Indexes they are the deployments of. */ readonly updateTime?: Date; } /** * IndexPrivateEndpoints proto is used to provide paths for users to send * requests via private endpoints (e.g. private service access, private service * connect). To send request via private service access, use match_grpc_address. * To send request via private service connect, use service_attachment. */ export interface GoogleCloudAiplatformV1IndexPrivateEndpoints { /** * Output only. The ip address used to send match gRPC requests. */ readonly matchGrpcAddress?: string; /** * Output only. PscAutomatedEndpoints is populated if private service connect * is enabled if PscAutomatedConfig is set. */ readonly pscAutomatedEndpoints?: GoogleCloudAiplatformV1PscAutomatedEndpoints[]; /** * Output only. The name of the service attachment resource. Populated if * private service connect is enabled. */ readonly serviceAttachment?: string; } /** * Stats of the Index. */ export interface GoogleCloudAiplatformV1IndexStats { /** * Output only. The number of shards in the Index. */ readonly shardsCount?: number; /** * Output only. The number of sparse vectors in the Index. */ readonly sparseVectorsCount?: bigint; /** * Output only. The number of dense vectors in the Index. */ readonly vectorsCount?: bigint; } /** * Specifies Vertex AI owned input data to be used for training, and possibly * evaluating, the Model. */ export interface GoogleCloudAiplatformV1InputDataConfig { /** * Applicable only to custom training with Datasets that have DataItems and * Annotations. Cloud Storage URI that points to a YAML file describing the * annotation schema. The schema is defined as an OpenAPI 3.0.2 [Schema * Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). * The schema files that can be used here are found in * gs://google-cloud-aiplatform/schema/dataset/annotation/ , note that the * chosen schema must be consistent with metadata of the Dataset specified by * dataset_id. Only Annotations that both match this schema and belong to * DataItems not ignored by the split method are used in respectively * training, validation or test role, depending on the role of the DataItem * they are on. When used in conjunction with annotations_filter, the * Annotations used for training are filtered by both annotations_filter and * annotation_schema_uri. */ annotationSchemaUri?: string; /** * Applicable only to Datasets that have DataItems and Annotations. A filter * on Annotations of the Dataset. Only Annotations that both match this filter * and belong to DataItems not ignored by the split method are used in * respectively training, validation or test role, depending on the role of * the DataItem they are on (for the auto-assigned that role is decided by * Vertex AI). A filter with same syntax as the one used in ListAnnotations * may be used, but note here it filters across all Annotations of the * Dataset, and not just within a single DataItem. */ annotationsFilter?: string; /** * Only applicable to custom training with tabular Dataset with BigQuery * source. The BigQuery project location where the training data is to be * written to. In the given project a new dataset is created with name * `dataset___` where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All * training input data is written into that dataset. In the dataset three * tables are created, `training`, `validation` and `test`. * AIP_DATA_FORMAT * = "bigquery". * AIP_TRAINING_DATA_URI = * "bigquery_destination.dataset___.training" * AIP_VALIDATION_DATA_URI = * "bigquery_destination.dataset___.validation" * AIP_TEST_DATA_URI = * "bigquery_destination.dataset___.test" */ bigqueryDestination?: GoogleCloudAiplatformV1BigQueryDestination; /** * Required. The ID of the Dataset in the same Project and Location which * data will be used to train the Model. The Dataset must use schema * compatible with Model being trained, and what is compatible should be * described in the used TrainingPipeline's training_task_definition. For * tabular Datasets, all their data is exported to training, to pick and * choose from. */ datasetId?: string; /** * Split based on the provided filters for each set. */ filterSplit?: GoogleCloudAiplatformV1FilterSplit; /** * Split based on fractions defining the size of each set. */ fractionSplit?: GoogleCloudAiplatformV1FractionSplit; /** * The Cloud Storage location where the training data is to be written to. In * the given directory a new directory is created with name: `dataset---` * where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. All * training input data is written into that directory. The Vertex AI * environment variables representing Cloud Storage data URIs are represented * in the Cloud Storage wildcard format to support sharded data. e.g.: * "gs://.../training-*.jsonl" * AIP_DATA_FORMAT = "jsonl" for non-tabular * data, "csv" for tabular data * AIP_TRAINING_DATA_URI = * "gcs_destination/dataset---/training-*.${AIP_DATA_FORMAT}" * * AIP_VALIDATION_DATA_URI = * "gcs_destination/dataset---/validation-*.${AIP_DATA_FORMAT}" * * AIP_TEST_DATA_URI = "gcs_destination/dataset---/test-*.${AIP_DATA_FORMAT}" */ gcsDestination?: GoogleCloudAiplatformV1GcsDestination; /** * Whether to persist the ML use assignment to data item system labels. */ persistMlUseAssignment?: boolean; /** * Supported only for tabular Datasets. Split based on a predefined key. */ predefinedSplit?: GoogleCloudAiplatformV1PredefinedSplit; /** * Only applicable to Datasets that have SavedQueries. The ID of a SavedQuery * (annotation set) under the Dataset specified by dataset_id used for * filtering Annotations for training. Only Annotations that are associated * with this SavedQuery are used in respectively training. When used in * conjunction with annotations_filter, the Annotations used for training are * filtered by both saved_query_id and annotations_filter. Only one of * saved_query_id and annotation_schema_uri should be specified as both of * them represent the same thing: problem type. */ savedQueryId?: string; /** * Supported only for tabular Datasets. Split based on the distribution of * the specified column. */ stratifiedSplit?: GoogleCloudAiplatformV1StratifiedSplit; /** * Supported only for tabular Datasets. Split based on the timestamp of the * input data pieces. */ timestampSplit?: GoogleCloudAiplatformV1TimestampSplit; } /** * A list of int64 values. */ export interface GoogleCloudAiplatformV1Int64Array { /** * A list of int64 values. */ values?: bigint[]; } function serializeGoogleCloudAiplatformV1Int64Array(data: any): GoogleCloudAiplatformV1Int64Array { return { ...data, values: data["values"] !== undefined ? data["values"].map((item: any) => (String(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1Int64Array(data: any): GoogleCloudAiplatformV1Int64Array { return { ...data, values: data["values"] !== undefined ? data["values"].map((item: any) => (BigInt(item))) : undefined, }; } /** * An attribution method that computes the Aumann-Shapley value taking * advantage of the model's fully differentiable structure. Refer to this paper * for more details: https://arxiv.org/abs/1703.01365 */ export interface GoogleCloudAiplatformV1IntegratedGradientsAttribution { /** * Config for IG with blur baseline. When enabled, a linear path from the * maximally blurred image to the input image is created. Using a blurred * baseline instead of zero (black image) is motivated by the BlurIG approach * explained here: https://arxiv.org/abs/2004.03383 */ blurBaselineConfig?: GoogleCloudAiplatformV1BlurBaselineConfig; /** * Config for SmoothGrad approximation of gradients. When enabled, the * gradients are approximated by averaging the gradients from noisy samples in * the vicinity of the inputs. Adding noise can help improve the computed * gradients. Refer to this paper for more details: * https://arxiv.org/pdf/1706.03825.pdf */ smoothGradConfig?: GoogleCloudAiplatformV1SmoothGradConfig; /** * Required. The number of steps for approximating the path integral. A good * value to start is 50 and gradually increase until the sum to diff property * is within the desired error range. Valid range of its value is [1, 100], * inclusively. */ stepCount?: number; } /** * The Jira source for the ImportRagFilesRequest. */ export interface GoogleCloudAiplatformV1JiraSource { /** * Required. The Jira queries. */ jiraQueries?: GoogleCloudAiplatformV1JiraSourceJiraQueries[]; } /** * JiraQueries contains the Jira queries and corresponding authentication. */ export interface GoogleCloudAiplatformV1JiraSourceJiraQueries { /** * Required. The SecretManager secret version resource name (e.g. * projects/{project}/secrets/{secret}/versions/{version}) storing the Jira * API key. See [Manage API tokens for your Atlassian * account](https://support.atlassian.com/atlassian-account/docs/manage-api-tokens-for-your-atlassian-account/). */ apiKeyConfig?: GoogleCloudAiplatformV1ApiAuthApiKeyConfig; /** * A list of custom Jira queries to import. For information about JQL (Jira * Query Language), see * https://support.atlassian.com/jira-service-management-cloud/docs/use-advanced-search-with-jira-query-language-jql/ */ customQueries?: string[]; /** * Required. The Jira email address. */ email?: string; /** * A list of Jira projects to import in their entirety. */ projects?: string[]; /** * Required. The Jira server URI. */ serverUri?: string; } /** * Contains information about the Large Model. */ export interface GoogleCloudAiplatformV1LargeModelReference { /** * Required. The unique name of the large Foundation or pre-built model. Like * "chat-bison", "text-bison". Or model name with version ID, like * "chat-bison@001", "text-bison@005", etc. */ name?: string; } /** * A subgraph of the overall lineage graph. Event edges connect Artifact and * Execution nodes. */ export interface GoogleCloudAiplatformV1LineageSubgraph { /** * The Artifact nodes in the subgraph. */ artifacts?: GoogleCloudAiplatformV1Artifact[]; /** * The Event edges between Artifacts and Executions in the subgraph. */ events?: GoogleCloudAiplatformV1Event[]; /** * The Execution nodes in the subgraph. */ executions?: GoogleCloudAiplatformV1Execution[]; } /** * Response message for DatasetService.ListAnnotations. */ export interface GoogleCloudAiplatformV1ListAnnotationsResponse { /** * A list of Annotations that matches the specified filter in the request. */ annotations?: GoogleCloudAiplatformV1Annotation[]; /** * The standard List next-page token. */ nextPageToken?: string; } /** * Response message for MetadataService.ListArtifacts. */ export interface GoogleCloudAiplatformV1ListArtifactsResponse { /** * The Artifacts retrieved from the MetadataStore. */ artifacts?: GoogleCloudAiplatformV1Artifact[]; /** * A token, which can be sent as ListArtifactsRequest.page_token to retrieve * the next page. If this field is not populated, there are no subsequent * pages. */ nextPageToken?: string; } /** * Response message for JobService.ListBatchPredictionJobs */ export interface GoogleCloudAiplatformV1ListBatchPredictionJobsResponse { /** * List of BatchPredictionJobs in the requested page. */ batchPredictionJobs?: GoogleCloudAiplatformV1BatchPredictionJob[]; /** * A token to retrieve the next page of results. Pass to * ListBatchPredictionJobsRequest.page_token to obtain that page. */ nextPageToken?: string; } function serializeGoogleCloudAiplatformV1ListBatchPredictionJobsResponse(data: any): GoogleCloudAiplatformV1ListBatchPredictionJobsResponse { return { ...data, batchPredictionJobs: data["batchPredictionJobs"] !== undefined ? data["batchPredictionJobs"].map((item: any) => (serializeGoogleCloudAiplatformV1BatchPredictionJob(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ListBatchPredictionJobsResponse(data: any): GoogleCloudAiplatformV1ListBatchPredictionJobsResponse { return { ...data, batchPredictionJobs: data["batchPredictionJobs"] !== undefined ? data["batchPredictionJobs"].map((item: any) => (deserializeGoogleCloudAiplatformV1BatchPredictionJob(item))) : undefined, }; } /** * Response with a list of CachedContents. */ export interface GoogleCloudAiplatformV1ListCachedContentsResponse { /** * List of cached contents. */ cachedContents?: GoogleCloudAiplatformV1CachedContent[]; /** * A token, which can be sent as `page_token` to retrieve the next page. If * this field is omitted, there are no subsequent pages. */ nextPageToken?: string; } function serializeGoogleCloudAiplatformV1ListCachedContentsResponse(data: any): GoogleCloudAiplatformV1ListCachedContentsResponse { return { ...data, cachedContents: data["cachedContents"] !== undefined ? data["cachedContents"].map((item: any) => (serializeGoogleCloudAiplatformV1CachedContent(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ListCachedContentsResponse(data: any): GoogleCloudAiplatformV1ListCachedContentsResponse { return { ...data, cachedContents: data["cachedContents"] !== undefined ? data["cachedContents"].map((item: any) => (deserializeGoogleCloudAiplatformV1CachedContent(item))) : undefined, }; } /** * Response message for MetadataService.ListContexts. */ export interface GoogleCloudAiplatformV1ListContextsResponse { /** * The Contexts retrieved from the MetadataStore. */ contexts?: GoogleCloudAiplatformV1Context[]; /** * A token, which can be sent as ListContextsRequest.page_token to retrieve * the next page. If this field is not populated, there are no subsequent * pages. */ nextPageToken?: string; } /** * Response message for JobService.ListCustomJobs */ export interface GoogleCloudAiplatformV1ListCustomJobsResponse { /** * List of CustomJobs in the requested page. */ customJobs?: GoogleCloudAiplatformV1CustomJob[]; /** * A token to retrieve the next page of results. Pass to * ListCustomJobsRequest.page_token to obtain that page. */ nextPageToken?: string; } function serializeGoogleCloudAiplatformV1ListCustomJobsResponse(data: any): GoogleCloudAiplatformV1ListCustomJobsResponse { return { ...data, customJobs: data["customJobs"] !== undefined ? data["customJobs"].map((item: any) => (serializeGoogleCloudAiplatformV1CustomJob(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ListCustomJobsResponse(data: any): GoogleCloudAiplatformV1ListCustomJobsResponse { return { ...data, customJobs: data["customJobs"] !== undefined ? data["customJobs"].map((item: any) => (deserializeGoogleCloudAiplatformV1CustomJob(item))) : undefined, }; } /** * Response message for DatasetService.ListDataItems. */ export interface GoogleCloudAiplatformV1ListDataItemsResponse { /** * A list of DataItems that matches the specified filter in the request. */ dataItems?: GoogleCloudAiplatformV1DataItem[]; /** * The standard List next-page token. */ nextPageToken?: string; } /** * Response message for JobService.ListDataLabelingJobs. */ export interface GoogleCloudAiplatformV1ListDataLabelingJobsResponse { /** * A list of DataLabelingJobs that matches the specified filter in the * request. */ dataLabelingJobs?: GoogleCloudAiplatformV1DataLabelingJob[]; /** * The standard List next-page token. */ nextPageToken?: string; } function serializeGoogleCloudAiplatformV1ListDataLabelingJobsResponse(data: any): GoogleCloudAiplatformV1ListDataLabelingJobsResponse { return { ...data, dataLabelingJobs: data["dataLabelingJobs"] !== undefined ? data["dataLabelingJobs"].map((item: any) => (serializeGoogleCloudAiplatformV1DataLabelingJob(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ListDataLabelingJobsResponse(data: any): GoogleCloudAiplatformV1ListDataLabelingJobsResponse { return { ...data, dataLabelingJobs: data["dataLabelingJobs"] !== undefined ? data["dataLabelingJobs"].map((item: any) => (deserializeGoogleCloudAiplatformV1DataLabelingJob(item))) : undefined, }; } /** * Response message for DatasetService.ListDatasets. */ export interface GoogleCloudAiplatformV1ListDatasetsResponse { /** * A list of Datasets that matches the specified filter in the request. */ datasets?: GoogleCloudAiplatformV1Dataset[]; /** * The standard List next-page token. */ nextPageToken?: string; } /** * Response message for DatasetService.ListDatasetVersions. */ export interface GoogleCloudAiplatformV1ListDatasetVersionsResponse { /** * A list of DatasetVersions that matches the specified filter in the * request. */ datasetVersions?: GoogleCloudAiplatformV1DatasetVersion[]; /** * The standard List next-page token. */ nextPageToken?: string; } /** * Response message for ListDeploymentResourcePools method. */ export interface GoogleCloudAiplatformV1ListDeploymentResourcePoolsResponse { /** * The DeploymentResourcePools from the specified location. */ deploymentResourcePools?: GoogleCloudAiplatformV1DeploymentResourcePool[]; /** * A token, which can be sent as `page_token` to retrieve the next page. If * this field is omitted, there are no subsequent pages. */ nextPageToken?: string; } /** * Response message for EndpointService.ListEndpoints. */ export interface GoogleCloudAiplatformV1ListEndpointsResponse { /** * List of Endpoints in the requested page. */ endpoints?: GoogleCloudAiplatformV1Endpoint[]; /** * A token to retrieve the next page of results. Pass to * ListEndpointsRequest.page_token to obtain that page. */ nextPageToken?: string; } function serializeGoogleCloudAiplatformV1ListEndpointsResponse(data: any): GoogleCloudAiplatformV1ListEndpointsResponse { return { ...data, endpoints: data["endpoints"] !== undefined ? data["endpoints"].map((item: any) => (serializeGoogleCloudAiplatformV1Endpoint(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ListEndpointsResponse(data: any): GoogleCloudAiplatformV1ListEndpointsResponse { return { ...data, endpoints: data["endpoints"] !== undefined ? data["endpoints"].map((item: any) => (deserializeGoogleCloudAiplatformV1Endpoint(item))) : undefined, }; } /** * Response message for FeaturestoreService.ListEntityTypes. */ export interface GoogleCloudAiplatformV1ListEntityTypesResponse { /** * The EntityTypes matching the request. */ entityTypes?: GoogleCloudAiplatformV1EntityType[]; /** * A token, which can be sent as ListEntityTypesRequest.page_token to * retrieve the next page. If this field is omitted, there are no subsequent * pages. */ nextPageToken?: string; } /** * Response message for MetadataService.ListExecutions. */ export interface GoogleCloudAiplatformV1ListExecutionsResponse { /** * The Executions retrieved from the MetadataStore. */ executions?: GoogleCloudAiplatformV1Execution[]; /** * A token, which can be sent as ListExecutionsRequest.page_token to retrieve * the next page. If this field is not populated, there are no subsequent * pages. */ nextPageToken?: string; } /** * Response message for FeatureRegistryService.ListFeatureGroups. */ export interface GoogleCloudAiplatformV1ListFeatureGroupsResponse { /** * The FeatureGroups matching the request. */ featureGroups?: GoogleCloudAiplatformV1FeatureGroup[]; /** * A token, which can be sent as ListFeatureGroupsRequest.page_token to * retrieve the next page. If this field is omitted, there are no subsequent * pages. */ nextPageToken?: string; } /** * Response message for FeatureOnlineStoreAdminService.ListFeatureOnlineStores. */ export interface GoogleCloudAiplatformV1ListFeatureOnlineStoresResponse { /** * The FeatureOnlineStores matching the request. */ featureOnlineStores?: GoogleCloudAiplatformV1FeatureOnlineStore[]; /** * A token, which can be sent as ListFeatureOnlineStoresRequest.page_token to * retrieve the next page. If this field is omitted, there are no subsequent * pages. */ nextPageToken?: string; } /** * Response message for FeaturestoreService.ListFeatures. Response message for * FeatureRegistryService.ListFeatures. */ export interface GoogleCloudAiplatformV1ListFeaturesResponse { /** * The Features matching the request. */ features?: GoogleCloudAiplatformV1Feature[]; /** * A token, which can be sent as ListFeaturesRequest.page_token to retrieve * the next page. If this field is omitted, there are no subsequent pages. */ nextPageToken?: string; } /** * Response message for FeaturestoreService.ListFeaturestores. */ export interface GoogleCloudAiplatformV1ListFeaturestoresResponse { /** * The Featurestores matching the request. */ featurestores?: GoogleCloudAiplatformV1Featurestore[]; /** * A token, which can be sent as ListFeaturestoresRequest.page_token to * retrieve the next page. If this field is omitted, there are no subsequent * pages. */ nextPageToken?: string; } /** * Response message for FeatureOnlineStoreAdminService.ListFeatureViews. */ export interface GoogleCloudAiplatformV1ListFeatureViewsResponse { /** * The FeatureViews matching the request. */ featureViews?: GoogleCloudAiplatformV1FeatureView[]; /** * A token, which can be sent as ListFeatureViewsRequest.page_token to * retrieve the next page. If this field is omitted, there are no subsequent * pages. */ nextPageToken?: string; } function serializeGoogleCloudAiplatformV1ListFeatureViewsResponse(data: any): GoogleCloudAiplatformV1ListFeatureViewsResponse { return { ...data, featureViews: data["featureViews"] !== undefined ? data["featureViews"].map((item: any) => (serializeGoogleCloudAiplatformV1FeatureView(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ListFeatureViewsResponse(data: any): GoogleCloudAiplatformV1ListFeatureViewsResponse { return { ...data, featureViews: data["featureViews"] !== undefined ? data["featureViews"].map((item: any) => (deserializeGoogleCloudAiplatformV1FeatureView(item))) : undefined, }; } /** * Response message for FeatureOnlineStoreAdminService.ListFeatureViewSyncs. */ export interface GoogleCloudAiplatformV1ListFeatureViewSyncsResponse { /** * The FeatureViewSyncs matching the request. */ featureViewSyncs?: GoogleCloudAiplatformV1FeatureViewSync[]; /** * A token, which can be sent as ListFeatureViewSyncsRequest.page_token to * retrieve the next page. If this field is omitted, there are no subsequent * pages. */ nextPageToken?: string; } /** * Response message for JobService.ListHyperparameterTuningJobs */ export interface GoogleCloudAiplatformV1ListHyperparameterTuningJobsResponse { /** * List of HyperparameterTuningJobs in the requested page. * HyperparameterTuningJob.trials of the jobs will be not be returned. */ hyperparameterTuningJobs?: GoogleCloudAiplatformV1HyperparameterTuningJob[]; /** * A token to retrieve the next page of results. Pass to * ListHyperparameterTuningJobsRequest.page_token to obtain that page. */ nextPageToken?: string; } function serializeGoogleCloudAiplatformV1ListHyperparameterTuningJobsResponse(data: any): GoogleCloudAiplatformV1ListHyperparameterTuningJobsResponse { return { ...data, hyperparameterTuningJobs: data["hyperparameterTuningJobs"] !== undefined ? data["hyperparameterTuningJobs"].map((item: any) => (serializeGoogleCloudAiplatformV1HyperparameterTuningJob(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ListHyperparameterTuningJobsResponse(data: any): GoogleCloudAiplatformV1ListHyperparameterTuningJobsResponse { return { ...data, hyperparameterTuningJobs: data["hyperparameterTuningJobs"] !== undefined ? data["hyperparameterTuningJobs"].map((item: any) => (deserializeGoogleCloudAiplatformV1HyperparameterTuningJob(item))) : undefined, }; } /** * Response message for IndexEndpointService.ListIndexEndpoints. */ export interface GoogleCloudAiplatformV1ListIndexEndpointsResponse { /** * List of IndexEndpoints in the requested page. */ indexEndpoints?: GoogleCloudAiplatformV1IndexEndpoint[]; /** * A token to retrieve next page of results. Pass to * ListIndexEndpointsRequest.page_token to obtain that page. */ nextPageToken?: string; } /** * Response message for IndexService.ListIndexes. */ export interface GoogleCloudAiplatformV1ListIndexesResponse { /** * List of indexes in the requested page. */ indexes?: GoogleCloudAiplatformV1Index[]; /** * A token to retrieve next page of results. Pass to * ListIndexesRequest.page_token to obtain that page. */ nextPageToken?: string; } /** * Response message for MetadataService.ListMetadataSchemas. */ export interface GoogleCloudAiplatformV1ListMetadataSchemasResponse { /** * The MetadataSchemas found for the MetadataStore. */ metadataSchemas?: GoogleCloudAiplatformV1MetadataSchema[]; /** * A token, which can be sent as ListMetadataSchemasRequest.page_token to * retrieve the next page. If this field is not populated, there are no * subsequent pages. */ nextPageToken?: string; } /** * Response message for MetadataService.ListMetadataStores. */ export interface GoogleCloudAiplatformV1ListMetadataStoresResponse { /** * The MetadataStores found for the Location. */ metadataStores?: GoogleCloudAiplatformV1MetadataStore[]; /** * A token, which can be sent as ListMetadataStoresRequest.page_token to * retrieve the next page. If this field is not populated, there are no * subsequent pages. */ nextPageToken?: string; } /** * Response message for JobService.ListModelDeploymentMonitoringJobs. */ export interface GoogleCloudAiplatformV1ListModelDeploymentMonitoringJobsResponse { /** * A list of ModelDeploymentMonitoringJobs that matches the specified filter * in the request. */ modelDeploymentMonitoringJobs?: GoogleCloudAiplatformV1ModelDeploymentMonitoringJob[]; /** * The standard List next-page token. */ nextPageToken?: string; } function serializeGoogleCloudAiplatformV1ListModelDeploymentMonitoringJobsResponse(data: any): GoogleCloudAiplatformV1ListModelDeploymentMonitoringJobsResponse { return { ...data, modelDeploymentMonitoringJobs: data["modelDeploymentMonitoringJobs"] !== undefined ? data["modelDeploymentMonitoringJobs"].map((item: any) => (serializeGoogleCloudAiplatformV1ModelDeploymentMonitoringJob(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ListModelDeploymentMonitoringJobsResponse(data: any): GoogleCloudAiplatformV1ListModelDeploymentMonitoringJobsResponse { return { ...data, modelDeploymentMonitoringJobs: data["modelDeploymentMonitoringJobs"] !== undefined ? data["modelDeploymentMonitoringJobs"].map((item: any) => (deserializeGoogleCloudAiplatformV1ModelDeploymentMonitoringJob(item))) : undefined, }; } /** * Response message for ModelService.ListModelEvaluationSlices. */ export interface GoogleCloudAiplatformV1ListModelEvaluationSlicesResponse { /** * List of ModelEvaluations in the requested page. */ modelEvaluationSlices?: GoogleCloudAiplatformV1ModelEvaluationSlice[]; /** * A token to retrieve next page of results. Pass to * ListModelEvaluationSlicesRequest.page_token to obtain that page. */ nextPageToken?: string; } /** * Response message for ModelService.ListModelEvaluations. */ export interface GoogleCloudAiplatformV1ListModelEvaluationsResponse { /** * List of ModelEvaluations in the requested page. */ modelEvaluations?: GoogleCloudAiplatformV1ModelEvaluation[]; /** * A token to retrieve next page of results. Pass to * ListModelEvaluationsRequest.page_token to obtain that page. */ nextPageToken?: string; } /** * Response message for ModelService.ListModels */ export interface GoogleCloudAiplatformV1ListModelsResponse { /** * List of Models in the requested page. */ models?: GoogleCloudAiplatformV1Model[]; /** * A token to retrieve next page of results. Pass to * ListModelsRequest.page_token to obtain that page. */ nextPageToken?: string; } function serializeGoogleCloudAiplatformV1ListModelsResponse(data: any): GoogleCloudAiplatformV1ListModelsResponse { return { ...data, models: data["models"] !== undefined ? data["models"].map((item: any) => (serializeGoogleCloudAiplatformV1Model(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ListModelsResponse(data: any): GoogleCloudAiplatformV1ListModelsResponse { return { ...data, models: data["models"] !== undefined ? data["models"].map((item: any) => (deserializeGoogleCloudAiplatformV1Model(item))) : undefined, }; } /** * Response message for ModelService.ListModelVersionCheckpoints */ export interface GoogleCloudAiplatformV1ListModelVersionCheckpointsResponse { /** * List of Model Version checkpoints. */ checkpoints?: GoogleCloudAiplatformV1ModelVersionCheckpoint[]; /** * A token to retrieve the next page of results. Pass to * ListModelVersionCheckpointsRequest.page_token to obtain that page. */ nextPageToken?: string; } function serializeGoogleCloudAiplatformV1ListModelVersionCheckpointsResponse(data: any): GoogleCloudAiplatformV1ListModelVersionCheckpointsResponse { return { ...data, checkpoints: data["checkpoints"] !== undefined ? data["checkpoints"].map((item: any) => (serializeGoogleCloudAiplatformV1ModelVersionCheckpoint(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ListModelVersionCheckpointsResponse(data: any): GoogleCloudAiplatformV1ListModelVersionCheckpointsResponse { return { ...data, checkpoints: data["checkpoints"] !== undefined ? data["checkpoints"].map((item: any) => (deserializeGoogleCloudAiplatformV1ModelVersionCheckpoint(item))) : undefined, }; } /** * Response message for ModelService.ListModelVersions */ export interface GoogleCloudAiplatformV1ListModelVersionsResponse { /** * List of Model versions in the requested page. In the returned Model name * field, version ID instead of regvision tag will be included. */ models?: GoogleCloudAiplatformV1Model[]; /** * A token to retrieve the next page of results. Pass to * ListModelVersionsRequest.page_token to obtain that page. */ nextPageToken?: string; } function serializeGoogleCloudAiplatformV1ListModelVersionsResponse(data: any): GoogleCloudAiplatformV1ListModelVersionsResponse { return { ...data, models: data["models"] !== undefined ? data["models"].map((item: any) => (serializeGoogleCloudAiplatformV1Model(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ListModelVersionsResponse(data: any): GoogleCloudAiplatformV1ListModelVersionsResponse { return { ...data, models: data["models"] !== undefined ? data["models"].map((item: any) => (deserializeGoogleCloudAiplatformV1Model(item))) : undefined, }; } /** * Response message for JobService.ListNasJobs */ export interface GoogleCloudAiplatformV1ListNasJobsResponse { /** * List of NasJobs in the requested page. NasJob.nas_job_output of the jobs * will not be returned. */ nasJobs?: GoogleCloudAiplatformV1NasJob[]; /** * A token to retrieve the next page of results. Pass to * ListNasJobsRequest.page_token to obtain that page. */ nextPageToken?: string; } function serializeGoogleCloudAiplatformV1ListNasJobsResponse(data: any): GoogleCloudAiplatformV1ListNasJobsResponse { return { ...data, nasJobs: data["nasJobs"] !== undefined ? data["nasJobs"].map((item: any) => (serializeGoogleCloudAiplatformV1NasJob(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ListNasJobsResponse(data: any): GoogleCloudAiplatformV1ListNasJobsResponse { return { ...data, nasJobs: data["nasJobs"] !== undefined ? data["nasJobs"].map((item: any) => (deserializeGoogleCloudAiplatformV1NasJob(item))) : undefined, }; } /** * Response message for JobService.ListNasTrialDetails */ export interface GoogleCloudAiplatformV1ListNasTrialDetailsResponse { /** * List of top NasTrials in the requested page. */ nasTrialDetails?: GoogleCloudAiplatformV1NasTrialDetail[]; /** * A token to retrieve the next page of results. Pass to * ListNasTrialDetailsRequest.page_token to obtain that page. */ nextPageToken?: string; } /** * Response message for [NotebookService.CreateNotebookExecutionJob] */ export interface GoogleCloudAiplatformV1ListNotebookExecutionJobsResponse { /** * A token to retrieve next page of results. Pass to * ListNotebookExecutionJobsRequest.page_token to obtain that page. */ nextPageToken?: string; /** * List of NotebookExecutionJobs in the requested page. */ notebookExecutionJobs?: GoogleCloudAiplatformV1NotebookExecutionJob[]; } function serializeGoogleCloudAiplatformV1ListNotebookExecutionJobsResponse(data: any): GoogleCloudAiplatformV1ListNotebookExecutionJobsResponse { return { ...data, notebookExecutionJobs: data["notebookExecutionJobs"] !== undefined ? data["notebookExecutionJobs"].map((item: any) => (serializeGoogleCloudAiplatformV1NotebookExecutionJob(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ListNotebookExecutionJobsResponse(data: any): GoogleCloudAiplatformV1ListNotebookExecutionJobsResponse { return { ...data, notebookExecutionJobs: data["notebookExecutionJobs"] !== undefined ? data["notebookExecutionJobs"].map((item: any) => (deserializeGoogleCloudAiplatformV1NotebookExecutionJob(item))) : undefined, }; } /** * Response message for NotebookService.ListNotebookRuntimes. */ export interface GoogleCloudAiplatformV1ListNotebookRuntimesResponse { /** * A token to retrieve next page of results. Pass to * ListNotebookRuntimesRequest.page_token to obtain that page. */ nextPageToken?: string; /** * List of NotebookRuntimes in the requested page. */ notebookRuntimes?: GoogleCloudAiplatformV1NotebookRuntime[]; } /** * Response message for NotebookService.ListNotebookRuntimeTemplates. */ export interface GoogleCloudAiplatformV1ListNotebookRuntimeTemplatesResponse { /** * A token to retrieve next page of results. Pass to * ListNotebookRuntimeTemplatesRequest.page_token to obtain that page. */ nextPageToken?: string; /** * List of NotebookRuntimeTemplates in the requested page. */ notebookRuntimeTemplates?: GoogleCloudAiplatformV1NotebookRuntimeTemplate[]; } function serializeGoogleCloudAiplatformV1ListNotebookRuntimeTemplatesResponse(data: any): GoogleCloudAiplatformV1ListNotebookRuntimeTemplatesResponse { return { ...data, notebookRuntimeTemplates: data["notebookRuntimeTemplates"] !== undefined ? data["notebookRuntimeTemplates"].map((item: any) => (serializeGoogleCloudAiplatformV1NotebookRuntimeTemplate(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ListNotebookRuntimeTemplatesResponse(data: any): GoogleCloudAiplatformV1ListNotebookRuntimeTemplatesResponse { return { ...data, notebookRuntimeTemplates: data["notebookRuntimeTemplates"] !== undefined ? data["notebookRuntimeTemplates"].map((item: any) => (deserializeGoogleCloudAiplatformV1NotebookRuntimeTemplate(item))) : undefined, }; } /** * Request message for VizierService.ListOptimalTrials. */ export interface GoogleCloudAiplatformV1ListOptimalTrialsRequest { } /** * Response message for VizierService.ListOptimalTrials. */ export interface GoogleCloudAiplatformV1ListOptimalTrialsResponse { /** * The pareto-optimal Trials for multiple objective Study or the optimal * trial for single objective Study. The definition of pareto-optimal can be * checked in wiki page. https://en.wikipedia.org/wiki/Pareto_efficiency */ optimalTrials?: GoogleCloudAiplatformV1Trial[]; } /** * Response message for PersistentResourceService.ListPersistentResources */ export interface GoogleCloudAiplatformV1ListPersistentResourcesResponse { /** * A token to retrieve next page of results. Pass to * ListPersistentResourcesRequest.page_token to obtain that page. */ nextPageToken?: string; persistentResources?: GoogleCloudAiplatformV1PersistentResource[]; } function serializeGoogleCloudAiplatformV1ListPersistentResourcesResponse(data: any): GoogleCloudAiplatformV1ListPersistentResourcesResponse { return { ...data, persistentResources: data["persistentResources"] !== undefined ? data["persistentResources"].map((item: any) => (serializeGoogleCloudAiplatformV1PersistentResource(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ListPersistentResourcesResponse(data: any): GoogleCloudAiplatformV1ListPersistentResourcesResponse { return { ...data, persistentResources: data["persistentResources"] !== undefined ? data["persistentResources"].map((item: any) => (deserializeGoogleCloudAiplatformV1PersistentResource(item))) : undefined, }; } /** * Response message for PipelineService.ListPipelineJobs */ export interface GoogleCloudAiplatformV1ListPipelineJobsResponse { /** * A token to retrieve the next page of results. Pass to * ListPipelineJobsRequest.page_token to obtain that page. */ nextPageToken?: string; /** * List of PipelineJobs in the requested page. */ pipelineJobs?: GoogleCloudAiplatformV1PipelineJob[]; } function serializeGoogleCloudAiplatformV1ListPipelineJobsResponse(data: any): GoogleCloudAiplatformV1ListPipelineJobsResponse { return { ...data, pipelineJobs: data["pipelineJobs"] !== undefined ? data["pipelineJobs"].map((item: any) => (serializeGoogleCloudAiplatformV1PipelineJob(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ListPipelineJobsResponse(data: any): GoogleCloudAiplatformV1ListPipelineJobsResponse { return { ...data, pipelineJobs: data["pipelineJobs"] !== undefined ? data["pipelineJobs"].map((item: any) => (deserializeGoogleCloudAiplatformV1PipelineJob(item))) : undefined, }; } /** * Response message for VertexRagDataService.ListRagCorpora. */ export interface GoogleCloudAiplatformV1ListRagCorporaResponse { /** * A token to retrieve the next page of results. Pass to * ListRagCorporaRequest.page_token to obtain that page. */ nextPageToken?: string; /** * List of RagCorpora in the requested page. */ ragCorpora?: GoogleCloudAiplatformV1RagCorpus[]; } /** * Response message for VertexRagDataService.ListRagFiles. */ export interface GoogleCloudAiplatformV1ListRagFilesResponse { /** * A token to retrieve the next page of results. Pass to * ListRagFilesRequest.page_token to obtain that page. */ nextPageToken?: string; /** * List of RagFiles in the requested page. */ ragFiles?: GoogleCloudAiplatformV1RagFile[]; } function serializeGoogleCloudAiplatformV1ListRagFilesResponse(data: any): GoogleCloudAiplatformV1ListRagFilesResponse { return { ...data, ragFiles: data["ragFiles"] !== undefined ? data["ragFiles"].map((item: any) => (serializeGoogleCloudAiplatformV1RagFile(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ListRagFilesResponse(data: any): GoogleCloudAiplatformV1ListRagFilesResponse { return { ...data, ragFiles: data["ragFiles"] !== undefined ? data["ragFiles"].map((item: any) => (deserializeGoogleCloudAiplatformV1RagFile(item))) : undefined, }; } /** * Response message for ReasoningEngineService.ListReasoningEngines */ export interface GoogleCloudAiplatformV1ListReasoningEnginesResponse { /** * A token to retrieve the next page of results. Pass to * ListReasoningEnginesRequest.page_token to obtain that page. */ nextPageToken?: string; /** * List of ReasoningEngines in the requested page. */ reasoningEngines?: GoogleCloudAiplatformV1ReasoningEngine[]; } /** * Response message for DatasetService.ListSavedQueries. */ export interface GoogleCloudAiplatformV1ListSavedQueriesResponse { /** * The standard List next-page token. */ nextPageToken?: string; /** * A list of SavedQueries that match the specified filter in the request. */ savedQueries?: GoogleCloudAiplatformV1SavedQuery[]; } /** * Response message for ScheduleService.ListSchedules */ export interface GoogleCloudAiplatformV1ListSchedulesResponse { /** * A token to retrieve the next page of results. Pass to * ListSchedulesRequest.page_token to obtain that page. */ nextPageToken?: string; /** * List of Schedules in the requested page. */ schedules?: GoogleCloudAiplatformV1Schedule[]; } function serializeGoogleCloudAiplatformV1ListSchedulesResponse(data: any): GoogleCloudAiplatformV1ListSchedulesResponse { return { ...data, schedules: data["schedules"] !== undefined ? data["schedules"].map((item: any) => (serializeGoogleCloudAiplatformV1Schedule(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ListSchedulesResponse(data: any): GoogleCloudAiplatformV1ListSchedulesResponse { return { ...data, schedules: data["schedules"] !== undefined ? data["schedules"].map((item: any) => (deserializeGoogleCloudAiplatformV1Schedule(item))) : undefined, }; } /** * Response message for SpecialistPoolService.ListSpecialistPools. */ export interface GoogleCloudAiplatformV1ListSpecialistPoolsResponse { /** * The standard List next-page token. */ nextPageToken?: string; /** * A list of SpecialistPools that matches the specified filter in the * request. */ specialistPools?: GoogleCloudAiplatformV1SpecialistPool[]; } /** * Response message for VizierService.ListStudies. */ export interface GoogleCloudAiplatformV1ListStudiesResponse { /** * Passes this token as the `page_token` field of the request for a * subsequent call. If this field is omitted, there are no subsequent pages. */ nextPageToken?: string; /** * The studies associated with the project. */ studies?: GoogleCloudAiplatformV1Study[]; } function serializeGoogleCloudAiplatformV1ListStudiesResponse(data: any): GoogleCloudAiplatformV1ListStudiesResponse { return { ...data, studies: data["studies"] !== undefined ? data["studies"].map((item: any) => (serializeGoogleCloudAiplatformV1Study(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ListStudiesResponse(data: any): GoogleCloudAiplatformV1ListStudiesResponse { return { ...data, studies: data["studies"] !== undefined ? data["studies"].map((item: any) => (deserializeGoogleCloudAiplatformV1Study(item))) : undefined, }; } /** * Response message for TensorboardService.ListTensorboardExperiments. */ export interface GoogleCloudAiplatformV1ListTensorboardExperimentsResponse { /** * A token, which can be sent as ListTensorboardExperimentsRequest.page_token * to retrieve the next page. If this field is omitted, there are no * subsequent pages. */ nextPageToken?: string; /** * The TensorboardExperiments mathching the request. */ tensorboardExperiments?: GoogleCloudAiplatformV1TensorboardExperiment[]; } /** * Response message for TensorboardService.ListTensorboardRuns. */ export interface GoogleCloudAiplatformV1ListTensorboardRunsResponse { /** * A token, which can be sent as ListTensorboardRunsRequest.page_token to * retrieve the next page. If this field is omitted, there are no subsequent * pages. */ nextPageToken?: string; /** * The TensorboardRuns mathching the request. */ tensorboardRuns?: GoogleCloudAiplatformV1TensorboardRun[]; } /** * Response message for TensorboardService.ListTensorboards. */ export interface GoogleCloudAiplatformV1ListTensorboardsResponse { /** * A token, which can be sent as ListTensorboardsRequest.page_token to * retrieve the next page. If this field is omitted, there are no subsequent * pages. */ nextPageToken?: string; /** * The Tensorboards mathching the request. */ tensorboards?: GoogleCloudAiplatformV1Tensorboard[]; } /** * Response message for TensorboardService.ListTensorboardTimeSeries. */ export interface GoogleCloudAiplatformV1ListTensorboardTimeSeriesResponse { /** * A token, which can be sent as ListTensorboardTimeSeriesRequest.page_token * to retrieve the next page. If this field is omitted, there are no * subsequent pages. */ nextPageToken?: string; /** * The TensorboardTimeSeries mathching the request. */ tensorboardTimeSeries?: GoogleCloudAiplatformV1TensorboardTimeSeries[]; } function serializeGoogleCloudAiplatformV1ListTensorboardTimeSeriesResponse(data: any): GoogleCloudAiplatformV1ListTensorboardTimeSeriesResponse { return { ...data, tensorboardTimeSeries: data["tensorboardTimeSeries"] !== undefined ? data["tensorboardTimeSeries"].map((item: any) => (serializeGoogleCloudAiplatformV1TensorboardTimeSeries(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ListTensorboardTimeSeriesResponse(data: any): GoogleCloudAiplatformV1ListTensorboardTimeSeriesResponse { return { ...data, tensorboardTimeSeries: data["tensorboardTimeSeries"] !== undefined ? data["tensorboardTimeSeries"].map((item: any) => (deserializeGoogleCloudAiplatformV1TensorboardTimeSeries(item))) : undefined, }; } /** * Response message for PipelineService.ListTrainingPipelines */ export interface GoogleCloudAiplatformV1ListTrainingPipelinesResponse { /** * A token to retrieve the next page of results. Pass to * ListTrainingPipelinesRequest.page_token to obtain that page. */ nextPageToken?: string; /** * List of TrainingPipelines in the requested page. */ trainingPipelines?: GoogleCloudAiplatformV1TrainingPipeline[]; } function serializeGoogleCloudAiplatformV1ListTrainingPipelinesResponse(data: any): GoogleCloudAiplatformV1ListTrainingPipelinesResponse { return { ...data, trainingPipelines: data["trainingPipelines"] !== undefined ? data["trainingPipelines"].map((item: any) => (serializeGoogleCloudAiplatformV1TrainingPipeline(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ListTrainingPipelinesResponse(data: any): GoogleCloudAiplatformV1ListTrainingPipelinesResponse { return { ...data, trainingPipelines: data["trainingPipelines"] !== undefined ? data["trainingPipelines"].map((item: any) => (deserializeGoogleCloudAiplatformV1TrainingPipeline(item))) : undefined, }; } /** * Response message for VizierService.ListTrials. */ export interface GoogleCloudAiplatformV1ListTrialsResponse { /** * Pass this token as the `page_token` field of the request for a subsequent * call. If this field is omitted, there are no subsequent pages. */ nextPageToken?: string; /** * The Trials associated with the Study. */ trials?: GoogleCloudAiplatformV1Trial[]; } /** * Response message for GenAiTuningService.ListTuningJobs */ export interface GoogleCloudAiplatformV1ListTuningJobsResponse { /** * A token to retrieve the next page of results. Pass to * ListTuningJobsRequest.page_token to obtain that page. */ nextPageToken?: string; /** * List of TuningJobs in the requested page. */ tuningJobs?: GoogleCloudAiplatformV1TuningJob[]; } function serializeGoogleCloudAiplatformV1ListTuningJobsResponse(data: any): GoogleCloudAiplatformV1ListTuningJobsResponse { return { ...data, tuningJobs: data["tuningJobs"] !== undefined ? data["tuningJobs"].map((item: any) => (serializeGoogleCloudAiplatformV1TuningJob(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ListTuningJobsResponse(data: any): GoogleCloudAiplatformV1ListTuningJobsResponse { return { ...data, tuningJobs: data["tuningJobs"] !== undefined ? data["tuningJobs"].map((item: any) => (deserializeGoogleCloudAiplatformV1TuningJob(item))) : undefined, }; } /** * Logprobs Result */ export interface GoogleCloudAiplatformV1LogprobsResult { /** * Length = total number of decoding steps. The chosen candidates may or may * not be in top_candidates. */ chosenCandidates?: GoogleCloudAiplatformV1LogprobsResultCandidate[]; /** * Length = total number of decoding steps. */ topCandidates?: GoogleCloudAiplatformV1LogprobsResultTopCandidates[]; } /** * Candidate for the logprobs token and score. */ export interface GoogleCloudAiplatformV1LogprobsResultCandidate { /** * The candidate's log probability. */ logProbability?: number; /** * The candidate's token string value. */ token?: string; /** * The candidate's token id value. */ tokenId?: number; } /** * Candidates with top log probabilities at each decoding step. */ export interface GoogleCloudAiplatformV1LogprobsResultTopCandidates { /** * Sorted by log probability in descending order. */ candidates?: GoogleCloudAiplatformV1LogprobsResultCandidate[]; } /** * Request message for VizierService.LookupStudy. */ export interface GoogleCloudAiplatformV1LookupStudyRequest { /** * Required. The user-defined display name of the Study */ displayName?: string; } /** * Specification of a single machine. */ export interface GoogleCloudAiplatformV1MachineSpec { /** * The number of accelerators to attach to the machine. */ acceleratorCount?: number; /** * Immutable. The type of accelerator(s) that may be attached to the machine * as per accelerator_count. */ acceleratorType?: | "ACCELERATOR_TYPE_UNSPECIFIED" | "NVIDIA_TESLA_K80" | "NVIDIA_TESLA_P100" | "NVIDIA_TESLA_V100" | "NVIDIA_TESLA_P4" | "NVIDIA_TESLA_T4" | "NVIDIA_TESLA_A100" | "NVIDIA_A100_80GB" | "NVIDIA_L4" | "NVIDIA_H100_80GB" | "NVIDIA_H100_MEGA_80GB" | "NVIDIA_H200_141GB" | "TPU_V2" | "TPU_V3" | "TPU_V4_POD" | "TPU_V5_LITEPOD"; /** * Immutable. The type of the machine. See the [list of machine types * supported for * prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) * See the [list of machine types supported for custom * training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). * For DeployedModel this field is optional, and the default value is * `n1-standard-2`. For BatchPredictionJob or as part of WorkerPoolSpec this * field is required. */ machineType?: string; /** * Optional. Immutable. Configuration controlling how this resource pool * consumes reservation. */ reservationAffinity?: GoogleCloudAiplatformV1ReservationAffinity; /** * Immutable. The topology of the TPUs. Corresponds to the TPU topologies * available from GKE. (Example: tpu_topology: "2x2x1"). */ tpuTopology?: string; } /** * Manual batch tuning parameters. */ export interface GoogleCloudAiplatformV1ManualBatchTuningParameters { /** * Immutable. The number of the records (e.g. instances) of the operation * given in each batch to a machine replica. Machine type, and size of a * single record should be considered when setting this parameter, higher * value speeds up the batch operation's execution, but too high value will * result in a whole batch not fitting in a machine's memory, and the whole * operation will fail. The default value is 64. */ batchSize?: number; } /** * A message representing a Measurement of a Trial. A Measurement contains the * Metrics got by executing a Trial using suggested hyperparameter values. */ export interface GoogleCloudAiplatformV1Measurement { /** * Output only. Time that the Trial has been running at the point of this * Measurement. */ readonly elapsedDuration?: number /* Duration */; /** * Output only. A list of metrics got by evaluating the objective functions * using suggested Parameter values. */ readonly metrics?: GoogleCloudAiplatformV1MeasurementMetric[]; /** * Output only. The number of steps the machine learning model has been * trained for. Must be non-negative. */ readonly stepCount?: bigint; } /** * A message representing a metric in the measurement. */ export interface GoogleCloudAiplatformV1MeasurementMetric { /** * Output only. The ID of the Metric. The Metric should be defined in * StudySpec's Metrics. */ readonly metricId?: string; /** * Output only. The value for this metric. */ readonly value?: number; } /** * Request message for ModelService.MergeVersionAliases. */ export interface GoogleCloudAiplatformV1MergeVersionAliasesRequest { /** * Required. The set of version aliases to merge. The alias should be at most * 128 characters, and match `a-z{0,126}[a-z-0-9]`. Add the `-` prefix to an * alias means removing that alias from the version. `-` is NOT counted in the * 128 characters. Example: `-golden` means removing the `golden` alias from * the version. There is NO ordering in aliases, which means 1) The aliases * returned from GetModel API might not have the exactly same order from this * MergeVersionAliases API. 2) Adding and deleting the same alias in the * request is not recommended, and the 2 operations will be cancelled out. */ versionAliases?: string[]; } /** * Instance of a general MetadataSchema. */ export interface GoogleCloudAiplatformV1MetadataSchema { /** * Output only. Timestamp when this MetadataSchema was created. */ readonly createTime?: Date; /** * Description of the Metadata Schema */ description?: string; /** * Output only. The resource name of the MetadataSchema. */ readonly name?: string; /** * Required. The raw YAML string representation of the MetadataSchema. The * combination of [MetadataSchema.version] and the schema name given by * `title` in [MetadataSchema.schema] must be unique within a MetadataStore. * The schema is defined as an OpenAPI 3.0.2 [MetadataSchema * Object](https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.2.md#schemaObject) */ schema?: string; /** * The type of the MetadataSchema. This is a property that identifies which * metadata types will use the MetadataSchema. */ schemaType?: | "METADATA_SCHEMA_TYPE_UNSPECIFIED" | "ARTIFACT_TYPE" | "EXECUTION_TYPE" | "CONTEXT_TYPE"; /** * The version of the MetadataSchema. The version's format must match the * following regular expression: `^[0-9]+.+.+$`, which would allow to * order/compare different versions. Example: 1.0.0, 1.0.1, etc. */ schemaVersion?: string; } /** * Instance of a metadata store. Contains a set of metadata that can be * queried. */ export interface GoogleCloudAiplatformV1MetadataStore { /** * Output only. Timestamp when this MetadataStore was created. */ readonly createTime?: Date; /** * Optional. Dataplex integration settings. */ dataplexConfig?: GoogleCloudAiplatformV1MetadataStoreDataplexConfig; /** * Description of the MetadataStore. */ description?: string; /** * Customer-managed encryption key spec for a Metadata Store. If set, this * Metadata Store and all sub-resources of this Metadata Store are secured * using this key. */ encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec; /** * Output only. The resource name of the MetadataStore instance. */ readonly name?: string; /** * Output only. State information of the MetadataStore. */ readonly state?: GoogleCloudAiplatformV1MetadataStoreMetadataStoreState; /** * Output only. Timestamp when this MetadataStore was last updated. */ readonly updateTime?: Date; } /** * Represents Dataplex integration settings. */ export interface GoogleCloudAiplatformV1MetadataStoreDataplexConfig { /** * Optional. Whether or not Data Lineage synchronization is enabled for * Vertex Pipelines. */ enabledPipelinesLineage?: boolean; } /** * Represents state information for a MetadataStore. */ export interface GoogleCloudAiplatformV1MetadataStoreMetadataStoreState { /** * The disk utilization of the MetadataStore in bytes. */ diskUtilizationBytes?: bigint; } function serializeGoogleCloudAiplatformV1MetadataStoreMetadataStoreState(data: any): GoogleCloudAiplatformV1MetadataStoreMetadataStoreState { return { ...data, diskUtilizationBytes: data["diskUtilizationBytes"] !== undefined ? String(data["diskUtilizationBytes"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1MetadataStoreMetadataStoreState(data: any): GoogleCloudAiplatformV1MetadataStoreMetadataStoreState { return { ...data, diskUtilizationBytes: data["diskUtilizationBytes"] !== undefined ? BigInt(data["diskUtilizationBytes"]) : undefined, }; } /** * The metric used for dataset level evaluation. */ export interface GoogleCloudAiplatformV1Metric { /** * Optional. The aggregation metrics to use. */ aggregationMetrics?: | "AGGREGATION_METRIC_UNSPECIFIED" | "AVERAGE" | "MODE" | "STANDARD_DEVIATION" | "VARIANCE" | "MINIMUM" | "MAXIMUM" | "MEDIAN" | "PERCENTILE_P90" | "PERCENTILE_P95" | "PERCENTILE_P99"[]; /** * Spec for bleu metric. */ bleuSpec?: GoogleCloudAiplatformV1BleuSpec; /** * Spec for exact match metric. */ exactMatchSpec?: GoogleCloudAiplatformV1ExactMatchSpec; /** * Spec for pairwise metric. */ pairwiseMetricSpec?: GoogleCloudAiplatformV1PairwiseMetricSpec; /** * Spec for pointwise metric. */ pointwiseMetricSpec?: GoogleCloudAiplatformV1PointwiseMetricSpec; /** * Spec for rouge metric. */ rougeSpec?: GoogleCloudAiplatformV1RougeSpec; } /** * Input for MetricX metric. */ export interface GoogleCloudAiplatformV1MetricxInput { /** * Required. Metricx instance. */ instance?: GoogleCloudAiplatformV1MetricxInstance; /** * Required. Spec for Metricx metric. */ metricSpec?: GoogleCloudAiplatformV1MetricxSpec; } /** * Spec for MetricX instance - The fields used for evaluation are dependent on * the MetricX version. */ export interface GoogleCloudAiplatformV1MetricxInstance { /** * Required. Output of the evaluated model. */ prediction?: string; /** * Optional. Ground truth used to compare against the prediction. */ reference?: string; /** * Optional. Source text in original language. */ source?: string; } /** * Spec for MetricX result - calculates the MetricX score for the given * instance using the version specified in the spec. */ export interface GoogleCloudAiplatformV1MetricxResult { /** * Output only. MetricX score. Range depends on version. */ readonly score?: number; } /** * Spec for MetricX metric. */ export interface GoogleCloudAiplatformV1MetricxSpec { /** * Optional. Source language in BCP-47 format. */ sourceLanguage?: string; /** * Optional. Target language in BCP-47 format. Covers both prediction and * reference. */ targetLanguage?: string; /** * Required. Which version to use for evaluation. */ version?: | "METRICX_VERSION_UNSPECIFIED" | "METRICX_24_REF" | "METRICX_24_SRC" | "METRICX_24_SRC_REF"; } /** * Represents one resource that exists in automl.googleapis.com, * datalabeling.googleapis.com or ml.googleapis.com. */ export interface GoogleCloudAiplatformV1MigratableResource { /** * Output only. Represents one Dataset in automl.googleapis.com. */ readonly automlDataset?: GoogleCloudAiplatformV1MigratableResourceAutomlDataset; /** * Output only. Represents one Model in automl.googleapis.com. */ readonly automlModel?: GoogleCloudAiplatformV1MigratableResourceAutomlModel; /** * Output only. Represents one Dataset in datalabeling.googleapis.com. */ readonly dataLabelingDataset?: GoogleCloudAiplatformV1MigratableResourceDataLabelingDataset; /** * Output only. Timestamp when the last migration attempt on this * MigratableResource started. Will not be set if there's no migration attempt * on this MigratableResource. */ readonly lastMigrateTime?: Date; /** * Output only. Timestamp when this MigratableResource was last updated. */ readonly lastUpdateTime?: Date; /** * Output only. Represents one Version in ml.googleapis.com. */ readonly mlEngineModelVersion?: GoogleCloudAiplatformV1MigratableResourceMlEngineModelVersion; } /** * Represents one Dataset in automl.googleapis.com. */ export interface GoogleCloudAiplatformV1MigratableResourceAutomlDataset { /** * Full resource name of automl Dataset. Format: * `projects/{project}/locations/{location}/datasets/{dataset}`. */ dataset?: string; /** * The Dataset's display name in automl.googleapis.com. */ datasetDisplayName?: string; } /** * Represents one Model in automl.googleapis.com. */ export interface GoogleCloudAiplatformV1MigratableResourceAutomlModel { /** * Full resource name of automl Model. Format: * `projects/{project}/locations/{location}/models/{model}`. */ model?: string; /** * The Model's display name in automl.googleapis.com. */ modelDisplayName?: string; } /** * Represents one Dataset in datalabeling.googleapis.com. */ export interface GoogleCloudAiplatformV1MigratableResourceDataLabelingDataset { /** * The migratable AnnotatedDataset in datalabeling.googleapis.com belongs to * the data labeling Dataset. */ dataLabelingAnnotatedDatasets?: GoogleCloudAiplatformV1MigratableResourceDataLabelingDatasetDataLabelingAnnotatedDataset[]; /** * Full resource name of data labeling Dataset. Format: * `projects/{project}/datasets/{dataset}`. */ dataset?: string; /** * The Dataset's display name in datalabeling.googleapis.com. */ datasetDisplayName?: string; } /** * Represents one AnnotatedDataset in datalabeling.googleapis.com. */ export interface GoogleCloudAiplatformV1MigratableResourceDataLabelingDatasetDataLabelingAnnotatedDataset { /** * Full resource name of data labeling AnnotatedDataset. Format: * `projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}`. */ annotatedDataset?: string; /** * The AnnotatedDataset's display name in datalabeling.googleapis.com. */ annotatedDatasetDisplayName?: string; } /** * Represents one model Version in ml.googleapis.com. */ export interface GoogleCloudAiplatformV1MigratableResourceMlEngineModelVersion { /** * The ml.googleapis.com endpoint that this model Version currently lives in. * Example values: * ml.googleapis.com * us-centrall-ml.googleapis.com * * europe-west4-ml.googleapis.com * asia-east1-ml.googleapis.com */ endpoint?: string; /** * Full resource name of ml engine model Version. Format: * `projects/{project}/models/{model}/versions/{version}`. */ version?: string; } /** * Config of migrating one resource from automl.googleapis.com, * datalabeling.googleapis.com and ml.googleapis.com to Vertex AI. */ export interface GoogleCloudAiplatformV1MigrateResourceRequest { /** * Config for migrating Dataset in automl.googleapis.com to Vertex AI's * Dataset. */ migrateAutomlDatasetConfig?: GoogleCloudAiplatformV1MigrateResourceRequestMigrateAutomlDatasetConfig; /** * Config for migrating Model in automl.googleapis.com to Vertex AI's Model. */ migrateAutomlModelConfig?: GoogleCloudAiplatformV1MigrateResourceRequestMigrateAutomlModelConfig; /** * Config for migrating Dataset in datalabeling.googleapis.com to Vertex AI's * Dataset. */ migrateDataLabelingDatasetConfig?: GoogleCloudAiplatformV1MigrateResourceRequestMigrateDataLabelingDatasetConfig; /** * Config for migrating Version in ml.googleapis.com to Vertex AI's Model. */ migrateMlEngineModelVersionConfig?: GoogleCloudAiplatformV1MigrateResourceRequestMigrateMlEngineModelVersionConfig; } /** * Config for migrating Dataset in automl.googleapis.com to Vertex AI's * Dataset. */ export interface GoogleCloudAiplatformV1MigrateResourceRequestMigrateAutomlDatasetConfig { /** * Required. Full resource name of automl Dataset. Format: * `projects/{project}/locations/{location}/datasets/{dataset}`. */ dataset?: string; /** * Required. Display name of the Dataset in Vertex AI. System will pick a * display name if unspecified. */ datasetDisplayName?: string; } /** * Config for migrating Model in automl.googleapis.com to Vertex AI's Model. */ export interface GoogleCloudAiplatformV1MigrateResourceRequestMigrateAutomlModelConfig { /** * Required. Full resource name of automl Model. Format: * `projects/{project}/locations/{location}/models/{model}`. */ model?: string; /** * Optional. Display name of the model in Vertex AI. System will pick a * display name if unspecified. */ modelDisplayName?: string; } /** * Config for migrating Dataset in datalabeling.googleapis.com to Vertex AI's * Dataset. */ export interface GoogleCloudAiplatformV1MigrateResourceRequestMigrateDataLabelingDatasetConfig { /** * Required. Full resource name of data labeling Dataset. Format: * `projects/{project}/datasets/{dataset}`. */ dataset?: string; /** * Optional. Display name of the Dataset in Vertex AI. System will pick a * display name if unspecified. */ datasetDisplayName?: string; /** * Optional. Configs for migrating AnnotatedDataset in * datalabeling.googleapis.com to Vertex AI's SavedQuery. The specified * AnnotatedDatasets have to belong to the datalabeling Dataset. */ migrateDataLabelingAnnotatedDatasetConfigs?: GoogleCloudAiplatformV1MigrateResourceRequestMigrateDataLabelingDatasetConfigMigrateDataLabelingAnnotatedDatasetConfig[]; } /** * Config for migrating AnnotatedDataset in datalabeling.googleapis.com to * Vertex AI's SavedQuery. */ export interface GoogleCloudAiplatformV1MigrateResourceRequestMigrateDataLabelingDatasetConfigMigrateDataLabelingAnnotatedDatasetConfig { /** * Required. Full resource name of data labeling AnnotatedDataset. Format: * `projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}`. */ annotatedDataset?: string; } /** * Config for migrating version in ml.googleapis.com to Vertex AI's Model. */ export interface GoogleCloudAiplatformV1MigrateResourceRequestMigrateMlEngineModelVersionConfig { /** * Required. The ml.googleapis.com endpoint that this model version should be * migrated from. Example values: * ml.googleapis.com * * us-centrall-ml.googleapis.com * europe-west4-ml.googleapis.com * * asia-east1-ml.googleapis.com */ endpoint?: string; /** * Required. Display name of the model in Vertex AI. System will pick a * display name if unspecified. */ modelDisplayName?: string; /** * Required. Full resource name of ml engine model version. Format: * `projects/{project}/models/{model}/versions/{version}`. */ modelVersion?: string; } /** * Describes a successfully migrated resource. */ export interface GoogleCloudAiplatformV1MigrateResourceResponse { /** * Migrated Dataset's resource name. */ dataset?: string; /** * Before migration, the identifier in ml.googleapis.com, * automl.googleapis.com or datalabeling.googleapis.com. */ migratableResource?: GoogleCloudAiplatformV1MigratableResource; /** * Migrated Model's resource name. */ model?: string; } /** * Represents token counting info for a single modality. */ export interface GoogleCloudAiplatformV1ModalityTokenCount { /** * The modality associated with this token count. */ modality?: | "MODALITY_UNSPECIFIED" | "TEXT" | "IMAGE" | "VIDEO" | "AUDIO" | "DOCUMENT"; /** * Number of tokens. */ tokenCount?: number; } /** * A trained machine learning Model. */ export interface GoogleCloudAiplatformV1Model { /** * Immutable. The path to the directory containing the Model artifact and any * of its supporting files. Not required for AutoML Models. */ artifactUri?: string; /** * Optional. User input field to specify the base model source. Currently it * only supports specifing the Model Garden models and Genie models. */ baseModelSource?: GoogleCloudAiplatformV1ModelBaseModelSource; /** * Input only. The specification of the container that is to be used when * deploying this Model. The specification is ingested upon * ModelService.UploadModel, and all binaries it contains are copied and * stored internally by Vertex AI. Not required for AutoML Models. */ containerSpec?: GoogleCloudAiplatformV1ModelContainerSpec; /** * Output only. Timestamp when this Model was uploaded into Vertex AI. */ readonly createTime?: Date; /** * Stats of data used for training or evaluating the Model. Only populated * when the Model is trained by a TrainingPipeline with data_input_config. */ dataStats?: GoogleCloudAiplatformV1ModelDataStats; /** * The default checkpoint id of a model version. */ defaultCheckpointId?: string; /** * Output only. The pointers to DeployedModels created from this Model. Note * that Model could have been deployed to Endpoints in different Locations. */ readonly deployedModels?: GoogleCloudAiplatformV1DeployedModelRef[]; /** * The description of the Model. */ description?: string; /** * Required. The display name of the Model. The name can be up to 128 * characters long and can consist of any UTF-8 characters. */ displayName?: string; /** * Customer-managed encryption key spec for a Model. If set, this Model and * all sub-resources of this Model will be secured by this key. */ encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec; /** * Used to perform consistent read-modify-write updates. If not set, a blind * "overwrite" update happens. */ etag?: string; /** * The default explanation specification for this Model. The Model can be * used for requesting explanation after being deployed if it is populated. * The Model can be used for batch explanation if it is populated. All fields * of the explanation_spec can be overridden by explanation_spec of * DeployModelRequest.deployed_model, or explanation_spec of * BatchPredictionJob. If the default explanation specification is not set for * this Model, this Model can still be used for requesting explanation by * setting explanation_spec of DeployModelRequest.deployed_model and for batch * explanation by setting explanation_spec of BatchPredictionJob. */ explanationSpec?: GoogleCloudAiplatformV1ExplanationSpec; /** * The labels with user-defined metadata to organize your Models. Label keys * and values can be no longer than 64 characters (Unicode codepoints), can * only contain lowercase letters, numeric characters, underscores and dashes. * International characters are allowed. See https://goo.gl/xmQnxf for more * information and examples of labels. */ labels?: { [key: string]: string }; /** * Immutable. An additional information about the Model; the schema of the * metadata can be found in metadata_schema. Unset if the Model does not have * any additional information. */ metadata?: any; /** * Output only. The resource name of the Artifact that was created in * MetadataStore when creating the Model. The Artifact resource name pattern * is * `projects/{project}/locations/{location}/metadataStores/{metadata_store}/artifacts/{artifact}`. */ readonly metadataArtifact?: string; /** * Immutable. Points to a YAML file stored on Google Cloud Storage describing * additional information about the Model, that is specific to it. Unset if * the Model does not have any additional information. The schema is defined * as an OpenAPI 3.0.2 [Schema * Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). * AutoML Models always have this field populated by Vertex AI, if no * additional metadata is needed, this field is set to an empty string. Note: * The URI given on output will be immutable and probably different, including * the URI scheme, than the one given on input. The output URI will point to a * location where the user only has a read access. */ metadataSchemaUri?: string; /** * Output only. Source of a model. It can either be automl training pipeline, * custom training pipeline, BigQuery ML, or saved and tuned from Genie or * Model Garden. */ readonly modelSourceInfo?: GoogleCloudAiplatformV1ModelSourceInfo; /** * The resource name of the Model. */ name?: string; /** * Output only. If this Model is a copy of another Model, this contains info * about the original. */ readonly originalModelInfo?: GoogleCloudAiplatformV1ModelOriginalModelInfo; /** * Optional. This field is populated if the model is produced by a pipeline * job. */ pipelineJob?: string; /** * The schemata that describe formats of the Model's predictions and * explanations as given and returned via PredictionService.Predict and * PredictionService.Explain. */ predictSchemata?: GoogleCloudAiplatformV1PredictSchemata; /** * Output only. Reserved for future use. */ readonly satisfiesPzi?: boolean; /** * Output only. Reserved for future use. */ readonly satisfiesPzs?: boolean; /** * Output only. When this Model is deployed, its prediction resources are * described by the `prediction_resources` field of the * Endpoint.deployed_models object. Because not all Models support all * resource configuration types, the configuration types this Model supports * are listed here. If no configuration types are listed, the Model cannot be * deployed to an Endpoint and does not support online predictions * (PredictionService.Predict or PredictionService.Explain). Such a Model can * serve predictions by using a BatchPredictionJob, if it has at least one * entry each in supported_input_storage_formats and * supported_output_storage_formats. */ readonly supportedDeploymentResourcesTypes?: | "DEPLOYMENT_RESOURCES_TYPE_UNSPECIFIED" | "DEDICATED_RESOURCES" | "AUTOMATIC_RESOURCES" | "SHARED_RESOURCES"[]; /** * Output only. The formats in which this Model may be exported. If empty, * this Model is not available for export. */ readonly supportedExportFormats?: GoogleCloudAiplatformV1ModelExportFormat[]; /** * Output only. The formats this Model supports in * BatchPredictionJob.input_config. If PredictSchemata.instance_schema_uri * exists, the instances should be given as per that schema. The possible * formats are: * `jsonl` The JSON Lines format, where each instance is a * single line. Uses GcsSource. * `csv` The CSV format, where each instance is * a single comma-separated line. The first line in the file is the header, * containing comma-separated field names. Uses GcsSource. * `tf-record` The * TFRecord format, where each instance is a single record in tfrecord syntax. * Uses GcsSource. * `tf-record-gzip` Similar to `tf-record`, but the file is * gzipped. Uses GcsSource. * `bigquery` Each instance is a single row in * BigQuery. Uses BigQuerySource. * `file-list` Each line of the file is the * location of an instance to process, uses `gcs_source` field of the * InputConfig object. If this Model doesn't support any of these formats it * means it cannot be used with a BatchPredictionJob. However, if it has * supported_deployment_resources_types, it could serve online predictions by * using PredictionService.Predict or PredictionService.Explain. */ readonly supportedInputStorageFormats?: string[]; /** * Output only. The formats this Model supports in * BatchPredictionJob.output_config. If both * PredictSchemata.instance_schema_uri and * PredictSchemata.prediction_schema_uri exist, the predictions are returned * together with their instances. In other words, the prediction has the * original instance data first, followed by the actual prediction content (as * per the schema). The possible formats are: * `jsonl` The JSON Lines format, * where each prediction is a single line. Uses GcsDestination. * `csv` The * CSV format, where each prediction is a single comma-separated line. The * first line in the file is the header, containing comma-separated field * names. Uses GcsDestination. * `bigquery` Each prediction is a single row in * a BigQuery table, uses BigQueryDestination . If this Model doesn't support * any of these formats it means it cannot be used with a BatchPredictionJob. * However, if it has supported_deployment_resources_types, it could serve * online predictions by using PredictionService.Predict or * PredictionService.Explain. */ readonly supportedOutputStorageFormats?: string[]; /** * Output only. The resource name of the TrainingPipeline that uploaded this * Model, if any. */ readonly trainingPipeline?: string; /** * Output only. Timestamp when this Model was most recently updated. */ readonly updateTime?: Date; /** * User provided version aliases so that a model version can be referenced * via alias (i.e. * `projects/{project}/locations/{location}/models/{model_id}@{version_alias}` * instead of auto-generated version id (i.e. * `projects/{project}/locations/{location}/models/{model_id}@{version_id})`. * The format is a-z{0,126}[a-z0-9] to distinguish from version_id. A default * version alias will be created for the first version of the model, and there * must be exactly one default version alias for a model. */ versionAliases?: string[]; /** * Output only. Timestamp when this version was created. */ readonly versionCreateTime?: Date; /** * The description of this version. */ versionDescription?: string; /** * Output only. Immutable. The version ID of the model. A new version is * committed when a new model version is uploaded or trained under an existing * model id. It is an auto-incrementing decimal number in string * representation. */ readonly versionId?: string; /** * Output only. Timestamp when this version was most recently updated. */ readonly versionUpdateTime?: Date; } function serializeGoogleCloudAiplatformV1Model(data: any): GoogleCloudAiplatformV1Model { return { ...data, containerSpec: data["containerSpec"] !== undefined ? serializeGoogleCloudAiplatformV1ModelContainerSpec(data["containerSpec"]) : undefined, dataStats: data["dataStats"] !== undefined ? serializeGoogleCloudAiplatformV1ModelDataStats(data["dataStats"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1Model(data: any): GoogleCloudAiplatformV1Model { return { ...data, containerSpec: data["containerSpec"] !== undefined ? deserializeGoogleCloudAiplatformV1ModelContainerSpec(data["containerSpec"]) : undefined, createTime: data["createTime"] !== undefined ? new Date(data["createTime"]) : undefined, dataStats: data["dataStats"] !== undefined ? deserializeGoogleCloudAiplatformV1ModelDataStats(data["dataStats"]) : undefined, updateTime: data["updateTime"] !== undefined ? new Date(data["updateTime"]) : undefined, versionCreateTime: data["versionCreateTime"] !== undefined ? new Date(data["versionCreateTime"]) : undefined, versionUpdateTime: data["versionUpdateTime"] !== undefined ? new Date(data["versionUpdateTime"]) : undefined, }; } /** * User input field to specify the base model source. Currently it only * supports specifing the Model Garden models and Genie models. */ export interface GoogleCloudAiplatformV1ModelBaseModelSource { /** * Information about the base model of Genie models. */ genieSource?: GoogleCloudAiplatformV1GenieSource; /** * Source information of Model Garden models. */ modelGardenSource?: GoogleCloudAiplatformV1ModelGardenSource; } /** * Specification of a container for serving predictions. Some fields in this * message correspond to fields in the [Kubernetes Container v1 core * specification](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core). */ export interface GoogleCloudAiplatformV1ModelContainerSpec { /** * Immutable. Specifies arguments for the command that runs when the * container starts. This overrides the container's * [`CMD`](https://docs.docker.com/engine/reference/builder/#cmd). Specify * this field as an array of executable and arguments, similar to a Docker * `CMD`'s "default parameters" form. If you don't specify this field but do * specify the command field, then the command from the `command` field runs * without any additional arguments. See the [Kubernetes documentation about * how the `command` and `args` fields interact with a container's * `ENTRYPOINT` and * `CMD`](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes). * If you don't specify this field and don't specify the `command` field, then * the container's * [`ENTRYPOINT`](https://docs.docker.com/engine/reference/builder/#cmd) and * `CMD` determine what runs based on their default behavior. See the Docker * documentation about [how `CMD` and `ENTRYPOINT` * interact](https://docs.docker.com/engine/reference/builder/#understand-how-cmd-and-entrypoint-interact). * In this field, you can reference [environment variables set by Vertex * AI](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables) * and environment variables set in the env field. You cannot reference * environment variables set in the Docker image. In order for environment * variables to be expanded, reference them by using the following syntax: $( * VARIABLE_NAME) Note that this differs from Bash variable expansion, which * does not use parentheses. If a variable cannot be resolved, the reference * in the input string is used unchanged. To avoid variable expansion, you can * escape this syntax with `$$`; for example: $$(VARIABLE_NAME) This field * corresponds to the `args` field of the Kubernetes Containers [v1 core * API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core). */ args?: string[]; /** * Immutable. Specifies the command that runs when the container starts. This * overrides the container's * [ENTRYPOINT](https://docs.docker.com/engine/reference/builder/#entrypoint). * Specify this field as an array of executable and arguments, similar to a * Docker `ENTRYPOINT`'s "exec" form, not its "shell" form. If you do not * specify this field, then the container's `ENTRYPOINT` runs, in conjunction * with the args field or the container's * [`CMD`](https://docs.docker.com/engine/reference/builder/#cmd), if either * exists. If this field is not specified and the container does not have an * `ENTRYPOINT`, then refer to the Docker documentation about [how `CMD` and * `ENTRYPOINT` * interact](https://docs.docker.com/engine/reference/builder/#understand-how-cmd-and-entrypoint-interact). * If you specify this field, then you can also specify the `args` field to * provide additional arguments for this command. However, if you specify this * field, then the container's `CMD` is ignored. See the [Kubernetes * documentation about how the `command` and `args` fields interact with a * container's `ENTRYPOINT` and * `CMD`](https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#notes). * In this field, you can reference [environment variables set by Vertex * AI](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables) * and environment variables set in the env field. You cannot reference * environment variables set in the Docker image. In order for environment * variables to be expanded, reference them by using the following syntax: $( * VARIABLE_NAME) Note that this differs from Bash variable expansion, which * does not use parentheses. If a variable cannot be resolved, the reference * in the input string is used unchanged. To avoid variable expansion, you can * escape this syntax with `$$`; for example: $$(VARIABLE_NAME) This field * corresponds to the `command` field of the Kubernetes Containers [v1 core * API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core). */ command?: string[]; /** * Immutable. Deployment timeout. Limit for deployment timeout is 2 hours. */ deploymentTimeout?: number /* Duration */; /** * Immutable. List of environment variables to set in the container. After * the container starts running, code running in the container can read these * environment variables. Additionally, the command and args fields can * reference these variables. Later entries in this list can also reference * earlier entries. For example, the following example sets the variable * `VAR_2` to have the value `foo bar`: ```json [ { "name": "VAR_1", "value": * "foo" }, { "name": "VAR_2", "value": "$(VAR_1) bar" } ] ``` If you switch * the order of the variables in the example, then the expansion does not * occur. This field corresponds to the `env` field of the Kubernetes * Containers [v1 core * API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core). */ env?: GoogleCloudAiplatformV1EnvVar[]; /** * Immutable. List of ports to expose from the container. Vertex AI sends * gRPC prediction requests that it receives to the first port on this list. * Vertex AI also sends liveness and health checks to this port. If you do not * specify this field, gRPC requests to the container will be disabled. Vertex * AI does not use ports other than the first one listed. This field * corresponds to the `ports` field of the Kubernetes Containers v1 core API. */ grpcPorts?: GoogleCloudAiplatformV1Port[]; /** * Immutable. Specification for Kubernetes readiness probe. */ healthProbe?: GoogleCloudAiplatformV1Probe; /** * Immutable. HTTP path on the container to send health checks to. Vertex AI * intermittently sends GET requests to this path on the container's IP * address and port to check that the container is healthy. Read more about * [health * checks](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#health). * For example, if you set this field to `/bar`, then Vertex AI intermittently * sends a GET request to the `/bar` path on the port of your container * specified by the first value of this `ModelContainerSpec`'s ports field. If * you don't specify this field, it defaults to the following value when you * deploy this Model to an Endpoint: /v1/endpoints/ENDPOINT/deployedModels/ * DEPLOYED_MODEL:predict The placeholders in this value are replaced as * follows: * ENDPOINT: The last segment (following `endpoints/`)of the * Endpoint.name][] field of the Endpoint where this Model has been deployed. * (Vertex AI makes this value available to your container code as the * [`AIP_ENDPOINT_ID` environment * variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).) * * DEPLOYED_MODEL: DeployedModel.id of the `DeployedModel`. (Vertex AI makes * this value available to your container code as the [`AIP_DEPLOYED_MODEL_ID` * environment * variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).) */ healthRoute?: string; /** * Required. Immutable. URI of the Docker image to be used as the custom * container for serving predictions. This URI must identify an image in * Artifact Registry or Container Registry. Learn more about the [container * publishing * requirements](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#publishing), * including permissions requirements for the Vertex AI Service Agent. The * container image is ingested upon ModelService.UploadModel, stored * internally, and this original path is afterwards not used. To learn about * the requirements for the Docker image itself, see [Custom container * requirements](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#). * You can use the URI to one of Vertex AI's [pre-built container images for * prediction](https://cloud.google.com/vertex-ai/docs/predictions/pre-built-containers) * in this field. */ imageUri?: string; /** * Immutable. Specification for Kubernetes liveness probe. */ livenessProbe?: GoogleCloudAiplatformV1Probe; /** * Immutable. List of ports to expose from the container. Vertex AI sends any * prediction requests that it receives to the first port on this list. Vertex * AI also sends [liveness and health * checks](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#liveness) * to this port. If you do not specify this field, it defaults to following * value: ```json [ { "containerPort": 8080 } ] ``` Vertex AI does not use * ports other than the first one listed. This field corresponds to the * `ports` field of the Kubernetes Containers [v1 core * API](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#container-v1-core). */ ports?: GoogleCloudAiplatformV1Port[]; /** * Immutable. HTTP path on the container to send prediction requests to. * Vertex AI forwards requests sent using projects.locations.endpoints.predict * to this path on the container's IP address and port. Vertex AI then returns * the container's response in the API response. For example, if you set this * field to `/foo`, then when Vertex AI receives a prediction request, it * forwards the request body in a POST request to the `/foo` path on the port * of your container specified by the first value of this * `ModelContainerSpec`'s ports field. If you don't specify this field, it * defaults to the following value when you deploy this Model to an Endpoint: * /v1/endpoints/ENDPOINT/deployedModels/DEPLOYED_MODEL:predict The * placeholders in this value are replaced as follows: * ENDPOINT: The last * segment (following `endpoints/`)of the Endpoint.name][] field of the * Endpoint where this Model has been deployed. (Vertex AI makes this value * available to your container code as the [`AIP_ENDPOINT_ID` environment * variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).) * * DEPLOYED_MODEL: DeployedModel.id of the `DeployedModel`. (Vertex AI makes * this value available to your container code as the [`AIP_DEPLOYED_MODEL_ID` * environment * variable](https://cloud.google.com/vertex-ai/docs/predictions/custom-container-requirements#aip-variables).) */ predictRoute?: string; /** * Immutable. The amount of the VM memory to reserve as the shared memory for * the model in megabytes. */ sharedMemorySizeMb?: bigint; /** * Immutable. Specification for Kubernetes startup probe. */ startupProbe?: GoogleCloudAiplatformV1Probe; } function serializeGoogleCloudAiplatformV1ModelContainerSpec(data: any): GoogleCloudAiplatformV1ModelContainerSpec { return { ...data, deploymentTimeout: data["deploymentTimeout"] !== undefined ? data["deploymentTimeout"] : undefined, sharedMemorySizeMb: data["sharedMemorySizeMb"] !== undefined ? String(data["sharedMemorySizeMb"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1ModelContainerSpec(data: any): GoogleCloudAiplatformV1ModelContainerSpec { return { ...data, deploymentTimeout: data["deploymentTimeout"] !== undefined ? data["deploymentTimeout"] : undefined, sharedMemorySizeMb: data["sharedMemorySizeMb"] !== undefined ? BigInt(data["sharedMemorySizeMb"]) : undefined, }; } /** * Stats of data used for train or evaluate the Model. */ export interface GoogleCloudAiplatformV1ModelDataStats { /** * Number of Annotations that are used for evaluating this Model. If the * Model is evaluated multiple times, this will be the number of test * Annotations used by the first evaluation. If the Model is not evaluated, * the number is 0. */ testAnnotationsCount?: bigint; /** * Number of DataItems that were used for evaluating this Model. If the Model * is evaluated multiple times, this will be the number of test DataItems used * by the first evaluation. If the Model is not evaluated, the number is 0. */ testDataItemsCount?: bigint; /** * Number of Annotations that are used for training this Model. */ trainingAnnotationsCount?: bigint; /** * Number of DataItems that were used for training this Model. */ trainingDataItemsCount?: bigint; /** * Number of Annotations that are used for validating this Model during * training. */ validationAnnotationsCount?: bigint; /** * Number of DataItems that were used for validating this Model during * training. */ validationDataItemsCount?: bigint; } function serializeGoogleCloudAiplatformV1ModelDataStats(data: any): GoogleCloudAiplatformV1ModelDataStats { return { ...data, testAnnotationsCount: data["testAnnotationsCount"] !== undefined ? String(data["testAnnotationsCount"]) : undefined, testDataItemsCount: data["testDataItemsCount"] !== undefined ? String(data["testDataItemsCount"]) : undefined, trainingAnnotationsCount: data["trainingAnnotationsCount"] !== undefined ? String(data["trainingAnnotationsCount"]) : undefined, trainingDataItemsCount: data["trainingDataItemsCount"] !== undefined ? String(data["trainingDataItemsCount"]) : undefined, validationAnnotationsCount: data["validationAnnotationsCount"] !== undefined ? String(data["validationAnnotationsCount"]) : undefined, validationDataItemsCount: data["validationDataItemsCount"] !== undefined ? String(data["validationDataItemsCount"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1ModelDataStats(data: any): GoogleCloudAiplatformV1ModelDataStats { return { ...data, testAnnotationsCount: data["testAnnotationsCount"] !== undefined ? BigInt(data["testAnnotationsCount"]) : undefined, testDataItemsCount: data["testDataItemsCount"] !== undefined ? BigInt(data["testDataItemsCount"]) : undefined, trainingAnnotationsCount: data["trainingAnnotationsCount"] !== undefined ? BigInt(data["trainingAnnotationsCount"]) : undefined, trainingDataItemsCount: data["trainingDataItemsCount"] !== undefined ? BigInt(data["trainingDataItemsCount"]) : undefined, validationAnnotationsCount: data["validationAnnotationsCount"] !== undefined ? BigInt(data["validationAnnotationsCount"]) : undefined, validationDataItemsCount: data["validationDataItemsCount"] !== undefined ? BigInt(data["validationDataItemsCount"]) : undefined, }; } /** * ModelDeploymentMonitoringBigQueryTable specifies the BigQuery table name as * well as some information of the logs stored in this table. */ export interface GoogleCloudAiplatformV1ModelDeploymentMonitoringBigQueryTable { /** * The created BigQuery table to store logs. Customer could do their own * query & analysis. Format: `bq://.model_deployment_monitoring_._` */ bigqueryTablePath?: string; /** * The source of log. */ logSource?: | "LOG_SOURCE_UNSPECIFIED" | "TRAINING" | "SERVING"; /** * The type of log. */ logType?: | "LOG_TYPE_UNSPECIFIED" | "PREDICT" | "EXPLAIN"; /** * Output only. The schema version of the request/response logging BigQuery * table. Default to v1 if unset. */ readonly requestResponseLoggingSchemaVersion?: string; } /** * Represents a job that runs periodically to monitor the deployed models in an * endpoint. It will analyze the logged training & prediction data to detect any * abnormal behaviors. */ export interface GoogleCloudAiplatformV1ModelDeploymentMonitoringJob { /** * YAML schema file uri describing the format of a single instance that you * want Tensorflow Data Validation (TFDV) to analyze. If this field is empty, * all the feature data types are inferred from predict_instance_schema_uri, * meaning that TFDV will use the data in the exact format(data type) as * prediction request/response. If there are any data type differences between * predict instance and TFDV instance, this field can be used to override the * schema. For models trained with Vertex AI, this field must be set as all * the fields in predict instance formatted as string. */ analysisInstanceSchemaUri?: string; /** * Output only. The created bigquery tables for the job under customer * project. Customer could do their own query & analysis. There could be 4 log * tables in maximum: 1. Training data logging predict request/response 2. * Serving data logging predict request/response */ readonly bigqueryTables?: GoogleCloudAiplatformV1ModelDeploymentMonitoringBigQueryTable[]; /** * Output only. Timestamp when this ModelDeploymentMonitoringJob was created. */ readonly createTime?: Date; /** * Required. The user-defined name of the ModelDeploymentMonitoringJob. The * name can be up to 128 characters long and can consist of any UTF-8 * characters. Display name of a ModelDeploymentMonitoringJob. */ displayName?: string; /** * If true, the scheduled monitoring pipeline logs are sent to Google Cloud * Logging, including pipeline status and anomalies detected. Please note the * logs incur cost, which are subject to [Cloud Logging * pricing](https://cloud.google.com/logging#pricing). */ enableMonitoringPipelineLogs?: boolean; /** * Customer-managed encryption key spec for a ModelDeploymentMonitoringJob. * If set, this ModelDeploymentMonitoringJob and all sub-resources of this * ModelDeploymentMonitoringJob will be secured by this key. */ encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec; /** * Required. Endpoint resource name. Format: * `projects/{project}/locations/{location}/endpoints/{endpoint}` */ endpoint?: string; /** * Output only. Only populated when the job's state is `JOB_STATE_FAILED` or * `JOB_STATE_CANCELLED`. */ readonly error?: GoogleRpcStatus; /** * The labels with user-defined metadata to organize your * ModelDeploymentMonitoringJob. Label keys and values can be no longer than * 64 characters (Unicode codepoints), can only contain lowercase letters, * numeric characters, underscores and dashes. International characters are * allowed. See https://goo.gl/xmQnxf for more information and examples of * labels. */ labels?: { [key: string]: string }; /** * Output only. Latest triggered monitoring pipeline metadata. */ readonly latestMonitoringPipelineMetadata?: GoogleCloudAiplatformV1ModelDeploymentMonitoringJobLatestMonitoringPipelineMetadata; /** * Required. Sample Strategy for logging. */ loggingSamplingStrategy?: GoogleCloudAiplatformV1SamplingStrategy; /** * The TTL of BigQuery tables in user projects which stores logs. A day is * the basic unit of the TTL and we take the ceil of TTL/86400(a day). e.g. { * second: 3600} indicates ttl = 1 day. */ logTtl?: number /* Duration */; /** * Required. The config for monitoring objectives. This is a per * DeployedModel config. Each DeployedModel needs to be configured separately. */ modelDeploymentMonitoringObjectiveConfigs?: GoogleCloudAiplatformV1ModelDeploymentMonitoringObjectiveConfig[]; /** * Required. Schedule config for running the monitoring job. */ modelDeploymentMonitoringScheduleConfig?: GoogleCloudAiplatformV1ModelDeploymentMonitoringScheduleConfig; /** * Alert config for model monitoring. */ modelMonitoringAlertConfig?: GoogleCloudAiplatformV1ModelMonitoringAlertConfig; /** * Output only. Resource name of a ModelDeploymentMonitoringJob. */ readonly name?: string; /** * Output only. Timestamp when this monitoring pipeline will be scheduled to * run for the next round. */ readonly nextScheduleTime?: Date; /** * YAML schema file uri describing the format of a single instance, which are * given to format this Endpoint's prediction (and explanation). If not set, * we will generate predict schema from collected predict requests. */ predictInstanceSchemaUri?: string; /** * Sample Predict instance, same format as PredictRequest.instances, this can * be set as a replacement of * ModelDeploymentMonitoringJob.predict_instance_schema_uri. If not set, we * will generate predict schema from collected predict requests. */ samplePredictInstance?: any; /** * Output only. Reserved for future use. */ readonly satisfiesPzi?: boolean; /** * Output only. Reserved for future use. */ readonly satisfiesPzs?: boolean; /** * Output only. Schedule state when the monitoring job is in Running state. */ readonly scheduleState?: | "MONITORING_SCHEDULE_STATE_UNSPECIFIED" | "PENDING" | "OFFLINE" | "RUNNING"; /** * Output only. The detailed state of the monitoring job. When the job is * still creating, the state will be 'PENDING'. Once the job is successfully * created, the state will be 'RUNNING'. Pause the job, the state will be * 'PAUSED'. Resume the job, the state will return to 'RUNNING'. */ readonly state?: | "JOB_STATE_UNSPECIFIED" | "JOB_STATE_QUEUED" | "JOB_STATE_PENDING" | "JOB_STATE_RUNNING" | "JOB_STATE_SUCCEEDED" | "JOB_STATE_FAILED" | "JOB_STATE_CANCELLING" | "JOB_STATE_CANCELLED" | "JOB_STATE_PAUSED" | "JOB_STATE_EXPIRED" | "JOB_STATE_UPDATING" | "JOB_STATE_PARTIALLY_SUCCEEDED"; /** * Stats anomalies base folder path. */ statsAnomaliesBaseDirectory?: GoogleCloudAiplatformV1GcsDestination; /** * Output only. Timestamp when this ModelDeploymentMonitoringJob was updated * most recently. */ readonly updateTime?: Date; } function serializeGoogleCloudAiplatformV1ModelDeploymentMonitoringJob(data: any): GoogleCloudAiplatformV1ModelDeploymentMonitoringJob { return { ...data, logTtl: data["logTtl"] !== undefined ? data["logTtl"] : undefined, modelDeploymentMonitoringScheduleConfig: data["modelDeploymentMonitoringScheduleConfig"] !== undefined ? serializeGoogleCloudAiplatformV1ModelDeploymentMonitoringScheduleConfig(data["modelDeploymentMonitoringScheduleConfig"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1ModelDeploymentMonitoringJob(data: any): GoogleCloudAiplatformV1ModelDeploymentMonitoringJob { return { ...data, createTime: data["createTime"] !== undefined ? new Date(data["createTime"]) : undefined, latestMonitoringPipelineMetadata: data["latestMonitoringPipelineMetadata"] !== undefined ? deserializeGoogleCloudAiplatformV1ModelDeploymentMonitoringJobLatestMonitoringPipelineMetadata(data["latestMonitoringPipelineMetadata"]) : undefined, logTtl: data["logTtl"] !== undefined ? data["logTtl"] : undefined, modelDeploymentMonitoringScheduleConfig: data["modelDeploymentMonitoringScheduleConfig"] !== undefined ? deserializeGoogleCloudAiplatformV1ModelDeploymentMonitoringScheduleConfig(data["modelDeploymentMonitoringScheduleConfig"]) : undefined, nextScheduleTime: data["nextScheduleTime"] !== undefined ? new Date(data["nextScheduleTime"]) : undefined, updateTime: data["updateTime"] !== undefined ? new Date(data["updateTime"]) : undefined, }; } /** * All metadata of most recent monitoring pipelines. */ export interface GoogleCloudAiplatformV1ModelDeploymentMonitoringJobLatestMonitoringPipelineMetadata { /** * The time that most recent monitoring pipelines that is related to this * run. */ runTime?: Date; /** * The status of the most recent monitoring pipeline. */ status?: GoogleRpcStatus; } function serializeGoogleCloudAiplatformV1ModelDeploymentMonitoringJobLatestMonitoringPipelineMetadata(data: any): GoogleCloudAiplatformV1ModelDeploymentMonitoringJobLatestMonitoringPipelineMetadata { return { ...data, runTime: data["runTime"] !== undefined ? data["runTime"].toISOString() : undefined, }; } function deserializeGoogleCloudAiplatformV1ModelDeploymentMonitoringJobLatestMonitoringPipelineMetadata(data: any): GoogleCloudAiplatformV1ModelDeploymentMonitoringJobLatestMonitoringPipelineMetadata { return { ...data, runTime: data["runTime"] !== undefined ? new Date(data["runTime"]) : undefined, }; } /** * ModelDeploymentMonitoringObjectiveConfig contains the pair of * deployed_model_id to ModelMonitoringObjectiveConfig. */ export interface GoogleCloudAiplatformV1ModelDeploymentMonitoringObjectiveConfig { /** * The DeployedModel ID of the objective config. */ deployedModelId?: string; /** * The objective config of for the modelmonitoring job of this deployed * model. */ objectiveConfig?: GoogleCloudAiplatformV1ModelMonitoringObjectiveConfig; } /** * The config for scheduling monitoring job. */ export interface GoogleCloudAiplatformV1ModelDeploymentMonitoringScheduleConfig { /** * Required. The model monitoring job scheduling interval. It will be rounded * up to next full hour. This defines how often the monitoring jobs are * triggered. */ monitorInterval?: number /* Duration */; /** * The time window of the prediction data being included in each prediction * dataset. This window specifies how long the data should be collected from * historical model results for each run. If not set, * ModelDeploymentMonitoringScheduleConfig.monitor_interval will be used. e.g. * If currently the cutoff time is 2022-01-08 14:30:00 and the monitor_window * is set to be 3600, then data from 2022-01-08 13:30:00 to 2022-01-08 * 14:30:00 will be retrieved and aggregated to calculate the monitoring * statistics. */ monitorWindow?: number /* Duration */; } function serializeGoogleCloudAiplatformV1ModelDeploymentMonitoringScheduleConfig(data: any): GoogleCloudAiplatformV1ModelDeploymentMonitoringScheduleConfig { return { ...data, monitorInterval: data["monitorInterval"] !== undefined ? data["monitorInterval"] : undefined, monitorWindow: data["monitorWindow"] !== undefined ? data["monitorWindow"] : undefined, }; } function deserializeGoogleCloudAiplatformV1ModelDeploymentMonitoringScheduleConfig(data: any): GoogleCloudAiplatformV1ModelDeploymentMonitoringScheduleConfig { return { ...data, monitorInterval: data["monitorInterval"] !== undefined ? data["monitorInterval"] : undefined, monitorWindow: data["monitorWindow"] !== undefined ? data["monitorWindow"] : undefined, }; } /** * A collection of metrics calculated by comparing Model's predictions on all * of the test data against annotations from the test data. */ export interface GoogleCloudAiplatformV1ModelEvaluation { /** * Points to a YAML file stored on Google Cloud Storage describing * EvaluatedDataItemView.predictions, EvaluatedDataItemView.ground_truths, * EvaluatedAnnotation.predictions, and EvaluatedAnnotation.ground_truths. The * schema is defined as an OpenAPI 3.0.2 [Schema * Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). * This field is not populated if there are neither EvaluatedDataItemViews nor * EvaluatedAnnotations under this ModelEvaluation. */ annotationSchemaUri?: string; /** * Output only. Timestamp when this ModelEvaluation was created. */ readonly createTime?: Date; /** * Points to a YAML file stored on Google Cloud Storage describing * EvaluatedDataItemView.data_item_payload and * EvaluatedAnnotation.data_item_payload. The schema is defined as an OpenAPI * 3.0.2 [Schema * Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). * This field is not populated if there are neither EvaluatedDataItemViews nor * EvaluatedAnnotations under this ModelEvaluation. */ dataItemSchemaUri?: string; /** * The display name of the ModelEvaluation. */ displayName?: string; /** * Describes the values of ExplanationSpec that are used for explaining the * predicted values on the evaluated data. */ explanationSpecs?: GoogleCloudAiplatformV1ModelEvaluationModelEvaluationExplanationSpec[]; /** * The metadata of the ModelEvaluation. For the ModelEvaluation uploaded from * Managed Pipeline, metadata contains a structured value with keys of * "pipeline_job_id", "evaluation_dataset_type", "evaluation_dataset_path", * "row_based_metrics_path". */ metadata?: any; /** * Evaluation metrics of the Model. The schema of the metrics is stored in * metrics_schema_uri */ metrics?: any; /** * Points to a YAML file stored on Google Cloud Storage describing the * metrics of this ModelEvaluation. The schema is defined as an OpenAPI 3.0.2 * [Schema * Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). */ metricsSchemaUri?: string; /** * Aggregated explanation metrics for the Model's prediction output over the * data this ModelEvaluation uses. This field is populated only if the Model * is evaluated with explanations, and only for AutoML tabular Models. */ modelExplanation?: GoogleCloudAiplatformV1ModelExplanation; /** * Output only. The resource name of the ModelEvaluation. */ readonly name?: string; /** * All possible dimensions of ModelEvaluationSlices. The dimensions can be * used as the filter of the ModelService.ListModelEvaluationSlices request, * in the form of `slice.dimension = `. */ sliceDimensions?: string[]; } export interface GoogleCloudAiplatformV1ModelEvaluationModelEvaluationExplanationSpec { /** * Explanation spec details. */ explanationSpec?: GoogleCloudAiplatformV1ExplanationSpec; /** * Explanation type. For AutoML Image Classification models, possible values * are: * `image-integrated-gradients` * `image-xrai` */ explanationType?: string; } /** * A collection of metrics calculated by comparing Model's predictions on a * slice of the test data against ground truth annotations. */ export interface GoogleCloudAiplatformV1ModelEvaluationSlice { /** * Output only. Timestamp when this ModelEvaluationSlice was created. */ readonly createTime?: Date; /** * Output only. Sliced evaluation metrics of the Model. The schema of the * metrics is stored in metrics_schema_uri */ readonly metrics?: any; /** * Output only. Points to a YAML file stored on Google Cloud Storage * describing the metrics of this ModelEvaluationSlice. The schema is defined * as an OpenAPI 3.0.2 [Schema * Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). */ readonly metricsSchemaUri?: string; /** * Output only. Aggregated explanation metrics for the Model's prediction * output over the data this ModelEvaluation uses. This field is populated * only if the Model is evaluated with explanations, and only for tabular * Models. */ readonly modelExplanation?: GoogleCloudAiplatformV1ModelExplanation; /** * Output only. The resource name of the ModelEvaluationSlice. */ readonly name?: string; /** * Output only. The slice of the test data that is used to evaluate the * Model. */ readonly slice?: GoogleCloudAiplatformV1ModelEvaluationSliceSlice; } /** * Definition of a slice. */ export interface GoogleCloudAiplatformV1ModelEvaluationSliceSlice { /** * Output only. The dimension of the slice. Well-known dimensions are: * * `annotationSpec`: This slice is on the test data that has either ground * truth or prediction with AnnotationSpec.display_name equals to value. * * `slice`: This slice is a user customized slice defined by its SliceSpec. */ readonly dimension?: string; /** * Output only. Specification for how the data was sliced. */ readonly sliceSpec?: GoogleCloudAiplatformV1ModelEvaluationSliceSliceSliceSpec; /** * Output only. The value of the dimension in this slice. */ readonly value?: string; } /** * Specification for how the data should be sliced. */ export interface GoogleCloudAiplatformV1ModelEvaluationSliceSliceSliceSpec { /** * Mapping configuration for this SliceSpec. The key is the name of the * feature. By default, the key will be prefixed by "instance" as a dictionary * prefix for Vertex Batch Predictions output format. */ configs?: { [key: string]: GoogleCloudAiplatformV1ModelEvaluationSliceSliceSliceSpecSliceConfig }; } /** * A range of values for slice(s). `low` is inclusive, `high` is exclusive. */ export interface GoogleCloudAiplatformV1ModelEvaluationSliceSliceSliceSpecRange { /** * Exclusive high value for the range. */ high?: number; /** * Inclusive low value for the range. */ low?: number; } /** * Specification message containing the config for this SliceSpec. When `kind` * is selected as `value` and/or `range`, only a single slice will be computed. * When `all_values` is present, a separate slice will be computed for each * possible label/value for the corresponding key in `config`. Examples, with * feature zip_code with values 12345, 23334, 88888 and feature country with * values "US", "Canada", "Mexico" in the dataset: Example 1: { "zip_code": { * "value": { "float_value": 12345.0 } } } A single slice for any data with * zip_code 12345 in the dataset. Example 2: { "zip_code": { "range": { "low": * 12345, "high": 20000 } } } A single slice containing data where the zip_codes * between 12345 and 20000 For this example, data with the zip_code of 12345 * will be in this slice. Example 3: { "zip_code": { "range": { "low": 10000, * "high": 20000 } }, "country": { "value": { "string_value": "US" } } } A * single slice containing data where the zip_codes between 10000 and 20000 has * the country "US". For this example, data with the zip_code of 12345 and * country "US" will be in this slice. Example 4: { "country": {"all_values": { * "value": true } } } Three slices are computed, one for each unique country in * the dataset. Example 5: { "country": { "all_values": { "value": true } }, * "zip_code": { "value": { "float_value": 12345.0 } } } Three slices are * computed, one for each unique country in the dataset where the zip_code is * also 12345. For this example, data with zip_code 12345 and country "US" will * be in one slice, zip_code 12345 and country "Canada" in another slice, and * zip_code 12345 and country "Mexico" in another slice, totaling 3 slices. */ export interface GoogleCloudAiplatformV1ModelEvaluationSliceSliceSliceSpecSliceConfig { /** * If all_values is set to true, then all possible labels of the keyed * feature will have another slice computed. Example: * `{"all_values":{"value":true}}` */ allValues?: boolean; /** * A range of values for a numerical feature. Example: * `{"range":{"low":10000.0,"high":50000.0}}` will capture 12345 and 23334 in * the slice. */ range?: GoogleCloudAiplatformV1ModelEvaluationSliceSliceSliceSpecRange; /** * A unique specific value for a given feature. Example: `{ "value": { * "string_value": "12345" } }` */ value?: GoogleCloudAiplatformV1ModelEvaluationSliceSliceSliceSpecValue; } /** * Single value that supports strings and floats. */ export interface GoogleCloudAiplatformV1ModelEvaluationSliceSliceSliceSpecValue { /** * Float type. */ floatValue?: number; /** * String type. */ stringValue?: string; } /** * Aggregated explanation metrics for a Model over a set of instances. */ export interface GoogleCloudAiplatformV1ModelExplanation { /** * Output only. Aggregated attributions explaining the Model's prediction * outputs over the set of instances. The attributions are grouped by outputs. * For Models that predict only one output, such as regression Models that * predict only one score, there is only one attibution that explains the * predicted output. For Models that predict multiple outputs, such as * multiclass Models that predict multiple classes, each element explains one * specific item. Attribution.output_index can be used to identify which * output this attribution is explaining. The baselineOutputValue, * instanceOutputValue and featureAttributions fields are averaged over the * test data. NOTE: Currently AutoML tabular classification Models produce * only one attribution, which averages attributions over all the classes it * predicts. Attribution.approximation_error is not populated. */ readonly meanAttributions?: GoogleCloudAiplatformV1Attribution[]; } /** * Represents export format supported by the Model. All formats export to * Google Cloud Storage. */ export interface GoogleCloudAiplatformV1ModelExportFormat { /** * Output only. The content of this Model that may be exported. */ readonly exportableContents?: | "EXPORTABLE_CONTENT_UNSPECIFIED" | "ARTIFACT" | "IMAGE"[]; /** * Output only. The ID of the export format. The possible format IDs are: * * `tflite` Used for Android mobile devices. * `edgetpu-tflite` Used for [Edge * TPU](https://cloud.google.com/edge-tpu/) devices. * `tf-saved-model` A * tensorflow model in SavedModel format. * `tf-js` A * [TensorFlow.js](https://www.tensorflow.org/js) model that can be used in * the browser and in Node.js using JavaScript. * `core-ml` Used for iOS * mobile devices. * `custom-trained` A Model that was uploaded or trained by * custom code. * `genie` A tuned Model Garden model. */ readonly id?: string; } /** * Contains information about the source of the models generated from Model * Garden. */ export interface GoogleCloudAiplatformV1ModelGardenSource { /** * Required. The model garden source model resource name. */ publicModelName?: string; /** * Optional. Whether to avoid pulling the model from the HF cache. */ skipHfModelCache?: boolean; /** * Optional. The model garden source model version ID. */ versionId?: string; } /** * The alert config for model monitoring. */ export interface GoogleCloudAiplatformV1ModelMonitoringAlertConfig { /** * Email alert config. */ emailAlertConfig?: GoogleCloudAiplatformV1ModelMonitoringAlertConfigEmailAlertConfig; /** * Dump the anomalies to Cloud Logging. The anomalies will be put to json * payload encoded from proto ModelMonitoringStatsAnomalies. This can be * further synced to Pub/Sub or any other services supported by Cloud Logging. */ enableLogging?: boolean; /** * Resource names of the NotificationChannels to send alert. Must be of the * format `projects//notificationChannels/` */ notificationChannels?: string[]; } /** * The config for email alert. */ export interface GoogleCloudAiplatformV1ModelMonitoringAlertConfigEmailAlertConfig { /** * The email addresses to send the alert. */ userEmails?: string[]; } /** * The objective configuration for model monitoring, including the information * needed to detect anomalies for one particular model. */ export interface GoogleCloudAiplatformV1ModelMonitoringObjectiveConfig { /** * The config for integrating with Vertex Explainable AI. */ explanationConfig?: GoogleCloudAiplatformV1ModelMonitoringObjectiveConfigExplanationConfig; /** * The config for drift of prediction data. */ predictionDriftDetectionConfig?: GoogleCloudAiplatformV1ModelMonitoringObjectiveConfigPredictionDriftDetectionConfig; /** * Training dataset for models. This field has to be set only if * TrainingPredictionSkewDetectionConfig is specified. */ trainingDataset?: GoogleCloudAiplatformV1ModelMonitoringObjectiveConfigTrainingDataset; /** * The config for skew between training data and prediction data. */ trainingPredictionSkewDetectionConfig?: GoogleCloudAiplatformV1ModelMonitoringObjectiveConfigTrainingPredictionSkewDetectionConfig; } /** * The config for integrating with Vertex Explainable AI. Only applicable if * the Model has explanation_spec populated. */ export interface GoogleCloudAiplatformV1ModelMonitoringObjectiveConfigExplanationConfig { /** * If want to analyze the Vertex Explainable AI feature attribute scores or * not. If set to true, Vertex AI will log the feature attributions from * explain response and do the skew/drift detection for them. */ enableFeatureAttributes?: boolean; /** * Predictions generated by the BatchPredictionJob using baseline dataset. */ explanationBaseline?: GoogleCloudAiplatformV1ModelMonitoringObjectiveConfigExplanationConfigExplanationBaseline; } /** * Output from BatchPredictionJob for Model Monitoring baseline dataset, which * can be used to generate baseline attribution scores. */ export interface GoogleCloudAiplatformV1ModelMonitoringObjectiveConfigExplanationConfigExplanationBaseline { /** * BigQuery location for BatchExplain output. */ bigquery?: GoogleCloudAiplatformV1BigQueryDestination; /** * Cloud Storage location for BatchExplain output. */ gcs?: GoogleCloudAiplatformV1GcsDestination; /** * The storage format of the predictions generated BatchPrediction job. */ predictionFormat?: | "PREDICTION_FORMAT_UNSPECIFIED" | "JSONL" | "BIGQUERY"; } /** * The config for Prediction data drift detection. */ export interface GoogleCloudAiplatformV1ModelMonitoringObjectiveConfigPredictionDriftDetectionConfig { /** * Key is the feature name and value is the threshold. The threshold here is * against attribution score distance between different time windows. */ attributionScoreDriftThresholds?: { [key: string]: GoogleCloudAiplatformV1ThresholdConfig }; /** * Drift anomaly detection threshold used by all features. When the * per-feature thresholds are not set, this field can be used to specify a * threshold for all features. */ defaultDriftThreshold?: GoogleCloudAiplatformV1ThresholdConfig; /** * Key is the feature name and value is the threshold. If a feature needs to * be monitored for drift, a value threshold must be configured for that * feature. The threshold here is against feature distribution distance * between different time windws. */ driftThresholds?: { [key: string]: GoogleCloudAiplatformV1ThresholdConfig }; } /** * Training Dataset information. */ export interface GoogleCloudAiplatformV1ModelMonitoringObjectiveConfigTrainingDataset { /** * The BigQuery table of the unmanaged Dataset used to train this Model. */ bigquerySource?: GoogleCloudAiplatformV1BigQuerySource; /** * Data format of the dataset, only applicable if the input is from Google * Cloud Storage. The possible formats are: "tf-record" The source file is a * TFRecord file. "csv" The source file is a CSV file. "jsonl" The source file * is a JSONL file. */ dataFormat?: string; /** * The resource name of the Dataset used to train this Model. */ dataset?: string; /** * The Google Cloud Storage uri of the unmanaged Dataset used to train this * Model. */ gcsSource?: GoogleCloudAiplatformV1GcsSource; /** * Strategy to sample data from Training Dataset. If not set, we process the * whole dataset. */ loggingSamplingStrategy?: GoogleCloudAiplatformV1SamplingStrategy; /** * The target field name the model is to predict. This field will be excluded * when doing Predict and (or) Explain for the training data. */ targetField?: string; } /** * The config for Training & Prediction data skew detection. It specifies the * training dataset sources and the skew detection parameters. */ export interface GoogleCloudAiplatformV1ModelMonitoringObjectiveConfigTrainingPredictionSkewDetectionConfig { /** * Key is the feature name and value is the threshold. The threshold here is * against attribution score distance between the training and prediction * feature. */ attributionScoreSkewThresholds?: { [key: string]: GoogleCloudAiplatformV1ThresholdConfig }; /** * Skew anomaly detection threshold used by all features. When the * per-feature thresholds are not set, this field can be used to specify a * threshold for all features. */ defaultSkewThreshold?: GoogleCloudAiplatformV1ThresholdConfig; /** * Key is the feature name and value is the threshold. If a feature needs to * be monitored for skew, a value threshold must be configured for that * feature. The threshold here is against feature distribution distance * between the training and prediction feature. */ skewThresholds?: { [key: string]: GoogleCloudAiplatformV1ThresholdConfig }; } /** * Statistics and anomalies generated by Model Monitoring. */ export interface GoogleCloudAiplatformV1ModelMonitoringStatsAnomalies { /** * Number of anomalies within all stats. */ anomalyCount?: number; /** * Deployed Model ID. */ deployedModelId?: string; /** * A list of historical Stats and Anomalies generated for all Features. */ featureStats?: GoogleCloudAiplatformV1ModelMonitoringStatsAnomaliesFeatureHistoricStatsAnomalies[]; /** * Model Monitoring Objective those stats and anomalies belonging to. */ objective?: | "MODEL_DEPLOYMENT_MONITORING_OBJECTIVE_TYPE_UNSPECIFIED" | "RAW_FEATURE_SKEW" | "RAW_FEATURE_DRIFT" | "FEATURE_ATTRIBUTION_SKEW" | "FEATURE_ATTRIBUTION_DRIFT"; } function serializeGoogleCloudAiplatformV1ModelMonitoringStatsAnomalies(data: any): GoogleCloudAiplatformV1ModelMonitoringStatsAnomalies { return { ...data, featureStats: data["featureStats"] !== undefined ? data["featureStats"].map((item: any) => (serializeGoogleCloudAiplatformV1ModelMonitoringStatsAnomaliesFeatureHistoricStatsAnomalies(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ModelMonitoringStatsAnomalies(data: any): GoogleCloudAiplatformV1ModelMonitoringStatsAnomalies { return { ...data, featureStats: data["featureStats"] !== undefined ? data["featureStats"].map((item: any) => (deserializeGoogleCloudAiplatformV1ModelMonitoringStatsAnomaliesFeatureHistoricStatsAnomalies(item))) : undefined, }; } /** * Historical Stats (and Anomalies) for a specific Feature. */ export interface GoogleCloudAiplatformV1ModelMonitoringStatsAnomaliesFeatureHistoricStatsAnomalies { /** * Display Name of the Feature. */ featureDisplayName?: string; /** * A list of historical stats generated by different time window's Prediction * Dataset. */ predictionStats?: GoogleCloudAiplatformV1FeatureStatsAnomaly[]; /** * Threshold for anomaly detection. */ threshold?: GoogleCloudAiplatformV1ThresholdConfig; /** * Stats calculated for the Training Dataset. */ trainingStats?: GoogleCloudAiplatformV1FeatureStatsAnomaly; } function serializeGoogleCloudAiplatformV1ModelMonitoringStatsAnomaliesFeatureHistoricStatsAnomalies(data: any): GoogleCloudAiplatformV1ModelMonitoringStatsAnomaliesFeatureHistoricStatsAnomalies { return { ...data, predictionStats: data["predictionStats"] !== undefined ? data["predictionStats"].map((item: any) => (serializeGoogleCloudAiplatformV1FeatureStatsAnomaly(item))) : undefined, trainingStats: data["trainingStats"] !== undefined ? serializeGoogleCloudAiplatformV1FeatureStatsAnomaly(data["trainingStats"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1ModelMonitoringStatsAnomaliesFeatureHistoricStatsAnomalies(data: any): GoogleCloudAiplatformV1ModelMonitoringStatsAnomaliesFeatureHistoricStatsAnomalies { return { ...data, predictionStats: data["predictionStats"] !== undefined ? data["predictionStats"].map((item: any) => (deserializeGoogleCloudAiplatformV1FeatureStatsAnomaly(item))) : undefined, trainingStats: data["trainingStats"] !== undefined ? deserializeGoogleCloudAiplatformV1FeatureStatsAnomaly(data["trainingStats"]) : undefined, }; } /** * Contains information about the original Model if this Model is a copy. */ export interface GoogleCloudAiplatformV1ModelOriginalModelInfo { /** * Output only. The resource name of the Model this Model is a copy of, * including the revision. Format: * `projects/{project}/locations/{location}/models/{model_id}@{version_id}` */ readonly model?: string; } /** * Detail description of the source information of the model. */ export interface GoogleCloudAiplatformV1ModelSourceInfo { /** * If this Model is copy of another Model. If true then source_type pertains * to the original. */ copy?: boolean; /** * Type of the model source. */ sourceType?: | "MODEL_SOURCE_TYPE_UNSPECIFIED" | "AUTOML" | "CUSTOM" | "BQML" | "MODEL_GARDEN" | "GENIE" | "CUSTOM_TEXT_EMBEDDING" | "MARKETPLACE"; } /** * Describes the machine learning model version checkpoint. */ export interface GoogleCloudAiplatformV1ModelVersionCheckpoint { /** * The ID of the checkpoint. */ checkpointId?: string; /** * The epoch of the checkpoint. */ epoch?: bigint; /** * Identifier. The resource name of the ModelVersionCheckpoint. Format: * `projects/{project}/locations/{location}/models/{model}/versions/{version}/checkpoints/{checkpoint}` */ name?: string; /** * The step of the checkpoint. */ step?: bigint; } function serializeGoogleCloudAiplatformV1ModelVersionCheckpoint(data: any): GoogleCloudAiplatformV1ModelVersionCheckpoint { return { ...data, epoch: data["epoch"] !== undefined ? String(data["epoch"]) : undefined, step: data["step"] !== undefined ? String(data["step"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1ModelVersionCheckpoint(data: any): GoogleCloudAiplatformV1ModelVersionCheckpoint { return { ...data, epoch: data["epoch"] !== undefined ? BigInt(data["epoch"]) : undefined, step: data["step"] !== undefined ? BigInt(data["step"]) : undefined, }; } /** * Runtime operation information for IndexEndpointService.MutateDeployedIndex. */ export interface GoogleCloudAiplatformV1MutateDeployedIndexOperationMetadata { /** * The unique index id specified by user */ deployedIndexId?: string; /** * The operation generic information. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Response message for IndexEndpointService.MutateDeployedIndex. */ export interface GoogleCloudAiplatformV1MutateDeployedIndexResponse { /** * The DeployedIndex that had been updated in the IndexEndpoint. */ deployedIndex?: GoogleCloudAiplatformV1DeployedIndex; } /** * Runtime operation information for EndpointService.MutateDeployedModel. */ export interface GoogleCloudAiplatformV1MutateDeployedModelOperationMetadata { /** * The operation generic information. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Request message for EndpointService.MutateDeployedModel. */ export interface GoogleCloudAiplatformV1MutateDeployedModelRequest { /** * Required. The DeployedModel to be mutated within the Endpoint. Only the * following fields can be mutated: * `min_replica_count` in either * DedicatedResources or AutomaticResources * `max_replica_count` in either * DedicatedResources or AutomaticResources * autoscaling_metric_specs * * `disable_container_logging` (v1 only) * `enable_container_logging` (v1beta1 * only) */ deployedModel?: GoogleCloudAiplatformV1DeployedModel; /** * Required. The update mask applies to the resource. See * google.protobuf.FieldMask. */ updateMask?: string /* FieldMask */; } function serializeGoogleCloudAiplatformV1MutateDeployedModelRequest(data: any): GoogleCloudAiplatformV1MutateDeployedModelRequest { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } function deserializeGoogleCloudAiplatformV1MutateDeployedModelRequest(data: any): GoogleCloudAiplatformV1MutateDeployedModelRequest { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } /** * Response message for EndpointService.MutateDeployedModel. */ export interface GoogleCloudAiplatformV1MutateDeployedModelResponse { /** * The DeployedModel that's being mutated. */ deployedModel?: GoogleCloudAiplatformV1DeployedModel; } /** * Represents a Neural Architecture Search (NAS) job. */ export interface GoogleCloudAiplatformV1NasJob { /** * Output only. Time when the NasJob was created. */ readonly createTime?: Date; /** * Required. The display name of the NasJob. The name can be up to 128 * characters long and can consist of any UTF-8 characters. */ displayName?: string; /** * Optional. Enable a separation of Custom model training and restricted * image training for tenant project. */ enableRestrictedImageTraining?: boolean; /** * Customer-managed encryption key options for a NasJob. If this is set, then * all resources created by the NasJob will be encrypted with the provided * encryption key. */ encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec; /** * Output only. Time when the NasJob entered any of the following states: * `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`. */ readonly endTime?: Date; /** * Output only. Only populated when job's state is JOB_STATE_FAILED or * JOB_STATE_CANCELLED. */ readonly error?: GoogleRpcStatus; /** * The labels with user-defined metadata to organize NasJobs. Label keys and * values can be no longer than 64 characters (Unicode codepoints), can only * contain lowercase letters, numeric characters, underscores and dashes. * International characters are allowed. See https://goo.gl/xmQnxf for more * information and examples of labels. */ labels?: { [key: string]: string }; /** * Output only. Resource name of the NasJob. */ readonly name?: string; /** * Output only. Output of the NasJob. */ readonly nasJobOutput?: GoogleCloudAiplatformV1NasJobOutput; /** * Required. The specification of a NasJob. */ nasJobSpec?: GoogleCloudAiplatformV1NasJobSpec; /** * Output only. Reserved for future use. */ readonly satisfiesPzi?: boolean; /** * Output only. Reserved for future use. */ readonly satisfiesPzs?: boolean; /** * Output only. Time when the NasJob for the first time entered the * `JOB_STATE_RUNNING` state. */ readonly startTime?: Date; /** * Output only. The detailed state of the job. */ readonly state?: | "JOB_STATE_UNSPECIFIED" | "JOB_STATE_QUEUED" | "JOB_STATE_PENDING" | "JOB_STATE_RUNNING" | "JOB_STATE_SUCCEEDED" | "JOB_STATE_FAILED" | "JOB_STATE_CANCELLING" | "JOB_STATE_CANCELLED" | "JOB_STATE_PAUSED" | "JOB_STATE_EXPIRED" | "JOB_STATE_UPDATING" | "JOB_STATE_PARTIALLY_SUCCEEDED"; /** * Output only. Time when the NasJob was most recently updated. */ readonly updateTime?: Date; } function serializeGoogleCloudAiplatformV1NasJob(data: any): GoogleCloudAiplatformV1NasJob { return { ...data, nasJobSpec: data["nasJobSpec"] !== undefined ? serializeGoogleCloudAiplatformV1NasJobSpec(data["nasJobSpec"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1NasJob(data: any): GoogleCloudAiplatformV1NasJob { return { ...data, createTime: data["createTime"] !== undefined ? new Date(data["createTime"]) : undefined, endTime: data["endTime"] !== undefined ? new Date(data["endTime"]) : undefined, nasJobSpec: data["nasJobSpec"] !== undefined ? deserializeGoogleCloudAiplatformV1NasJobSpec(data["nasJobSpec"]) : undefined, startTime: data["startTime"] !== undefined ? new Date(data["startTime"]) : undefined, updateTime: data["updateTime"] !== undefined ? new Date(data["updateTime"]) : undefined, }; } /** * Represents a uCAIP NasJob output. */ export interface GoogleCloudAiplatformV1NasJobOutput { /** * Output only. The output of this multi-trial Neural Architecture Search * (NAS) job. */ readonly multiTrialJobOutput?: GoogleCloudAiplatformV1NasJobOutputMultiTrialJobOutput; } /** * The output of a multi-trial Neural Architecture Search (NAS) jobs. */ export interface GoogleCloudAiplatformV1NasJobOutputMultiTrialJobOutput { /** * Output only. List of NasTrials that were started as part of search stage. */ readonly searchTrials?: GoogleCloudAiplatformV1NasTrial[]; /** * Output only. List of NasTrials that were started as part of train stage. */ readonly trainTrials?: GoogleCloudAiplatformV1NasTrial[]; } /** * Represents the spec of a NasJob. */ export interface GoogleCloudAiplatformV1NasJobSpec { /** * The spec of multi-trial algorithms. */ multiTrialAlgorithmSpec?: GoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpec; /** * The ID of the existing NasJob in the same Project and Location which will * be used to resume search. search_space_spec and nas_algorithm_spec are * obtained from previous NasJob hence should not provide them again for this * NasJob. */ resumeNasJobId?: string; /** * It defines the search space for Neural Architecture Search (NAS). */ searchSpaceSpec?: string; } function serializeGoogleCloudAiplatformV1NasJobSpec(data: any): GoogleCloudAiplatformV1NasJobSpec { return { ...data, multiTrialAlgorithmSpec: data["multiTrialAlgorithmSpec"] !== undefined ? serializeGoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpec(data["multiTrialAlgorithmSpec"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1NasJobSpec(data: any): GoogleCloudAiplatformV1NasJobSpec { return { ...data, multiTrialAlgorithmSpec: data["multiTrialAlgorithmSpec"] !== undefined ? deserializeGoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpec(data["multiTrialAlgorithmSpec"]) : undefined, }; } /** * The spec of multi-trial Neural Architecture Search (NAS). */ export interface GoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpec { /** * Metric specs for the NAS job. Validation for this field is done at * `multi_trial_algorithm_spec` field. */ metric?: GoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpecMetricSpec; /** * The multi-trial Neural Architecture Search (NAS) algorithm type. Defaults * to `REINFORCEMENT_LEARNING`. */ multiTrialAlgorithm?: | "MULTI_TRIAL_ALGORITHM_UNSPECIFIED" | "REINFORCEMENT_LEARNING" | "GRID_SEARCH"; /** * Required. Spec for search trials. */ searchTrialSpec?: GoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpecSearchTrialSpec; /** * Spec for train trials. Top N [TrainTrialSpec.max_parallel_trial_count] * search trials will be trained for every M [TrainTrialSpec.frequency] trials * searched. */ trainTrialSpec?: GoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpecTrainTrialSpec; } function serializeGoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpec(data: any): GoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpec { return { ...data, searchTrialSpec: data["searchTrialSpec"] !== undefined ? serializeGoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpecSearchTrialSpec(data["searchTrialSpec"]) : undefined, trainTrialSpec: data["trainTrialSpec"] !== undefined ? serializeGoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpecTrainTrialSpec(data["trainTrialSpec"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpec(data: any): GoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpec { return { ...data, searchTrialSpec: data["searchTrialSpec"] !== undefined ? deserializeGoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpecSearchTrialSpec(data["searchTrialSpec"]) : undefined, trainTrialSpec: data["trainTrialSpec"] !== undefined ? deserializeGoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpecTrainTrialSpec(data["trainTrialSpec"]) : undefined, }; } /** * Represents a metric to optimize. */ export interface GoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpecMetricSpec { /** * Required. The optimization goal of the metric. */ goal?: | "GOAL_TYPE_UNSPECIFIED" | "MAXIMIZE" | "MINIMIZE"; /** * Required. The ID of the metric. Must not contain whitespaces. */ metricId?: string; } /** * Represent spec for search trials. */ export interface GoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpecSearchTrialSpec { /** * The number of failed trials that need to be seen before failing the * NasJob. If set to 0, Vertex AI decides how many trials must fail before the * whole job fails. */ maxFailedTrialCount?: number; /** * Required. The maximum number of trials to run in parallel. */ maxParallelTrialCount?: number; /** * Required. The maximum number of Neural Architecture Search (NAS) trials to * run. */ maxTrialCount?: number; /** * Required. The spec of a search trial job. The same spec applies to all * search trials. */ searchTrialJobSpec?: GoogleCloudAiplatformV1CustomJobSpec; } function serializeGoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpecSearchTrialSpec(data: any): GoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpecSearchTrialSpec { return { ...data, searchTrialJobSpec: data["searchTrialJobSpec"] !== undefined ? serializeGoogleCloudAiplatformV1CustomJobSpec(data["searchTrialJobSpec"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpecSearchTrialSpec(data: any): GoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpecSearchTrialSpec { return { ...data, searchTrialJobSpec: data["searchTrialJobSpec"] !== undefined ? deserializeGoogleCloudAiplatformV1CustomJobSpec(data["searchTrialJobSpec"]) : undefined, }; } /** * Represent spec for train trials. */ export interface GoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpecTrainTrialSpec { /** * Required. Frequency of search trials to start train stage. Top N * [TrainTrialSpec.max_parallel_trial_count] search trials will be trained for * every M [TrainTrialSpec.frequency] trials searched. */ frequency?: number; /** * Required. The maximum number of trials to run in parallel. */ maxParallelTrialCount?: number; /** * Required. The spec of a train trial job. The same spec applies to all * train trials. */ trainTrialJobSpec?: GoogleCloudAiplatformV1CustomJobSpec; } function serializeGoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpecTrainTrialSpec(data: any): GoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpecTrainTrialSpec { return { ...data, trainTrialJobSpec: data["trainTrialJobSpec"] !== undefined ? serializeGoogleCloudAiplatformV1CustomJobSpec(data["trainTrialJobSpec"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpecTrainTrialSpec(data: any): GoogleCloudAiplatformV1NasJobSpecMultiTrialAlgorithmSpecTrainTrialSpec { return { ...data, trainTrialJobSpec: data["trainTrialJobSpec"] !== undefined ? deserializeGoogleCloudAiplatformV1CustomJobSpec(data["trainTrialJobSpec"]) : undefined, }; } /** * Represents a uCAIP NasJob trial. */ export interface GoogleCloudAiplatformV1NasTrial { /** * Output only. Time when the NasTrial's status changed to `SUCCEEDED` or * `INFEASIBLE`. */ readonly endTime?: Date; /** * Output only. The final measurement containing the objective value. */ readonly finalMeasurement?: GoogleCloudAiplatformV1Measurement; /** * Output only. The identifier of the NasTrial assigned by the service. */ readonly id?: string; /** * Output only. Time when the NasTrial was started. */ readonly startTime?: Date; /** * Output only. The detailed state of the NasTrial. */ readonly state?: | "STATE_UNSPECIFIED" | "REQUESTED" | "ACTIVE" | "STOPPING" | "SUCCEEDED" | "INFEASIBLE"; } /** * Represents a NasTrial details along with its parameters. If there is a * corresponding train NasTrial, the train NasTrial is also returned. */ export interface GoogleCloudAiplatformV1NasTrialDetail { /** * Output only. Resource name of the NasTrialDetail. */ readonly name?: string; /** * The parameters for the NasJob NasTrial. */ parameters?: string; /** * The requested search NasTrial. */ searchTrial?: GoogleCloudAiplatformV1NasTrial; /** * The train NasTrial corresponding to search_trial. Only populated if * search_trial is used for training. */ trainTrial?: GoogleCloudAiplatformV1NasTrial; } /** * A query to find a number of similar entities. */ export interface GoogleCloudAiplatformV1NearestNeighborQuery { /** * Optional. The embedding vector that be used for similar search. */ embedding?: GoogleCloudAiplatformV1NearestNeighborQueryEmbedding; /** * Optional. The entity id whose similar entities should be searched for. If * embedding is set, search will use embedding instead of entity_id. */ entityId?: string; /** * Optional. The number of similar entities to be retrieved from feature view * for each query. */ neighborCount?: number; /** * Optional. The list of numeric filters. */ numericFilters?: GoogleCloudAiplatformV1NearestNeighborQueryNumericFilter[]; /** * Optional. Parameters that can be set to tune query on the fly. */ parameters?: GoogleCloudAiplatformV1NearestNeighborQueryParameters; /** * Optional. Crowding is a constraint on a neighbor list produced by nearest * neighbor search requiring that no more than * sper_crowding_attribute_neighbor_count of the k neighbors returned have the * same value of crowding_attribute. It's used for improving result diversity. */ perCrowdingAttributeNeighborCount?: number; /** * Optional. The list of string filters. */ stringFilters?: GoogleCloudAiplatformV1NearestNeighborQueryStringFilter[]; } function serializeGoogleCloudAiplatformV1NearestNeighborQuery(data: any): GoogleCloudAiplatformV1NearestNeighborQuery { return { ...data, numericFilters: data["numericFilters"] !== undefined ? data["numericFilters"].map((item: any) => (serializeGoogleCloudAiplatformV1NearestNeighborQueryNumericFilter(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1NearestNeighborQuery(data: any): GoogleCloudAiplatformV1NearestNeighborQuery { return { ...data, numericFilters: data["numericFilters"] !== undefined ? data["numericFilters"].map((item: any) => (deserializeGoogleCloudAiplatformV1NearestNeighborQueryNumericFilter(item))) : undefined, }; } /** * The embedding vector. */ export interface GoogleCloudAiplatformV1NearestNeighborQueryEmbedding { /** * Optional. Individual value in the embedding. */ value?: number[]; } /** * Numeric filter is used to search a subset of the entities by using boolean * rules on numeric columns. For example: Database Point 0: {name: "a" * value_int: 42} {name: "b" value_float: 1.0} Database Point 1: {name: "a" * value_int: 10} {name: "b" value_float: 2.0} Database Point 2: {name: "a" * value_int: -1} {name: "b" value_float: 3.0} Query: {name: "a" value_int: 12 * operator: LESS} // Matches Point 1, 2 {name: "b" value_float: 2.0 operator: * EQUAL} // Matches Point 1 */ export interface GoogleCloudAiplatformV1NearestNeighborQueryNumericFilter { /** * Required. Column name in BigQuery that used as filters. */ name?: string; /** * Optional. This MUST be specified for queries and must NOT be specified for * database points. */ op?: | "OPERATOR_UNSPECIFIED" | "LESS" | "LESS_EQUAL" | "EQUAL" | "GREATER_EQUAL" | "GREATER" | "NOT_EQUAL"; /** * double value type. */ valueDouble?: number; /** * float value type. */ valueFloat?: number; /** * int value type. */ valueInt?: bigint; } function serializeGoogleCloudAiplatformV1NearestNeighborQueryNumericFilter(data: any): GoogleCloudAiplatformV1NearestNeighborQueryNumericFilter { return { ...data, valueInt: data["valueInt"] !== undefined ? String(data["valueInt"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1NearestNeighborQueryNumericFilter(data: any): GoogleCloudAiplatformV1NearestNeighborQueryNumericFilter { return { ...data, valueInt: data["valueInt"] !== undefined ? BigInt(data["valueInt"]) : undefined, }; } /** * Parameters that can be overrided in each query to tune query latency and * recall. */ export interface GoogleCloudAiplatformV1NearestNeighborQueryParameters { /** * Optional. The number of neighbors to find via approximate search before * exact reordering is performed; if set, this value must be > neighbor_count. */ approximateNeighborCandidates?: number; /** * Optional. The fraction of the number of leaves to search, set at query * time allows user to tune search performance. This value increase result in * both search accuracy and latency increase. The value should be between 0.0 * and 1.0. */ leafNodesSearchFraction?: number; } /** * String filter is used to search a subset of the entities by using boolean * rules on string columns. For example: if a query specifies string filter with * 'name = color, allow_tokens = {red, blue}, deny_tokens = {purple}',' then * that query will match entities that are red or blue, but if those points are * also purple, then they will be excluded even if they are red/blue. Only * string filter is supported for now, numeric filter will be supported in the * near future. */ export interface GoogleCloudAiplatformV1NearestNeighborQueryStringFilter { /** * Optional. The allowed tokens. */ allowTokens?: string[]; /** * Optional. The denied tokens. */ denyTokens?: string[]; /** * Required. Column names in BigQuery that used as filters. */ name?: string; } /** * Nearest neighbors for one query. */ export interface GoogleCloudAiplatformV1NearestNeighbors { /** * All its neighbors. */ neighbors?: GoogleCloudAiplatformV1NearestNeighborsNeighbor[]; } function serializeGoogleCloudAiplatformV1NearestNeighbors(data: any): GoogleCloudAiplatformV1NearestNeighbors { return { ...data, neighbors: data["neighbors"] !== undefined ? data["neighbors"].map((item: any) => (serializeGoogleCloudAiplatformV1NearestNeighborsNeighbor(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1NearestNeighbors(data: any): GoogleCloudAiplatformV1NearestNeighbors { return { ...data, neighbors: data["neighbors"] !== undefined ? data["neighbors"].map((item: any) => (deserializeGoogleCloudAiplatformV1NearestNeighborsNeighbor(item))) : undefined, }; } /** * Runtime operation metadata with regard to Matching Engine Index. */ export interface GoogleCloudAiplatformV1NearestNeighborSearchOperationMetadata { /** * The validation stats of the content (per file) to be inserted or updated * on the Matching Engine Index resource. Populated if contentsDeltaUri is * provided as part of Index.metadata. Please note that, currently for those * files that are broken or has unsupported file format, we will not have the * stats for those files. */ contentValidationStats?: GoogleCloudAiplatformV1NearestNeighborSearchOperationMetadataContentValidationStats[]; /** * The ingested data size in bytes. */ dataBytesCount?: bigint; } function serializeGoogleCloudAiplatformV1NearestNeighborSearchOperationMetadata(data: any): GoogleCloudAiplatformV1NearestNeighborSearchOperationMetadata { return { ...data, contentValidationStats: data["contentValidationStats"] !== undefined ? data["contentValidationStats"].map((item: any) => (serializeGoogleCloudAiplatformV1NearestNeighborSearchOperationMetadataContentValidationStats(item))) : undefined, dataBytesCount: data["dataBytesCount"] !== undefined ? String(data["dataBytesCount"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1NearestNeighborSearchOperationMetadata(data: any): GoogleCloudAiplatformV1NearestNeighborSearchOperationMetadata { return { ...data, contentValidationStats: data["contentValidationStats"] !== undefined ? data["contentValidationStats"].map((item: any) => (deserializeGoogleCloudAiplatformV1NearestNeighborSearchOperationMetadataContentValidationStats(item))) : undefined, dataBytesCount: data["dataBytesCount"] !== undefined ? BigInt(data["dataBytesCount"]) : undefined, }; } export interface GoogleCloudAiplatformV1NearestNeighborSearchOperationMetadataContentValidationStats { /** * Number of records in this file we skipped due to validate errors. */ invalidRecordCount?: bigint; /** * Number of sparse records in this file we skipped due to validate errors. */ invalidSparseRecordCount?: bigint; /** * The detail information of the partial failures encountered for those * invalid records that couldn't be parsed. Up to 50 partial errors will be * reported. */ partialErrors?: GoogleCloudAiplatformV1NearestNeighborSearchOperationMetadataRecordError[]; /** * Cloud Storage URI pointing to the original file in user's bucket. */ sourceGcsUri?: string; /** * Number of records in this file that were successfully processed. */ validRecordCount?: bigint; /** * Number of sparse records in this file that were successfully processed. */ validSparseRecordCount?: bigint; } function serializeGoogleCloudAiplatformV1NearestNeighborSearchOperationMetadataContentValidationStats(data: any): GoogleCloudAiplatformV1NearestNeighborSearchOperationMetadataContentValidationStats { return { ...data, invalidRecordCount: data["invalidRecordCount"] !== undefined ? String(data["invalidRecordCount"]) : undefined, invalidSparseRecordCount: data["invalidSparseRecordCount"] !== undefined ? String(data["invalidSparseRecordCount"]) : undefined, validRecordCount: data["validRecordCount"] !== undefined ? String(data["validRecordCount"]) : undefined, validSparseRecordCount: data["validSparseRecordCount"] !== undefined ? String(data["validSparseRecordCount"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1NearestNeighborSearchOperationMetadataContentValidationStats(data: any): GoogleCloudAiplatformV1NearestNeighborSearchOperationMetadataContentValidationStats { return { ...data, invalidRecordCount: data["invalidRecordCount"] !== undefined ? BigInt(data["invalidRecordCount"]) : undefined, invalidSparseRecordCount: data["invalidSparseRecordCount"] !== undefined ? BigInt(data["invalidSparseRecordCount"]) : undefined, validRecordCount: data["validRecordCount"] !== undefined ? BigInt(data["validRecordCount"]) : undefined, validSparseRecordCount: data["validSparseRecordCount"] !== undefined ? BigInt(data["validSparseRecordCount"]) : undefined, }; } export interface GoogleCloudAiplatformV1NearestNeighborSearchOperationMetadataRecordError { /** * Empty if the embedding id is failed to parse. */ embeddingId?: string; /** * A human-readable message that is shown to the user to help them fix the * error. Note that this message may change from time to time, your code * should check against error_type as the source of truth. */ errorMessage?: string; /** * The error type of this record. */ errorType?: | "ERROR_TYPE_UNSPECIFIED" | "EMPTY_LINE" | "INVALID_JSON_SYNTAX" | "INVALID_CSV_SYNTAX" | "INVALID_AVRO_SYNTAX" | "INVALID_EMBEDDING_ID" | "EMBEDDING_SIZE_MISMATCH" | "NAMESPACE_MISSING" | "PARSING_ERROR" | "DUPLICATE_NAMESPACE" | "OP_IN_DATAPOINT" | "MULTIPLE_VALUES" | "INVALID_NUMERIC_VALUE" | "INVALID_ENCODING" | "INVALID_SPARSE_DIMENSIONS" | "INVALID_TOKEN_VALUE" | "INVALID_SPARSE_EMBEDDING" | "INVALID_EMBEDDING" | "INVALID_EMBEDDING_METADATA"; /** * The original content of this record. */ rawRecord?: string; /** * Cloud Storage URI pointing to the original file in user's bucket. */ sourceGcsUri?: string; } /** * A neighbor of the query vector. */ export interface GoogleCloudAiplatformV1NearestNeighborsNeighbor { /** * The distance between the neighbor and the query vector. */ distance?: number; /** * The id of the similar entity. */ entityId?: string; /** * The attributes of the neighbor, e.g. filters, crowding and metadata Note * that full entities are returned only when "return_full_entity" is set to * true. Otherwise, only the "entity_id" and "distance" fields are populated. */ entityKeyValues?: GoogleCloudAiplatformV1FetchFeatureValuesResponse; } function serializeGoogleCloudAiplatformV1NearestNeighborsNeighbor(data: any): GoogleCloudAiplatformV1NearestNeighborsNeighbor { return { ...data, entityKeyValues: data["entityKeyValues"] !== undefined ? serializeGoogleCloudAiplatformV1FetchFeatureValuesResponse(data["entityKeyValues"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1NearestNeighborsNeighbor(data: any): GoogleCloudAiplatformV1NearestNeighborsNeighbor { return { ...data, entityKeyValues: data["entityKeyValues"] !== undefined ? deserializeGoogleCloudAiplatformV1FetchFeatureValuesResponse(data["entityKeyValues"]) : undefined, }; } /** * Neighbors for example-based explanations. */ export interface GoogleCloudAiplatformV1Neighbor { /** * Output only. The neighbor distance. */ readonly neighborDistance?: number; /** * Output only. The neighbor id. */ readonly neighborId?: string; } /** * Network spec. */ export interface GoogleCloudAiplatformV1NetworkSpec { /** * Whether to enable public internet access. Default false. */ enableInternetAccess?: boolean; /** * The full name of the Google Compute Engine * [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) */ network?: string; /** * The name of the subnet that this instance is in. Format: * `projects/{project_id_or_number}/regions/{region}/subnetworks/{subnetwork_id}` */ subnetwork?: string; } /** * Represents a mount configuration for Network File System (NFS) to mount. */ export interface GoogleCloudAiplatformV1NfsMount { /** * Required. Destination mount path. The NFS will be mounted for the user * under /mnt/nfs/ */ mountPoint?: string; /** * Required. Source path exported from NFS server. Has to start with '/', and * combined with the ip address, it indicates the source mount path in the * form of `server:path` */ path?: string; /** * Required. IP address of the NFS server. */ server?: string; } /** * The euc configuration of NotebookRuntimeTemplate. */ export interface GoogleCloudAiplatformV1NotebookEucConfig { /** * Output only. Whether ActAs check is bypassed for service account attached * to the VM. If false, we need ActAs check for the default Compute Engine * Service account. When a Runtime is created, a VM is allocated using Default * Compute Engine Service Account. Any user requesting to use this Runtime * requires Service Account User (ActAs) permission over this SA. If true, * Runtime owner is using EUC and does not require the above permission as VM * no longer use default Compute Engine SA, but a P4SA. */ readonly bypassActasCheck?: boolean; /** * Input only. Whether EUC is disabled in this NotebookRuntimeTemplate. In * proto3, the default value of a boolean is false. In this way, by default * EUC will be enabled for NotebookRuntimeTemplate. */ eucDisabled?: boolean; } /** * NotebookExecutionJob represents an instance of a notebook execution. */ export interface GoogleCloudAiplatformV1NotebookExecutionJob { /** * Output only. Timestamp when this NotebookExecutionJob was created. */ readonly createTime?: Date; /** * The custom compute configuration for an execution job. */ customEnvironmentSpec?: GoogleCloudAiplatformV1NotebookExecutionJobCustomEnvironmentSpec; /** * The Dataform Repository pointing to a single file notebook repository. */ dataformRepositorySource?: GoogleCloudAiplatformV1NotebookExecutionJobDataformRepositorySource; /** * The contents of an input notebook file. */ directNotebookSource?: GoogleCloudAiplatformV1NotebookExecutionJobDirectNotebookSource; /** * The display name of the NotebookExecutionJob. The name can be up to 128 * characters long and can consist of any UTF-8 characters. */ displayName?: string; /** * Customer-managed encryption key spec for the notebook execution job. This * field is auto-populated if the NotebookRuntimeTemplate has an encryption * spec. */ encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec; /** * Max running time of the execution job in seconds (default 86400s / 24 * hrs). */ executionTimeout?: number /* Duration */; /** * The user email to run the execution as. Only supported by Colab runtimes. */ executionUser?: string; /** * The Cloud Storage url pointing to the ipynb file. Format: * `gs://bucket/notebook_file.ipynb` */ gcsNotebookSource?: GoogleCloudAiplatformV1NotebookExecutionJobGcsNotebookSource; /** * The Cloud Storage location to upload the result to. Format: * `gs://bucket-name` */ gcsOutputUri?: string; /** * Output only. The state of the NotebookExecutionJob. */ readonly jobState?: | "JOB_STATE_UNSPECIFIED" | "JOB_STATE_QUEUED" | "JOB_STATE_PENDING" | "JOB_STATE_RUNNING" | "JOB_STATE_SUCCEEDED" | "JOB_STATE_FAILED" | "JOB_STATE_CANCELLING" | "JOB_STATE_CANCELLED" | "JOB_STATE_PAUSED" | "JOB_STATE_EXPIRED" | "JOB_STATE_UPDATING" | "JOB_STATE_PARTIALLY_SUCCEEDED"; /** * The name of the kernel to use during notebook execution. If unset, the * default kernel is used. */ kernelName?: string; /** * The labels with user-defined metadata to organize NotebookExecutionJobs. * Label keys and values can be no longer than 64 characters (Unicode * codepoints), can only contain lowercase letters, numeric characters, * underscores and dashes. International characters are allowed. See * https://goo.gl/xmQnxf for more information and examples of labels. System * reserved label keys are prefixed with "aiplatform.googleapis.com/" and are * immutable. */ labels?: { [key: string]: string }; /** * Output only. The resource name of this NotebookExecutionJob. Format: * `projects/{project_id}/locations/{location}/notebookExecutionJobs/{job_id}` */ readonly name?: string; /** * The NotebookRuntimeTemplate to source compute configuration from. */ notebookRuntimeTemplateResourceName?: string; /** * The Schedule resource name if this job is triggered by one. Format: * `projects/{project_id}/locations/{location}/schedules/{schedule_id}` */ scheduleResourceName?: string; /** * The service account to run the execution as. */ serviceAccount?: string; /** * Output only. Populated when the NotebookExecutionJob is completed. When * there is an error during notebook execution, the error details are * populated. */ readonly status?: GoogleRpcStatus; /** * Output only. Timestamp when this NotebookExecutionJob was most recently * updated. */ readonly updateTime?: Date; /** * The Workbench runtime configuration to use for the notebook execution. */ workbenchRuntime?: GoogleCloudAiplatformV1NotebookExecutionJobWorkbenchRuntime; } function serializeGoogleCloudAiplatformV1NotebookExecutionJob(data: any): GoogleCloudAiplatformV1NotebookExecutionJob { return { ...data, customEnvironmentSpec: data["customEnvironmentSpec"] !== undefined ? serializeGoogleCloudAiplatformV1NotebookExecutionJobCustomEnvironmentSpec(data["customEnvironmentSpec"]) : undefined, directNotebookSource: data["directNotebookSource"] !== undefined ? serializeGoogleCloudAiplatformV1NotebookExecutionJobDirectNotebookSource(data["directNotebookSource"]) : undefined, executionTimeout: data["executionTimeout"] !== undefined ? data["executionTimeout"] : undefined, }; } function deserializeGoogleCloudAiplatformV1NotebookExecutionJob(data: any): GoogleCloudAiplatformV1NotebookExecutionJob { return { ...data, createTime: data["createTime"] !== undefined ? new Date(data["createTime"]) : undefined, customEnvironmentSpec: data["customEnvironmentSpec"] !== undefined ? deserializeGoogleCloudAiplatformV1NotebookExecutionJobCustomEnvironmentSpec(data["customEnvironmentSpec"]) : undefined, directNotebookSource: data["directNotebookSource"] !== undefined ? deserializeGoogleCloudAiplatformV1NotebookExecutionJobDirectNotebookSource(data["directNotebookSource"]) : undefined, executionTimeout: data["executionTimeout"] !== undefined ? data["executionTimeout"] : undefined, updateTime: data["updateTime"] !== undefined ? new Date(data["updateTime"]) : undefined, }; } /** * Compute configuration to use for an execution job. */ export interface GoogleCloudAiplatformV1NotebookExecutionJobCustomEnvironmentSpec { /** * The specification of a single machine for the execution job. */ machineSpec?: GoogleCloudAiplatformV1MachineSpec; /** * The network configuration to use for the execution job. */ networkSpec?: GoogleCloudAiplatformV1NetworkSpec; /** * The specification of a persistent disk to attach for the execution job. */ persistentDiskSpec?: GoogleCloudAiplatformV1PersistentDiskSpec; } function serializeGoogleCloudAiplatformV1NotebookExecutionJobCustomEnvironmentSpec(data: any): GoogleCloudAiplatformV1NotebookExecutionJobCustomEnvironmentSpec { return { ...data, persistentDiskSpec: data["persistentDiskSpec"] !== undefined ? serializeGoogleCloudAiplatformV1PersistentDiskSpec(data["persistentDiskSpec"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1NotebookExecutionJobCustomEnvironmentSpec(data: any): GoogleCloudAiplatformV1NotebookExecutionJobCustomEnvironmentSpec { return { ...data, persistentDiskSpec: data["persistentDiskSpec"] !== undefined ? deserializeGoogleCloudAiplatformV1PersistentDiskSpec(data["persistentDiskSpec"]) : undefined, }; } /** * The Dataform Repository containing the input notebook. */ export interface GoogleCloudAiplatformV1NotebookExecutionJobDataformRepositorySource { /** * The commit SHA to read repository with. If unset, the file will be read at * HEAD. */ commitSha?: string; /** * The resource name of the Dataform Repository. Format: * `projects/{project_id}/locations/{location}/repositories/{repository_id}` */ dataformRepositoryResourceName?: string; } /** * The content of the input notebook in ipynb format. */ export interface GoogleCloudAiplatformV1NotebookExecutionJobDirectNotebookSource { /** * The base64-encoded contents of the input notebook file. */ content?: Uint8Array; } function serializeGoogleCloudAiplatformV1NotebookExecutionJobDirectNotebookSource(data: any): GoogleCloudAiplatformV1NotebookExecutionJobDirectNotebookSource { return { ...data, content: data["content"] !== undefined ? encodeBase64(data["content"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1NotebookExecutionJobDirectNotebookSource(data: any): GoogleCloudAiplatformV1NotebookExecutionJobDirectNotebookSource { return { ...data, content: data["content"] !== undefined ? decodeBase64(data["content"] as string) : undefined, }; } /** * The Cloud Storage uri for the input notebook. */ export interface GoogleCloudAiplatformV1NotebookExecutionJobGcsNotebookSource { /** * The version of the Cloud Storage object to read. If unset, the current * version of the object is read. See * https://cloud.google.com/storage/docs/metadata#generation-number. */ generation?: string; /** * The Cloud Storage uri pointing to the ipynb file. Format: * `gs://bucket/notebook_file.ipynb` */ uri?: string; } /** * Configuration for a Workbench Instances-based environment. */ export interface GoogleCloudAiplatformV1NotebookExecutionJobWorkbenchRuntime { } /** * The idle shutdown configuration of NotebookRuntimeTemplate, which contains * the idle_timeout as required field. */ export interface GoogleCloudAiplatformV1NotebookIdleShutdownConfig { /** * Whether Idle Shutdown is disabled in this NotebookRuntimeTemplate. */ idleShutdownDisabled?: boolean; /** * Required. Duration is accurate to the second. In Notebook, Idle Timeout is * accurate to minute so the range of idle_timeout (second) is: 10 * 60 ~ 1440 * * 60. */ idleTimeout?: number /* Duration */; } function serializeGoogleCloudAiplatformV1NotebookIdleShutdownConfig(data: any): GoogleCloudAiplatformV1NotebookIdleShutdownConfig { return { ...data, idleTimeout: data["idleTimeout"] !== undefined ? data["idleTimeout"] : undefined, }; } function deserializeGoogleCloudAiplatformV1NotebookIdleShutdownConfig(data: any): GoogleCloudAiplatformV1NotebookIdleShutdownConfig { return { ...data, idleTimeout: data["idleTimeout"] !== undefined ? data["idleTimeout"] : undefined, }; } /** * Notebook Reservation Affinity for consuming Zonal reservation. */ export interface GoogleCloudAiplatformV1NotebookReservationAffinity { /** * Required. Specifies the type of reservation from which this instance can * consume resources: RESERVATION_ANY (default), RESERVATION_SPECIFIC, or * RESERVATION_NONE. See Consuming reserved instances for examples. */ consumeReservationType?: | "RESERVATION_AFFINITY_TYPE_UNSPECIFIED" | "RESERVATION_NONE" | "RESERVATION_ANY" | "RESERVATION_SPECIFIC"; /** * Optional. Corresponds to the label key of a reservation resource. To * target a RESERVATION_SPECIFIC by name, use * compute.googleapis.com/reservation-name as the key and specify the name of * your reservation as its value. */ key?: string; /** * Optional. Corresponds to the label values of a reservation resource. This * must be the full path name of Reservation. */ values?: string[]; } /** * A runtime is a virtual machine allocated to a particular user for a * particular Notebook file on temporary basis with lifetime limited to 24 * hours. */ export interface GoogleCloudAiplatformV1NotebookRuntime { /** * Output only. Timestamp when this NotebookRuntime was created. */ readonly createTime?: Date; /** * Output only. The specification of persistent disk attached to the notebook * runtime as data disk storage. */ readonly dataPersistentDiskSpec?: GoogleCloudAiplatformV1PersistentDiskSpec; /** * The description of the NotebookRuntime. */ description?: string; /** * Required. The display name of the NotebookRuntime. The name can be up to * 128 characters long and can consist of any UTF-8 characters. */ displayName?: string; /** * Output only. Customer-managed encryption key spec for the notebook * runtime. */ readonly encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec; /** * Output only. EUC configuration of the notebook runtime. */ readonly eucConfig?: GoogleCloudAiplatformV1NotebookEucConfig; /** * Output only. Timestamp when this NotebookRuntime will be expired: 1. * System Predefined NotebookRuntime: 24 hours after creation. After * expiration, system predifined runtime will be deleted. 2. User created * NotebookRuntime: 6 months after last upgrade. After expiration, user * created runtime will be stopped and allowed for upgrade. */ readonly expirationTime?: Date; /** * Output only. The health state of the NotebookRuntime. */ readonly healthState?: | "HEALTH_STATE_UNSPECIFIED" | "HEALTHY" | "UNHEALTHY"; /** * Output only. The idle shutdown configuration of the notebook runtime. */ readonly idleShutdownConfig?: GoogleCloudAiplatformV1NotebookIdleShutdownConfig; /** * Output only. Whether NotebookRuntime is upgradable. */ readonly isUpgradable?: boolean; /** * The labels with user-defined metadata to organize your NotebookRuntime. * Label keys and values can be no longer than 64 characters (Unicode * codepoints), can only contain lowercase letters, numeric characters, * underscores and dashes. International characters are allowed. No more than * 64 user labels can be associated with one NotebookRuntime (System labels * are excluded). See https://goo.gl/xmQnxf for more information and examples * of labels. System reserved label keys are prefixed with * "aiplatform.googleapis.com/" and are immutable. Following system labels * exist for NotebookRuntime: * * "aiplatform.googleapis.com/notebook_runtime_gce_instance_id": output only, * its value is the Compute Engine instance id. * * "aiplatform.googleapis.com/colab_enterprise_entry_service": its value is * either "bigquery" or "vertex"; if absent, it should be "vertex". This is to * describe the entry service, either BigQuery or Vertex. */ labels?: { [key: string]: string }; /** * Output only. The specification of a single machine used by the notebook * runtime. */ readonly machineSpec?: GoogleCloudAiplatformV1MachineSpec; /** * Output only. The resource name of the NotebookRuntime. */ readonly name?: string; /** * Output only. Network spec of the notebook runtime. */ readonly networkSpec?: GoogleCloudAiplatformV1NetworkSpec; /** * Optional. The Compute Engine tags to add to runtime (see [Tagging * instances](https://cloud.google.com/vpc/docs/add-remove-network-tags)). */ networkTags?: string[]; /** * Output only. The pointer to NotebookRuntimeTemplate this NotebookRuntime * is created from. */ readonly notebookRuntimeTemplateRef?: GoogleCloudAiplatformV1NotebookRuntimeTemplateRef; /** * Output only. The type of the notebook runtime. */ readonly notebookRuntimeType?: | "NOTEBOOK_RUNTIME_TYPE_UNSPECIFIED" | "USER_DEFINED" | "ONE_CLICK"; /** * Output only. The proxy endpoint used to access the NotebookRuntime. */ readonly proxyUri?: string; /** * Output only. Reservation Affinity of the notebook runtime. */ readonly reservationAffinity?: GoogleCloudAiplatformV1NotebookReservationAffinity; /** * Output only. The runtime (instance) state of the NotebookRuntime. */ readonly runtimeState?: | "RUNTIME_STATE_UNSPECIFIED" | "RUNNING" | "BEING_STARTED" | "BEING_STOPPED" | "STOPPED" | "BEING_UPGRADED" | "ERROR" | "INVALID"; /** * Required. The user email of the NotebookRuntime. */ runtimeUser?: string; /** * Output only. Reserved for future use. */ readonly satisfiesPzi?: boolean; /** * Output only. Reserved for future use. */ readonly satisfiesPzs?: boolean; /** * Output only. Deprecated: This field is no longer used and the "Vertex AI * Notebook Service Account" * (service-PROJECT_NUMBER@gcp-sa-aiplatform-vm.iam.gserviceaccount.com) is * used for the runtime workload identity. See * https://cloud.google.com/iam/docs/service-agents#vertex-ai-notebook-service-account * for more details. The service account that the NotebookRuntime workload * runs as. */ readonly serviceAccount?: string; /** * Output only. Runtime Shielded VM spec. */ readonly shieldedVmConfig?: GoogleCloudAiplatformV1ShieldedVmConfig; /** * Output only. Software config of the notebook runtime. */ readonly softwareConfig?: GoogleCloudAiplatformV1NotebookSoftwareConfig; /** * Output only. Timestamp when this NotebookRuntime was most recently * updated. */ readonly updateTime?: Date; /** * Output only. The VM os image version of NotebookRuntime. */ readonly version?: string; } /** * A template that specifies runtime configurations such as machine type, * runtime version, network configurations, etc. Multiple runtimes can be * created from a runtime template. */ export interface GoogleCloudAiplatformV1NotebookRuntimeTemplate { /** * Output only. Timestamp when this NotebookRuntimeTemplate was created. */ readonly createTime?: Date; /** * Optional. The specification of persistent disk attached to the runtime as * data disk storage. */ dataPersistentDiskSpec?: GoogleCloudAiplatformV1PersistentDiskSpec; /** * The description of the NotebookRuntimeTemplate. */ description?: string; /** * Required. The display name of the NotebookRuntimeTemplate. The name can be * up to 128 characters long and can consist of any UTF-8 characters. */ displayName?: string; /** * Customer-managed encryption key spec for the notebook runtime. */ encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec; /** * Used to perform consistent read-modify-write updates. If not set, a blind * "overwrite" update happens. */ etag?: string; /** * EUC configuration of the NotebookRuntimeTemplate. */ eucConfig?: GoogleCloudAiplatformV1NotebookEucConfig; /** * The idle shutdown configuration of NotebookRuntimeTemplate. This config * will only be set when idle shutdown is enabled. */ idleShutdownConfig?: GoogleCloudAiplatformV1NotebookIdleShutdownConfig; /** * Output only. Deprecated: This field has no behavior. Use * notebook_runtime_type = 'ONE_CLICK' instead. The default template to use if * not specified. */ readonly isDefault?: boolean; /** * The labels with user-defined metadata to organize the * NotebookRuntimeTemplates. Label keys and values can be no longer than 64 * characters (Unicode codepoints), can only contain lowercase letters, * numeric characters, underscores and dashes. International characters are * allowed. See https://goo.gl/xmQnxf for more information and examples of * labels. */ labels?: { [key: string]: string }; /** * Optional. Immutable. The specification of a single machine for the * template. */ machineSpec?: GoogleCloudAiplatformV1MachineSpec; /** * The resource name of the NotebookRuntimeTemplate. */ name?: string; /** * Optional. Network spec. */ networkSpec?: GoogleCloudAiplatformV1NetworkSpec; /** * Optional. The Compute Engine tags to add to runtime (see [Tagging * instances](https://cloud.google.com/vpc/docs/add-remove-network-tags)). */ networkTags?: string[]; /** * Optional. Immutable. The type of the notebook runtime template. */ notebookRuntimeType?: | "NOTEBOOK_RUNTIME_TYPE_UNSPECIFIED" | "USER_DEFINED" | "ONE_CLICK"; /** * Optional. Reservation Affinity of the notebook runtime template. */ reservationAffinity?: GoogleCloudAiplatformV1NotebookReservationAffinity; /** * Deprecated: This field is ignored and the "Vertex AI Notebook Service * Account" * (service-PROJECT_NUMBER@gcp-sa-aiplatform-vm.iam.gserviceaccount.com) is * used for the runtime workload identity. See * https://cloud.google.com/iam/docs/service-agents#vertex-ai-notebook-service-account * for more details. For NotebookExecutionJob, use * NotebookExecutionJob.service_account instead. The service account that the * runtime workload runs as. You can use any service account within the same * project, but you must have the service account user permission to use the * instance. If not specified, the [Compute Engine default service * account](https://cloud.google.com/compute/docs/access/service-accounts#default_service_account) * is used. */ serviceAccount?: string; /** * Optional. Immutable. Runtime Shielded VM spec. */ shieldedVmConfig?: GoogleCloudAiplatformV1ShieldedVmConfig; /** * Optional. The notebook software configuration of the notebook runtime. */ softwareConfig?: GoogleCloudAiplatformV1NotebookSoftwareConfig; /** * Output only. Timestamp when this NotebookRuntimeTemplate was most recently * updated. */ readonly updateTime?: Date; } function serializeGoogleCloudAiplatformV1NotebookRuntimeTemplate(data: any): GoogleCloudAiplatformV1NotebookRuntimeTemplate { return { ...data, dataPersistentDiskSpec: data["dataPersistentDiskSpec"] !== undefined ? serializeGoogleCloudAiplatformV1PersistentDiskSpec(data["dataPersistentDiskSpec"]) : undefined, idleShutdownConfig: data["idleShutdownConfig"] !== undefined ? serializeGoogleCloudAiplatformV1NotebookIdleShutdownConfig(data["idleShutdownConfig"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1NotebookRuntimeTemplate(data: any): GoogleCloudAiplatformV1NotebookRuntimeTemplate { return { ...data, createTime: data["createTime"] !== undefined ? new Date(data["createTime"]) : undefined, dataPersistentDiskSpec: data["dataPersistentDiskSpec"] !== undefined ? deserializeGoogleCloudAiplatformV1PersistentDiskSpec(data["dataPersistentDiskSpec"]) : undefined, idleShutdownConfig: data["idleShutdownConfig"] !== undefined ? deserializeGoogleCloudAiplatformV1NotebookIdleShutdownConfig(data["idleShutdownConfig"]) : undefined, updateTime: data["updateTime"] !== undefined ? new Date(data["updateTime"]) : undefined, }; } /** * Points to a NotebookRuntimeTemplateRef. */ export interface GoogleCloudAiplatformV1NotebookRuntimeTemplateRef { /** * Immutable. A resource name of the NotebookRuntimeTemplate. */ notebookRuntimeTemplate?: string; } /** * Notebook Software Config. */ export interface GoogleCloudAiplatformV1NotebookSoftwareConfig { /** * Optional. Environment variables to be passed to the container. Maximum * limit is 100. */ env?: GoogleCloudAiplatformV1EnvVar[]; /** * Optional. Post startup script config. */ postStartupScriptConfig?: GoogleCloudAiplatformV1PostStartupScriptConfig; } /** * Config for evaluation output. */ export interface GoogleCloudAiplatformV1OutputConfig { /** * Cloud storage destination for evaluation output. */ gcsDestination?: GoogleCloudAiplatformV1GcsDestination; } /** * Input for pairwise metric. */ export interface GoogleCloudAiplatformV1PairwiseMetricInput { /** * Required. Pairwise metric instance. */ instance?: GoogleCloudAiplatformV1PairwiseMetricInstance; /** * Required. Spec for pairwise metric. */ metricSpec?: GoogleCloudAiplatformV1PairwiseMetricSpec; } function serializeGoogleCloudAiplatformV1PairwiseMetricInput(data: any): GoogleCloudAiplatformV1PairwiseMetricInput { return { ...data, instance: data["instance"] !== undefined ? serializeGoogleCloudAiplatformV1PairwiseMetricInstance(data["instance"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1PairwiseMetricInput(data: any): GoogleCloudAiplatformV1PairwiseMetricInput { return { ...data, instance: data["instance"] !== undefined ? deserializeGoogleCloudAiplatformV1PairwiseMetricInstance(data["instance"]) : undefined, }; } /** * Pairwise metric instance. Usually one instance corresponds to one row in an * evaluation dataset. */ export interface GoogleCloudAiplatformV1PairwiseMetricInstance { /** * Key-value contents for the mutlimodality input, including text, image, * video, audio, and pdf, etc. The key is placeholder in metric prompt * template, and the value is the multimodal content. */ contentMapInstance?: GoogleCloudAiplatformV1ContentMap; /** * Instance specified as a json string. String key-value pairs are expected * in the json_instance to render PairwiseMetricSpec.instance_prompt_template. */ jsonInstance?: string; } function serializeGoogleCloudAiplatformV1PairwiseMetricInstance(data: any): GoogleCloudAiplatformV1PairwiseMetricInstance { return { ...data, contentMapInstance: data["contentMapInstance"] !== undefined ? serializeGoogleCloudAiplatformV1ContentMap(data["contentMapInstance"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1PairwiseMetricInstance(data: any): GoogleCloudAiplatformV1PairwiseMetricInstance { return { ...data, contentMapInstance: data["contentMapInstance"] !== undefined ? deserializeGoogleCloudAiplatformV1ContentMap(data["contentMapInstance"]) : undefined, }; } /** * Spec for pairwise metric result. */ export interface GoogleCloudAiplatformV1PairwiseMetricResult { /** * Output only. Spec for custom output. */ readonly customOutput?: GoogleCloudAiplatformV1CustomOutput; /** * Output only. Explanation for pairwise metric score. */ readonly explanation?: string; /** * Output only. Pairwise metric choice. */ readonly pairwiseChoice?: | "PAIRWISE_CHOICE_UNSPECIFIED" | "BASELINE" | "CANDIDATE" | "TIE"; } /** * Spec for pairwise metric. */ export interface GoogleCloudAiplatformV1PairwiseMetricSpec { /** * Optional. The field name of the baseline response. */ baselineResponseFieldName?: string; /** * Optional. The field name of the candidate response. */ candidateResponseFieldName?: string; /** * Optional. CustomOutputFormatConfig allows customization of metric output. * When this config is set, the default output is replaced with the raw output * string. If a custom format is chosen, the `pairwise_choice` and * `explanation` fields in the corresponding metric result will be empty. */ customOutputFormatConfig?: GoogleCloudAiplatformV1CustomOutputFormatConfig; /** * Required. Metric prompt template for pairwise metric. */ metricPromptTemplate?: string; /** * Optional. System instructions for pairwise metric. */ systemInstruction?: string; } /** * Input for pairwise question answering quality metric. */ export interface GoogleCloudAiplatformV1PairwiseQuestionAnsweringQualityInput { /** * Required. Pairwise question answering quality instance. */ instance?: GoogleCloudAiplatformV1PairwiseQuestionAnsweringQualityInstance; /** * Required. Spec for pairwise question answering quality score metric. */ metricSpec?: GoogleCloudAiplatformV1PairwiseQuestionAnsweringQualitySpec; } /** * Spec for pairwise question answering quality instance. */ export interface GoogleCloudAiplatformV1PairwiseQuestionAnsweringQualityInstance { /** * Required. Output of the baseline model. */ baselinePrediction?: string; /** * Required. Text to answer the question. */ context?: string; /** * Required. Question Answering prompt for LLM. */ instruction?: string; /** * Required. Output of the candidate model. */ prediction?: string; /** * Optional. Ground truth used to compare against the prediction. */ reference?: string; } /** * Spec for pairwise question answering quality result. */ export interface GoogleCloudAiplatformV1PairwiseQuestionAnsweringQualityResult { /** * Output only. Confidence for question answering quality score. */ readonly confidence?: number; /** * Output only. Explanation for question answering quality score. */ readonly explanation?: string; /** * Output only. Pairwise question answering prediction choice. */ readonly pairwiseChoice?: | "PAIRWISE_CHOICE_UNSPECIFIED" | "BASELINE" | "CANDIDATE" | "TIE"; } /** * Spec for pairwise question answering quality score metric. */ export interface GoogleCloudAiplatformV1PairwiseQuestionAnsweringQualitySpec { /** * Optional. Whether to use instance.reference to compute question answering * quality. */ useReference?: boolean; /** * Optional. Which version to use for evaluation. */ version?: number; } /** * Input for pairwise summarization quality metric. */ export interface GoogleCloudAiplatformV1PairwiseSummarizationQualityInput { /** * Required. Pairwise summarization quality instance. */ instance?: GoogleCloudAiplatformV1PairwiseSummarizationQualityInstance; /** * Required. Spec for pairwise summarization quality score metric. */ metricSpec?: GoogleCloudAiplatformV1PairwiseSummarizationQualitySpec; } /** * Spec for pairwise summarization quality instance. */ export interface GoogleCloudAiplatformV1PairwiseSummarizationQualityInstance { /** * Required. Output of the baseline model. */ baselinePrediction?: string; /** * Required. Text to be summarized. */ context?: string; /** * Required. Summarization prompt for LLM. */ instruction?: string; /** * Required. Output of the candidate model. */ prediction?: string; /** * Optional. Ground truth used to compare against the prediction. */ reference?: string; } /** * Spec for pairwise summarization quality result. */ export interface GoogleCloudAiplatformV1PairwiseSummarizationQualityResult { /** * Output only. Confidence for summarization quality score. */ readonly confidence?: number; /** * Output only. Explanation for summarization quality score. */ readonly explanation?: string; /** * Output only. Pairwise summarization prediction choice. */ readonly pairwiseChoice?: | "PAIRWISE_CHOICE_UNSPECIFIED" | "BASELINE" | "CANDIDATE" | "TIE"; } /** * Spec for pairwise summarization quality score metric. */ export interface GoogleCloudAiplatformV1PairwiseSummarizationQualitySpec { /** * Optional. Whether to use instance.reference to compute pairwise * summarization quality. */ useReference?: boolean; /** * Optional. Which version to use for evaluation. */ version?: number; } /** * A datatype containing media that is part of a multi-part `Content` message. * A `Part` consists of data which has an associated datatype. A `Part` can only * contain one of the accepted types in `Part.data`. A `Part` must have a fixed * IANA MIME type identifying the type and subtype of the media if `inline_data` * or `file_data` field is filled with raw bytes. */ export interface GoogleCloudAiplatformV1Part { /** * Optional. Result of executing the [ExecutableCode]. */ codeExecutionResult?: GoogleCloudAiplatformV1CodeExecutionResult; /** * Optional. Code generated by the model that is meant to be executed. */ executableCode?: GoogleCloudAiplatformV1ExecutableCode; /** * Optional. URI based data. */ fileData?: GoogleCloudAiplatformV1FileData; /** * Optional. A predicted [FunctionCall] returned from the model that contains * a string representing the [FunctionDeclaration.name] with the parameters * and their values. */ functionCall?: GoogleCloudAiplatformV1FunctionCall; /** * Optional. The result output of a [FunctionCall] that contains a string * representing the [FunctionDeclaration.name] and a structured JSON object * containing any output from the function call. It is used as context to the * model. */ functionResponse?: GoogleCloudAiplatformV1FunctionResponse; /** * Optional. Inlined bytes data. */ inlineData?: GoogleCloudAiplatformV1Blob; /** * Optional. Text part (can be code). */ text?: string; /** * Optional. Video metadata. The metadata should only be specified while the * video data is presented in inline_data or file_data. */ videoMetadata?: GoogleCloudAiplatformV1VideoMetadata; } function serializeGoogleCloudAiplatformV1Part(data: any): GoogleCloudAiplatformV1Part { return { ...data, inlineData: data["inlineData"] !== undefined ? serializeGoogleCloudAiplatformV1Blob(data["inlineData"]) : undefined, videoMetadata: data["videoMetadata"] !== undefined ? serializeGoogleCloudAiplatformV1VideoMetadata(data["videoMetadata"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1Part(data: any): GoogleCloudAiplatformV1Part { return { ...data, inlineData: data["inlineData"] !== undefined ? deserializeGoogleCloudAiplatformV1Blob(data["inlineData"]) : undefined, videoMetadata: data["videoMetadata"] !== undefined ? deserializeGoogleCloudAiplatformV1VideoMetadata(data["videoMetadata"]) : undefined, }; } /** * Request message for JobService.PauseModelDeploymentMonitoringJob. */ export interface GoogleCloudAiplatformV1PauseModelDeploymentMonitoringJobRequest { } /** * Request message for ScheduleService.PauseSchedule. */ export interface GoogleCloudAiplatformV1PauseScheduleRequest { } /** * Represents the spec of persistent disk options. */ export interface GoogleCloudAiplatformV1PersistentDiskSpec { /** * Size in GB of the disk (default is 100GB). */ diskSizeGb?: bigint; /** * Type of the disk (default is "pd-standard"). Valid values: "pd-ssd" * (Persistent Disk Solid State Drive) "pd-standard" (Persistent Disk Hard * Disk Drive) "pd-balanced" (Balanced Persistent Disk) "pd-extreme" (Extreme * Persistent Disk) */ diskType?: string; } function serializeGoogleCloudAiplatformV1PersistentDiskSpec(data: any): GoogleCloudAiplatformV1PersistentDiskSpec { return { ...data, diskSizeGb: data["diskSizeGb"] !== undefined ? String(data["diskSizeGb"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1PersistentDiskSpec(data: any): GoogleCloudAiplatformV1PersistentDiskSpec { return { ...data, diskSizeGb: data["diskSizeGb"] !== undefined ? BigInt(data["diskSizeGb"]) : undefined, }; } /** * Represents long-lasting resources that are dedicated to users to runs custom * workloads. A PersistentResource can have multiple node pools and each node * pool can have its own machine spec. */ export interface GoogleCloudAiplatformV1PersistentResource { /** * Output only. Time when the PersistentResource was created. */ readonly createTime?: Date; /** * Optional. The display name of the PersistentResource. The name can be up * to 128 characters long and can consist of any UTF-8 characters. */ displayName?: string; /** * Optional. Customer-managed encryption key spec for a PersistentResource. * If set, this PersistentResource and all sub-resources of this * PersistentResource will be secured by this key. */ encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec; /** * Output only. Only populated when persistent resource's state is `STOPPING` * or `ERROR`. */ readonly error?: GoogleRpcStatus; /** * Optional. The labels with user-defined metadata to organize * PersistentResource. Label keys and values can be no longer than 64 * characters (Unicode codepoints), can only contain lowercase letters, * numeric characters, underscores and dashes. International characters are * allowed. See https://goo.gl/xmQnxf for more information and examples of * labels. */ labels?: { [key: string]: string }; /** * Immutable. Resource name of a PersistentResource. */ name?: string; /** * Optional. The full name of the Compute Engine * [network](/compute/docs/networks-and-firewalls#networks) to peered with * Vertex AI to host the persistent resources. For example, * `projects/12345/global/networks/myVPC`. * [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form * `projects/{project}/global/networks/{network}`. Where {project} is a * project number, as in `12345`, and {network} is a network name. To specify * this field, you must have already [configured VPC Network Peering for * Vertex AI](https://cloud.google.com/vertex-ai/docs/general/vpc-peering). If * this field is left unspecified, the resources aren't peered with any * network. */ network?: string; /** * Optional. A list of names for the reserved IP ranges under the VPC network * that can be used for this persistent resource. If set, we will deploy the * persistent resource within the provided IP ranges. Otherwise, the * persistent resource is deployed to any IP ranges under the provided VPC * network. Example: ['vertex-ai-ip-range']. */ reservedIpRanges?: string[]; /** * Required. The spec of the pools of different resources. */ resourcePools?: GoogleCloudAiplatformV1ResourcePool[]; /** * Output only. Runtime information of the Persistent Resource. */ readonly resourceRuntime?: GoogleCloudAiplatformV1ResourceRuntime; /** * Optional. Persistent Resource runtime spec. For example, used for Ray * cluster configuration. */ resourceRuntimeSpec?: GoogleCloudAiplatformV1ResourceRuntimeSpec; /** * Output only. Reserved for future use. */ readonly satisfiesPzi?: boolean; /** * Output only. Reserved for future use. */ readonly satisfiesPzs?: boolean; /** * Output only. Time when the PersistentResource for the first time entered * the `RUNNING` state. */ readonly startTime?: Date; /** * Output only. The detailed state of a Study. */ readonly state?: | "STATE_UNSPECIFIED" | "PROVISIONING" | "RUNNING" | "STOPPING" | "ERROR" | "REBOOTING" | "UPDATING"; /** * Output only. Time when the PersistentResource was most recently updated. */ readonly updateTime?: Date; } function serializeGoogleCloudAiplatformV1PersistentResource(data: any): GoogleCloudAiplatformV1PersistentResource { return { ...data, resourcePools: data["resourcePools"] !== undefined ? data["resourcePools"].map((item: any) => (serializeGoogleCloudAiplatformV1ResourcePool(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1PersistentResource(data: any): GoogleCloudAiplatformV1PersistentResource { return { ...data, createTime: data["createTime"] !== undefined ? new Date(data["createTime"]) : undefined, resourcePools: data["resourcePools"] !== undefined ? data["resourcePools"].map((item: any) => (deserializeGoogleCloudAiplatformV1ResourcePool(item))) : undefined, startTime: data["startTime"] !== undefined ? new Date(data["startTime"]) : undefined, updateTime: data["updateTime"] !== undefined ? new Date(data["updateTime"]) : undefined, }; } /** * An instance of a machine learning PipelineJob. */ export interface GoogleCloudAiplatformV1PipelineJob { /** * Output only. Pipeline creation time. */ readonly createTime?: Date; /** * The display name of the Pipeline. The name can be up to 128 characters * long and can consist of any UTF-8 characters. */ displayName?: string; /** * Customer-managed encryption key spec for a pipelineJob. If set, this * PipelineJob and all of its sub-resources will be secured by this key. */ encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec; /** * Output only. Pipeline end time. */ readonly endTime?: Date; /** * Output only. The error that occurred during pipeline execution. Only * populated when the pipeline's state is FAILED or CANCELLED. */ readonly error?: GoogleRpcStatus; /** * Output only. The details of pipeline run. Not available in the list view. */ readonly jobDetail?: GoogleCloudAiplatformV1PipelineJobDetail; /** * The labels with user-defined metadata to organize PipelineJob. Label keys * and values can be no longer than 64 characters (Unicode codepoints), can * only contain lowercase letters, numeric characters, underscores and dashes. * International characters are allowed. See https://goo.gl/xmQnxf for more * information and examples of labels. Note there is some reserved label key * for Vertex AI Pipelines. - `vertex-ai-pipelines-run-billing-id`, user set * value will get overrided. */ labels?: { [key: string]: string }; /** * Output only. The resource name of the PipelineJob. */ readonly name?: string; /** * The full name of the Compute Engine * [network](/compute/docs/networks-and-firewalls#networks) to which the * Pipeline Job's workload should be peered. For example, * `projects/12345/global/networks/myVPC`. * [Format](/compute/docs/reference/rest/v1/networks/insert) is of the form * `projects/{project}/global/networks/{network}`. Where {project} is a * project number, as in `12345`, and {network} is a network name. Private * services access must already be configured for the network. Pipeline job * will apply the network configuration to the Google Cloud resources being * launched, if applied, such as Vertex AI Training or Dataflow job. If left * unspecified, the workload is not peered with any network. */ network?: string; /** * The spec of the pipeline. */ pipelineSpec?: { [key: string]: any }; /** * Optional. Whether to do component level validations before job creation. */ preflightValidations?: boolean; /** * A list of names for the reserved ip ranges under the VPC network that can * be used for this Pipeline Job's workload. If set, we will deploy the * Pipeline Job's workload within the provided ip ranges. Otherwise, the job * will be deployed to any ip ranges under the provided VPC network. Example: * ['vertex-ai-ip-range']. */ reservedIpRanges?: string[]; /** * Runtime config of the pipeline. */ runtimeConfig?: GoogleCloudAiplatformV1PipelineJobRuntimeConfig; /** * Output only. The schedule resource name. Only returned if the Pipeline is * created by Schedule API. */ readonly scheduleName?: string; /** * The service account that the pipeline workload runs as. If not specified, * the Compute Engine default service account in the project will be used. See * https://cloud.google.com/compute/docs/access/service-accounts#default_service_account * Users starting the pipeline must have the `iam.serviceAccounts.actAs` * permission on this service account. */ serviceAccount?: string; /** * Output only. Pipeline start time. */ readonly startTime?: Date; /** * Output only. The detailed state of the job. */ readonly state?: | "PIPELINE_STATE_UNSPECIFIED" | "PIPELINE_STATE_QUEUED" | "PIPELINE_STATE_PENDING" | "PIPELINE_STATE_RUNNING" | "PIPELINE_STATE_SUCCEEDED" | "PIPELINE_STATE_FAILED" | "PIPELINE_STATE_CANCELLING" | "PIPELINE_STATE_CANCELLED" | "PIPELINE_STATE_PAUSED"; /** * Output only. Pipeline template metadata. Will fill up fields if * PipelineJob.template_uri is from supported template registry. */ readonly templateMetadata?: GoogleCloudAiplatformV1PipelineTemplateMetadata; /** * A template uri from where the PipelineJob.pipeline_spec, if empty, will be * downloaded. Currently, only uri from Vertex Template Registry & Gallery is * supported. Reference to * https://cloud.google.com/vertex-ai/docs/pipelines/create-pipeline-template. */ templateUri?: string; /** * Output only. Timestamp when this PipelineJob was most recently updated. */ readonly updateTime?: Date; } function serializeGoogleCloudAiplatformV1PipelineJob(data: any): GoogleCloudAiplatformV1PipelineJob { return { ...data, runtimeConfig: data["runtimeConfig"] !== undefined ? serializeGoogleCloudAiplatformV1PipelineJobRuntimeConfig(data["runtimeConfig"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1PipelineJob(data: any): GoogleCloudAiplatformV1PipelineJob { return { ...data, createTime: data["createTime"] !== undefined ? new Date(data["createTime"]) : undefined, endTime: data["endTime"] !== undefined ? new Date(data["endTime"]) : undefined, runtimeConfig: data["runtimeConfig"] !== undefined ? deserializeGoogleCloudAiplatformV1PipelineJobRuntimeConfig(data["runtimeConfig"]) : undefined, startTime: data["startTime"] !== undefined ? new Date(data["startTime"]) : undefined, updateTime: data["updateTime"] !== undefined ? new Date(data["updateTime"]) : undefined, }; } /** * The runtime detail of PipelineJob. */ export interface GoogleCloudAiplatformV1PipelineJobDetail { /** * Output only. The context of the pipeline. */ readonly pipelineContext?: GoogleCloudAiplatformV1Context; /** * Output only. The context of the current pipeline run. */ readonly pipelineRunContext?: GoogleCloudAiplatformV1Context; /** * Output only. The runtime details of the tasks under the pipeline. */ readonly taskDetails?: GoogleCloudAiplatformV1PipelineTaskDetail[]; } /** * The runtime config of a PipelineJob. */ export interface GoogleCloudAiplatformV1PipelineJobRuntimeConfig { /** * Represents the failure policy of a pipeline. Currently, the default of a * pipeline is that the pipeline will continue to run until no more tasks can * be executed, also known as PIPELINE_FAILURE_POLICY_FAIL_SLOW. However, if a * pipeline is set to PIPELINE_FAILURE_POLICY_FAIL_FAST, it will stop * scheduling any new tasks when a task has failed. Any scheduled tasks will * continue to completion. */ failurePolicy?: | "PIPELINE_FAILURE_POLICY_UNSPECIFIED" | "PIPELINE_FAILURE_POLICY_FAIL_SLOW" | "PIPELINE_FAILURE_POLICY_FAIL_FAST"; /** * Required. A path in a Cloud Storage bucket, which will be treated as the * root output directory of the pipeline. It is used by the system to generate * the paths of output artifacts. The artifact paths are generated with a * sub-path pattern `{job_id}/{task_id}/{output_key}` under the specified * output directory. The service account specified in this pipeline must have * the `storage.objects.get` and `storage.objects.create` permissions for this * bucket. */ gcsOutputDirectory?: string; /** * The runtime artifacts of the PipelineJob. The key will be the input * artifact name and the value would be one of the InputArtifact. */ inputArtifacts?: { [key: string]: GoogleCloudAiplatformV1PipelineJobRuntimeConfigInputArtifact }; /** * Deprecated. Use RuntimeConfig.parameter_values instead. The runtime * parameters of the PipelineJob. The parameters will be passed into * PipelineJob.pipeline_spec to replace the placeholders at runtime. This * field is used by pipelines built using * `PipelineJob.pipeline_spec.schema_version` 2.0.0 or lower, such as * pipelines built using Kubeflow Pipelines SDK 1.8 or lower. */ parameters?: { [key: string]: GoogleCloudAiplatformV1Value }; /** * The runtime parameters of the PipelineJob. The parameters will be passed * into PipelineJob.pipeline_spec to replace the placeholders at runtime. This * field is used by pipelines built using * `PipelineJob.pipeline_spec.schema_version` 2.1.0, such as pipelines built * using Kubeflow Pipelines SDK 1.9 or higher and the v2 DSL. */ parameterValues?: { [key: string]: any }; } function serializeGoogleCloudAiplatformV1PipelineJobRuntimeConfig(data: any): GoogleCloudAiplatformV1PipelineJobRuntimeConfig { return { ...data, parameters: data["parameters"] !== undefined ? Object.fromEntries(Object.entries(data["parameters"]).map(([k, v]: [string, any]) => ([k, serializeGoogleCloudAiplatformV1Value(v)]))) : undefined, }; } function deserializeGoogleCloudAiplatformV1PipelineJobRuntimeConfig(data: any): GoogleCloudAiplatformV1PipelineJobRuntimeConfig { return { ...data, parameters: data["parameters"] !== undefined ? Object.fromEntries(Object.entries(data["parameters"]).map(([k, v]: [string, any]) => ([k, deserializeGoogleCloudAiplatformV1Value(v)]))) : undefined, }; } /** * The type of an input artifact. */ export interface GoogleCloudAiplatformV1PipelineJobRuntimeConfigInputArtifact { /** * Artifact resource id from MLMD. Which is the last portion of an artifact * resource name: * `projects/{project}/locations/{location}/metadataStores/default/artifacts/{artifact_id}`. * The artifact must stay within the same project, location and default * metadatastore as the pipeline. */ artifactId?: string; } /** * The runtime detail of a task execution. */ export interface GoogleCloudAiplatformV1PipelineTaskDetail { /** * Output only. Task create time. */ readonly createTime?: Date; /** * Output only. Task end time. */ readonly endTime?: Date; /** * Output only. The error that occurred during task execution. Only populated * when the task's state is FAILED or CANCELLED. */ readonly error?: GoogleRpcStatus; /** * Output only. The execution metadata of the task. */ readonly execution?: GoogleCloudAiplatformV1Execution; /** * Output only. The detailed execution info. */ readonly executorDetail?: GoogleCloudAiplatformV1PipelineTaskExecutorDetail; /** * Output only. The runtime input artifacts of the task. */ readonly inputs?: { [key: string]: GoogleCloudAiplatformV1PipelineTaskDetailArtifactList }; /** * Output only. The runtime output artifacts of the task. */ readonly outputs?: { [key: string]: GoogleCloudAiplatformV1PipelineTaskDetailArtifactList }; /** * Output only. The id of the parent task if the task is within a component * scope. Empty if the task is at the root level. */ readonly parentTaskId?: bigint; /** * Output only. A list of task status. This field keeps a record of task * status evolving over time. */ readonly pipelineTaskStatus?: GoogleCloudAiplatformV1PipelineTaskDetailPipelineTaskStatus[]; /** * Output only. Task start time. */ readonly startTime?: Date; /** * Output only. State of the task. */ readonly state?: | "STATE_UNSPECIFIED" | "PENDING" | "RUNNING" | "SUCCEEDED" | "CANCEL_PENDING" | "CANCELLING" | "CANCELLED" | "FAILED" | "SKIPPED" | "NOT_TRIGGERED"; /** * Output only. The system generated ID of the task. */ readonly taskId?: bigint; /** * Output only. The user specified name of the task that is defined in * pipeline_spec. */ readonly taskName?: string; } /** * A list of artifact metadata. */ export interface GoogleCloudAiplatformV1PipelineTaskDetailArtifactList { /** * Output only. A list of artifact metadata. */ readonly artifacts?: GoogleCloudAiplatformV1Artifact[]; } /** * A single record of the task status. */ export interface GoogleCloudAiplatformV1PipelineTaskDetailPipelineTaskStatus { /** * Output only. The error that occurred during the state. May be set when the * state is any of the non-final state (PENDING/RUNNING/CANCELLING) or FAILED * state. If the state is FAILED, the error here is final and not going to be * retried. If the state is a non-final state, the error indicates a * system-error being retried. */ readonly error?: GoogleRpcStatus; /** * Output only. The state of the task. */ readonly state?: | "STATE_UNSPECIFIED" | "PENDING" | "RUNNING" | "SUCCEEDED" | "CANCEL_PENDING" | "CANCELLING" | "CANCELLED" | "FAILED" | "SKIPPED" | "NOT_TRIGGERED"; /** * Output only. Update time of this status. */ readonly updateTime?: Date; } /** * The runtime detail of a pipeline executor. */ export interface GoogleCloudAiplatformV1PipelineTaskExecutorDetail { /** * Output only. The detailed info for a container executor. */ readonly containerDetail?: GoogleCloudAiplatformV1PipelineTaskExecutorDetailContainerDetail; /** * Output only. The detailed info for a custom job executor. */ readonly customJobDetail?: GoogleCloudAiplatformV1PipelineTaskExecutorDetailCustomJobDetail; } /** * The detail of a container execution. It contains the job names of the * lifecycle of a container execution. */ export interface GoogleCloudAiplatformV1PipelineTaskExecutorDetailContainerDetail { /** * Output only. The names of the previously failed CustomJob for the main * container executions. The list includes the all attempts in chronological * order. */ readonly failedMainJobs?: string[]; /** * Output only. The names of the previously failed CustomJob for the * pre-caching-check container executions. This job will be available if the * PipelineJob.pipeline_spec specifies the `pre_caching_check` hook in the * lifecycle events. The list includes the all attempts in chronological * order. */ readonly failedPreCachingCheckJobs?: string[]; /** * Output only. The name of the CustomJob for the main container execution. */ readonly mainJob?: string; /** * Output only. The name of the CustomJob for the pre-caching-check container * execution. This job will be available if the PipelineJob.pipeline_spec * specifies the `pre_caching_check` hook in the lifecycle events. */ readonly preCachingCheckJob?: string; } /** * The detailed info for a custom job executor. */ export interface GoogleCloudAiplatformV1PipelineTaskExecutorDetailCustomJobDetail { /** * Output only. The names of the previously failed CustomJob. The list * includes the all attempts in chronological order. */ readonly failedJobs?: string[]; /** * Output only. The name of the CustomJob. */ readonly job?: string; } /** * Pipeline template metadata if PipelineJob.template_uri is from supported * template registry. Currently, the only supported registry is Artifact * Registry. */ export interface GoogleCloudAiplatformV1PipelineTemplateMetadata { /** * The version_name in artifact registry. Will always be presented in output * if the PipelineJob.template_uri is from supported template registry. Format * is "sha256:abcdef123456...". */ version?: string; } /** * Input for pointwise metric. */ export interface GoogleCloudAiplatformV1PointwiseMetricInput { /** * Required. Pointwise metric instance. */ instance?: GoogleCloudAiplatformV1PointwiseMetricInstance; /** * Required. Spec for pointwise metric. */ metricSpec?: GoogleCloudAiplatformV1PointwiseMetricSpec; } function serializeGoogleCloudAiplatformV1PointwiseMetricInput(data: any): GoogleCloudAiplatformV1PointwiseMetricInput { return { ...data, instance: data["instance"] !== undefined ? serializeGoogleCloudAiplatformV1PointwiseMetricInstance(data["instance"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1PointwiseMetricInput(data: any): GoogleCloudAiplatformV1PointwiseMetricInput { return { ...data, instance: data["instance"] !== undefined ? deserializeGoogleCloudAiplatformV1PointwiseMetricInstance(data["instance"]) : undefined, }; } /** * Pointwise metric instance. Usually one instance corresponds to one row in an * evaluation dataset. */ export interface GoogleCloudAiplatformV1PointwiseMetricInstance { /** * Key-value contents for the mutlimodality input, including text, image, * video, audio, and pdf, etc. The key is placeholder in metric prompt * template, and the value is the multimodal content. */ contentMapInstance?: GoogleCloudAiplatformV1ContentMap; /** * Instance specified as a json string. String key-value pairs are expected * in the json_instance to render * PointwiseMetricSpec.instance_prompt_template. */ jsonInstance?: string; } function serializeGoogleCloudAiplatformV1PointwiseMetricInstance(data: any): GoogleCloudAiplatformV1PointwiseMetricInstance { return { ...data, contentMapInstance: data["contentMapInstance"] !== undefined ? serializeGoogleCloudAiplatformV1ContentMap(data["contentMapInstance"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1PointwiseMetricInstance(data: any): GoogleCloudAiplatformV1PointwiseMetricInstance { return { ...data, contentMapInstance: data["contentMapInstance"] !== undefined ? deserializeGoogleCloudAiplatformV1ContentMap(data["contentMapInstance"]) : undefined, }; } /** * Spec for pointwise metric result. */ export interface GoogleCloudAiplatformV1PointwiseMetricResult { /** * Output only. Spec for custom output. */ readonly customOutput?: GoogleCloudAiplatformV1CustomOutput; /** * Output only. Explanation for pointwise metric score. */ readonly explanation?: string; /** * Output only. Pointwise metric score. */ readonly score?: number; } /** * Spec for pointwise metric. */ export interface GoogleCloudAiplatformV1PointwiseMetricSpec { /** * Optional. CustomOutputFormatConfig allows customization of metric output. * By default, metrics return a score and explanation. When this config is * set, the default output is replaced with either: - The raw output string. - * A parsed output based on a user-defined schema. If a custom format is * chosen, the `score` and `explanation` fields in the corresponding metric * result will be empty. */ customOutputFormatConfig?: GoogleCloudAiplatformV1CustomOutputFormatConfig; /** * Required. Metric prompt template for pointwise metric. */ metricPromptTemplate?: string; /** * Optional. System instructions for pointwise metric. */ systemInstruction?: string; } /** * Represents a network port in a container. */ export interface GoogleCloudAiplatformV1Port { /** * The number of the port to expose on the pod's IP address. Must be a valid * port number, between 1 and 65535 inclusive. */ containerPort?: number; } /** * Post startup script config. */ export interface GoogleCloudAiplatformV1PostStartupScriptConfig { /** * Optional. Post startup script to run after runtime is started. */ postStartupScript?: string; /** * Optional. Post startup script behavior that defines download and execution * behavior. */ postStartupScriptBehavior?: | "POST_STARTUP_SCRIPT_BEHAVIOR_UNSPECIFIED" | "RUN_ONCE" | "RUN_EVERY_START" | "DOWNLOAD_AND_RUN_EVERY_START"; /** * Optional. Post startup script url to download. Example: * `gs://bucket/script.sh` */ postStartupScriptUrl?: string; } /** * The configuration for the prebuilt speaker to use. */ export interface GoogleCloudAiplatformV1PrebuiltVoiceConfig { /** * The name of the preset voice to use. */ voiceName?: string; } /** * Assigns input data to training, validation, and test sets based on the value * of a provided key. Supported only for tabular Datasets. */ export interface GoogleCloudAiplatformV1PredefinedSplit { /** * Required. The key is a name of one of the Dataset's data columns. The * value of the key (either the label's value or value in the column) must be * one of {`training`, `validation`, `test`}, and it defines to which set the * given piece of data is assigned. If for a piece of data the key is not * present or has an invalid value, that piece is ignored by the pipeline. */ key?: string; } /** * Request message for PredictionService.PredictLongRunning. */ export interface GoogleCloudAiplatformV1PredictLongRunningRequest { /** * Required. The instances that are the input to the prediction call. A * DeployedModel may have an upper limit on the number of instances it * supports per request, and when it is exceeded the prediction call errors in * case of AutoML Models, or, in case of customer created Models, the * behaviour is as documented by that Model. The schema of any single instance * may be specified via Endpoint's DeployedModels' Model's PredictSchemata's * instance_schema_uri. */ instances?: any[]; /** * Optional. The parameters that govern the prediction. The schema of the * parameters may be specified via Endpoint's DeployedModels' Model's * PredictSchemata's parameters_schema_uri. */ parameters?: any; } /** * Request message for PredictionService.Predict. */ export interface GoogleCloudAiplatformV1PredictRequest { /** * Required. The instances that are the input to the prediction call. A * DeployedModel may have an upper limit on the number of instances it * supports per request, and when it is exceeded the prediction call errors in * case of AutoML Models, or, in case of customer created Models, the * behaviour is as documented by that Model. The schema of any single instance * may be specified via Endpoint's DeployedModels' Model's PredictSchemata's * instance_schema_uri. */ instances?: any[]; /** * The parameters that govern the prediction. The schema of the parameters * may be specified via Endpoint's DeployedModels' Model's PredictSchemata's * parameters_schema_uri. */ parameters?: any; } /** * Configuration for logging request-response to a BigQuery table. */ export interface GoogleCloudAiplatformV1PredictRequestResponseLoggingConfig { /** * BigQuery table for logging. If only given a project, a new dataset will be * created with name `logging__` where will be made BigQuery-dataset-name * compatible (e.g. most special characters will become underscores). If no * table name is given, a new table will be created with name * `request_response_logging` */ bigqueryDestination?: GoogleCloudAiplatformV1BigQueryDestination; /** * If logging is enabled or not. */ enabled?: boolean; /** * Percentage of requests to be logged, expressed as a fraction in * range(0,1]. */ samplingRate?: number; } /** * Response message for PredictionService.Predict. */ export interface GoogleCloudAiplatformV1PredictResponse { /** * ID of the Endpoint's DeployedModel that served this prediction. */ deployedModelId?: string; /** * Output only. Request-level metadata returned by the model. The metadata * type will be dependent upon the model implementation. */ readonly metadata?: any; /** * Output only. The resource name of the Model which is deployed as the * DeployedModel that this prediction hits. */ readonly model?: string; /** * Output only. The display name of the Model which is deployed as the * DeployedModel that this prediction hits. */ readonly modelDisplayName?: string; /** * Output only. The version ID of the Model which is deployed as the * DeployedModel that this prediction hits. */ readonly modelVersionId?: string; /** * The predictions that are the output of the predictions call. The schema of * any single prediction may be specified via Endpoint's DeployedModels' * Model's PredictSchemata's prediction_schema_uri. */ predictions?: any[]; } /** * Contains the schemata used in Model's predictions and explanations via * PredictionService.Predict, PredictionService.Explain and BatchPredictionJob. */ export interface GoogleCloudAiplatformV1PredictSchemata { /** * Immutable. Points to a YAML file stored on Google Cloud Storage describing * the format of a single instance, which are used in * PredictRequest.instances, ExplainRequest.instances and * BatchPredictionJob.input_config. The schema is defined as an OpenAPI 3.0.2 * [Schema * Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). * AutoML Models always have this field populated by Vertex AI. Note: The URI * given on output will be immutable and probably different, including the URI * scheme, than the one given on input. The output URI will point to a * location where the user only has a read access. */ instanceSchemaUri?: string; /** * Immutable. Points to a YAML file stored on Google Cloud Storage describing * the parameters of prediction and explanation via PredictRequest.parameters, * ExplainRequest.parameters and BatchPredictionJob.model_parameters. The * schema is defined as an OpenAPI 3.0.2 [Schema * Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). * AutoML Models always have this field populated by Vertex AI, if no * parameters are supported, then it is set to an empty string. Note: The URI * given on output will be immutable and probably different, including the URI * scheme, than the one given on input. The output URI will point to a * location where the user only has a read access. */ parametersSchemaUri?: string; /** * Immutable. Points to a YAML file stored on Google Cloud Storage describing * the format of a single prediction produced by this Model, which are * returned via PredictResponse.predictions, ExplainResponse.explanations, and * BatchPredictionJob.output_config. The schema is defined as an OpenAPI 3.0.2 * [Schema * Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schemaObject). * AutoML Models always have this field populated by Vertex AI. Note: The URI * given on output will be immutable and probably different, including the URI * scheme, than the one given on input. The output URI will point to a * location where the user only has a read access. */ predictionSchemaUri?: string; } /** * Preset configuration for example-based explanations */ export interface GoogleCloudAiplatformV1Presets { /** * The modality of the uploaded model, which automatically configures the * distance measurement and feature normalization for the underlying example * index and queries. If your model does not precisely fit one of these types, * it is okay to choose the closest type. */ modality?: | "MODALITY_UNSPECIFIED" | "IMAGE" | "TEXT" | "TABULAR"; /** * Preset option controlling parameters for speed-precision trade-off when * querying for examples. If omitted, defaults to `PRECISE`. */ query?: | "PRECISE" | "FAST"; } /** * PrivateEndpoints proto is used to provide paths for users to send requests * privately. To send request via private service access, use predict_http_uri, * explain_http_uri or health_http_uri. To send request via private service * connect, use service_attachment. */ export interface GoogleCloudAiplatformV1PrivateEndpoints { /** * Output only. Http(s) path to send explain requests. */ readonly explainHttpUri?: string; /** * Output only. Http(s) path to send health check requests. */ readonly healthHttpUri?: string; /** * Output only. Http(s) path to send prediction requests. */ readonly predictHttpUri?: string; /** * Output only. The name of the service attachment resource. Populated if * private service connect is enabled. */ readonly serviceAttachment?: string; } /** * Represents configuration for private service connect. */ export interface GoogleCloudAiplatformV1PrivateServiceConnectConfig { /** * Required. If true, expose the IndexEndpoint via private service connect. */ enablePrivateServiceConnect?: boolean; /** * A list of Projects from which the forwarding rule will target the service * attachment. */ projectAllowlist?: string[]; /** * Output only. The name of the generated service attachment resource. This * is only populated if the endpoint is deployed with PrivateServiceConnect. */ readonly serviceAttachment?: string; } /** * Probe describes a health check to be performed against a container to * determine whether it is alive or ready to receive traffic. */ export interface GoogleCloudAiplatformV1Probe { /** * ExecAction probes the health of a container by executing a command. */ exec?: GoogleCloudAiplatformV1ProbeExecAction; /** * Number of consecutive failures before the probe is considered failed. * Defaults to 3. Minimum value is 1. Maps to Kubernetes probe argument * 'failureThreshold'. */ failureThreshold?: number; /** * GrpcAction probes the health of a container by sending a gRPC request. */ grpc?: GoogleCloudAiplatformV1ProbeGrpcAction; /** * HttpGetAction probes the health of a container by sending an HTTP GET * request. */ httpGet?: GoogleCloudAiplatformV1ProbeHttpGetAction; /** * Number of seconds to wait before starting the probe. Defaults to 0. * Minimum value is 0. Maps to Kubernetes probe argument * 'initialDelaySeconds'. */ initialDelaySeconds?: number; /** * How often (in seconds) to perform the probe. Default to 10 seconds. * Minimum value is 1. Must be less than timeout_seconds. Maps to Kubernetes * probe argument 'periodSeconds'. */ periodSeconds?: number; /** * Number of consecutive successes before the probe is considered successful. * Defaults to 1. Minimum value is 1. Maps to Kubernetes probe argument * 'successThreshold'. */ successThreshold?: number; /** * TcpSocketAction probes the health of a container by opening a TCP socket * connection. */ tcpSocket?: GoogleCloudAiplatformV1ProbeTcpSocketAction; /** * Number of seconds after which the probe times out. Defaults to 1 second. * Minimum value is 1. Must be greater or equal to period_seconds. Maps to * Kubernetes probe argument 'timeoutSeconds'. */ timeoutSeconds?: number; } /** * ExecAction specifies a command to execute. */ export interface GoogleCloudAiplatformV1ProbeExecAction { /** * Command is the command line to execute inside the container, the working * directory for the command is root ('/') in the container's filesystem. The * command is simply exec'd, it is not run inside a shell, so traditional * shell instructions ('|', etc) won't work. To use a shell, you need to * explicitly call out to that shell. Exit status of 0 is treated as * live/healthy and non-zero is unhealthy. */ command?: string[]; } /** * GrpcAction checks the health of a container using a gRPC service. */ export interface GoogleCloudAiplatformV1ProbeGrpcAction { /** * Port number of the gRPC service. Number must be in the range 1 to 65535. */ port?: number; /** * Service is the name of the service to place in the gRPC * HealthCheckRequest. See * https://github.com/grpc/grpc/blob/master/doc/health-checking.md. If this is * not specified, the default behavior is defined by gRPC. */ service?: string; } /** * HttpGetAction describes an action based on HTTP Get requests. */ export interface GoogleCloudAiplatformV1ProbeHttpGetAction { /** * Host name to connect to, defaults to the model serving container's IP. You * probably want to set "Host" in httpHeaders instead. */ host?: string; /** * Custom headers to set in the request. HTTP allows repeated headers. */ httpHeaders?: GoogleCloudAiplatformV1ProbeHttpHeader[]; /** * Path to access on the HTTP server. */ path?: string; /** * Number of the port to access on the container. Number must be in the range * 1 to 65535. */ port?: number; /** * Scheme to use for connecting to the host. Defaults to HTTP. Acceptable * values are "HTTP" or "HTTPS". */ scheme?: string; } /** * HttpHeader describes a custom header to be used in HTTP probes */ export interface GoogleCloudAiplatformV1ProbeHttpHeader { /** * The header field name. This will be canonicalized upon output, so * case-variant names will be understood as the same header. */ name?: string; /** * The header field value */ value?: string; } /** * TcpSocketAction probes the health of a container by opening a TCP socket * connection. */ export interface GoogleCloudAiplatformV1ProbeTcpSocketAction { /** * Optional: Host name to connect to, defaults to the model serving * container's IP. */ host?: string; /** * Number of the port to access on the container. Number must be in the range * 1 to 65535. */ port?: number; } /** * PscAutomatedEndpoints defines the output of the forwarding rule * automatically created by each PscAutomationConfig. */ export interface GoogleCloudAiplatformV1PscAutomatedEndpoints { /** * Ip Address created by the automated forwarding rule. */ matchAddress?: string; /** * Corresponding network in pscAutomationConfigs. */ network?: string; /** * Corresponding project_id in pscAutomationConfigs */ projectId?: string; } /** * PSC config that is used to automatically create forwarding rule via * ServiceConnectionMap. */ export interface GoogleCloudAiplatformV1PSCAutomationConfig { /** * Required. The full name of the Google Compute Engine * [network](https://cloud.google.com/compute/docs/networks-and-firewalls#networks). * [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): * `projects/{project}/global/networks/{network}`. Where {project} is a * project number, as in '12345', and {network} is network name. */ network?: string; /** * Required. Project id used to create forwarding rule. */ projectId?: string; } /** * A Model Garden Publisher Model. */ export interface GoogleCloudAiplatformV1PublisherModel { /** * Optional. Additional information about the model's Frameworks. */ frameworks?: string[]; /** * Optional. Indicates the launch stage of the model. */ launchStage?: | "LAUNCH_STAGE_UNSPECIFIED" | "EXPERIMENTAL" | "PRIVATE_PREVIEW" | "PUBLIC_PREVIEW" | "GA"; /** * Output only. The resource name of the PublisherModel. */ readonly name?: string; /** * Required. Indicates the open source category of the publisher model. */ openSourceCategory?: | "OPEN_SOURCE_CATEGORY_UNSPECIFIED" | "PROPRIETARY" | "GOOGLE_OWNED_OSS_WITH_GOOGLE_CHECKPOINT" | "THIRD_PARTY_OWNED_OSS_WITH_GOOGLE_CHECKPOINT" | "GOOGLE_OWNED_OSS" | "THIRD_PARTY_OWNED_OSS"; /** * Optional. The schemata that describes formats of the PublisherModel's * predictions and explanations as given and returned via * PredictionService.Predict. */ predictSchemata?: GoogleCloudAiplatformV1PredictSchemata; /** * Optional. Output only. Immutable. Used to indicate this model has a * publisher model and provide the template of the publisher model resource * name. */ readonly publisherModelTemplate?: string; /** * Optional. Supported call-to-action options. */ supportedActions?: GoogleCloudAiplatformV1PublisherModelCallToAction; /** * Output only. Immutable. The version ID of the PublisherModel. A new * version is committed when a new model version is uploaded under an existing * model id. It is an auto-incrementing decimal number in string * representation. */ readonly versionId?: string; /** * Optional. Indicates the state of the model version. */ versionState?: | "VERSION_STATE_UNSPECIFIED" | "VERSION_STATE_STABLE" | "VERSION_STATE_UNSTABLE"; } function serializeGoogleCloudAiplatformV1PublisherModel(data: any): GoogleCloudAiplatformV1PublisherModel { return { ...data, supportedActions: data["supportedActions"] !== undefined ? serializeGoogleCloudAiplatformV1PublisherModelCallToAction(data["supportedActions"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1PublisherModel(data: any): GoogleCloudAiplatformV1PublisherModel { return { ...data, supportedActions: data["supportedActions"] !== undefined ? deserializeGoogleCloudAiplatformV1PublisherModelCallToAction(data["supportedActions"]) : undefined, }; } /** * Actions could take on this Publisher Model. */ export interface GoogleCloudAiplatformV1PublisherModelCallToAction { /** * Optional. Create application using the PublisherModel. */ createApplication?: GoogleCloudAiplatformV1PublisherModelCallToActionRegionalResourceReferences; /** * Optional. Deploy the PublisherModel to Vertex Endpoint. */ deploy?: GoogleCloudAiplatformV1PublisherModelCallToActionDeploy; /** * Optional. Deploy PublisherModel to Google Kubernetes Engine. */ deployGke?: GoogleCloudAiplatformV1PublisherModelCallToActionDeployGke; /** * Optional. Multiple setups to deploy the PublisherModel to Vertex Endpoint. */ multiDeployVertex?: GoogleCloudAiplatformV1PublisherModelCallToActionDeployVertex; /** * Optional. Open evaluation pipeline of the PublisherModel. */ openEvaluationPipeline?: GoogleCloudAiplatformV1PublisherModelCallToActionRegionalResourceReferences; /** * Optional. Open fine-tuning pipeline of the PublisherModel. */ openFineTuningPipeline?: GoogleCloudAiplatformV1PublisherModelCallToActionRegionalResourceReferences; /** * Optional. Open fine-tuning pipelines of the PublisherModel. */ openFineTuningPipelines?: GoogleCloudAiplatformV1PublisherModelCallToActionOpenFineTuningPipelines; /** * Optional. Open in Generation AI Studio. */ openGenerationAiStudio?: GoogleCloudAiplatformV1PublisherModelCallToActionRegionalResourceReferences; /** * Optional. Open Genie / Playground. */ openGenie?: GoogleCloudAiplatformV1PublisherModelCallToActionRegionalResourceReferences; /** * Optional. Open notebook of the PublisherModel. */ openNotebook?: GoogleCloudAiplatformV1PublisherModelCallToActionRegionalResourceReferences; /** * Optional. Open notebooks of the PublisherModel. */ openNotebooks?: GoogleCloudAiplatformV1PublisherModelCallToActionOpenNotebooks; /** * Optional. Open prompt-tuning pipeline of the PublisherModel. */ openPromptTuningPipeline?: GoogleCloudAiplatformV1PublisherModelCallToActionRegionalResourceReferences; /** * Optional. Request for access. */ requestAccess?: GoogleCloudAiplatformV1PublisherModelCallToActionRegionalResourceReferences; /** * Optional. To view Rest API docs. */ viewRestApi?: GoogleCloudAiplatformV1PublisherModelCallToActionViewRestApi; } function serializeGoogleCloudAiplatformV1PublisherModelCallToAction(data: any): GoogleCloudAiplatformV1PublisherModelCallToAction { return { ...data, deploy: data["deploy"] !== undefined ? serializeGoogleCloudAiplatformV1PublisherModelCallToActionDeploy(data["deploy"]) : undefined, multiDeployVertex: data["multiDeployVertex"] !== undefined ? serializeGoogleCloudAiplatformV1PublisherModelCallToActionDeployVertex(data["multiDeployVertex"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1PublisherModelCallToAction(data: any): GoogleCloudAiplatformV1PublisherModelCallToAction { return { ...data, deploy: data["deploy"] !== undefined ? deserializeGoogleCloudAiplatformV1PublisherModelCallToActionDeploy(data["deploy"]) : undefined, multiDeployVertex: data["multiDeployVertex"] !== undefined ? deserializeGoogleCloudAiplatformV1PublisherModelCallToActionDeployVertex(data["multiDeployVertex"]) : undefined, }; } /** * Model metadata that is needed for UploadModel or DeployModel/CreateEndpoint * requests. */ export interface GoogleCloudAiplatformV1PublisherModelCallToActionDeploy { /** * Optional. The path to the directory containing the Model artifact and any * of its supporting files. */ artifactUri?: string; /** * A description of resources that to large degree are decided by Vertex AI, * and require only a modest additional configuration. */ automaticResources?: GoogleCloudAiplatformV1AutomaticResources; /** * Optional. The specification of the container that is to be used when * deploying this Model in Vertex AI. Not present for Large Models. */ containerSpec?: GoogleCloudAiplatformV1ModelContainerSpec; /** * A description of resources that are dedicated to the DeployedModel, and * that need a higher degree of manual configuration. */ dedicatedResources?: GoogleCloudAiplatformV1DedicatedResources; /** * Optional. Metadata information about this deployment config. */ deployMetadata?: GoogleCloudAiplatformV1PublisherModelCallToActionDeployDeployMetadata; /** * Optional. The name of the deploy task (e.g., "text to image generation"). */ deployTaskName?: string; /** * Optional. Large model reference. When this is set, model_artifact_spec is * not needed. */ largeModelReference?: GoogleCloudAiplatformV1LargeModelReference; /** * Optional. Default model display name. */ modelDisplayName?: string; /** * Optional. The signed URI for ephemeral Cloud Storage access to model * artifact. */ publicArtifactUri?: string; /** * The resource name of the shared DeploymentResourcePool to deploy on. * Format: * `projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}` */ sharedResources?: string; /** * Required. The title of the regional resource reference. */ title?: string; } function serializeGoogleCloudAiplatformV1PublisherModelCallToActionDeploy(data: any): GoogleCloudAiplatformV1PublisherModelCallToActionDeploy { return { ...data, containerSpec: data["containerSpec"] !== undefined ? serializeGoogleCloudAiplatformV1ModelContainerSpec(data["containerSpec"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1PublisherModelCallToActionDeploy(data: any): GoogleCloudAiplatformV1PublisherModelCallToActionDeploy { return { ...data, containerSpec: data["containerSpec"] !== undefined ? deserializeGoogleCloudAiplatformV1ModelContainerSpec(data["containerSpec"]) : undefined, }; } /** * Metadata information about the deployment for managing deployment config. */ export interface GoogleCloudAiplatformV1PublisherModelCallToActionDeployDeployMetadata { /** * Optional. Labels for the deployment config. For managing deployment config * like verifying, source of deployment config, etc. */ labels?: { [key: string]: string }; /** * Optional. Sample request for deployed endpoint. */ sampleRequest?: string; } /** * Configurations for PublisherModel GKE deployment */ export interface GoogleCloudAiplatformV1PublisherModelCallToActionDeployGke { /** * Optional. GKE deployment configuration in yaml format. */ gkeYamlConfigs?: string[]; } /** * Multiple setups to deploy the PublisherModel. */ export interface GoogleCloudAiplatformV1PublisherModelCallToActionDeployVertex { /** * Optional. One click deployment configurations. */ multiDeployVertex?: GoogleCloudAiplatformV1PublisherModelCallToActionDeploy[]; } function serializeGoogleCloudAiplatformV1PublisherModelCallToActionDeployVertex(data: any): GoogleCloudAiplatformV1PublisherModelCallToActionDeployVertex { return { ...data, multiDeployVertex: data["multiDeployVertex"] !== undefined ? data["multiDeployVertex"].map((item: any) => (serializeGoogleCloudAiplatformV1PublisherModelCallToActionDeploy(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1PublisherModelCallToActionDeployVertex(data: any): GoogleCloudAiplatformV1PublisherModelCallToActionDeployVertex { return { ...data, multiDeployVertex: data["multiDeployVertex"] !== undefined ? data["multiDeployVertex"].map((item: any) => (deserializeGoogleCloudAiplatformV1PublisherModelCallToActionDeploy(item))) : undefined, }; } /** * Open fine tuning pipelines. */ export interface GoogleCloudAiplatformV1PublisherModelCallToActionOpenFineTuningPipelines { /** * Required. Regional resource references to fine tuning pipelines. */ fineTuningPipelines?: GoogleCloudAiplatformV1PublisherModelCallToActionRegionalResourceReferences[]; } /** * Open notebooks. */ export interface GoogleCloudAiplatformV1PublisherModelCallToActionOpenNotebooks { /** * Required. Regional resource references to notebooks. */ notebooks?: GoogleCloudAiplatformV1PublisherModelCallToActionRegionalResourceReferences[]; } /** * The regional resource name or the URI. Key is region, e.g., us-central1, * europe-west2, global, etc.. */ export interface GoogleCloudAiplatformV1PublisherModelCallToActionRegionalResourceReferences { /** * Required. */ references?: { [key: string]: GoogleCloudAiplatformV1PublisherModelResourceReference }; /** * Optional. Description of the resource. */ resourceDescription?: string; /** * Optional. Title of the resource. */ resourceTitle?: string; /** * Optional. Use case (CUJ) of the resource. */ resourceUseCase?: string; /** * Required. */ title?: string; } /** * Rest API docs. */ export interface GoogleCloudAiplatformV1PublisherModelCallToActionViewRestApi { /** * Required. */ documentations?: GoogleCloudAiplatformV1PublisherModelDocumentation[]; /** * Required. The title of the view rest API. */ title?: string; } /** * A named piece of documentation. */ export interface GoogleCloudAiplatformV1PublisherModelDocumentation { /** * Required. Content of this piece of document (in Markdown format). */ content?: string; /** * Required. E.g., OVERVIEW, USE CASES, DOCUMENTATION, SDK & SAMPLES, JAVA, * NODE.JS, etc.. */ title?: string; } /** * Reference to a resource. */ export interface GoogleCloudAiplatformV1PublisherModelResourceReference { /** * Description of the resource. */ description?: string; /** * The resource name of the Google Cloud resource. */ resourceName?: string; /** * The URI of the resource. */ uri?: string; /** * Use case (CUJ) of the resource. */ useCase?: string; } /** * Details of operations that perform MetadataService.PurgeArtifacts. */ export interface GoogleCloudAiplatformV1PurgeArtifactsMetadata { /** * Operation metadata for purging Artifacts. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Request message for MetadataService.PurgeArtifacts. */ export interface GoogleCloudAiplatformV1PurgeArtifactsRequest { /** * Required. A required filter matching the Artifacts to be purged. E.g., * `update_time <= 2020-11-19T11:30:00-04:00`. */ filter?: string; /** * Optional. Flag to indicate to actually perform the purge. If `force` is * set to false, the method will return a sample of Artifact names that would * be deleted. */ force?: boolean; } /** * Response message for MetadataService.PurgeArtifacts. */ export interface GoogleCloudAiplatformV1PurgeArtifactsResponse { /** * The number of Artifacts that this request deleted (or, if `force` is * false, the number of Artifacts that will be deleted). This can be an * estimate. */ purgeCount?: bigint; /** * A sample of the Artifact names that will be deleted. Only populated if * `force` is set to false. The maximum number of samples is 100 (it is * possible to return fewer). */ purgeSample?: string[]; } function serializeGoogleCloudAiplatformV1PurgeArtifactsResponse(data: any): GoogleCloudAiplatformV1PurgeArtifactsResponse { return { ...data, purgeCount: data["purgeCount"] !== undefined ? String(data["purgeCount"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1PurgeArtifactsResponse(data: any): GoogleCloudAiplatformV1PurgeArtifactsResponse { return { ...data, purgeCount: data["purgeCount"] !== undefined ? BigInt(data["purgeCount"]) : undefined, }; } /** * Details of operations that perform MetadataService.PurgeContexts. */ export interface GoogleCloudAiplatformV1PurgeContextsMetadata { /** * Operation metadata for purging Contexts. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Request message for MetadataService.PurgeContexts. */ export interface GoogleCloudAiplatformV1PurgeContextsRequest { /** * Required. A required filter matching the Contexts to be purged. E.g., * `update_time <= 2020-11-19T11:30:00-04:00`. */ filter?: string; /** * Optional. Flag to indicate to actually perform the purge. If `force` is * set to false, the method will return a sample of Context names that would * be deleted. */ force?: boolean; } /** * Response message for MetadataService.PurgeContexts. */ export interface GoogleCloudAiplatformV1PurgeContextsResponse { /** * The number of Contexts that this request deleted (or, if `force` is false, * the number of Contexts that will be deleted). This can be an estimate. */ purgeCount?: bigint; /** * A sample of the Context names that will be deleted. Only populated if * `force` is set to false. The maximum number of samples is 100 (it is * possible to return fewer). */ purgeSample?: string[]; } function serializeGoogleCloudAiplatformV1PurgeContextsResponse(data: any): GoogleCloudAiplatformV1PurgeContextsResponse { return { ...data, purgeCount: data["purgeCount"] !== undefined ? String(data["purgeCount"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1PurgeContextsResponse(data: any): GoogleCloudAiplatformV1PurgeContextsResponse { return { ...data, purgeCount: data["purgeCount"] !== undefined ? BigInt(data["purgeCount"]) : undefined, }; } /** * Details of operations that perform MetadataService.PurgeExecutions. */ export interface GoogleCloudAiplatformV1PurgeExecutionsMetadata { /** * Operation metadata for purging Executions. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Request message for MetadataService.PurgeExecutions. */ export interface GoogleCloudAiplatformV1PurgeExecutionsRequest { /** * Required. A required filter matching the Executions to be purged. E.g., * `update_time <= 2020-11-19T11:30:00-04:00`. */ filter?: string; /** * Optional. Flag to indicate to actually perform the purge. If `force` is * set to false, the method will return a sample of Execution names that would * be deleted. */ force?: boolean; } /** * Response message for MetadataService.PurgeExecutions. */ export interface GoogleCloudAiplatformV1PurgeExecutionsResponse { /** * The number of Executions that this request deleted (or, if `force` is * false, the number of Executions that will be deleted). This can be an * estimate. */ purgeCount?: bigint; /** * A sample of the Execution names that will be deleted. Only populated if * `force` is set to false. The maximum number of samples is 100 (it is * possible to return fewer). */ purgeSample?: string[]; } function serializeGoogleCloudAiplatformV1PurgeExecutionsResponse(data: any): GoogleCloudAiplatformV1PurgeExecutionsResponse { return { ...data, purgeCount: data["purgeCount"] !== undefined ? String(data["purgeCount"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1PurgeExecutionsResponse(data: any): GoogleCloudAiplatformV1PurgeExecutionsResponse { return { ...data, purgeCount: data["purgeCount"] !== undefined ? BigInt(data["purgeCount"]) : undefined, }; } /** * The spec of a Python packaged code. */ export interface GoogleCloudAiplatformV1PythonPackageSpec { /** * Command line arguments to be passed to the Python task. */ args?: string[]; /** * Environment variables to be passed to the python module. Maximum limit is * 100. */ env?: GoogleCloudAiplatformV1EnvVar[]; /** * Required. The URI of a container image in Artifact Registry that will run * the provided Python package. Vertex AI provides a wide range of executor * images with pre-installed packages to meet users' various use cases. See * the list of [pre-built containers for * training](https://cloud.google.com/vertex-ai/docs/training/pre-built-containers). * You must use an image from this list. */ executorImageUri?: string; /** * Required. The Google Cloud Storage location of the Python package files * which are the training program and its dependent packages. The maximum * number of package URIs is 100. */ packageUris?: string[]; /** * Required. The Python module name to run after installing the packages. */ pythonModule?: string; } /** * Response message for QueryDeployedModels method. */ export interface GoogleCloudAiplatformV1QueryDeployedModelsResponse { /** * References to the DeployedModels that share the specified * deploymentResourcePool. */ deployedModelRefs?: GoogleCloudAiplatformV1DeployedModelRef[]; /** * DEPRECATED Use deployed_model_refs instead. */ deployedModels?: GoogleCloudAiplatformV1DeployedModel[]; /** * A token, which can be sent as `page_token` to retrieve the next page. If * this field is omitted, there are no subsequent pages. */ nextPageToken?: string; /** * The total number of DeployedModels on this DeploymentResourcePool. */ totalDeployedModelCount?: number; /** * The total number of Endpoints that have DeployedModels on this * DeploymentResourcePool. */ totalEndpointCount?: number; } /** * Request message for ReasoningEngineExecutionService.Query. */ export interface GoogleCloudAiplatformV1QueryReasoningEngineRequest { /** * Optional. Class method to be used for the query. It is optional and * defaults to "query" if unspecified. */ classMethod?: string; /** * Optional. Input content provided by users in JSON object format. Examples * include text query, function calling parameters, media bytes, etc. */ input?: { [key: string]: any }; } /** * Response message for ReasoningEngineExecutionService.Query */ export interface GoogleCloudAiplatformV1QueryReasoningEngineResponse { /** * Response provided by users in JSON object format. */ output?: any; } /** * Input for question answering correctness metric. */ export interface GoogleCloudAiplatformV1QuestionAnsweringCorrectnessInput { /** * Required. Question answering correctness instance. */ instance?: GoogleCloudAiplatformV1QuestionAnsweringCorrectnessInstance; /** * Required. Spec for question answering correctness score metric. */ metricSpec?: GoogleCloudAiplatformV1QuestionAnsweringCorrectnessSpec; } /** * Spec for question answering correctness instance. */ export interface GoogleCloudAiplatformV1QuestionAnsweringCorrectnessInstance { /** * Optional. Text provided as context to answer the question. */ context?: string; /** * Required. The question asked and other instruction in the inference * prompt. */ instruction?: string; /** * Required. Output of the evaluated model. */ prediction?: string; /** * Optional. Ground truth used to compare against the prediction. */ reference?: string; } /** * Spec for question answering correctness result. */ export interface GoogleCloudAiplatformV1QuestionAnsweringCorrectnessResult { /** * Output only. Confidence for question answering correctness score. */ readonly confidence?: number; /** * Output only. Explanation for question answering correctness score. */ readonly explanation?: string; /** * Output only. Question Answering Correctness score. */ readonly score?: number; } /** * Spec for question answering correctness metric. */ export interface GoogleCloudAiplatformV1QuestionAnsweringCorrectnessSpec { /** * Optional. Whether to use instance.reference to compute question answering * correctness. */ useReference?: boolean; /** * Optional. Which version to use for evaluation. */ version?: number; } /** * Input for question answering helpfulness metric. */ export interface GoogleCloudAiplatformV1QuestionAnsweringHelpfulnessInput { /** * Required. Question answering helpfulness instance. */ instance?: GoogleCloudAiplatformV1QuestionAnsweringHelpfulnessInstance; /** * Required. Spec for question answering helpfulness score metric. */ metricSpec?: GoogleCloudAiplatformV1QuestionAnsweringHelpfulnessSpec; } /** * Spec for question answering helpfulness instance. */ export interface GoogleCloudAiplatformV1QuestionAnsweringHelpfulnessInstance { /** * Optional. Text provided as context to answer the question. */ context?: string; /** * Required. The question asked and other instruction in the inference * prompt. */ instruction?: string; /** * Required. Output of the evaluated model. */ prediction?: string; /** * Optional. Ground truth used to compare against the prediction. */ reference?: string; } /** * Spec for question answering helpfulness result. */ export interface GoogleCloudAiplatformV1QuestionAnsweringHelpfulnessResult { /** * Output only. Confidence for question answering helpfulness score. */ readonly confidence?: number; /** * Output only. Explanation for question answering helpfulness score. */ readonly explanation?: string; /** * Output only. Question Answering Helpfulness score. */ readonly score?: number; } /** * Spec for question answering helpfulness metric. */ export interface GoogleCloudAiplatformV1QuestionAnsweringHelpfulnessSpec { /** * Optional. Whether to use instance.reference to compute question answering * helpfulness. */ useReference?: boolean; /** * Optional. Which version to use for evaluation. */ version?: number; } /** * Input for question answering quality metric. */ export interface GoogleCloudAiplatformV1QuestionAnsweringQualityInput { /** * Required. Question answering quality instance. */ instance?: GoogleCloudAiplatformV1QuestionAnsweringQualityInstance; /** * Required. Spec for question answering quality score metric. */ metricSpec?: GoogleCloudAiplatformV1QuestionAnsweringQualitySpec; } /** * Spec for question answering quality instance. */ export interface GoogleCloudAiplatformV1QuestionAnsweringQualityInstance { /** * Required. Text to answer the question. */ context?: string; /** * Required. Question Answering prompt for LLM. */ instruction?: string; /** * Required. Output of the evaluated model. */ prediction?: string; /** * Optional. Ground truth used to compare against the prediction. */ reference?: string; } /** * Spec for question answering quality result. */ export interface GoogleCloudAiplatformV1QuestionAnsweringQualityResult { /** * Output only. Confidence for question answering quality score. */ readonly confidence?: number; /** * Output only. Explanation for question answering quality score. */ readonly explanation?: string; /** * Output only. Question Answering Quality score. */ readonly score?: number; } /** * Spec for question answering quality score metric. */ export interface GoogleCloudAiplatformV1QuestionAnsweringQualitySpec { /** * Optional. Whether to use instance.reference to compute question answering * quality. */ useReference?: boolean; /** * Optional. Which version to use for evaluation. */ version?: number; } /** * Input for question answering relevance metric. */ export interface GoogleCloudAiplatformV1QuestionAnsweringRelevanceInput { /** * Required. Question answering relevance instance. */ instance?: GoogleCloudAiplatformV1QuestionAnsweringRelevanceInstance; /** * Required. Spec for question answering relevance score metric. */ metricSpec?: GoogleCloudAiplatformV1QuestionAnsweringRelevanceSpec; } /** * Spec for question answering relevance instance. */ export interface GoogleCloudAiplatformV1QuestionAnsweringRelevanceInstance { /** * Optional. Text provided as context to answer the question. */ context?: string; /** * Required. The question asked and other instruction in the inference * prompt. */ instruction?: string; /** * Required. Output of the evaluated model. */ prediction?: string; /** * Optional. Ground truth used to compare against the prediction. */ reference?: string; } /** * Spec for question answering relevance result. */ export interface GoogleCloudAiplatformV1QuestionAnsweringRelevanceResult { /** * Output only. Confidence for question answering relevance score. */ readonly confidence?: number; /** * Output only. Explanation for question answering relevance score. */ readonly explanation?: string; /** * Output only. Question Answering Relevance score. */ readonly score?: number; } /** * Spec for question answering relevance metric. */ export interface GoogleCloudAiplatformV1QuestionAnsweringRelevanceSpec { /** * Optional. Whether to use instance.reference to compute question answering * relevance. */ useReference?: boolean; /** * Optional. Which version to use for evaluation. */ version?: number; } /** * A RagChunk includes the content of a chunk of a RagFile, and associated * metadata. */ export interface GoogleCloudAiplatformV1RagChunk { /** * If populated, represents where the chunk starts and ends in the document. */ pageSpan?: GoogleCloudAiplatformV1RagChunkPageSpan; /** * The content of the chunk. */ text?: string; } /** * Represents where the chunk starts and ends in the document. */ export interface GoogleCloudAiplatformV1RagChunkPageSpan { /** * Page where chunk starts in the document. Inclusive. 1-indexed. */ firstPage?: number; /** * Page where chunk ends in the document. Inclusive. 1-indexed. */ lastPage?: number; } /** * Relevant contexts for one query. */ export interface GoogleCloudAiplatformV1RagContexts { /** * All its contexts. */ contexts?: GoogleCloudAiplatformV1RagContextsContext[]; } /** * A context of the query. */ export interface GoogleCloudAiplatformV1RagContextsContext { /** * Context of the retrieved chunk. */ chunk?: GoogleCloudAiplatformV1RagChunk; /** * According to the underlying Vector DB and the selected metric type, the * score can be either the distance or the similarity between the query and * the context and its range depends on the metric type. For example, if the * metric type is COSINE_DISTANCE, it represents the distance between the * query and the context. The larger the distance, the less relevant the * context is to the query. The range is [0, 2], while 0 means the most * relevant and 2 means the least relevant. */ score?: number; /** * The file display name. */ sourceDisplayName?: string; /** * If the file is imported from Cloud Storage or Google Drive, source_uri * will be original file URI in Cloud Storage or Google Drive; if file is * uploaded, source_uri will be file display name. */ sourceUri?: string; /** * The text chunk. */ text?: string; } /** * A RagCorpus is a RagFile container and a project can have multiple * RagCorpora. */ export interface GoogleCloudAiplatformV1RagCorpus { /** * Output only. RagCorpus state. */ readonly corpusStatus?: GoogleCloudAiplatformV1CorpusStatus; /** * Output only. Timestamp when this RagCorpus was created. */ readonly createTime?: Date; /** * Optional. The description of the RagCorpus. */ description?: string; /** * Required. The display name of the RagCorpus. The name can be up to 128 * characters long and can consist of any UTF-8 characters. */ displayName?: string; /** * Output only. The resource name of the RagCorpus. */ readonly name?: string; /** * Output only. Timestamp when this RagCorpus was last updated. */ readonly updateTime?: Date; /** * Optional. Immutable. The config for the Vector DBs. */ vectorDbConfig?: GoogleCloudAiplatformV1RagVectorDbConfig; /** * Optional. Immutable. The config for the Vertex AI Search. */ vertexAiSearchConfig?: GoogleCloudAiplatformV1VertexAiSearchConfig; } /** * Config for the embedding model to use for RAG. */ export interface GoogleCloudAiplatformV1RagEmbeddingModelConfig { /** * The Vertex AI Prediction Endpoint that either refers to a publisher model * or an endpoint that is hosting a 1P fine-tuned text embedding model. * Endpoints hosting non-1P fine-tuned text embedding models are currently not * supported. This is used for dense vector search. */ vertexPredictionEndpoint?: GoogleCloudAiplatformV1RagEmbeddingModelConfigVertexPredictionEndpoint; } /** * Config representing a model hosted on Vertex Prediction Endpoint. */ export interface GoogleCloudAiplatformV1RagEmbeddingModelConfigVertexPredictionEndpoint { /** * Required. The endpoint resource name. Format: * `projects/{project}/locations/{location}/publishers/{publisher}/models/{model}` * or `projects/{project}/locations/{location}/endpoints/{endpoint}` */ endpoint?: string; /** * Output only. The resource name of the model that is deployed on the * endpoint. Present only when the endpoint is not a publisher model. Pattern: * `projects/{project}/locations/{location}/models/{model}` */ readonly model?: string; /** * Output only. Version ID of the model that is deployed on the endpoint. * Present only when the endpoint is not a publisher model. */ readonly modelVersionId?: string; } /** * A RagFile contains user data for chunking, embedding and indexing. */ export interface GoogleCloudAiplatformV1RagFile { /** * Output only. Timestamp when this RagFile was created. */ readonly createTime?: Date; /** * Optional. The description of the RagFile. */ description?: string; /** * Output only. The RagFile is encapsulated and uploaded in the UploadRagFile * request. */ readonly directUploadSource?: GoogleCloudAiplatformV1DirectUploadSource; /** * Required. The display name of the RagFile. The name can be up to 128 * characters long and can consist of any UTF-8 characters. */ displayName?: string; /** * Output only. State of the RagFile. */ readonly fileStatus?: GoogleCloudAiplatformV1FileStatus; /** * Output only. Google Cloud Storage location of the RagFile. It does not * support wildcards in the Cloud Storage uri for now. */ readonly gcsSource?: GoogleCloudAiplatformV1GcsSource; /** * Output only. Google Drive location. Supports importing individual files as * well as Google Drive folders. */ readonly googleDriveSource?: GoogleCloudAiplatformV1GoogleDriveSource; /** * The RagFile is imported from a Jira query. */ jiraSource?: GoogleCloudAiplatformV1JiraSource; /** * Output only. The resource name of the RagFile. */ readonly name?: string; /** * The RagFile is imported from a SharePoint source. */ sharePointSources?: GoogleCloudAiplatformV1SharePointSources; /** * The RagFile is imported from a Slack channel. */ slackSource?: GoogleCloudAiplatformV1SlackSource; /** * Output only. Timestamp when this RagFile was last updated. */ readonly updateTime?: Date; } function serializeGoogleCloudAiplatformV1RagFile(data: any): GoogleCloudAiplatformV1RagFile { return { ...data, slackSource: data["slackSource"] !== undefined ? serializeGoogleCloudAiplatformV1SlackSource(data["slackSource"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1RagFile(data: any): GoogleCloudAiplatformV1RagFile { return { ...data, createTime: data["createTime"] !== undefined ? new Date(data["createTime"]) : undefined, slackSource: data["slackSource"] !== undefined ? deserializeGoogleCloudAiplatformV1SlackSource(data["slackSource"]) : undefined, updateTime: data["updateTime"] !== undefined ? new Date(data["updateTime"]) : undefined, }; } /** * Specifies the size and overlap of chunks for RagFiles. */ export interface GoogleCloudAiplatformV1RagFileChunkingConfig { /** * Specifies the fixed length chunking config. */ fixedLengthChunking?: GoogleCloudAiplatformV1RagFileChunkingConfigFixedLengthChunking; } /** * Specifies the fixed length chunking config. */ export interface GoogleCloudAiplatformV1RagFileChunkingConfigFixedLengthChunking { /** * The overlap between chunks. */ chunkOverlap?: number; /** * The size of the chunks. */ chunkSize?: number; } /** * Specifies the parsing config for RagFiles. */ export interface GoogleCloudAiplatformV1RagFileParsingConfig { /** * The Layout Parser to use for RagFiles. */ layoutParser?: GoogleCloudAiplatformV1RagFileParsingConfigLayoutParser; } /** * Document AI Layout Parser config. */ export interface GoogleCloudAiplatformV1RagFileParsingConfigLayoutParser { /** * The maximum number of requests the job is allowed to make to the Document * AI processor per minute. Consult * https://cloud.google.com/document-ai/quotas and the Quota page for your * project to set an appropriate value here. If unspecified, a default value * of 120 QPM would be used. */ maxParsingRequestsPerMin?: number; /** * The full resource name of a Document AI processor or processor version. * The processor must have type `LAYOUT_PARSER_PROCESSOR`. If specified, the * `additional_config.parse_as_scanned_pdf` field must be false. Format: * * `projects/{project_id}/locations/{location}/processors/{processor_id}` * * `projects/{project_id}/locations/{location}/processors/{processor_id}/processorVersions/{processor_version_id}` */ processorName?: string; } /** * Specifies the transformation config for RagFiles. */ export interface GoogleCloudAiplatformV1RagFileTransformationConfig { /** * Specifies the chunking config for RagFiles. */ ragFileChunkingConfig?: GoogleCloudAiplatformV1RagFileChunkingConfig; } /** * A query to retrieve relevant contexts. */ export interface GoogleCloudAiplatformV1RagQuery { /** * Optional. The retrieval config for the query. */ ragRetrievalConfig?: GoogleCloudAiplatformV1RagRetrievalConfig; /** * Optional. The query in text format to get relevant contexts. */ text?: string; } /** * Specifies the context retrieval config. */ export interface GoogleCloudAiplatformV1RagRetrievalConfig { /** * Optional. Config for filters. */ filter?: GoogleCloudAiplatformV1RagRetrievalConfigFilter; /** * Optional. Config for ranking and reranking. */ ranking?: GoogleCloudAiplatformV1RagRetrievalConfigRanking; /** * Optional. The number of contexts to retrieve. */ topK?: number; } /** * Config for filters. */ export interface GoogleCloudAiplatformV1RagRetrievalConfigFilter { /** * Optional. String for metadata filtering. */ metadataFilter?: string; /** * Optional. Only returns contexts with vector distance smaller than the * threshold. */ vectorDistanceThreshold?: number; /** * Optional. Only returns contexts with vector similarity larger than the * threshold. */ vectorSimilarityThreshold?: number; } /** * Config for ranking and reranking. */ export interface GoogleCloudAiplatformV1RagRetrievalConfigRanking { /** * Optional. Config for LlmRanker. */ llmRanker?: GoogleCloudAiplatformV1RagRetrievalConfigRankingLlmRanker; /** * Optional. Config for Rank Service. */ rankService?: GoogleCloudAiplatformV1RagRetrievalConfigRankingRankService; } /** * Config for LlmRanker. */ export interface GoogleCloudAiplatformV1RagRetrievalConfigRankingLlmRanker { /** * Optional. The model name used for ranking. See [Supported * models](https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference#supported-models). */ modelName?: string; } /** * Config for Rank Service. */ export interface GoogleCloudAiplatformV1RagRetrievalConfigRankingRankService { /** * Optional. The model name of the rank service. Format: * `semantic-ranker-512@latest` */ modelName?: string; } /** * Config for the Vector DB to use for RAG. */ export interface GoogleCloudAiplatformV1RagVectorDbConfig { /** * Authentication config for the chosen Vector DB. */ apiAuth?: GoogleCloudAiplatformV1ApiAuth; /** * The config for the Pinecone. */ pinecone?: GoogleCloudAiplatformV1RagVectorDbConfigPinecone; /** * Optional. Immutable. The embedding model config of the Vector DB. */ ragEmbeddingModelConfig?: GoogleCloudAiplatformV1RagEmbeddingModelConfig; /** * The config for the RAG-managed Vector DB. */ ragManagedDb?: GoogleCloudAiplatformV1RagVectorDbConfigRagManagedDb; /** * The config for the Vertex Vector Search. */ vertexVectorSearch?: GoogleCloudAiplatformV1RagVectorDbConfigVertexVectorSearch; } /** * The config for the Pinecone. */ export interface GoogleCloudAiplatformV1RagVectorDbConfigPinecone { /** * Pinecone index name. This value cannot be changed after it's set. */ indexName?: string; } /** * The config for the default RAG-managed Vector DB. */ export interface GoogleCloudAiplatformV1RagVectorDbConfigRagManagedDb { } /** * The config for the Vertex Vector Search. */ export interface GoogleCloudAiplatformV1RagVectorDbConfigVertexVectorSearch { /** * The resource name of the Index. Format: * `projects/{project}/locations/{location}/indexes/{index}` */ index?: string; /** * The resource name of the Index Endpoint. Format: * `projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}` */ indexEndpoint?: string; } /** * Raw output. */ export interface GoogleCloudAiplatformV1RawOutput { /** * Output only. Raw output string. */ readonly rawOutput?: string[]; } /** * Request message for PredictionService.RawPredict. */ export interface GoogleCloudAiplatformV1RawPredictRequest { /** * The prediction input. Supports HTTP headers and arbitrary data payload. A * DeployedModel may have an upper limit on the number of instances it * supports per request. When this limit it is exceeded for an AutoML model, * the RawPredict method returns an error. When this limit is exceeded for a * custom-trained model, the behavior varies depending on the model. You can * specify the schema for each instance in the * predict_schemata.instance_schema_uri field when you create a Model. This * schema applies when you deploy the `Model` as a `DeployedModel` to an * Endpoint and use the `RawPredict` method. */ httpBody?: GoogleApiHttpBody; } function serializeGoogleCloudAiplatformV1RawPredictRequest(data: any): GoogleCloudAiplatformV1RawPredictRequest { return { ...data, httpBody: data["httpBody"] !== undefined ? serializeGoogleApiHttpBody(data["httpBody"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1RawPredictRequest(data: any): GoogleCloudAiplatformV1RawPredictRequest { return { ...data, httpBody: data["httpBody"] !== undefined ? deserializeGoogleApiHttpBody(data["httpBody"]) : undefined, }; } /** * Configuration for the Ray OSS Logs. */ export interface GoogleCloudAiplatformV1RayLogsSpec { /** * Optional. Flag to disable the export of Ray OSS logs to Cloud Logging. */ disabled?: boolean; } /** * Configuration for the Ray metrics. */ export interface GoogleCloudAiplatformV1RayMetricSpec { /** * Optional. Flag to disable the Ray metrics collection. */ disabled?: boolean; } /** * Configuration information for the Ray cluster. For experimental launch, Ray * cluster creation and Persistent cluster creation are 1:1 mapping: We will * provision all the nodes within the Persistent cluster as Ray nodes. */ export interface GoogleCloudAiplatformV1RaySpec { /** * Optional. This will be used to indicate which resource pool will serve as * the Ray head node(the first node within that pool). Will use the machine * from the first workerpool as the head node by default if this field isn't * set. */ headNodeResourcePoolId?: string; /** * Optional. Default image for user to choose a preferred ML framework (for * example, TensorFlow or Pytorch) by choosing from [Vertex prebuilt * images](https://cloud.google.com/vertex-ai/docs/training/pre-built-containers). * Either this or the resource_pool_images is required. Use this field if you * need all the resource pools to have the same Ray image. Otherwise, use the * {@code resource_pool_images} field. */ imageUri?: string; /** * Optional. OSS Ray logging configurations. */ rayLogsSpec?: GoogleCloudAiplatformV1RayLogsSpec; /** * Optional. Ray metrics configurations. */ rayMetricSpec?: GoogleCloudAiplatformV1RayMetricSpec; /** * Optional. Required if image_uri isn't set. A map of resource_pool_id to * prebuild Ray image if user need to use different images for different * head/worker pools. This map needs to cover all the resource pool ids. * Example: { "ray_head_node_pool": "head image" "ray_worker_node_pool1": * "worker image" "ray_worker_node_pool2": "another worker image" } */ resourcePoolImages?: { [key: string]: string }; } /** * Request message for FeaturestoreOnlineServingService.ReadFeatureValues. */ export interface GoogleCloudAiplatformV1ReadFeatureValuesRequest { /** * Required. ID for a specific entity. For example, for a machine learning * model predicting user clicks on a website, an entity ID could be * `user_123`. */ entityId?: string; /** * Required. Selector choosing Features of the target EntityType. */ featureSelector?: GoogleCloudAiplatformV1FeatureSelector; } /** * Response message for FeaturestoreOnlineServingService.ReadFeatureValues. */ export interface GoogleCloudAiplatformV1ReadFeatureValuesResponse { /** * Entity view with Feature values. This may be the entity in the * Featurestore if values for all Features were requested, or a projection of * the entity in the Featurestore if values for only some Features were * requested. */ entityView?: GoogleCloudAiplatformV1ReadFeatureValuesResponseEntityView; /** * Response header. */ header?: GoogleCloudAiplatformV1ReadFeatureValuesResponseHeader; } function serializeGoogleCloudAiplatformV1ReadFeatureValuesResponse(data: any): GoogleCloudAiplatformV1ReadFeatureValuesResponse { return { ...data, entityView: data["entityView"] !== undefined ? serializeGoogleCloudAiplatformV1ReadFeatureValuesResponseEntityView(data["entityView"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1ReadFeatureValuesResponse(data: any): GoogleCloudAiplatformV1ReadFeatureValuesResponse { return { ...data, entityView: data["entityView"] !== undefined ? deserializeGoogleCloudAiplatformV1ReadFeatureValuesResponseEntityView(data["entityView"]) : undefined, }; } /** * Entity view with Feature values. */ export interface GoogleCloudAiplatformV1ReadFeatureValuesResponseEntityView { /** * Each piece of data holds the k requested values for one requested Feature. * If no values for the requested Feature exist, the corresponding cell will * be empty. This has the same size and is in the same order as the features * from the header ReadFeatureValuesResponse.header. */ data?: GoogleCloudAiplatformV1ReadFeatureValuesResponseEntityViewData[]; /** * ID of the requested entity. */ entityId?: string; } function serializeGoogleCloudAiplatformV1ReadFeatureValuesResponseEntityView(data: any): GoogleCloudAiplatformV1ReadFeatureValuesResponseEntityView { return { ...data, data: data["data"] !== undefined ? data["data"].map((item: any) => (serializeGoogleCloudAiplatformV1ReadFeatureValuesResponseEntityViewData(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ReadFeatureValuesResponseEntityView(data: any): GoogleCloudAiplatformV1ReadFeatureValuesResponseEntityView { return { ...data, data: data["data"] !== undefined ? data["data"].map((item: any) => (deserializeGoogleCloudAiplatformV1ReadFeatureValuesResponseEntityViewData(item))) : undefined, }; } /** * Container to hold value(s), successive in time, for one Feature from the * request. */ export interface GoogleCloudAiplatformV1ReadFeatureValuesResponseEntityViewData { /** * Feature value if a single value is requested. */ value?: GoogleCloudAiplatformV1FeatureValue; /** * Feature values list if values, successive in time, are requested. If the * requested number of values is greater than the number of existing Feature * values, nonexistent values are omitted instead of being returned as empty. */ values?: GoogleCloudAiplatformV1FeatureValueList; } function serializeGoogleCloudAiplatformV1ReadFeatureValuesResponseEntityViewData(data: any): GoogleCloudAiplatformV1ReadFeatureValuesResponseEntityViewData { return { ...data, value: data["value"] !== undefined ? serializeGoogleCloudAiplatformV1FeatureValue(data["value"]) : undefined, values: data["values"] !== undefined ? serializeGoogleCloudAiplatformV1FeatureValueList(data["values"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1ReadFeatureValuesResponseEntityViewData(data: any): GoogleCloudAiplatformV1ReadFeatureValuesResponseEntityViewData { return { ...data, value: data["value"] !== undefined ? deserializeGoogleCloudAiplatformV1FeatureValue(data["value"]) : undefined, values: data["values"] !== undefined ? deserializeGoogleCloudAiplatformV1FeatureValueList(data["values"]) : undefined, }; } /** * Metadata for requested Features. */ export interface GoogleCloudAiplatformV1ReadFeatureValuesResponseFeatureDescriptor { /** * Feature ID. */ id?: string; } /** * Response header with metadata for the requested * ReadFeatureValuesRequest.entity_type and Features. */ export interface GoogleCloudAiplatformV1ReadFeatureValuesResponseHeader { /** * The resource name of the EntityType from the ReadFeatureValuesRequest. * Value format: * `projects/{project}/locations/{location}/featurestores/{featurestore}/entityTypes/{entityType}`. */ entityType?: string; /** * List of Feature metadata corresponding to each piece of * ReadFeatureValuesResponse.EntityView.data. */ featureDescriptors?: GoogleCloudAiplatformV1ReadFeatureValuesResponseFeatureDescriptor[]; } /** * The request message for MatchService.ReadIndexDatapoints. */ export interface GoogleCloudAiplatformV1ReadIndexDatapointsRequest { /** * The ID of the DeployedIndex that will serve the request. */ deployedIndexId?: string; /** * IDs of the datapoints to be searched for. */ ids?: string[]; } /** * The response message for MatchService.ReadIndexDatapoints. */ export interface GoogleCloudAiplatformV1ReadIndexDatapointsResponse { /** * The result list of datapoints. */ datapoints?: GoogleCloudAiplatformV1IndexDatapoint[]; } function serializeGoogleCloudAiplatformV1ReadIndexDatapointsResponse(data: any): GoogleCloudAiplatformV1ReadIndexDatapointsResponse { return { ...data, datapoints: data["datapoints"] !== undefined ? data["datapoints"].map((item: any) => (serializeGoogleCloudAiplatformV1IndexDatapoint(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ReadIndexDatapointsResponse(data: any): GoogleCloudAiplatformV1ReadIndexDatapointsResponse { return { ...data, datapoints: data["datapoints"] !== undefined ? data["datapoints"].map((item: any) => (deserializeGoogleCloudAiplatformV1IndexDatapoint(item))) : undefined, }; } /** * Response message for TensorboardService.ReadTensorboardBlobData. */ export interface GoogleCloudAiplatformV1ReadTensorboardBlobDataResponse { /** * Blob messages containing blob bytes. */ blobs?: GoogleCloudAiplatformV1TensorboardBlob[]; } function serializeGoogleCloudAiplatformV1ReadTensorboardBlobDataResponse(data: any): GoogleCloudAiplatformV1ReadTensorboardBlobDataResponse { return { ...data, blobs: data["blobs"] !== undefined ? data["blobs"].map((item: any) => (serializeGoogleCloudAiplatformV1TensorboardBlob(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ReadTensorboardBlobDataResponse(data: any): GoogleCloudAiplatformV1ReadTensorboardBlobDataResponse { return { ...data, blobs: data["blobs"] !== undefined ? data["blobs"].map((item: any) => (deserializeGoogleCloudAiplatformV1TensorboardBlob(item))) : undefined, }; } /** * Response message for TensorboardService.ReadTensorboardSize. */ export interface GoogleCloudAiplatformV1ReadTensorboardSizeResponse { /** * Payload storage size for the TensorBoard */ storageSizeByte?: bigint; } function serializeGoogleCloudAiplatformV1ReadTensorboardSizeResponse(data: any): GoogleCloudAiplatformV1ReadTensorboardSizeResponse { return { ...data, storageSizeByte: data["storageSizeByte"] !== undefined ? String(data["storageSizeByte"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1ReadTensorboardSizeResponse(data: any): GoogleCloudAiplatformV1ReadTensorboardSizeResponse { return { ...data, storageSizeByte: data["storageSizeByte"] !== undefined ? BigInt(data["storageSizeByte"]) : undefined, }; } /** * Response message for TensorboardService.ReadTensorboardTimeSeriesData. */ export interface GoogleCloudAiplatformV1ReadTensorboardTimeSeriesDataResponse { /** * The returned time series data. */ timeSeriesData?: GoogleCloudAiplatformV1TimeSeriesData; } function serializeGoogleCloudAiplatformV1ReadTensorboardTimeSeriesDataResponse(data: any): GoogleCloudAiplatformV1ReadTensorboardTimeSeriesDataResponse { return { ...data, timeSeriesData: data["timeSeriesData"] !== undefined ? serializeGoogleCloudAiplatformV1TimeSeriesData(data["timeSeriesData"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1ReadTensorboardTimeSeriesDataResponse(data: any): GoogleCloudAiplatformV1ReadTensorboardTimeSeriesDataResponse { return { ...data, timeSeriesData: data["timeSeriesData"] !== undefined ? deserializeGoogleCloudAiplatformV1TimeSeriesData(data["timeSeriesData"]) : undefined, }; } /** * Response message for TensorboardService.ReadTensorboardUsage. */ export interface GoogleCloudAiplatformV1ReadTensorboardUsageResponse { /** * Maps year-month (YYYYMM) string to per month usage data. */ monthlyUsageData?: { [key: string]: GoogleCloudAiplatformV1ReadTensorboardUsageResponsePerMonthUsageData }; } function serializeGoogleCloudAiplatformV1ReadTensorboardUsageResponse(data: any): GoogleCloudAiplatformV1ReadTensorboardUsageResponse { return { ...data, monthlyUsageData: data["monthlyUsageData"] !== undefined ? Object.fromEntries(Object.entries(data["monthlyUsageData"]).map(([k, v]: [string, any]) => ([k, serializeGoogleCloudAiplatformV1ReadTensorboardUsageResponsePerMonthUsageData(v)]))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ReadTensorboardUsageResponse(data: any): GoogleCloudAiplatformV1ReadTensorboardUsageResponse { return { ...data, monthlyUsageData: data["monthlyUsageData"] !== undefined ? Object.fromEntries(Object.entries(data["monthlyUsageData"]).map(([k, v]: [string, any]) => ([k, deserializeGoogleCloudAiplatformV1ReadTensorboardUsageResponsePerMonthUsageData(v)]))) : undefined, }; } /** * Per month usage data */ export interface GoogleCloudAiplatformV1ReadTensorboardUsageResponsePerMonthUsageData { /** * Usage data for each user in the given month. */ userUsageData?: GoogleCloudAiplatformV1ReadTensorboardUsageResponsePerUserUsageData[]; } function serializeGoogleCloudAiplatformV1ReadTensorboardUsageResponsePerMonthUsageData(data: any): GoogleCloudAiplatformV1ReadTensorboardUsageResponsePerMonthUsageData { return { ...data, userUsageData: data["userUsageData"] !== undefined ? data["userUsageData"].map((item: any) => (serializeGoogleCloudAiplatformV1ReadTensorboardUsageResponsePerUserUsageData(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1ReadTensorboardUsageResponsePerMonthUsageData(data: any): GoogleCloudAiplatformV1ReadTensorboardUsageResponsePerMonthUsageData { return { ...data, userUsageData: data["userUsageData"] !== undefined ? data["userUsageData"].map((item: any) => (deserializeGoogleCloudAiplatformV1ReadTensorboardUsageResponsePerUserUsageData(item))) : undefined, }; } /** * Per user usage data. */ export interface GoogleCloudAiplatformV1ReadTensorboardUsageResponsePerUserUsageData { /** * User's username */ username?: string; /** * Number of times the user has read data within the Tensorboard. */ viewCount?: bigint; } function serializeGoogleCloudAiplatformV1ReadTensorboardUsageResponsePerUserUsageData(data: any): GoogleCloudAiplatformV1ReadTensorboardUsageResponsePerUserUsageData { return { ...data, viewCount: data["viewCount"] !== undefined ? String(data["viewCount"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1ReadTensorboardUsageResponsePerUserUsageData(data: any): GoogleCloudAiplatformV1ReadTensorboardUsageResponsePerUserUsageData { return { ...data, viewCount: data["viewCount"] !== undefined ? BigInt(data["viewCount"]) : undefined, }; } /** * ReasoningEngine provides a customizable runtime for models to determine * which actions to take and in which order. */ export interface GoogleCloudAiplatformV1ReasoningEngine { /** * Output only. Timestamp when this ReasoningEngine was created. */ readonly createTime?: Date; /** * Optional. The description of the ReasoningEngine. */ description?: string; /** * Required. The display name of the ReasoningEngine. */ displayName?: string; /** * Optional. Used to perform consistent read-modify-write updates. If not * set, a blind "overwrite" update happens. */ etag?: string; /** * Identifier. The resource name of the ReasoningEngine. */ name?: string; /** * Optional. Configurations of the ReasoningEngine */ spec?: GoogleCloudAiplatformV1ReasoningEngineSpec; /** * Output only. Timestamp when this ReasoningEngine was most recently * updated. */ readonly updateTime?: Date; } /** * ReasoningEngine configurations */ export interface GoogleCloudAiplatformV1ReasoningEngineSpec { /** * Optional. The OSS agent framework used to develop the agent. Currently * supported values: "google-adk", "langchain", "langgraph", "ag2", * "llama-index", "custom". */ agentFramework?: string; /** * Optional. Declarations for object class methods in OpenAPI specification * format. */ classMethods?: { [key: string]: any }[]; /** * Optional. The specification of a Reasoning Engine deployment. */ deploymentSpec?: GoogleCloudAiplatformV1ReasoningEngineSpecDeploymentSpec; /** * Optional. User provided package spec of the ReasoningEngine. Ignored when * users directly specify a deployment image through * `deployment_spec.first_party_image_override`, but keeping the * field_behavior to avoid introducing breaking changes. */ packageSpec?: GoogleCloudAiplatformV1ReasoningEngineSpecPackageSpec; } /** * The specification of a Reasoning Engine deployment. */ export interface GoogleCloudAiplatformV1ReasoningEngineSpecDeploymentSpec { /** * Optional. Environment variables to be set with the Reasoning Engine * deployment. The environment variables can be updated through the * UpdateReasoningEngine API. */ env?: GoogleCloudAiplatformV1EnvVar[]; /** * Optional. Environment variables where the value is a secret in Cloud * Secret Manager. To use this feature, add 'Secret Manager Secret Accessor' * role (roles/secretmanager.secretAccessor) to AI Platform Reasoning Engine * Service Agent. */ secretEnv?: GoogleCloudAiplatformV1SecretEnvVar[]; } /** * User provided package spec like pickled object and package requirements. */ export interface GoogleCloudAiplatformV1ReasoningEngineSpecPackageSpec { /** * Optional. The Cloud Storage URI of the dependency files in tar.gz format. */ dependencyFilesGcsUri?: string; /** * Optional. The Cloud Storage URI of the pickled python object. */ pickleObjectGcsUri?: string; /** * Optional. The Python version. Currently support 3.8, 3.9, 3.10, 3.11. If * not specified, default value is 3.10. */ pythonVersion?: string; /** * Optional. The Cloud Storage URI of the `requirements.txt` file */ requirementsGcsUri?: string; } /** * Request message for GenAiTuningService.RebaseTunedModel. */ export interface GoogleCloudAiplatformV1RebaseTunedModelRequest { /** * Optional. The Google Cloud Storage location to write the artifacts. */ artifactDestination?: GoogleCloudAiplatformV1GcsDestination; /** * Optional. By default, bison to gemini migration will always create new * model/endpoint, but for gemini-1.0 to gemini-1.5 migration, we default * deploy to the same endpoint. See details in this Section. */ deployToSameEndpoint?: boolean; /** * Required. TunedModel reference to retrieve the legacy model information. */ tunedModelRef?: GoogleCloudAiplatformV1TunedModelRef; /** * Optional. The TuningJob to be updated. Users can use this TuningJob field * to overwrite tuning configs. */ tuningJob?: GoogleCloudAiplatformV1TuningJob; } function serializeGoogleCloudAiplatformV1RebaseTunedModelRequest(data: any): GoogleCloudAiplatformV1RebaseTunedModelRequest { return { ...data, tuningJob: data["tuningJob"] !== undefined ? serializeGoogleCloudAiplatformV1TuningJob(data["tuningJob"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1RebaseTunedModelRequest(data: any): GoogleCloudAiplatformV1RebaseTunedModelRequest { return { ...data, tuningJob: data["tuningJob"] !== undefined ? deserializeGoogleCloudAiplatformV1TuningJob(data["tuningJob"]) : undefined, }; } /** * Details of operations that perform reboot PersistentResource. */ export interface GoogleCloudAiplatformV1RebootPersistentResourceOperationMetadata { /** * Operation metadata for PersistentResource. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; /** * Progress Message for Reboot LRO */ progressMessage?: string; } /** * Request message for PersistentResourceService.RebootPersistentResource. */ export interface GoogleCloudAiplatformV1RebootPersistentResourceRequest { } /** * Request message for MetadataService.DeleteContextChildrenRequest. */ export interface GoogleCloudAiplatformV1RemoveContextChildrenRequest { /** * The resource names of the child Contexts. */ childContexts?: string[]; } /** * Response message for MetadataService.RemoveContextChildren. */ export interface GoogleCloudAiplatformV1RemoveContextChildrenResponse { } /** * Request message for IndexService.RemoveDatapoints */ export interface GoogleCloudAiplatformV1RemoveDatapointsRequest { /** * A list of datapoint ids to be deleted. */ datapointIds?: string[]; } /** * Response message for IndexService.RemoveDatapoints */ export interface GoogleCloudAiplatformV1RemoveDatapointsResponse { } /** * A ReservationAffinity can be used to configure a Vertex AI resource (e.g., a * DeployedModel) to draw its Compute Engine resources from a Shared * Reservation, or exclusively from on-demand capacity. */ export interface GoogleCloudAiplatformV1ReservationAffinity { /** * Optional. Corresponds to the label key of a reservation resource. To * target a SPECIFIC_RESERVATION by name, use * `compute.googleapis.com/reservation-name` as the key and specify the name * of your reservation as its value. */ key?: string; /** * Required. Specifies the reservation affinity type. */ reservationAffinityType?: | "TYPE_UNSPECIFIED" | "NO_RESERVATION" | "ANY_RESERVATION" | "SPECIFIC_RESERVATION"; /** * Optional. Corresponds to the label values of a reservation resource. This * must be the full resource name of the reservation or reservation block. */ values?: string[]; } /** * Represents the spec of a group of resources of the same type, for example * machine type, disk, and accelerators, in a PersistentResource. */ export interface GoogleCloudAiplatformV1ResourcePool { /** * Optional. Optional spec to configure GKE or Ray-on-Vertex autoscaling */ autoscalingSpec?: GoogleCloudAiplatformV1ResourcePoolAutoscalingSpec; /** * Optional. Disk spec for the machine in this node pool. */ diskSpec?: GoogleCloudAiplatformV1DiskSpec; /** * Immutable. The unique ID in a PersistentResource for referring to this * resource pool. User can specify it if necessary. Otherwise, it's generated * automatically. */ id?: string; /** * Required. Immutable. The specification of a single machine. */ machineSpec?: GoogleCloudAiplatformV1MachineSpec; /** * Optional. The total number of machines to use for this resource pool. */ replicaCount?: bigint; /** * Output only. The number of machines currently in use by training jobs for * this resource pool. Will replace idle_replica_count. */ readonly usedReplicaCount?: bigint; } function serializeGoogleCloudAiplatformV1ResourcePool(data: any): GoogleCloudAiplatformV1ResourcePool { return { ...data, autoscalingSpec: data["autoscalingSpec"] !== undefined ? serializeGoogleCloudAiplatformV1ResourcePoolAutoscalingSpec(data["autoscalingSpec"]) : undefined, replicaCount: data["replicaCount"] !== undefined ? String(data["replicaCount"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1ResourcePool(data: any): GoogleCloudAiplatformV1ResourcePool { return { ...data, autoscalingSpec: data["autoscalingSpec"] !== undefined ? deserializeGoogleCloudAiplatformV1ResourcePoolAutoscalingSpec(data["autoscalingSpec"]) : undefined, replicaCount: data["replicaCount"] !== undefined ? BigInt(data["replicaCount"]) : undefined, usedReplicaCount: data["usedReplicaCount"] !== undefined ? BigInt(data["usedReplicaCount"]) : undefined, }; } /** * The min/max number of replicas allowed if enabling autoscaling */ export interface GoogleCloudAiplatformV1ResourcePoolAutoscalingSpec { /** * Optional. max replicas in the node pool, must be ≥ replica_count and > * min_replica_count or will throw error */ maxReplicaCount?: bigint; /** * Optional. min replicas in the node pool, must be ≤ replica_count and < * max_replica_count or will throw error. For autoscaling enabled * Ray-on-Vertex, we allow min_replica_count of a resource_pool to be 0 to * match the OSS Ray * behavior(https://docs.ray.io/en/latest/cluster/vms/user-guides/configuring-autoscaling.html#cluster-config-parameters). * As for Persistent Resource, the min_replica_count must be > 0, we added a * corresponding validation inside * CreatePersistentResourceRequestValidator.java. */ minReplicaCount?: bigint; } function serializeGoogleCloudAiplatformV1ResourcePoolAutoscalingSpec(data: any): GoogleCloudAiplatformV1ResourcePoolAutoscalingSpec { return { ...data, maxReplicaCount: data["maxReplicaCount"] !== undefined ? String(data["maxReplicaCount"]) : undefined, minReplicaCount: data["minReplicaCount"] !== undefined ? String(data["minReplicaCount"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1ResourcePoolAutoscalingSpec(data: any): GoogleCloudAiplatformV1ResourcePoolAutoscalingSpec { return { ...data, maxReplicaCount: data["maxReplicaCount"] !== undefined ? BigInt(data["maxReplicaCount"]) : undefined, minReplicaCount: data["minReplicaCount"] !== undefined ? BigInt(data["minReplicaCount"]) : undefined, }; } /** * Persistent Cluster runtime information as output */ export interface GoogleCloudAiplatformV1ResourceRuntime { /** * Output only. URIs for user to connect to the Cluster. Example: { * "RAY_HEAD_NODE_INTERNAL_IP": "head-node-IP:10001" "RAY_DASHBOARD_URI": * "ray-dashboard-address:8888" } */ readonly accessUris?: { [key: string]: string }; } /** * Configuration for the runtime on a PersistentResource instance, including * but not limited to: * Service accounts used to run the workloads. * Whether * to make it a dedicated Ray Cluster. */ export interface GoogleCloudAiplatformV1ResourceRuntimeSpec { /** * Optional. Ray cluster configuration. Required when creating a dedicated * RayCluster on the PersistentResource. */ raySpec?: GoogleCloudAiplatformV1RaySpec; /** * Optional. Configure the use of workload identity on the PersistentResource */ serviceAccountSpec?: GoogleCloudAiplatformV1ServiceAccountSpec; } /** * Statistics information about resource consumption. */ export interface GoogleCloudAiplatformV1ResourcesConsumed { /** * Output only. The number of replica hours used. Note that many replicas may * run in parallel, and additionally any given work may be queued for some * time. Therefore this value is not strictly related to wall time. */ readonly replicaHours?: number; } /** * Runtime operation information for DatasetService.RestoreDatasetVersion. */ export interface GoogleCloudAiplatformV1RestoreDatasetVersionOperationMetadata { /** * The common part of the operation metadata. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Request message for JobService.ResumeModelDeploymentMonitoringJob. */ export interface GoogleCloudAiplatformV1ResumeModelDeploymentMonitoringJobRequest { } /** * Request message for ScheduleService.ResumeSchedule. */ export interface GoogleCloudAiplatformV1ResumeScheduleRequest { /** * Optional. Whether to backfill missed runs when the schedule is resumed * from PAUSED state. If set to true, all missed runs will be scheduled. New * runs will be scheduled after the backfill is complete. This will also * update Schedule.catch_up field. Default to false. */ catchUp?: boolean; } /** * Defines a retrieval tool that model can call to access external knowledge. */ export interface GoogleCloudAiplatformV1Retrieval { /** * Optional. Deprecated. This option is no longer supported. */ disableAttribution?: boolean; /** * Set to use data source powered by Vertex AI Search. */ vertexAiSearch?: GoogleCloudAiplatformV1VertexAISearch; /** * Set to use data source powered by Vertex RAG store. User data is uploaded * via the VertexRagDataService. */ vertexRagStore?: GoogleCloudAiplatformV1VertexRagStore; } /** * Retrieval config. */ export interface GoogleCloudAiplatformV1RetrievalConfig { /** * The language code of the user. */ languageCode?: string; /** * The location of the user. */ latLng?: GoogleTypeLatLng; } /** * Metadata related to retrieval in the grounding flow. */ export interface GoogleCloudAiplatformV1RetrievalMetadata { /** * Optional. Score indicating how likely information from Google Search could * help answer the prompt. The score is in the range `[0, 1]`, where 0 is the * least likely and 1 is the most likely. This score is only populated when * Google Search grounding and dynamic retrieval is enabled. It will be * compared to the threshold to determine whether to trigger Google Search. */ googleSearchDynamicRetrievalScore?: number; } /** * Request message for VertexRagService.RetrieveContexts. */ export interface GoogleCloudAiplatformV1RetrieveContextsRequest { /** * Required. Single RAG retrieve query. */ query?: GoogleCloudAiplatformV1RagQuery; /** * The data source for Vertex RagStore. */ vertexRagStore?: GoogleCloudAiplatformV1RetrieveContextsRequestVertexRagStore; } /** * The data source for Vertex RagStore. */ export interface GoogleCloudAiplatformV1RetrieveContextsRequestVertexRagStore { /** * Optional. The representation of the rag source. It can be used to specify * corpus only or ragfiles. Currently only support one corpus or multiple * files from one corpus. In the future we may open up multiple corpora * support. */ ragResources?: GoogleCloudAiplatformV1RetrieveContextsRequestVertexRagStoreRagResource[]; /** * Optional. Only return contexts with vector distance smaller than the * threshold. */ vectorDistanceThreshold?: number; } /** * The definition of the Rag resource. */ export interface GoogleCloudAiplatformV1RetrieveContextsRequestVertexRagStoreRagResource { /** * Optional. RagCorpora resource name. Format: * `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}` */ ragCorpus?: string; /** * Optional. rag_file_id. The files should be in the same rag_corpus set in * rag_corpus field. */ ragFileIds?: string[]; } /** * Response message for VertexRagService.RetrieveContexts. */ export interface GoogleCloudAiplatformV1RetrieveContextsResponse { /** * The contexts of the query. */ contexts?: GoogleCloudAiplatformV1RagContexts; } /** * Input for rouge metric. */ export interface GoogleCloudAiplatformV1RougeInput { /** * Required. Repeated rouge instances. */ instances?: GoogleCloudAiplatformV1RougeInstance[]; /** * Required. Spec for rouge score metric. */ metricSpec?: GoogleCloudAiplatformV1RougeSpec; } /** * Spec for rouge instance. */ export interface GoogleCloudAiplatformV1RougeInstance { /** * Required. Output of the evaluated model. */ prediction?: string; /** * Required. Ground truth used to compare against the prediction. */ reference?: string; } /** * Rouge metric value for an instance. */ export interface GoogleCloudAiplatformV1RougeMetricValue { /** * Output only. Rouge score. */ readonly score?: number; } /** * Results for rouge metric. */ export interface GoogleCloudAiplatformV1RougeResults { /** * Output only. Rouge metric values. */ readonly rougeMetricValues?: GoogleCloudAiplatformV1RougeMetricValue[]; } /** * Spec for rouge score metric - calculates the recall of n-grams in prediction * as compared to reference - returns a score ranging between 0 and 1. */ export interface GoogleCloudAiplatformV1RougeSpec { /** * Optional. Supported rouge types are rougen[1-9], rougeL, and rougeLsum. */ rougeType?: string; /** * Optional. Whether to split summaries while using rougeLsum. */ splitSummaries?: boolean; /** * Optional. Whether to use stemmer to compute rouge score. */ useStemmer?: boolean; } /** * Instance and metric spec for RubricBasedInstructionFollowing metric. */ export interface GoogleCloudAiplatformV1RubricBasedInstructionFollowingInput { /** * Required. Instance for RubricBasedInstructionFollowing metric. */ instance?: GoogleCloudAiplatformV1RubricBasedInstructionFollowingInstance; /** * Required. Spec for RubricBasedInstructionFollowing metric. */ metricSpec?: GoogleCloudAiplatformV1RubricBasedInstructionFollowingSpec; } /** * Instance for RubricBasedInstructionFollowing metric - one instance * corresponds to one row in an evaluation dataset. */ export interface GoogleCloudAiplatformV1RubricBasedInstructionFollowingInstance { /** * Required. Instance specified as a json string. String key-value pairs are * expected in the json_instance to render RubricBasedInstructionFollowing * prompt templates. */ jsonInstance?: string; } /** * Result for RubricBasedInstructionFollowing metric. */ export interface GoogleCloudAiplatformV1RubricBasedInstructionFollowingResult { /** * Output only. List of per rubric critique results. */ readonly rubricCritiqueResults?: GoogleCloudAiplatformV1RubricCritiqueResult[]; /** * Output only. Overall score for the instruction following. */ readonly score?: number; } /** * Spec for RubricBasedInstructionFollowing metric - returns rubrics and * verdicts corresponding to rubrics along with overall score. */ export interface GoogleCloudAiplatformV1RubricBasedInstructionFollowingSpec { } /** * Rubric critique result. */ export interface GoogleCloudAiplatformV1RubricCritiqueResult { /** * Output only. Rubric to be evaluated. */ readonly rubric?: string; /** * Output only. Verdict for the rubric - true if the rubric is met, false * otherwise. */ readonly verdict?: boolean; } /** * Input for safety metric. */ export interface GoogleCloudAiplatformV1SafetyInput { /** * Required. Safety instance. */ instance?: GoogleCloudAiplatformV1SafetyInstance; /** * Required. Spec for safety metric. */ metricSpec?: GoogleCloudAiplatformV1SafetySpec; } /** * Spec for safety instance. */ export interface GoogleCloudAiplatformV1SafetyInstance { /** * Required. Output of the evaluated model. */ prediction?: string; } /** * Safety rating corresponding to the generated content. */ export interface GoogleCloudAiplatformV1SafetyRating { /** * Output only. Indicates whether the content was filtered out because of * this rating. */ readonly blocked?: boolean; /** * Output only. Harm category. */ readonly category?: | "HARM_CATEGORY_UNSPECIFIED" | "HARM_CATEGORY_HATE_SPEECH" | "HARM_CATEGORY_DANGEROUS_CONTENT" | "HARM_CATEGORY_HARASSMENT" | "HARM_CATEGORY_SEXUALLY_EXPLICIT" | "HARM_CATEGORY_CIVIC_INTEGRITY"; /** * Output only. Harm probability levels in the content. */ readonly probability?: | "HARM_PROBABILITY_UNSPECIFIED" | "NEGLIGIBLE" | "LOW" | "MEDIUM" | "HIGH"; /** * Output only. Harm probability score. */ readonly probabilityScore?: number; /** * Output only. Harm severity levels in the content. */ readonly severity?: | "HARM_SEVERITY_UNSPECIFIED" | "HARM_SEVERITY_NEGLIGIBLE" | "HARM_SEVERITY_LOW" | "HARM_SEVERITY_MEDIUM" | "HARM_SEVERITY_HIGH"; /** * Output only. Harm severity score. */ readonly severityScore?: number; } /** * Spec for safety result. */ export interface GoogleCloudAiplatformV1SafetyResult { /** * Output only. Confidence for safety score. */ readonly confidence?: number; /** * Output only. Explanation for safety score. */ readonly explanation?: string; /** * Output only. Safety score. */ readonly score?: number; } /** * Safety settings. */ export interface GoogleCloudAiplatformV1SafetySetting { /** * Required. Harm category. */ category?: | "HARM_CATEGORY_UNSPECIFIED" | "HARM_CATEGORY_HATE_SPEECH" | "HARM_CATEGORY_DANGEROUS_CONTENT" | "HARM_CATEGORY_HARASSMENT" | "HARM_CATEGORY_SEXUALLY_EXPLICIT" | "HARM_CATEGORY_CIVIC_INTEGRITY"; /** * Optional. Specify if the threshold is used for probability or severity * score. If not specified, the threshold is used for probability score. */ method?: | "HARM_BLOCK_METHOD_UNSPECIFIED" | "SEVERITY" | "PROBABILITY"; /** * Required. The harm block threshold. */ threshold?: | "HARM_BLOCK_THRESHOLD_UNSPECIFIED" | "BLOCK_LOW_AND_ABOVE" | "BLOCK_MEDIUM_AND_ABOVE" | "BLOCK_ONLY_HIGH" | "BLOCK_NONE" | "OFF"; } /** * Spec for safety metric. */ export interface GoogleCloudAiplatformV1SafetySpec { /** * Optional. Which version to use for evaluation. */ version?: number; } /** * Active learning data sampling config. For every active learning labeling * iteration, it will select a batch of data based on the sampling strategy. */ export interface GoogleCloudAiplatformV1SampleConfig { /** * The percentage of data needed to be labeled in each following batch * (except the first batch). */ followingBatchSamplePercentage?: number; /** * The percentage of data needed to be labeled in the first batch. */ initialBatchSamplePercentage?: number; /** * Field to choose sampling strategy. Sampling strategy will decide which * data should be selected for human labeling in every batch. */ sampleStrategy?: | "SAMPLE_STRATEGY_UNSPECIFIED" | "UNCERTAINTY"; } /** * An attribution method that approximates Shapley values for features that * contribute to the label being predicted. A sampling strategy is used to * approximate the value rather than considering all subsets of features. */ export interface GoogleCloudAiplatformV1SampledShapleyAttribution { /** * Required. The number of feature permutations to consider when * approximating the Shapley values. Valid range of its value is [1, 50], * inclusively. */ pathCount?: number; } /** * Sampling Strategy for logging, can be for both training and prediction * dataset. */ export interface GoogleCloudAiplatformV1SamplingStrategy { /** * Random sample config. Will support more sampling strategies later. */ randomSampleConfig?: GoogleCloudAiplatformV1SamplingStrategyRandomSampleConfig; } /** * Requests are randomly selected. */ export interface GoogleCloudAiplatformV1SamplingStrategyRandomSampleConfig { /** * Sample rate (0, 1] */ sampleRate?: number; } /** * A SavedQuery is a view of the dataset. It references a subset of annotations * by problem type and filters. */ export interface GoogleCloudAiplatformV1SavedQuery { /** * Output only. Filters on the Annotations in the dataset. */ readonly annotationFilter?: string; /** * Output only. Number of AnnotationSpecs in the context of the SavedQuery. */ readonly annotationSpecCount?: number; /** * Output only. Timestamp when this SavedQuery was created. */ readonly createTime?: Date; /** * Required. The user-defined name of the SavedQuery. The name can be up to * 128 characters long and can consist of any UTF-8 characters. */ displayName?: string; /** * Used to perform a consistent read-modify-write update. If not set, a blind * "overwrite" update happens. */ etag?: string; /** * Some additional information about the SavedQuery. */ metadata?: any; /** * Output only. Resource name of the SavedQuery. */ readonly name?: string; /** * Required. Problem type of the SavedQuery. Allowed values: * * IMAGE_CLASSIFICATION_SINGLE_LABEL * IMAGE_CLASSIFICATION_MULTI_LABEL * * IMAGE_BOUNDING_POLY * IMAGE_BOUNDING_BOX * TEXT_CLASSIFICATION_SINGLE_LABEL * * TEXT_CLASSIFICATION_MULTI_LABEL * TEXT_EXTRACTION * TEXT_SENTIMENT * * VIDEO_CLASSIFICATION * VIDEO_OBJECT_TRACKING */ problemType?: string; /** * Output only. If the Annotations belonging to the SavedQuery can be used * for AutoML training. */ readonly supportAutomlTraining?: boolean; /** * Output only. Timestamp when SavedQuery was last updated. */ readonly updateTime?: Date; } /** * One point viewable on a scalar metric plot. */ export interface GoogleCloudAiplatformV1Scalar { /** * Value of the point at this step / timestamp. */ value?: number; } /** * An instance of a Schedule periodically schedules runs to make API calls * based on user specified time specification and API request type. */ export interface GoogleCloudAiplatformV1Schedule { /** * Optional. Whether new scheduled runs can be queued when * max_concurrent_runs limit is reached. If set to true, new runs will be * queued instead of skipped. Default to false. */ allowQueueing?: boolean; /** * Output only. Whether to backfill missed runs when the schedule is resumed * from PAUSED state. If set to true, all missed runs will be scheduled. New * runs will be scheduled after the backfill is complete. Default to false. */ readonly catchUp?: boolean; /** * Request for NotebookService.CreateNotebookExecutionJob. */ createNotebookExecutionJobRequest?: GoogleCloudAiplatformV1CreateNotebookExecutionJobRequest; /** * Request for PipelineService.CreatePipelineJob. * CreatePipelineJobRequest.parent field is required (format: * projects/{project}/locations/{location}). */ createPipelineJobRequest?: GoogleCloudAiplatformV1CreatePipelineJobRequest; /** * Output only. Timestamp when this Schedule was created. */ readonly createTime?: Date; /** * Cron schedule (https://en.wikipedia.org/wiki/Cron) to launch scheduled * runs. To explicitly set a timezone to the cron tab, apply a prefix in the * cron tab: "CRON_TZ=${IANA_TIME_ZONE}" or "TZ=${IANA_TIME_ZONE}". The * ${IANA_TIME_ZONE} may only be a valid string from IANA time zone database. * For example, "CRON_TZ=America/New_York 1 * * * *", or "TZ=America/New_York * 1 * * * *". */ cron?: string; /** * Required. User provided name of the Schedule. The name can be up to 128 * characters long and can consist of any UTF-8 characters. */ displayName?: string; /** * Optional. Timestamp after which no new runs can be scheduled. If * specified, The schedule will be completed when either end_time is reached * or when scheduled_run_count >= max_run_count. If not specified, new runs * will keep getting scheduled until this Schedule is paused or deleted. * Already scheduled runs will be allowed to complete. Unset if not specified. */ endTime?: Date; /** * Output only. Timestamp when this Schedule was last paused. Unset if never * paused. */ readonly lastPauseTime?: Date; /** * Output only. Timestamp when this Schedule was last resumed. Unset if never * resumed from pause. */ readonly lastResumeTime?: Date; /** * Output only. Response of the last scheduled run. This is the response for * starting the scheduled requests and not the execution of the * operations/jobs created by the requests (if applicable). Unset if no run * has been scheduled yet. */ readonly lastScheduledRunResponse?: GoogleCloudAiplatformV1ScheduleRunResponse; /** * Required. Maximum number of runs that can be started concurrently for this * Schedule. This is the limit for starting the scheduled requests and not the * execution of the operations/jobs created by the requests (if applicable). */ maxConcurrentRunCount?: bigint; /** * Optional. Maximum run count of the schedule. If specified, The schedule * will be completed when either started_run_count >= max_run_count or when * end_time is reached. If not specified, new runs will keep getting scheduled * until this Schedule is paused or deleted. Already scheduled runs will be * allowed to complete. Unset if not specified. */ maxRunCount?: bigint; /** * Immutable. The resource name of the Schedule. */ name?: string; /** * Output only. Timestamp when this Schedule should schedule the next run. * Having a next_run_time in the past means the runs are being started behind * schedule. */ readonly nextRunTime?: Date; /** * Output only. The number of runs started by this schedule. */ readonly startedRunCount?: bigint; /** * Optional. Timestamp after which the first run can be scheduled. Default to * Schedule create time if not specified. */ startTime?: Date; /** * Output only. The state of this Schedule. */ readonly state?: | "STATE_UNSPECIFIED" | "ACTIVE" | "PAUSED" | "COMPLETED"; /** * Output only. Timestamp when this Schedule was updated. */ readonly updateTime?: Date; } function serializeGoogleCloudAiplatformV1Schedule(data: any): GoogleCloudAiplatformV1Schedule { return { ...data, createNotebookExecutionJobRequest: data["createNotebookExecutionJobRequest"] !== undefined ? serializeGoogleCloudAiplatformV1CreateNotebookExecutionJobRequest(data["createNotebookExecutionJobRequest"]) : undefined, createPipelineJobRequest: data["createPipelineJobRequest"] !== undefined ? serializeGoogleCloudAiplatformV1CreatePipelineJobRequest(data["createPipelineJobRequest"]) : undefined, endTime: data["endTime"] !== undefined ? data["endTime"].toISOString() : undefined, maxConcurrentRunCount: data["maxConcurrentRunCount"] !== undefined ? String(data["maxConcurrentRunCount"]) : undefined, maxRunCount: data["maxRunCount"] !== undefined ? String(data["maxRunCount"]) : undefined, startTime: data["startTime"] !== undefined ? data["startTime"].toISOString() : undefined, }; } function deserializeGoogleCloudAiplatformV1Schedule(data: any): GoogleCloudAiplatformV1Schedule { return { ...data, createNotebookExecutionJobRequest: data["createNotebookExecutionJobRequest"] !== undefined ? deserializeGoogleCloudAiplatformV1CreateNotebookExecutionJobRequest(data["createNotebookExecutionJobRequest"]) : undefined, createPipelineJobRequest: data["createPipelineJobRequest"] !== undefined ? deserializeGoogleCloudAiplatformV1CreatePipelineJobRequest(data["createPipelineJobRequest"]) : undefined, createTime: data["createTime"] !== undefined ? new Date(data["createTime"]) : undefined, endTime: data["endTime"] !== undefined ? new Date(data["endTime"]) : undefined, lastPauseTime: data["lastPauseTime"] !== undefined ? new Date(data["lastPauseTime"]) : undefined, lastResumeTime: data["lastResumeTime"] !== undefined ? new Date(data["lastResumeTime"]) : undefined, lastScheduledRunResponse: data["lastScheduledRunResponse"] !== undefined ? deserializeGoogleCloudAiplatformV1ScheduleRunResponse(data["lastScheduledRunResponse"]) : undefined, maxConcurrentRunCount: data["maxConcurrentRunCount"] !== undefined ? BigInt(data["maxConcurrentRunCount"]) : undefined, maxRunCount: data["maxRunCount"] !== undefined ? BigInt(data["maxRunCount"]) : undefined, nextRunTime: data["nextRunTime"] !== undefined ? new Date(data["nextRunTime"]) : undefined, startedRunCount: data["startedRunCount"] !== undefined ? BigInt(data["startedRunCount"]) : undefined, startTime: data["startTime"] !== undefined ? new Date(data["startTime"]) : undefined, updateTime: data["updateTime"] !== undefined ? new Date(data["updateTime"]) : undefined, }; } /** * Status of a scheduled run. */ export interface GoogleCloudAiplatformV1ScheduleRunResponse { /** * The response of the scheduled run. */ runResponse?: string; /** * The scheduled run time based on the user-specified schedule. */ scheduledRunTime?: Date; } function serializeGoogleCloudAiplatformV1ScheduleRunResponse(data: any): GoogleCloudAiplatformV1ScheduleRunResponse { return { ...data, scheduledRunTime: data["scheduledRunTime"] !== undefined ? data["scheduledRunTime"].toISOString() : undefined, }; } function deserializeGoogleCloudAiplatformV1ScheduleRunResponse(data: any): GoogleCloudAiplatformV1ScheduleRunResponse { return { ...data, scheduledRunTime: data["scheduledRunTime"] !== undefined ? new Date(data["scheduledRunTime"]) : undefined, }; } /** * All parameters related to queuing and scheduling of custom jobs. */ export interface GoogleCloudAiplatformV1Scheduling { /** * Optional. Indicates if the job should retry for internal errors after the * job starts running. If true, overrides * `Scheduling.restart_job_on_worker_restart` to false. */ disableRetries?: boolean; /** * Optional. This is the maximum duration that a job will wait for the * requested resources to be provisioned if the scheduling strategy is set to * [Strategy.DWS_FLEX_START]. If set to 0, the job will wait indefinitely. The * default is 24 hours. */ maxWaitDuration?: number /* Duration */; /** * Optional. Restarts the entire CustomJob if a worker gets restarted. This * feature can be used by distributed training jobs that are not resilient to * workers leaving and joining a job. */ restartJobOnWorkerRestart?: boolean; /** * Optional. This determines which type of scheduling strategy to use. */ strategy?: | "STRATEGY_UNSPECIFIED" | "ON_DEMAND" | "LOW_COST" | "STANDARD" | "SPOT" | "FLEX_START"; /** * Optional. The maximum job running time. The default is 7 days. */ timeout?: number /* Duration */; } function serializeGoogleCloudAiplatformV1Scheduling(data: any): GoogleCloudAiplatformV1Scheduling { return { ...data, maxWaitDuration: data["maxWaitDuration"] !== undefined ? data["maxWaitDuration"] : undefined, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } function deserializeGoogleCloudAiplatformV1Scheduling(data: any): GoogleCloudAiplatformV1Scheduling { return { ...data, maxWaitDuration: data["maxWaitDuration"] !== undefined ? data["maxWaitDuration"] : undefined, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } /** * Schema is used to define the format of input/output data. Represents a * select subset of an [OpenAPI 3.0 schema * object](https://spec.openapis.org/oas/v3.0.3#schema-object). More fields may * be added in the future as needed. */ export interface GoogleCloudAiplatformV1Schema { /** * Optional. The value should be validated against any (one or more) of the * subschemas in the list. */ anyOf?: GoogleCloudAiplatformV1Schema[]; /** * Optional. Default value of the data. */ default?: any; /** * Optional. The description of the data. */ description?: string; /** * Optional. Possible values of the element of primitive type with enum * format. Examples: 1. We can define direction as : {type:STRING, * format:enum, enum:["EAST", NORTH", "SOUTH", "WEST"]} 2. We can define * apartment number as : {type:INTEGER, format:enum, enum:["101", "201", * "301"]} */ enum?: string[]; /** * Optional. Example of the object. Will only populated when the object is * the root. */ example?: any; /** * Optional. The format of the data. Supported formats: for NUMBER type: * "float", "double" for INTEGER type: "int32", "int64" for STRING type: * "email", "byte", etc */ format?: string; /** * Optional. SCHEMA FIELDS FOR TYPE ARRAY Schema of the elements of * Type.ARRAY. */ items?: GoogleCloudAiplatformV1Schema; /** * Optional. Maximum value of the Type.INTEGER and Type.NUMBER */ maximum?: number; /** * Optional. Maximum number of the elements for Type.ARRAY. */ maxItems?: bigint; /** * Optional. Maximum length of the Type.STRING */ maxLength?: bigint; /** * Optional. Maximum number of the properties for Type.OBJECT. */ maxProperties?: bigint; /** * Optional. SCHEMA FIELDS FOR TYPE INTEGER and NUMBER Minimum value of the * Type.INTEGER and Type.NUMBER */ minimum?: number; /** * Optional. Minimum number of the elements for Type.ARRAY. */ minItems?: bigint; /** * Optional. SCHEMA FIELDS FOR TYPE STRING Minimum length of the Type.STRING */ minLength?: bigint; /** * Optional. Minimum number of the properties for Type.OBJECT. */ minProperties?: bigint; /** * Optional. Indicates if the value may be null. */ nullable?: boolean; /** * Optional. Pattern of the Type.STRING to restrict a string to a regular * expression. */ pattern?: string; /** * Optional. SCHEMA FIELDS FOR TYPE OBJECT Properties of Type.OBJECT. */ properties?: { [key: string]: GoogleCloudAiplatformV1Schema }; /** * Optional. The order of the properties. Not a standard field in open api * spec. Only used to support the order of the properties. */ propertyOrdering?: string[]; /** * Optional. Required properties of Type.OBJECT. */ required?: string[]; /** * Optional. The title of the Schema. */ title?: string; /** * Optional. The type of the data. */ type?: | "TYPE_UNSPECIFIED" | "STRING" | "NUMBER" | "INTEGER" | "BOOLEAN" | "ARRAY" | "OBJECT"; } function serializeGoogleCloudAiplatformV1Schema(data: any): GoogleCloudAiplatformV1Schema { return { ...data, anyOf: data["anyOf"] !== undefined ? data["anyOf"].map((item: any) => (serializeGoogleCloudAiplatformV1Schema(item))) : undefined, items: data["items"] !== undefined ? serializeGoogleCloudAiplatformV1Schema(data["items"]) : undefined, maxItems: data["maxItems"] !== undefined ? String(data["maxItems"]) : undefined, maxLength: data["maxLength"] !== undefined ? String(data["maxLength"]) : undefined, maxProperties: data["maxProperties"] !== undefined ? String(data["maxProperties"]) : undefined, minItems: data["minItems"] !== undefined ? String(data["minItems"]) : undefined, minLength: data["minLength"] !== undefined ? String(data["minLength"]) : undefined, minProperties: data["minProperties"] !== undefined ? String(data["minProperties"]) : undefined, properties: data["properties"] !== undefined ? Object.fromEntries(Object.entries(data["properties"]).map(([k, v]: [string, any]) => ([k, serializeGoogleCloudAiplatformV1Schema(v)]))) : undefined, }; } function deserializeGoogleCloudAiplatformV1Schema(data: any): GoogleCloudAiplatformV1Schema { return { ...data, anyOf: data["anyOf"] !== undefined ? data["anyOf"].map((item: any) => (deserializeGoogleCloudAiplatformV1Schema(item))) : undefined, items: data["items"] !== undefined ? deserializeGoogleCloudAiplatformV1Schema(data["items"]) : undefined, maxItems: data["maxItems"] !== undefined ? BigInt(data["maxItems"]) : undefined, maxLength: data["maxLength"] !== undefined ? BigInt(data["maxLength"]) : undefined, maxProperties: data["maxProperties"] !== undefined ? BigInt(data["maxProperties"]) : undefined, minItems: data["minItems"] !== undefined ? BigInt(data["minItems"]) : undefined, minLength: data["minLength"] !== undefined ? BigInt(data["minLength"]) : undefined, minProperties: data["minProperties"] !== undefined ? BigInt(data["minProperties"]) : undefined, properties: data["properties"] !== undefined ? Object.fromEntries(Object.entries(data["properties"]).map(([k, v]: [string, any]) => ([k, deserializeGoogleCloudAiplatformV1Schema(v)]))) : undefined, }; } /** * An entry of mapping between color and AnnotationSpec. The mapping is used in * segmentation mask. */ export interface GoogleCloudAiplatformV1SchemaAnnotationSpecColor { /** * The color of the AnnotationSpec in a segmentation mask. */ color?: GoogleTypeColor; /** * The display name of the AnnotationSpec represented by the color in the * segmentation mask. */ displayName?: string; /** * The ID of the AnnotationSpec represented by the color in the segmentation * mask. */ id?: string; } /** * Annotation details specific to image object detection. */ export interface GoogleCloudAiplatformV1SchemaImageBoundingBoxAnnotation { /** * The resource Id of the AnnotationSpec that this Annotation pertains to. */ annotationSpecId?: string; /** * The display name of the AnnotationSpec that this Annotation pertains to. */ displayName?: string; /** * The rightmost coordinate of the bounding box. */ xMax?: number; /** * The leftmost coordinate of the bounding box. */ xMin?: number; /** * The bottommost coordinate of the bounding box. */ yMax?: number; /** * The topmost coordinate of the bounding box. */ yMin?: number; } /** * Annotation details specific to image classification. */ export interface GoogleCloudAiplatformV1SchemaImageClassificationAnnotation { /** * The resource Id of the AnnotationSpec that this Annotation pertains to. */ annotationSpecId?: string; /** * The display name of the AnnotationSpec that this Annotation pertains to. */ displayName?: string; } /** * Payload of Image DataItem. */ export interface GoogleCloudAiplatformV1SchemaImageDataItem { /** * Required. Google Cloud Storage URI points to the original image in user's * bucket. The image is up to 30MB in size. */ gcsUri?: string; /** * Output only. The mime type of the content of the image. Only the images in * below listed mime types are supported. - image/jpeg - image/gif - image/png * - image/webp - image/bmp - image/tiff - image/vnd.microsoft.icon */ readonly mimeType?: string; } /** * The metadata of Datasets that contain Image DataItems. */ export interface GoogleCloudAiplatformV1SchemaImageDatasetMetadata { /** * Points to a YAML file stored on Google Cloud Storage describing payload of * the Image DataItems that belong to this Dataset. */ dataItemSchemaUri?: string; /** * Google Cloud Storage Bucket name that contains the blob data of this * Dataset. */ gcsBucket?: string; } /** * Annotation details specific to image segmentation. */ export interface GoogleCloudAiplatformV1SchemaImageSegmentationAnnotation { /** * Mask based segmentation annotation. Only one mask annotation can exist for * one image. */ maskAnnotation?: GoogleCloudAiplatformV1SchemaImageSegmentationAnnotationMaskAnnotation; /** * Polygon annotation. */ polygonAnnotation?: GoogleCloudAiplatformV1SchemaImageSegmentationAnnotationPolygonAnnotation; /** * Polyline annotation. */ polylineAnnotation?: GoogleCloudAiplatformV1SchemaImageSegmentationAnnotationPolylineAnnotation; } /** * The mask based segmentation annotation. */ export interface GoogleCloudAiplatformV1SchemaImageSegmentationAnnotationMaskAnnotation { /** * The mapping between color and AnnotationSpec for this Annotation. */ annotationSpecColors?: GoogleCloudAiplatformV1SchemaAnnotationSpecColor[]; /** * Google Cloud Storage URI that points to the mask image. The image must be * in PNG format. It must have the same size as the DataItem's image. Each * pixel in the image mask represents the AnnotationSpec which the pixel in * the image DataItem belong to. Each color is mapped to one AnnotationSpec * based on annotation_spec_colors. */ maskGcsUri?: string; } /** * Represents a polygon in image. */ export interface GoogleCloudAiplatformV1SchemaImageSegmentationAnnotationPolygonAnnotation { /** * The resource Id of the AnnotationSpec that this Annotation pertains to. */ annotationSpecId?: string; /** * The display name of the AnnotationSpec that this Annotation pertains to. */ displayName?: string; /** * The vertexes are connected one by one and the last vertex is connected to * the first one to represent a polygon. */ vertexes?: GoogleCloudAiplatformV1SchemaVertex[]; } /** * Represents a polyline in image. */ export interface GoogleCloudAiplatformV1SchemaImageSegmentationAnnotationPolylineAnnotation { /** * The resource Id of the AnnotationSpec that this Annotation pertains to. */ annotationSpecId?: string; /** * The display name of the AnnotationSpec that this Annotation pertains to. */ displayName?: string; /** * The vertexes are connected one by one and the last vertex in not connected * to the first one. */ vertexes?: GoogleCloudAiplatformV1SchemaVertex[]; } /** * Bounding box matching model metrics for a single intersection-over-union * threshold and multiple label match confidence thresholds. */ export interface GoogleCloudAiplatformV1SchemaModelevaluationMetricsBoundingBoxMetrics { /** * Metrics for each label-match confidence_threshold from * 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99. Precision-recall curve is derived * from them. */ confidenceMetrics?: GoogleCloudAiplatformV1SchemaModelevaluationMetricsBoundingBoxMetricsConfidenceMetrics[]; /** * The intersection-over-union threshold value used to compute this metrics * entry. */ iouThreshold?: number; /** * The mean average precision, most often close to `auPrc`. */ meanAveragePrecision?: number; } /** * Metrics for a single confidence threshold. */ export interface GoogleCloudAiplatformV1SchemaModelevaluationMetricsBoundingBoxMetricsConfidenceMetrics { /** * The confidence threshold value used to compute the metrics. */ confidenceThreshold?: number; /** * The harmonic mean of recall and precision. */ f1Score?: number; /** * Precision under the given confidence threshold. */ precision?: number; /** * Recall under the given confidence threshold. */ recall?: number; } /** * Metrics for classification evaluation results. */ export interface GoogleCloudAiplatformV1SchemaModelevaluationMetricsClassificationEvaluationMetrics { /** * The Area Under Precision-Recall Curve metric. Micro-averaged for the * overall evaluation. */ auPrc?: number; /** * The Area Under Receiver Operating Characteristic curve metric. * Micro-averaged for the overall evaluation. */ auRoc?: number; /** * Metrics for each `confidenceThreshold` in * 0.00,0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 and `positionThreshold` = * INT32_MAX_VALUE. ROC and precision-recall curves, and other aggregated * metrics are derived from them. The confidence metrics entries may also be * supplied for additional values of `positionThreshold`, but from these no * aggregated metrics are computed. */ confidenceMetrics?: GoogleCloudAiplatformV1SchemaModelevaluationMetricsClassificationEvaluationMetricsConfidenceMetrics[]; /** * Confusion matrix of the evaluation. */ confusionMatrix?: GoogleCloudAiplatformV1SchemaModelevaluationMetricsConfusionMatrix; /** * The Log Loss metric. */ logLoss?: number; } function serializeGoogleCloudAiplatformV1SchemaModelevaluationMetricsClassificationEvaluationMetrics(data: any): GoogleCloudAiplatformV1SchemaModelevaluationMetricsClassificationEvaluationMetrics { return { ...data, confidenceMetrics: data["confidenceMetrics"] !== undefined ? data["confidenceMetrics"].map((item: any) => (serializeGoogleCloudAiplatformV1SchemaModelevaluationMetricsClassificationEvaluationMetricsConfidenceMetrics(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaModelevaluationMetricsClassificationEvaluationMetrics(data: any): GoogleCloudAiplatformV1SchemaModelevaluationMetricsClassificationEvaluationMetrics { return { ...data, confidenceMetrics: data["confidenceMetrics"] !== undefined ? data["confidenceMetrics"].map((item: any) => (deserializeGoogleCloudAiplatformV1SchemaModelevaluationMetricsClassificationEvaluationMetricsConfidenceMetrics(item))) : undefined, }; } export interface GoogleCloudAiplatformV1SchemaModelevaluationMetricsClassificationEvaluationMetricsConfidenceMetrics { /** * Metrics are computed with an assumption that the Model never returns * predictions with score lower than this value. */ confidenceThreshold?: number; /** * Confusion matrix of the evaluation for this confidence_threshold. */ confusionMatrix?: GoogleCloudAiplatformV1SchemaModelevaluationMetricsConfusionMatrix; /** * The harmonic mean of recall and precision. For summary metrics, it * computes the micro-averaged F1 score. */ f1Score?: number; /** * The harmonic mean of recallAt1 and precisionAt1. */ f1ScoreAt1?: number; /** * Macro-averaged F1 Score. */ f1ScoreMacro?: number; /** * Micro-averaged F1 Score. */ f1ScoreMicro?: number; /** * The number of ground truth labels that are not matched by a Model created * label. */ falseNegativeCount?: bigint; /** * The number of Model created labels that do not match a ground truth label. */ falsePositiveCount?: bigint; /** * False Positive Rate for the given confidence threshold. */ falsePositiveRate?: number; /** * The False Positive Rate when only considering the label that has the * highest prediction score and not below the confidence threshold for each * DataItem. */ falsePositiveRateAt1?: number; /** * Metrics are computed with an assumption that the Model always returns at * most this many predictions (ordered by their score, descendingly), but they * all still need to meet the `confidenceThreshold`. */ maxPredictions?: number; /** * Precision for the given confidence threshold. */ precision?: number; /** * The precision when only considering the label that has the highest * prediction score and not below the confidence threshold for each DataItem. */ precisionAt1?: number; /** * Recall (True Positive Rate) for the given confidence threshold. */ recall?: number; /** * The Recall (True Positive Rate) when only considering the label that has * the highest prediction score and not below the confidence threshold for * each DataItem. */ recallAt1?: number; /** * The number of labels that were not created by the Model, but if they * would, they would not match a ground truth label. */ trueNegativeCount?: bigint; /** * The number of Model created labels that match a ground truth label. */ truePositiveCount?: bigint; } function serializeGoogleCloudAiplatformV1SchemaModelevaluationMetricsClassificationEvaluationMetricsConfidenceMetrics(data: any): GoogleCloudAiplatformV1SchemaModelevaluationMetricsClassificationEvaluationMetricsConfidenceMetrics { return { ...data, falseNegativeCount: data["falseNegativeCount"] !== undefined ? String(data["falseNegativeCount"]) : undefined, falsePositiveCount: data["falsePositiveCount"] !== undefined ? String(data["falsePositiveCount"]) : undefined, trueNegativeCount: data["trueNegativeCount"] !== undefined ? String(data["trueNegativeCount"]) : undefined, truePositiveCount: data["truePositiveCount"] !== undefined ? String(data["truePositiveCount"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaModelevaluationMetricsClassificationEvaluationMetricsConfidenceMetrics(data: any): GoogleCloudAiplatformV1SchemaModelevaluationMetricsClassificationEvaluationMetricsConfidenceMetrics { return { ...data, falseNegativeCount: data["falseNegativeCount"] !== undefined ? BigInt(data["falseNegativeCount"]) : undefined, falsePositiveCount: data["falsePositiveCount"] !== undefined ? BigInt(data["falsePositiveCount"]) : undefined, trueNegativeCount: data["trueNegativeCount"] !== undefined ? BigInt(data["trueNegativeCount"]) : undefined, truePositiveCount: data["truePositiveCount"] !== undefined ? BigInt(data["truePositiveCount"]) : undefined, }; } export interface GoogleCloudAiplatformV1SchemaModelevaluationMetricsConfusionMatrix { /** * AnnotationSpecs used in the confusion matrix. For AutoML Text Extraction, * a special negative AnnotationSpec with empty `id` and `displayName` of * "NULL" will be added as the last element. */ annotationSpecs?: GoogleCloudAiplatformV1SchemaModelevaluationMetricsConfusionMatrixAnnotationSpecRef[]; /** * Rows in the confusion matrix. The number of rows is equal to the size of * `annotationSpecs`. `rowsi` is the number of DataItems that have ground * truth of the `annotationSpecs[i]` and are predicted as `annotationSpecs[j]` * by the Model being evaluated. For Text Extraction, when * `annotationSpecs[i]` is the last element in `annotationSpecs`, i.e. the * special negative AnnotationSpec, `rowsi` is the number of predicted * entities of `annoatationSpec[j]` that are not labeled as any of the ground * truth AnnotationSpec. When annotationSpecs[j] is the special negative * AnnotationSpec, `rowsi` is the number of entities have ground truth of * `annotationSpec[i]` that are not predicted as an entity by the Model. The * value of the last cell, i.e. `rowi` where i == j and `annotationSpec[i]` is * the special negative AnnotationSpec, is always 0. */ rows?: any[][]; } export interface GoogleCloudAiplatformV1SchemaModelevaluationMetricsConfusionMatrixAnnotationSpecRef { /** * Display name of the AnnotationSpec. */ displayName?: string; /** * ID of the AnnotationSpec. */ id?: string; } /** * Metrics for forecasting evaluation results. */ export interface GoogleCloudAiplatformV1SchemaModelevaluationMetricsForecastingEvaluationMetrics { /** * Mean Absolute Error (MAE). */ meanAbsoluteError?: number; /** * Mean absolute percentage error. Infinity when there are zeros in the * ground truth. */ meanAbsolutePercentageError?: number; /** * The quantile metrics entries for each quantile. */ quantileMetrics?: GoogleCloudAiplatformV1SchemaModelevaluationMetricsForecastingEvaluationMetricsQuantileMetricsEntry[]; /** * Root Mean Squared Error (RMSE). */ rootMeanSquaredError?: number; /** * Root mean squared log error. Undefined when there are negative ground * truth values or predictions. */ rootMeanSquaredLogError?: number; /** * Root Mean Square Percentage Error. Square root of MSPE. * Undefined/imaginary when MSPE is negative. */ rootMeanSquaredPercentageError?: number; /** * Coefficient of determination as Pearson correlation coefficient. Undefined * when ground truth or predictions are constant or near constant. */ rSquared?: number; /** * Weighted Absolute Percentage Error. Does not use weights, this is just * what the metric is called. Undefined if actual values sum to zero. Will be * very large if actual values sum to a very small number. */ weightedAbsolutePercentageError?: number; } /** * Entry for the Quantiles loss type optimization objective. */ export interface GoogleCloudAiplatformV1SchemaModelevaluationMetricsForecastingEvaluationMetricsQuantileMetricsEntry { /** * This is a custom metric that calculates the percentage of true values that * were less than the predicted value for that quantile. Only populated when * optimization_objective is minimize-quantile-loss and each entry corresponds * to an entry in quantiles The percent value can be used to compare with the * quantile value, which is the target value. */ observedQuantile?: number; /** * The quantile for this entry. */ quantile?: number; /** * The scaled pinball loss of this quantile. */ scaledPinballLoss?: number; } export interface GoogleCloudAiplatformV1SchemaModelevaluationMetricsGeneralTextGenerationEvaluationMetrics { /** * BLEU (bilingual evaluation understudy) scores based on sacrebleu * implementation. */ bleu?: number; /** * ROUGE-L (Longest Common Subsequence) scoring at summary level. */ rougeLSum?: number; } /** * Metrics for image object detection evaluation results. */ export interface GoogleCloudAiplatformV1SchemaModelevaluationMetricsImageObjectDetectionEvaluationMetrics { /** * The single metric for bounding boxes evaluation: the * `meanAveragePrecision` averaged over all `boundingBoxMetricsEntries`. */ boundingBoxMeanAveragePrecision?: number; /** * The bounding boxes match metrics for each intersection-over-union * threshold 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 and each label confidence * threshold 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 pair. */ boundingBoxMetrics?: GoogleCloudAiplatformV1SchemaModelevaluationMetricsBoundingBoxMetrics[]; /** * The total number of bounding boxes (i.e. summed over all images) the * ground truth used to create this evaluation had. */ evaluatedBoundingBoxCount?: number; } /** * Metrics for image segmentation evaluation results. */ export interface GoogleCloudAiplatformV1SchemaModelevaluationMetricsImageSegmentationEvaluationMetrics { /** * Metrics for each confidenceThreshold in * 0.00,0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 Precision-recall curve can be * derived from it. */ confidenceMetricsEntries?: GoogleCloudAiplatformV1SchemaModelevaluationMetricsImageSegmentationEvaluationMetricsConfidenceMetricsEntry[]; } export interface GoogleCloudAiplatformV1SchemaModelevaluationMetricsImageSegmentationEvaluationMetricsConfidenceMetricsEntry { /** * Metrics are computed with an assumption that the model never returns * predictions with score lower than this value. */ confidenceThreshold?: number; /** * Confusion matrix for the given confidence threshold. */ confusionMatrix?: GoogleCloudAiplatformV1SchemaModelevaluationMetricsConfusionMatrix; /** * DSC or the F1 score, The harmonic mean of recall and precision. */ diceScoreCoefficient?: number; /** * The intersection-over-union score. The measure of overlap of the * annotation's category mask with ground truth category mask on the DataItem. */ iouScore?: number; /** * Precision for the given confidence threshold. */ precision?: number; /** * Recall (True Positive Rate) for the given confidence threshold. */ recall?: number; } /** * Metrics for general pairwise text generation evaluation results. */ export interface GoogleCloudAiplatformV1SchemaModelevaluationMetricsPairwiseTextGenerationEvaluationMetrics { /** * Fraction of cases where the autorater agreed with the human raters. */ accuracy?: number; /** * Percentage of time the autorater decided the baseline model had the better * response. */ baselineModelWinRate?: number; /** * A measurement of agreement between the autorater and human raters that * takes the likelihood of random agreement into account. */ cohensKappa?: number; /** * Harmonic mean of precision and recall. */ f1Score?: number; /** * Number of examples where the autorater chose the baseline model, but * humans preferred the model. */ falseNegativeCount?: bigint; /** * Number of examples where the autorater chose the model, but humans * preferred the baseline model. */ falsePositiveCount?: bigint; /** * Percentage of time humans decided the baseline model had the better * response. */ humanPreferenceBaselineModelWinRate?: number; /** * Percentage of time humans decided the model had the better response. */ humanPreferenceModelWinRate?: number; /** * Percentage of time the autorater decided the model had the better * response. */ modelWinRate?: number; /** * Fraction of cases where the autorater and humans thought the model had a * better response out of all cases where the autorater thought the model had * a better response. True positive divided by all positive. */ precision?: number; /** * Fraction of cases where the autorater and humans thought the model had a * better response out of all cases where the humans thought the model had a * better response. */ recall?: number; /** * Number of examples where both the autorater and humans decided that the * model had the worse response. */ trueNegativeCount?: bigint; /** * Number of examples where both the autorater and humans decided that the * model had the better response. */ truePositiveCount?: bigint; } function serializeGoogleCloudAiplatformV1SchemaModelevaluationMetricsPairwiseTextGenerationEvaluationMetrics(data: any): GoogleCloudAiplatformV1SchemaModelevaluationMetricsPairwiseTextGenerationEvaluationMetrics { return { ...data, falseNegativeCount: data["falseNegativeCount"] !== undefined ? String(data["falseNegativeCount"]) : undefined, falsePositiveCount: data["falsePositiveCount"] !== undefined ? String(data["falsePositiveCount"]) : undefined, trueNegativeCount: data["trueNegativeCount"] !== undefined ? String(data["trueNegativeCount"]) : undefined, truePositiveCount: data["truePositiveCount"] !== undefined ? String(data["truePositiveCount"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaModelevaluationMetricsPairwiseTextGenerationEvaluationMetrics(data: any): GoogleCloudAiplatformV1SchemaModelevaluationMetricsPairwiseTextGenerationEvaluationMetrics { return { ...data, falseNegativeCount: data["falseNegativeCount"] !== undefined ? BigInt(data["falseNegativeCount"]) : undefined, falsePositiveCount: data["falsePositiveCount"] !== undefined ? BigInt(data["falsePositiveCount"]) : undefined, trueNegativeCount: data["trueNegativeCount"] !== undefined ? BigInt(data["trueNegativeCount"]) : undefined, truePositiveCount: data["truePositiveCount"] !== undefined ? BigInt(data["truePositiveCount"]) : undefined, }; } export interface GoogleCloudAiplatformV1SchemaModelevaluationMetricsQuestionAnsweringEvaluationMetrics { /** * The rate at which the input predicted strings exactly match their * references. */ exactMatch?: number; } /** * Metrics for regression evaluation results. */ export interface GoogleCloudAiplatformV1SchemaModelevaluationMetricsRegressionEvaluationMetrics { /** * Mean Absolute Error (MAE). */ meanAbsoluteError?: number; /** * Mean absolute percentage error. Infinity when there are zeros in the * ground truth. */ meanAbsolutePercentageError?: number; /** * Root Mean Squared Error (RMSE). */ rootMeanSquaredError?: number; /** * Root mean squared log error. Undefined when there are negative ground * truth values or predictions. */ rootMeanSquaredLogError?: number; /** * Coefficient of determination as Pearson correlation coefficient. Undefined * when ground truth or predictions are constant or near constant. */ rSquared?: number; } export interface GoogleCloudAiplatformV1SchemaModelevaluationMetricsSummarizationEvaluationMetrics { /** * ROUGE-L (Longest Common Subsequence) scoring at summary level. */ rougeLSum?: number; } /** * Metrics for text extraction evaluation results. */ export interface GoogleCloudAiplatformV1SchemaModelevaluationMetricsTextExtractionEvaluationMetrics { /** * Metrics that have confidence thresholds. Precision-recall curve can be * derived from them. */ confidenceMetrics?: GoogleCloudAiplatformV1SchemaModelevaluationMetricsTextExtractionEvaluationMetricsConfidenceMetrics[]; /** * Confusion matrix of the evaluation. Only set for Models where number of * AnnotationSpecs is no more than 10. Only set for ModelEvaluations, not for * ModelEvaluationSlices. */ confusionMatrix?: GoogleCloudAiplatformV1SchemaModelevaluationMetricsConfusionMatrix; } export interface GoogleCloudAiplatformV1SchemaModelevaluationMetricsTextExtractionEvaluationMetricsConfidenceMetrics { /** * Metrics are computed with an assumption that the Model never returns * predictions with score lower than this value. */ confidenceThreshold?: number; /** * The harmonic mean of recall and precision. */ f1Score?: number; /** * Precision for the given confidence threshold. */ precision?: number; /** * Recall (True Positive Rate) for the given confidence threshold. */ recall?: number; } /** * Model evaluation metrics for text sentiment problems. */ export interface GoogleCloudAiplatformV1SchemaModelevaluationMetricsTextSentimentEvaluationMetrics { /** * Confusion matrix of the evaluation. Only set for ModelEvaluations, not for * ModelEvaluationSlices. */ confusionMatrix?: GoogleCloudAiplatformV1SchemaModelevaluationMetricsConfusionMatrix; /** * The harmonic mean of recall and precision. */ f1Score?: number; /** * Linear weighted kappa. Only set for ModelEvaluations, not for * ModelEvaluationSlices. */ linearKappa?: number; /** * Mean absolute error. Only set for ModelEvaluations, not for * ModelEvaluationSlices. */ meanAbsoluteError?: number; /** * Mean squared error. Only set for ModelEvaluations, not for * ModelEvaluationSlices. */ meanSquaredError?: number; /** * Precision. */ precision?: number; /** * Quadratic weighted kappa. Only set for ModelEvaluations, not for * ModelEvaluationSlices. */ quadraticKappa?: number; /** * Recall. */ recall?: number; } /** * UNIMPLEMENTED. Track matching model metrics for a single track match * threshold and multiple label match confidence thresholds. */ export interface GoogleCloudAiplatformV1SchemaModelevaluationMetricsTrackMetrics { /** * Metrics for each label-match `confidenceThreshold` from * 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99. Precision-recall curve is derived * from them. */ confidenceMetrics?: GoogleCloudAiplatformV1SchemaModelevaluationMetricsTrackMetricsConfidenceMetrics[]; /** * The intersection-over-union threshold value between bounding boxes across * frames used to compute this metric entry. */ iouThreshold?: number; /** * The mean bounding box iou over all confidence thresholds. */ meanBoundingBoxIou?: number; /** * The mean mismatch rate over all confidence thresholds. */ meanMismatchRate?: number; /** * The mean average precision over all confidence thresholds. */ meanTrackingAveragePrecision?: number; } /** * Metrics for a single confidence threshold. */ export interface GoogleCloudAiplatformV1SchemaModelevaluationMetricsTrackMetricsConfidenceMetrics { /** * Bounding box intersection-over-union precision. Measures how well the * bounding boxes overlap between each other (e.g. complete overlap or just * barely above iou_threshold). */ boundingBoxIou?: number; /** * The confidence threshold value used to compute the metrics. */ confidenceThreshold?: number; /** * Mismatch rate, which measures the tracking consistency, i.e. correctness * of instance ID continuity. */ mismatchRate?: number; /** * Tracking precision. */ trackingPrecision?: number; /** * Tracking recall. */ trackingRecall?: number; } /** * The Evaluation metrics given a specific precision_window_length. */ export interface GoogleCloudAiplatformV1SchemaModelevaluationMetricsVideoActionMetrics { /** * Metrics for each label-match confidence_threshold from * 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99. */ confidenceMetrics?: GoogleCloudAiplatformV1SchemaModelevaluationMetricsVideoActionMetricsConfidenceMetrics[]; /** * The mean average precision. */ meanAveragePrecision?: number; /** * This VideoActionMetrics is calculated based on this prediction window * length. If the predicted action's timestamp is inside the time window whose * center is the ground truth action's timestamp with this specific length, * the prediction result is treated as a true positive. */ precisionWindowLength?: number /* Duration */; } function serializeGoogleCloudAiplatformV1SchemaModelevaluationMetricsVideoActionMetrics(data: any): GoogleCloudAiplatformV1SchemaModelevaluationMetricsVideoActionMetrics { return { ...data, precisionWindowLength: data["precisionWindowLength"] !== undefined ? data["precisionWindowLength"] : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaModelevaluationMetricsVideoActionMetrics(data: any): GoogleCloudAiplatformV1SchemaModelevaluationMetricsVideoActionMetrics { return { ...data, precisionWindowLength: data["precisionWindowLength"] !== undefined ? data["precisionWindowLength"] : undefined, }; } /** * Metrics for a single confidence threshold. */ export interface GoogleCloudAiplatformV1SchemaModelevaluationMetricsVideoActionMetricsConfidenceMetrics { /** * Output only. The confidence threshold value used to compute the metrics. */ confidenceThreshold?: number; /** * Output only. The harmonic mean of recall and precision. */ f1Score?: number; /** * Output only. Precision for the given confidence threshold. */ precision?: number; /** * Output only. Recall for the given confidence threshold. */ recall?: number; } /** * Model evaluation metrics for video action recognition. */ export interface GoogleCloudAiplatformV1SchemaModelevaluationMetricsVideoActionRecognitionMetrics { /** * The number of ground truth actions used to create this evaluation. */ evaluatedActionCount?: number; /** * The metric entries for precision window lengths: 1s,2s,3s. */ videoActionMetrics?: GoogleCloudAiplatformV1SchemaModelevaluationMetricsVideoActionMetrics[]; } function serializeGoogleCloudAiplatformV1SchemaModelevaluationMetricsVideoActionRecognitionMetrics(data: any): GoogleCloudAiplatformV1SchemaModelevaluationMetricsVideoActionRecognitionMetrics { return { ...data, videoActionMetrics: data["videoActionMetrics"] !== undefined ? data["videoActionMetrics"].map((item: any) => (serializeGoogleCloudAiplatformV1SchemaModelevaluationMetricsVideoActionMetrics(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaModelevaluationMetricsVideoActionRecognitionMetrics(data: any): GoogleCloudAiplatformV1SchemaModelevaluationMetricsVideoActionRecognitionMetrics { return { ...data, videoActionMetrics: data["videoActionMetrics"] !== undefined ? data["videoActionMetrics"].map((item: any) => (deserializeGoogleCloudAiplatformV1SchemaModelevaluationMetricsVideoActionMetrics(item))) : undefined, }; } /** * Model evaluation metrics for video object tracking problems. Evaluates * prediction quality of both labeled bounding boxes and labeled tracks (i.e. * series of bounding boxes sharing same label and instance ID). */ export interface GoogleCloudAiplatformV1SchemaModelevaluationMetricsVideoObjectTrackingMetrics { /** * The single metric for bounding boxes evaluation: the * `meanAveragePrecision` averaged over all `boundingBoxMetrics`. */ boundingBoxMeanAveragePrecision?: number; /** * The bounding boxes match metrics for each intersection-over-union * threshold 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 and each label confidence * threshold 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 pair. */ boundingBoxMetrics?: GoogleCloudAiplatformV1SchemaModelevaluationMetricsBoundingBoxMetrics[]; /** * UNIMPLEMENTED. The total number of bounding boxes (i.e. summed over all * frames) the ground truth used to create this evaluation had. */ evaluatedBoundingBoxCount?: number; /** * UNIMPLEMENTED. The number of video frames used to create this evaluation. */ evaluatedFrameCount?: number; /** * UNIMPLEMENTED. The total number of tracks (i.e. as seen across all frames) * the ground truth used to create this evaluation had. */ evaluatedTrackCount?: number; /** * UNIMPLEMENTED. The single metric for tracks accuracy evaluation: the * `meanAveragePrecision` averaged over all `trackMetrics`. */ trackMeanAveragePrecision?: number; /** * UNIMPLEMENTED. The single metric for tracks bounding box iou evaluation: * the `meanBoundingBoxIou` averaged over all `trackMetrics`. */ trackMeanBoundingBoxIou?: number; /** * UNIMPLEMENTED. The single metric for tracking consistency evaluation: the * `meanMismatchRate` averaged over all `trackMetrics`. */ trackMeanMismatchRate?: number; /** * UNIMPLEMENTED. The tracks match metrics for each intersection-over-union * threshold 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 and each label confidence * threshold 0.05,0.10,...,0.95,0.96,0.97,0.98,0.99 pair. */ trackMetrics?: GoogleCloudAiplatformV1SchemaModelevaluationMetricsTrackMetrics[]; } /** * Prediction input format for Image Classification. */ export interface GoogleCloudAiplatformV1SchemaPredictInstanceImageClassificationPredictionInstance { /** * The image bytes or Cloud Storage URI to make the prediction on. */ content?: string; /** * The MIME type of the content of the image. Only the images in below listed * MIME types are supported. - image/jpeg - image/gif - image/png - image/webp * - image/bmp - image/tiff - image/vnd.microsoft.icon */ mimeType?: string; } /** * Prediction input format for Image Object Detection. */ export interface GoogleCloudAiplatformV1SchemaPredictInstanceImageObjectDetectionPredictionInstance { /** * The image bytes or Cloud Storage URI to make the prediction on. */ content?: string; /** * The MIME type of the content of the image. Only the images in below listed * MIME types are supported. - image/jpeg - image/gif - image/png - image/webp * - image/bmp - image/tiff - image/vnd.microsoft.icon */ mimeType?: string; } /** * Prediction input format for Image Segmentation. */ export interface GoogleCloudAiplatformV1SchemaPredictInstanceImageSegmentationPredictionInstance { /** * The image bytes to make the predictions on. */ content?: string; /** * The MIME type of the content of the image. Only the images in below listed * MIME types are supported. - image/jpeg - image/png */ mimeType?: string; } /** * Prediction input format for Text Classification. */ export interface GoogleCloudAiplatformV1SchemaPredictInstanceTextClassificationPredictionInstance { /** * The text snippet to make the predictions on. */ content?: string; /** * The MIME type of the text snippet. The supported MIME types are listed * below. - text/plain */ mimeType?: string; } /** * Prediction input format for Text Extraction. */ export interface GoogleCloudAiplatformV1SchemaPredictInstanceTextExtractionPredictionInstance { /** * The text snippet to make the predictions on. */ content?: string; /** * This field is only used for batch prediction. If a key is provided, the * batch prediction result will by mapped to this key. If omitted, then the * batch prediction result will contain the entire input instance. Vertex AI * will not check if keys in the request are duplicates, so it is up to the * caller to ensure the keys are unique. */ key?: string; /** * The MIME type of the text snippet. The supported MIME types are listed * below. - text/plain */ mimeType?: string; } /** * Prediction input format for Text Sentiment. */ export interface GoogleCloudAiplatformV1SchemaPredictInstanceTextSentimentPredictionInstance { /** * The text snippet to make the predictions on. */ content?: string; /** * The MIME type of the text snippet. The supported MIME types are listed * below. - text/plain */ mimeType?: string; } /** * Prediction input format for Video Action Recognition. */ export interface GoogleCloudAiplatformV1SchemaPredictInstanceVideoActionRecognitionPredictionInstance { /** * The Google Cloud Storage location of the video on which to perform the * prediction. */ content?: string; /** * The MIME type of the content of the video. Only the following are * supported: video/mp4 video/avi video/quicktime */ mimeType?: string; /** * The end, exclusive, of the video's time segment on which to perform the * prediction. Expressed as a number of seconds as measured from the start of * the video, with "s" appended at the end. Fractions are allowed, up to a * microsecond precision, and "inf" or "Infinity" is allowed, which means the * end of the video. */ timeSegmentEnd?: string; /** * The beginning, inclusive, of the video's time segment on which to perform * the prediction. Expressed as a number of seconds as measured from the start * of the video, with "s" appended at the end. Fractions are allowed, up to a * microsecond precision. */ timeSegmentStart?: string; } /** * Prediction input format for Video Classification. */ export interface GoogleCloudAiplatformV1SchemaPredictInstanceVideoClassificationPredictionInstance { /** * The Google Cloud Storage location of the video on which to perform the * prediction. */ content?: string; /** * The MIME type of the content of the video. Only the following are * supported: video/mp4 video/avi video/quicktime */ mimeType?: string; /** * The end, exclusive, of the video's time segment on which to perform the * prediction. Expressed as a number of seconds as measured from the start of * the video, with "s" appended at the end. Fractions are allowed, up to a * microsecond precision, and "inf" or "Infinity" is allowed, which means the * end of the video. */ timeSegmentEnd?: string; /** * The beginning, inclusive, of the video's time segment on which to perform * the prediction. Expressed as a number of seconds as measured from the start * of the video, with "s" appended at the end. Fractions are allowed, up to a * microsecond precision. */ timeSegmentStart?: string; } /** * Prediction input format for Video Object Tracking. */ export interface GoogleCloudAiplatformV1SchemaPredictInstanceVideoObjectTrackingPredictionInstance { /** * The Google Cloud Storage location of the video on which to perform the * prediction. */ content?: string; /** * The MIME type of the content of the video. Only the following are * supported: video/mp4 video/avi video/quicktime */ mimeType?: string; /** * The end, exclusive, of the video's time segment on which to perform the * prediction. Expressed as a number of seconds as measured from the start of * the video, with "s" appended at the end. Fractions are allowed, up to a * microsecond precision, and "inf" or "Infinity" is allowed, which means the * end of the video. */ timeSegmentEnd?: string; /** * The beginning, inclusive, of the video's time segment on which to perform * the prediction. Expressed as a number of seconds as measured from the start * of the video, with "s" appended at the end. Fractions are allowed, up to a * microsecond precision. */ timeSegmentStart?: string; } /** * Represents a line of JSONL in the batch prediction output file. */ export interface GoogleCloudAiplatformV1SchemaPredictionResult { /** * The error result. Do not set prediction if this is set. */ error?: GoogleCloudAiplatformV1SchemaPredictionResultError; /** * User's input instance. Struct is used here instead of Any so that * JsonFormat does not append an extra "@type" field when we convert the proto * to JSON. */ instance?: { [key: string]: any }; /** * Optional user-provided key from the input instance. */ key?: string; /** * The prediction result. Value is used here instead of Any so that * JsonFormat does not append an extra "@type" field when we convert the proto * to JSON and so we can represent array of objects. Do not set error if this * is set. */ prediction?: any; } export interface GoogleCloudAiplatformV1SchemaPredictionResultError { /** * Error message with additional details. */ message?: string; /** * Error status. This will be serialized into the enum name e.g. "NOT_FOUND". */ status?: | "OK" | "CANCELLED" | "UNKNOWN" | "INVALID_ARGUMENT" | "DEADLINE_EXCEEDED" | "NOT_FOUND" | "ALREADY_EXISTS" | "PERMISSION_DENIED" | "UNAUTHENTICATED" | "RESOURCE_EXHAUSTED" | "FAILED_PRECONDITION" | "ABORTED" | "OUT_OF_RANGE" | "UNIMPLEMENTED" | "INTERNAL" | "UNAVAILABLE" | "DATA_LOSS"; } /** * The configuration for grounding checking. */ export interface GoogleCloudAiplatformV1SchemaPredictParamsGroundingConfig { /** * If set, skip finding claim attributions (i.e not generate grounding * citation). */ disableAttribution?: boolean; /** * The sources for the grounding checking. */ sources?: GoogleCloudAiplatformV1SchemaPredictParamsGroundingConfigSourceEntry[]; } /** * Single source entry for the grounding checking. */ export interface GoogleCloudAiplatformV1SchemaPredictParamsGroundingConfigSourceEntry { /** * The uri of the Vertex AI Search data source. Deprecated. Use * vertex_ai_search_datastore instead. */ enterpriseDatastore?: string; /** * The grounding text passed inline with the Predict API. It can support up * to 1 million bytes. */ inlineContext?: string; /** * The type of the grounding checking source. */ type?: | "UNSPECIFIED" | "WEB" | "ENTERPRISE" | "VERTEX_AI_SEARCH" | "INLINE"; /** * The uri of the Vertex AI Search data source. */ vertexAiSearchDatastore?: string; } /** * Prediction model parameters for Image Classification. */ export interface GoogleCloudAiplatformV1SchemaPredictParamsImageClassificationPredictionParams { /** * The Model only returns predictions with at least this confidence score. * Default value is 0.0 */ confidenceThreshold?: number; /** * The Model only returns up to that many top, by confidence score, * predictions per instance. If this number is very high, the Model may return * fewer predictions. Default value is 10. */ maxPredictions?: number; } /** * Prediction model parameters for Image Object Detection. */ export interface GoogleCloudAiplatformV1SchemaPredictParamsImageObjectDetectionPredictionParams { /** * The Model only returns predictions with at least this confidence score. * Default value is 0.0 */ confidenceThreshold?: number; /** * The Model only returns up to that many top, by confidence score, * predictions per instance. Note that number of returned predictions is also * limited by metadata's predictionsLimit. Default value is 10. */ maxPredictions?: number; } /** * Prediction model parameters for Image Segmentation. */ export interface GoogleCloudAiplatformV1SchemaPredictParamsImageSegmentationPredictionParams { /** * When the model predicts category of pixels of the image, it will only * provide predictions for pixels that it is at least this much confident * about. All other pixels will be classified as background. Default value is * 0.5. */ confidenceThreshold?: number; } /** * Prediction model parameters for Video Action Recognition. */ export interface GoogleCloudAiplatformV1SchemaPredictParamsVideoActionRecognitionPredictionParams { /** * The Model only returns predictions with at least this confidence score. * Default value is 0.0 */ confidenceThreshold?: number; /** * The model only returns up to that many top, by confidence score, * predictions per frame of the video. If this number is very high, the Model * may return fewer predictions per frame. Default value is 50. */ maxPredictions?: number; } /** * Prediction model parameters for Video Classification. */ export interface GoogleCloudAiplatformV1SchemaPredictParamsVideoClassificationPredictionParams { /** * The Model only returns predictions with at least this confidence score. * Default value is 0.0 */ confidenceThreshold?: number; /** * The Model only returns up to that many top, by confidence score, * predictions per instance. If this number is very high, the Model may return * fewer predictions. Default value is 10,000. */ maxPredictions?: number; /** * Set to true to request classification for a video at one-second intervals. * Vertex AI returns labels and their confidence scores for each second of the * entire time segment of the video that user specified in the input WARNING: * Model evaluation is not done for this classification type, the quality of * it depends on the training data, but there are no metrics provided to * describe that quality. Default value is false */ oneSecIntervalClassification?: boolean; /** * Set to true to request segment-level classification. Vertex AI returns * labels and their confidence scores for the entire time segment of the video * that user specified in the input instance. Default value is true */ segmentClassification?: boolean; /** * Set to true to request shot-level classification. Vertex AI determines the * boundaries for each camera shot in the entire time segment of the video * that user specified in the input instance. Vertex AI then returns labels * and their confidence scores for each detected shot, along with the start * and end time of the shot. WARNING: Model evaluation is not done for this * classification type, the quality of it depends on the training data, but * there are no metrics provided to describe that quality. Default value is * false */ shotClassification?: boolean; } /** * Prediction model parameters for Video Object Tracking. */ export interface GoogleCloudAiplatformV1SchemaPredictParamsVideoObjectTrackingPredictionParams { /** * The Model only returns predictions with at least this confidence score. * Default value is 0.0 */ confidenceThreshold?: number; /** * The model only returns up to that many top, by confidence score, * predictions per frame of the video. If this number is very high, the Model * may return fewer predictions per frame. Default value is 50. */ maxPredictions?: number; /** * Only bounding boxes with shortest edge at least that long as a relative * value of video frame size are returned. Default value is 0.0. */ minBoundingBoxSize?: number; } /** * Prediction output format for Image and Text Classification. */ export interface GoogleCloudAiplatformV1SchemaPredictPredictionClassificationPredictionResult { /** * The Model's confidences in correctness of the predicted IDs, higher value * means higher confidence. Order matches the Ids. */ confidences?: number[]; /** * The display names of the AnnotationSpecs that had been identified, order * matches the IDs. */ displayNames?: string[]; /** * The resource IDs of the AnnotationSpecs that had been identified. */ ids?: bigint[]; } function serializeGoogleCloudAiplatformV1SchemaPredictPredictionClassificationPredictionResult(data: any): GoogleCloudAiplatformV1SchemaPredictPredictionClassificationPredictionResult { return { ...data, ids: data["ids"] !== undefined ? data["ids"].map((item: any) => (String(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaPredictPredictionClassificationPredictionResult(data: any): GoogleCloudAiplatformV1SchemaPredictPredictionClassificationPredictionResult { return { ...data, ids: data["ids"] !== undefined ? data["ids"].map((item: any) => (BigInt(item))) : undefined, }; } /** * Prediction output format for Image Object Detection. */ export interface GoogleCloudAiplatformV1SchemaPredictPredictionImageObjectDetectionPredictionResult { /** * Bounding boxes, i.e. the rectangles over the image, that pinpoint the * found AnnotationSpecs. Given in order that matches the IDs. Each bounding * box is an array of 4 numbers `xMin`, `xMax`, `yMin`, and `yMax`, which * represent the extremal coordinates of the box. They are relative to the * image size, and the point 0,0 is in the top left of the image. */ bboxes?: any[][]; /** * The Model's confidences in correctness of the predicted IDs, higher value * means higher confidence. Order matches the Ids. */ confidences?: number[]; /** * The display names of the AnnotationSpecs that had been identified, order * matches the IDs. */ displayNames?: string[]; /** * The resource IDs of the AnnotationSpecs that had been identified, ordered * by the confidence score descendingly. */ ids?: bigint[]; } function serializeGoogleCloudAiplatformV1SchemaPredictPredictionImageObjectDetectionPredictionResult(data: any): GoogleCloudAiplatformV1SchemaPredictPredictionImageObjectDetectionPredictionResult { return { ...data, ids: data["ids"] !== undefined ? data["ids"].map((item: any) => (String(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaPredictPredictionImageObjectDetectionPredictionResult(data: any): GoogleCloudAiplatformV1SchemaPredictPredictionImageObjectDetectionPredictionResult { return { ...data, ids: data["ids"] !== undefined ? data["ids"].map((item: any) => (BigInt(item))) : undefined, }; } /** * Prediction output format for Image Segmentation. */ export interface GoogleCloudAiplatformV1SchemaPredictPredictionImageSegmentationPredictionResult { /** * A PNG image where each pixel in the mask represents the category in which * the pixel in the original image was predicted to belong to. The size of * this image will be the same as the original image. The mapping between the * AnntoationSpec and the color can be found in model's metadata. The model * will choose the most likely category and if none of the categories reach * the confidence threshold, the pixel will be marked as background. */ categoryMask?: string; /** * A one channel image which is encoded as an 8bit lossless PNG. The size of * the image will be the same as the original image. For a specific pixel, * darker color means less confidence in correctness of the cateogry in the * categoryMask for the corresponding pixel. Black means no confidence and * white means complete confidence. */ confidenceMask?: string; } /** * Prediction output format for Tabular Classification. */ export interface GoogleCloudAiplatformV1SchemaPredictPredictionTabularClassificationPredictionResult { /** * The name of the classes being classified, contains all possible values of * the target column. */ classes?: string[]; /** * The model's confidence in each class being correct, higher value means * higher confidence. The N-th score corresponds to the N-th class in classes. */ scores?: number[]; } /** * Prediction output format for Tabular Regression. */ export interface GoogleCloudAiplatformV1SchemaPredictPredictionTabularRegressionPredictionResult { /** * The lower bound of the prediction interval. */ lowerBound?: number; /** * Quantile predictions, in 1-1 correspondence with quantile_values. */ quantilePredictions?: number[]; /** * Quantile values. */ quantileValues?: number[]; /** * The upper bound of the prediction interval. */ upperBound?: number; /** * The regression value. */ value?: number; } /** * Prediction output format for Text Extraction. */ export interface GoogleCloudAiplatformV1SchemaPredictPredictionTextExtractionPredictionResult { /** * The Model's confidences in correctness of the predicted IDs, higher value * means higher confidence. Order matches the Ids. */ confidences?: number[]; /** * The display names of the AnnotationSpecs that had been identified, order * matches the IDs. */ displayNames?: string[]; /** * The resource IDs of the AnnotationSpecs that had been identified, ordered * by the confidence score descendingly. */ ids?: bigint[]; /** * The end offsets, inclusive, of the text segment in which the * AnnotationSpec has been identified. Expressed as a zero-based number of * characters as measured from the start of the text snippet. */ textSegmentEndOffsets?: bigint[]; /** * The start offsets, inclusive, of the text segment in which the * AnnotationSpec has been identified. Expressed as a zero-based number of * characters as measured from the start of the text snippet. */ textSegmentStartOffsets?: bigint[]; } function serializeGoogleCloudAiplatformV1SchemaPredictPredictionTextExtractionPredictionResult(data: any): GoogleCloudAiplatformV1SchemaPredictPredictionTextExtractionPredictionResult { return { ...data, ids: data["ids"] !== undefined ? data["ids"].map((item: any) => (String(item))) : undefined, textSegmentEndOffsets: data["textSegmentEndOffsets"] !== undefined ? data["textSegmentEndOffsets"].map((item: any) => (String(item))) : undefined, textSegmentStartOffsets: data["textSegmentStartOffsets"] !== undefined ? data["textSegmentStartOffsets"].map((item: any) => (String(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaPredictPredictionTextExtractionPredictionResult(data: any): GoogleCloudAiplatformV1SchemaPredictPredictionTextExtractionPredictionResult { return { ...data, ids: data["ids"] !== undefined ? data["ids"].map((item: any) => (BigInt(item))) : undefined, textSegmentEndOffsets: data["textSegmentEndOffsets"] !== undefined ? data["textSegmentEndOffsets"].map((item: any) => (BigInt(item))) : undefined, textSegmentStartOffsets: data["textSegmentStartOffsets"] !== undefined ? data["textSegmentStartOffsets"].map((item: any) => (BigInt(item))) : undefined, }; } /** * Prediction output format for Text Sentiment */ export interface GoogleCloudAiplatformV1SchemaPredictPredictionTextSentimentPredictionResult { /** * The integer sentiment labels between 0 (inclusive) and sentimentMax label * (inclusive), while 0 maps to the least positive sentiment and sentimentMax * maps to the most positive one. The higher the score is, the more positive * the sentiment in the text snippet is. Note: sentimentMax is an integer * value between 1 (inclusive) and 10 (inclusive). */ sentiment?: number; } export interface GoogleCloudAiplatformV1SchemaPredictPredictionTftFeatureImportance { attributeColumns?: string[]; attributeWeights?: number[]; contextColumns?: string[]; /** * TFT feature importance values. Each pair for {context/horizon/attribute} * should have the same shape since the weight corresponds to the column * names. */ contextWeights?: number[]; horizonColumns?: string[]; horizonWeights?: number[]; } /** * Prediction output format for Time Series Forecasting. */ export interface GoogleCloudAiplatformV1SchemaPredictPredictionTimeSeriesForecastingPredictionResult { /** * Quantile predictions, in 1-1 correspondence with quantile_values. */ quantilePredictions?: number[]; /** * Quantile values. */ quantileValues?: number[]; /** * Only use these if TFt is enabled. */ tftFeatureImportance?: GoogleCloudAiplatformV1SchemaPredictPredictionTftFeatureImportance; /** * The regression value. */ value?: number; } /** * Prediction output format for Video Action Recognition. */ export interface GoogleCloudAiplatformV1SchemaPredictPredictionVideoActionRecognitionPredictionResult { /** * The Model's confidence in correction of this prediction, higher value * means higher confidence. */ confidence?: number; /** * The display name of the AnnotationSpec that had been identified. */ displayName?: string; /** * The resource ID of the AnnotationSpec that had been identified. */ id?: string; /** * The end, exclusive, of the video's time segment in which the * AnnotationSpec has been identified. Expressed as a number of seconds as * measured from the start of the video, with fractions up to a microsecond * precision, and with "s" appended at the end. */ timeSegmentEnd?: number /* Duration */; /** * The beginning, inclusive, of the video's time segment in which the * AnnotationSpec has been identified. Expressed as a number of seconds as * measured from the start of the video, with fractions up to a microsecond * precision, and with "s" appended at the end. */ timeSegmentStart?: number /* Duration */; } function serializeGoogleCloudAiplatformV1SchemaPredictPredictionVideoActionRecognitionPredictionResult(data: any): GoogleCloudAiplatformV1SchemaPredictPredictionVideoActionRecognitionPredictionResult { return { ...data, timeSegmentEnd: data["timeSegmentEnd"] !== undefined ? data["timeSegmentEnd"] : undefined, timeSegmentStart: data["timeSegmentStart"] !== undefined ? data["timeSegmentStart"] : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaPredictPredictionVideoActionRecognitionPredictionResult(data: any): GoogleCloudAiplatformV1SchemaPredictPredictionVideoActionRecognitionPredictionResult { return { ...data, timeSegmentEnd: data["timeSegmentEnd"] !== undefined ? data["timeSegmentEnd"] : undefined, timeSegmentStart: data["timeSegmentStart"] !== undefined ? data["timeSegmentStart"] : undefined, }; } /** * Prediction output format for Video Classification. */ export interface GoogleCloudAiplatformV1SchemaPredictPredictionVideoClassificationPredictionResult { /** * The Model's confidence in correction of this prediction, higher value * means higher confidence. */ confidence?: number; /** * The display name of the AnnotationSpec that had been identified. */ displayName?: string; /** * The resource ID of the AnnotationSpec that had been identified. */ id?: string; /** * The end, exclusive, of the video's time segment in which the * AnnotationSpec has been identified. Expressed as a number of seconds as * measured from the start of the video, with fractions up to a microsecond * precision, and with "s" appended at the end. Note that for * 'segment-classification' prediction type, this equals the original * 'timeSegmentEnd' from the input instance, for other types it is the end of * a shot or a 1 second interval respectively. */ timeSegmentEnd?: number /* Duration */; /** * The beginning, inclusive, of the video's time segment in which the * AnnotationSpec has been identified. Expressed as a number of seconds as * measured from the start of the video, with fractions up to a microsecond * precision, and with "s" appended at the end. Note that for * 'segment-classification' prediction type, this equals the original * 'timeSegmentStart' from the input instance, for other types it is the start * of a shot or a 1 second interval respectively. */ timeSegmentStart?: number /* Duration */; /** * The type of the prediction. The requested types can be configured via * parameters. This will be one of - segment-classification - * shot-classification - one-sec-interval-classification */ type?: string; } function serializeGoogleCloudAiplatformV1SchemaPredictPredictionVideoClassificationPredictionResult(data: any): GoogleCloudAiplatformV1SchemaPredictPredictionVideoClassificationPredictionResult { return { ...data, timeSegmentEnd: data["timeSegmentEnd"] !== undefined ? data["timeSegmentEnd"] : undefined, timeSegmentStart: data["timeSegmentStart"] !== undefined ? data["timeSegmentStart"] : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaPredictPredictionVideoClassificationPredictionResult(data: any): GoogleCloudAiplatformV1SchemaPredictPredictionVideoClassificationPredictionResult { return { ...data, timeSegmentEnd: data["timeSegmentEnd"] !== undefined ? data["timeSegmentEnd"] : undefined, timeSegmentStart: data["timeSegmentStart"] !== undefined ? data["timeSegmentStart"] : undefined, }; } /** * Prediction output format for Video Object Tracking. */ export interface GoogleCloudAiplatformV1SchemaPredictPredictionVideoObjectTrackingPredictionResult { /** * The Model's confidence in correction of this prediction, higher value * means higher confidence. */ confidence?: number; /** * The display name of the AnnotationSpec that had been identified. */ displayName?: string; /** * All of the frames of the video in which a single object instance has been * detected. The bounding boxes in the frames identify the same object. */ frames?: GoogleCloudAiplatformV1SchemaPredictPredictionVideoObjectTrackingPredictionResultFrame[]; /** * The resource ID of the AnnotationSpec that had been identified. */ id?: string; /** * The end, inclusive, of the video's time segment in which the object * instance has been detected. Expressed as a number of seconds as measured * from the start of the video, with fractions up to a microsecond precision, * and with "s" appended at the end. */ timeSegmentEnd?: number /* Duration */; /** * The beginning, inclusive, of the video's time segment in which the object * instance has been detected. Expressed as a number of seconds as measured * from the start of the video, with fractions up to a microsecond precision, * and with "s" appended at the end. */ timeSegmentStart?: number /* Duration */; } function serializeGoogleCloudAiplatformV1SchemaPredictPredictionVideoObjectTrackingPredictionResult(data: any): GoogleCloudAiplatformV1SchemaPredictPredictionVideoObjectTrackingPredictionResult { return { ...data, frames: data["frames"] !== undefined ? data["frames"].map((item: any) => (serializeGoogleCloudAiplatformV1SchemaPredictPredictionVideoObjectTrackingPredictionResultFrame(item))) : undefined, timeSegmentEnd: data["timeSegmentEnd"] !== undefined ? data["timeSegmentEnd"] : undefined, timeSegmentStart: data["timeSegmentStart"] !== undefined ? data["timeSegmentStart"] : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaPredictPredictionVideoObjectTrackingPredictionResult(data: any): GoogleCloudAiplatformV1SchemaPredictPredictionVideoObjectTrackingPredictionResult { return { ...data, frames: data["frames"] !== undefined ? data["frames"].map((item: any) => (deserializeGoogleCloudAiplatformV1SchemaPredictPredictionVideoObjectTrackingPredictionResultFrame(item))) : undefined, timeSegmentEnd: data["timeSegmentEnd"] !== undefined ? data["timeSegmentEnd"] : undefined, timeSegmentStart: data["timeSegmentStart"] !== undefined ? data["timeSegmentStart"] : undefined, }; } /** * The fields `xMin`, `xMax`, `yMin`, and `yMax` refer to a bounding box, i.e. * the rectangle over the video frame pinpointing the found AnnotationSpec. The * coordinates are relative to the frame size, and the point 0,0 is in the top * left of the frame. */ export interface GoogleCloudAiplatformV1SchemaPredictPredictionVideoObjectTrackingPredictionResultFrame { /** * A time (frame) of a video in which the object has been detected. Expressed * as a number of seconds as measured from the start of the video, with * fractions up to a microsecond precision, and with "s" appended at the end. */ timeOffset?: number /* Duration */; /** * The rightmost coordinate of the bounding box. */ xMax?: number; /** * The leftmost coordinate of the bounding box. */ xMin?: number; /** * The bottommost coordinate of the bounding box. */ yMax?: number; /** * The topmost coordinate of the bounding box. */ yMin?: number; } function serializeGoogleCloudAiplatformV1SchemaPredictPredictionVideoObjectTrackingPredictionResultFrame(data: any): GoogleCloudAiplatformV1SchemaPredictPredictionVideoObjectTrackingPredictionResultFrame { return { ...data, timeOffset: data["timeOffset"] !== undefined ? data["timeOffset"] : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaPredictPredictionVideoObjectTrackingPredictionResultFrame(data: any): GoogleCloudAiplatformV1SchemaPredictPredictionVideoObjectTrackingPredictionResultFrame { return { ...data, timeOffset: data["timeOffset"] !== undefined ? data["timeOffset"] : undefined, }; } /** * The A2 schema of a prompt. */ export interface GoogleCloudAiplatformV1SchemaPromptApiSchema { /** * The Schema version that represents changes to the API behavior. */ apiSchemaVersion?: string; /** * A list of execution instances for constructing a ready-to-use prompt. */ executions?: GoogleCloudAiplatformV1SchemaPromptInstancePromptExecution[]; /** * Multimodal prompt which embeds preambles to prompt string. */ multimodalPrompt?: GoogleCloudAiplatformV1SchemaPromptSpecMultimodalPrompt; /** * The prompt variation that stores preambles in separate fields. */ structuredPrompt?: GoogleCloudAiplatformV1SchemaPromptSpecStructuredPrompt; /** * The prompt variation for Translation use case. */ translationPrompt?: GoogleCloudAiplatformV1SchemaPromptSpecTranslationPrompt; } function serializeGoogleCloudAiplatformV1SchemaPromptApiSchema(data: any): GoogleCloudAiplatformV1SchemaPromptApiSchema { return { ...data, executions: data["executions"] !== undefined ? data["executions"].map((item: any) => (serializeGoogleCloudAiplatformV1SchemaPromptInstancePromptExecution(item))) : undefined, multimodalPrompt: data["multimodalPrompt"] !== undefined ? serializeGoogleCloudAiplatformV1SchemaPromptSpecMultimodalPrompt(data["multimodalPrompt"]) : undefined, structuredPrompt: data["structuredPrompt"] !== undefined ? serializeGoogleCloudAiplatformV1SchemaPromptSpecStructuredPrompt(data["structuredPrompt"]) : undefined, translationPrompt: data["translationPrompt"] !== undefined ? serializeGoogleCloudAiplatformV1SchemaPromptSpecTranslationPrompt(data["translationPrompt"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaPromptApiSchema(data: any): GoogleCloudAiplatformV1SchemaPromptApiSchema { return { ...data, executions: data["executions"] !== undefined ? data["executions"].map((item: any) => (deserializeGoogleCloudAiplatformV1SchemaPromptInstancePromptExecution(item))) : undefined, multimodalPrompt: data["multimodalPrompt"] !== undefined ? deserializeGoogleCloudAiplatformV1SchemaPromptSpecMultimodalPrompt(data["multimodalPrompt"]) : undefined, structuredPrompt: data["structuredPrompt"] !== undefined ? deserializeGoogleCloudAiplatformV1SchemaPromptSpecStructuredPrompt(data["structuredPrompt"]) : undefined, translationPrompt: data["translationPrompt"] !== undefined ? deserializeGoogleCloudAiplatformV1SchemaPromptSpecTranslationPrompt(data["translationPrompt"]) : undefined, }; } /** * A prompt instance's parameters set that contains a set of variable values. */ export interface GoogleCloudAiplatformV1SchemaPromptInstancePromptExecution { /** * Maps variable names to their value. */ arguments?: { [key: string]: GoogleCloudAiplatformV1SchemaPromptInstanceVariableValue }; } function serializeGoogleCloudAiplatformV1SchemaPromptInstancePromptExecution(data: any): GoogleCloudAiplatformV1SchemaPromptInstancePromptExecution { return { ...data, arguments: data["arguments"] !== undefined ? Object.fromEntries(Object.entries(data["arguments"]).map(([k, v]: [string, any]) => ([k, serializeGoogleCloudAiplatformV1SchemaPromptInstanceVariableValue(v)]))) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaPromptInstancePromptExecution(data: any): GoogleCloudAiplatformV1SchemaPromptInstancePromptExecution { return { ...data, arguments: data["arguments"] !== undefined ? Object.fromEntries(Object.entries(data["arguments"]).map(([k, v]: [string, any]) => ([k, deserializeGoogleCloudAiplatformV1SchemaPromptInstanceVariableValue(v)]))) : undefined, }; } /** * The value of a variable in prompt. */ export interface GoogleCloudAiplatformV1SchemaPromptInstanceVariableValue { /** * The parts of the variable value. */ partList?: GoogleCloudAiplatformV1SchemaPromptSpecPartList; } function serializeGoogleCloudAiplatformV1SchemaPromptInstanceVariableValue(data: any): GoogleCloudAiplatformV1SchemaPromptInstanceVariableValue { return { ...data, partList: data["partList"] !== undefined ? serializeGoogleCloudAiplatformV1SchemaPromptSpecPartList(data["partList"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaPromptInstanceVariableValue(data: any): GoogleCloudAiplatformV1SchemaPromptInstanceVariableValue { return { ...data, partList: data["partList"] !== undefined ? deserializeGoogleCloudAiplatformV1SchemaPromptSpecPartList(data["partList"]) : undefined, }; } /** * Prompt variation that embeds preambles to prompt string. */ export interface GoogleCloudAiplatformV1SchemaPromptSpecMultimodalPrompt { /** * The prompt message. */ promptMessage?: GoogleCloudAiplatformV1SchemaPromptSpecPromptMessage; } function serializeGoogleCloudAiplatformV1SchemaPromptSpecMultimodalPrompt(data: any): GoogleCloudAiplatformV1SchemaPromptSpecMultimodalPrompt { return { ...data, promptMessage: data["promptMessage"] !== undefined ? serializeGoogleCloudAiplatformV1SchemaPromptSpecPromptMessage(data["promptMessage"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaPromptSpecMultimodalPrompt(data: any): GoogleCloudAiplatformV1SchemaPromptSpecMultimodalPrompt { return { ...data, promptMessage: data["promptMessage"] !== undefined ? deserializeGoogleCloudAiplatformV1SchemaPromptSpecPromptMessage(data["promptMessage"]) : undefined, }; } /** * A list of elements and information that make up a portion of prompt. */ export interface GoogleCloudAiplatformV1SchemaPromptSpecPartList { /** * A list of elements that can be part of a prompt. */ parts?: GoogleCloudAiplatformV1Part[]; } function serializeGoogleCloudAiplatformV1SchemaPromptSpecPartList(data: any): GoogleCloudAiplatformV1SchemaPromptSpecPartList { return { ...data, parts: data["parts"] !== undefined ? data["parts"].map((item: any) => (serializeGoogleCloudAiplatformV1Part(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaPromptSpecPartList(data: any): GoogleCloudAiplatformV1SchemaPromptSpecPartList { return { ...data, parts: data["parts"] !== undefined ? data["parts"].map((item: any) => (deserializeGoogleCloudAiplatformV1Part(item))) : undefined, }; } /** * The prompt message that aligns with the prompt message in * google.cloud.aiplatform.master.GenerateContentRequest. */ export interface GoogleCloudAiplatformV1SchemaPromptSpecPromptMessage { /** * The content of the current conversation with the model. For single-turn * queries, this is a single instance. For multi-turn queries, this is a * repeated field that contains conversation history + latest request. */ contents?: GoogleCloudAiplatformV1Content[]; /** * Generation config. */ generationConfig?: GoogleCloudAiplatformV1GenerationConfig; /** * The model name. */ model?: string; /** * Per request settings for blocking unsafe content. Enforced on * GenerateContentResponse.candidates. */ safetySettings?: GoogleCloudAiplatformV1SafetySetting[]; /** * The user provided system instructions for the model. Note: only text * should be used in parts and content in each part will be in a separate * paragraph. */ systemInstruction?: GoogleCloudAiplatformV1Content; /** * Tool config. This config is shared for all tools provided in the request. */ toolConfig?: GoogleCloudAiplatformV1ToolConfig; /** * A list of `Tools` the model may use to generate the next response. A * `Tool` is a piece of code that enables the system to interact with external * systems to perform an action, or set of actions, outside of knowledge and * scope of the model. */ tools?: GoogleCloudAiplatformV1Tool[]; } function serializeGoogleCloudAiplatformV1SchemaPromptSpecPromptMessage(data: any): GoogleCloudAiplatformV1SchemaPromptSpecPromptMessage { return { ...data, contents: data["contents"] !== undefined ? data["contents"].map((item: any) => (serializeGoogleCloudAiplatformV1Content(item))) : undefined, generationConfig: data["generationConfig"] !== undefined ? serializeGoogleCloudAiplatformV1GenerationConfig(data["generationConfig"]) : undefined, systemInstruction: data["systemInstruction"] !== undefined ? serializeGoogleCloudAiplatformV1Content(data["systemInstruction"]) : undefined, tools: data["tools"] !== undefined ? data["tools"].map((item: any) => (serializeGoogleCloudAiplatformV1Tool(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaPromptSpecPromptMessage(data: any): GoogleCloudAiplatformV1SchemaPromptSpecPromptMessage { return { ...data, contents: data["contents"] !== undefined ? data["contents"].map((item: any) => (deserializeGoogleCloudAiplatformV1Content(item))) : undefined, generationConfig: data["generationConfig"] !== undefined ? deserializeGoogleCloudAiplatformV1GenerationConfig(data["generationConfig"]) : undefined, systemInstruction: data["systemInstruction"] !== undefined ? deserializeGoogleCloudAiplatformV1Content(data["systemInstruction"]) : undefined, tools: data["tools"] !== undefined ? data["tools"].map((item: any) => (deserializeGoogleCloudAiplatformV1Tool(item))) : undefined, }; } /** * A pair of sentences used as reference in source and target languages. */ export interface GoogleCloudAiplatformV1SchemaPromptSpecReferenceSentencePair { /** * Source sentence in the sentence pair. */ sourceSentence?: string; /** * Target sentence in the sentence pair. */ targetSentence?: string; } /** * A list of reference sentence pairs. */ export interface GoogleCloudAiplatformV1SchemaPromptSpecReferenceSentencePairList { /** * Reference sentence pairs. */ referenceSentencePairs?: GoogleCloudAiplatformV1SchemaPromptSpecReferenceSentencePair[]; } /** * Prompt variation that stores preambles in separate fields. */ export interface GoogleCloudAiplatformV1SchemaPromptSpecStructuredPrompt { /** * Preamble: The context of the prompt. */ context?: GoogleCloudAiplatformV1Content; /** * Preamble: A set of examples for expected model response. */ examples?: GoogleCloudAiplatformV1SchemaPromptSpecPartList[]; /** * Preamble: For infill prompt, the prefix before expected model response. */ infillPrefix?: string; /** * Preamble: For infill prompt, the suffix after expected model response. */ infillSuffix?: string; /** * Preamble: The input prefixes before each example input. */ inputPrefixes?: string[]; /** * Preamble: The output prefixes before each example output. */ outputPrefixes?: string[]; /** * Preamble: The input test data for prediction. Each PartList in this field * represents one text-only input set for a single model request. */ predictionInputs?: GoogleCloudAiplatformV1SchemaPromptSpecPartList[]; /** * The prompt message. */ promptMessage?: GoogleCloudAiplatformV1SchemaPromptSpecPromptMessage; } function serializeGoogleCloudAiplatformV1SchemaPromptSpecStructuredPrompt(data: any): GoogleCloudAiplatformV1SchemaPromptSpecStructuredPrompt { return { ...data, context: data["context"] !== undefined ? serializeGoogleCloudAiplatformV1Content(data["context"]) : undefined, examples: data["examples"] !== undefined ? data["examples"].map((item: any) => (serializeGoogleCloudAiplatformV1SchemaPromptSpecPartList(item))) : undefined, predictionInputs: data["predictionInputs"] !== undefined ? data["predictionInputs"].map((item: any) => (serializeGoogleCloudAiplatformV1SchemaPromptSpecPartList(item))) : undefined, promptMessage: data["promptMessage"] !== undefined ? serializeGoogleCloudAiplatformV1SchemaPromptSpecPromptMessage(data["promptMessage"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaPromptSpecStructuredPrompt(data: any): GoogleCloudAiplatformV1SchemaPromptSpecStructuredPrompt { return { ...data, context: data["context"] !== undefined ? deserializeGoogleCloudAiplatformV1Content(data["context"]) : undefined, examples: data["examples"] !== undefined ? data["examples"].map((item: any) => (deserializeGoogleCloudAiplatformV1SchemaPromptSpecPartList(item))) : undefined, predictionInputs: data["predictionInputs"] !== undefined ? data["predictionInputs"].map((item: any) => (deserializeGoogleCloudAiplatformV1SchemaPromptSpecPartList(item))) : undefined, promptMessage: data["promptMessage"] !== undefined ? deserializeGoogleCloudAiplatformV1SchemaPromptSpecPromptMessage(data["promptMessage"]) : undefined, }; } /** * The translation example that contains reference sentences from various * sources. */ export interface GoogleCloudAiplatformV1SchemaPromptSpecTranslationExample { /** * The reference sentences from inline text. */ referenceSentencePairLists?: GoogleCloudAiplatformV1SchemaPromptSpecReferenceSentencePairList[]; /** * The reference sentences from file. */ referenceSentencesFileInputs?: GoogleCloudAiplatformV1SchemaPromptSpecTranslationSentenceFileInput[]; } export interface GoogleCloudAiplatformV1SchemaPromptSpecTranslationFileInputSource { /** * The file's contents. */ content?: string; /** * The file's display name. */ displayName?: string; /** * The file's mime type. */ mimeType?: string; } export interface GoogleCloudAiplatformV1SchemaPromptSpecTranslationGcsInputSource { /** * Source data URI. For example, `gs://my_bucket/my_object`. */ inputUri?: string; } /** * Optional settings for translation prompt. */ export interface GoogleCloudAiplatformV1SchemaPromptSpecTranslationOption { /** * How many shots to use. */ numberOfShots?: number; } /** * Prompt variation for Translation use case. */ export interface GoogleCloudAiplatformV1SchemaPromptSpecTranslationPrompt { /** * The translation example. */ example?: GoogleCloudAiplatformV1SchemaPromptSpecTranslationExample; /** * The translation option. */ option?: GoogleCloudAiplatformV1SchemaPromptSpecTranslationOption; /** * The prompt message. */ promptMessage?: GoogleCloudAiplatformV1SchemaPromptSpecPromptMessage; /** * The source language code. */ sourceLanguageCode?: string; /** * The target language code. */ targetLanguageCode?: string; } function serializeGoogleCloudAiplatformV1SchemaPromptSpecTranslationPrompt(data: any): GoogleCloudAiplatformV1SchemaPromptSpecTranslationPrompt { return { ...data, promptMessage: data["promptMessage"] !== undefined ? serializeGoogleCloudAiplatformV1SchemaPromptSpecPromptMessage(data["promptMessage"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaPromptSpecTranslationPrompt(data: any): GoogleCloudAiplatformV1SchemaPromptSpecTranslationPrompt { return { ...data, promptMessage: data["promptMessage"] !== undefined ? deserializeGoogleCloudAiplatformV1SchemaPromptSpecPromptMessage(data["promptMessage"]) : undefined, }; } export interface GoogleCloudAiplatformV1SchemaPromptSpecTranslationSentenceFileInput { /** * Inlined file source. */ fileInputSource?: GoogleCloudAiplatformV1SchemaPromptSpecTranslationFileInputSource; /** * Cloud Storage file source. */ gcsInputSource?: GoogleCloudAiplatformV1SchemaPromptSpecTranslationGcsInputSource; } /** * The metadata of Datasets that contain tables data. */ export interface GoogleCloudAiplatformV1SchemaTablesDatasetMetadata { inputConfig?: GoogleCloudAiplatformV1SchemaTablesDatasetMetadataInputConfig; } export interface GoogleCloudAiplatformV1SchemaTablesDatasetMetadataBigQuerySource { /** * The URI of a BigQuery table. e.g. bq://projectId.bqDatasetId.bqTableId */ uri?: string; } export interface GoogleCloudAiplatformV1SchemaTablesDatasetMetadataGcsSource { /** * Cloud Storage URI of one or more files. Only CSV files are supported. The * first line of the CSV file is used as the header. If there are multiple * files, the header is the first line of the lexicographically first file, * the other files must either contain the exact same header or omit the * header. */ uri?: string[]; } /** * The tables Dataset's data source. The Dataset doesn't store the data * directly, but only pointer(s) to its data. */ export interface GoogleCloudAiplatformV1SchemaTablesDatasetMetadataInputConfig { bigquerySource?: GoogleCloudAiplatformV1SchemaTablesDatasetMetadataBigQuerySource; gcsSource?: GoogleCloudAiplatformV1SchemaTablesDatasetMetadataGcsSource; } /** * Annotation details specific to text classification. */ export interface GoogleCloudAiplatformV1SchemaTextClassificationAnnotation { /** * The resource Id of the AnnotationSpec that this Annotation pertains to. */ annotationSpecId?: string; /** * The display name of the AnnotationSpec that this Annotation pertains to. */ displayName?: string; } /** * Payload of Text DataItem. */ export interface GoogleCloudAiplatformV1SchemaTextDataItem { /** * Output only. Google Cloud Storage URI points to a copy of the original * text in the Vertex-managed bucket in the user's project. The text file is * up to 10MB in size. */ readonly gcsUri?: string; } /** * The metadata of Datasets that contain Text DataItems. */ export interface GoogleCloudAiplatformV1SchemaTextDatasetMetadata { /** * Points to a YAML file stored on Google Cloud Storage describing payload of * the Text DataItems that belong to this Dataset. */ dataItemSchemaUri?: string; /** * Google Cloud Storage Bucket name that contains the blob data of this * Dataset. */ gcsBucket?: string; } /** * Annotation details specific to text extraction. */ export interface GoogleCloudAiplatformV1SchemaTextExtractionAnnotation { /** * The resource Id of the AnnotationSpec that this Annotation pertains to. */ annotationSpecId?: string; /** * The display name of the AnnotationSpec that this Annotation pertains to. */ displayName?: string; /** * The segment of the text content. */ textSegment?: GoogleCloudAiplatformV1SchemaTextSegment; } function serializeGoogleCloudAiplatformV1SchemaTextExtractionAnnotation(data: any): GoogleCloudAiplatformV1SchemaTextExtractionAnnotation { return { ...data, textSegment: data["textSegment"] !== undefined ? serializeGoogleCloudAiplatformV1SchemaTextSegment(data["textSegment"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaTextExtractionAnnotation(data: any): GoogleCloudAiplatformV1SchemaTextExtractionAnnotation { return { ...data, textSegment: data["textSegment"] !== undefined ? deserializeGoogleCloudAiplatformV1SchemaTextSegment(data["textSegment"]) : undefined, }; } /** * The metadata of Datasets that contain Text Prompt data. */ export interface GoogleCloudAiplatformV1SchemaTextPromptDatasetMetadata { /** * Number of candidates. */ candidateCount?: bigint; /** * The Google Cloud Storage URI that stores the prompt data. */ gcsUri?: string; /** * Grounding checking configuration. */ groundingConfig?: GoogleCloudAiplatformV1SchemaPredictParamsGroundingConfig; /** * Whether the prompt dataset has prompt variable. */ hasPromptVariable?: boolean; /** * Whether or not the user has enabled logit probabilities in the model * parameters. */ logprobs?: boolean; /** * Value of the maximum number of tokens generated set when the dataset was * saved. */ maxOutputTokens?: bigint; /** * User-created prompt note. Note size limit is 2KB. */ note?: string; /** * The API schema of the prompt to support both UI and SDK usages. */ promptApiSchema?: GoogleCloudAiplatformV1SchemaPromptApiSchema; /** * Type of the prompt dataset. */ promptType?: string; /** * Seeding enables model to return a deterministic response on a best effort * basis. Determinism isn't guaranteed. This field determines whether or not * seeding is enabled. */ seedEnabled?: boolean; /** * The actual value of the seed. */ seedValue?: bigint; /** * Customized stop sequences. */ stopSequences?: string[]; /** * The content of the prompt dataset system instruction. */ systemInstruction?: string; /** * The Google Cloud Storage URI that stores the system instruction, starting * with gs://. */ systemInstructionGcsUri?: string; /** * Temperature value used for sampling set when the dataset was saved. This * value is used to tune the degree of randomness. */ temperature?: number; /** * The content of the prompt dataset. */ text?: string; /** * Top K value set when the dataset was saved. This value determines how many * candidates with highest probability from the vocab would be selected for * each decoding step. */ topK?: bigint; /** * Top P value set when the dataset was saved. Given topK tokens for * decoding, top candidates will be selected until the sum of their * probabilities is topP. */ topP?: number; } function serializeGoogleCloudAiplatformV1SchemaTextPromptDatasetMetadata(data: any): GoogleCloudAiplatformV1SchemaTextPromptDatasetMetadata { return { ...data, candidateCount: data["candidateCount"] !== undefined ? String(data["candidateCount"]) : undefined, maxOutputTokens: data["maxOutputTokens"] !== undefined ? String(data["maxOutputTokens"]) : undefined, promptApiSchema: data["promptApiSchema"] !== undefined ? serializeGoogleCloudAiplatformV1SchemaPromptApiSchema(data["promptApiSchema"]) : undefined, seedValue: data["seedValue"] !== undefined ? String(data["seedValue"]) : undefined, topK: data["topK"] !== undefined ? String(data["topK"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaTextPromptDatasetMetadata(data: any): GoogleCloudAiplatformV1SchemaTextPromptDatasetMetadata { return { ...data, candidateCount: data["candidateCount"] !== undefined ? BigInt(data["candidateCount"]) : undefined, maxOutputTokens: data["maxOutputTokens"] !== undefined ? BigInt(data["maxOutputTokens"]) : undefined, promptApiSchema: data["promptApiSchema"] !== undefined ? deserializeGoogleCloudAiplatformV1SchemaPromptApiSchema(data["promptApiSchema"]) : undefined, seedValue: data["seedValue"] !== undefined ? BigInt(data["seedValue"]) : undefined, topK: data["topK"] !== undefined ? BigInt(data["topK"]) : undefined, }; } /** * The text segment inside of DataItem. */ export interface GoogleCloudAiplatformV1SchemaTextSegment { /** * The text content in the segment for output only. */ content?: string; /** * Zero-based character index of the first character past the end of the text * segment (counting character from the beginning of the text). The character * at the end_offset is NOT included in the text segment. */ endOffset?: bigint; /** * Zero-based character index of the first character of the text segment * (counting characters from the beginning of the text). */ startOffset?: bigint; } function serializeGoogleCloudAiplatformV1SchemaTextSegment(data: any): GoogleCloudAiplatformV1SchemaTextSegment { return { ...data, endOffset: data["endOffset"] !== undefined ? String(data["endOffset"]) : undefined, startOffset: data["startOffset"] !== undefined ? String(data["startOffset"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaTextSegment(data: any): GoogleCloudAiplatformV1SchemaTextSegment { return { ...data, endOffset: data["endOffset"] !== undefined ? BigInt(data["endOffset"]) : undefined, startOffset: data["startOffset"] !== undefined ? BigInt(data["startOffset"]) : undefined, }; } /** * Annotation details specific to text sentiment. */ export interface GoogleCloudAiplatformV1SchemaTextSentimentAnnotation { /** * The resource Id of the AnnotationSpec that this Annotation pertains to. */ annotationSpecId?: string; /** * The display name of the AnnotationSpec that this Annotation pertains to. */ displayName?: string; /** * The sentiment score for text. */ sentiment?: number; /** * The sentiment max score for text. */ sentimentMax?: number; } /** * The metadata of SavedQuery contains TextSentiment Annotations. */ export interface GoogleCloudAiplatformV1SchemaTextSentimentSavedQueryMetadata { /** * The maximum sentiment of sentiment Anntoation in this SavedQuery. */ sentimentMax?: number; } /** * A time period inside of a DataItem that has a time dimension (e.g. video). */ export interface GoogleCloudAiplatformV1SchemaTimeSegment { /** * End of the time segment (exclusive), represented as the duration since the * start of the DataItem. */ endTimeOffset?: number /* Duration */; /** * Start of the time segment (inclusive), represented as the duration since * the start of the DataItem. */ startTimeOffset?: number /* Duration */; } function serializeGoogleCloudAiplatformV1SchemaTimeSegment(data: any): GoogleCloudAiplatformV1SchemaTimeSegment { return { ...data, endTimeOffset: data["endTimeOffset"] !== undefined ? data["endTimeOffset"] : undefined, startTimeOffset: data["startTimeOffset"] !== undefined ? data["startTimeOffset"] : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaTimeSegment(data: any): GoogleCloudAiplatformV1SchemaTimeSegment { return { ...data, endTimeOffset: data["endTimeOffset"] !== undefined ? data["endTimeOffset"] : undefined, startTimeOffset: data["startTimeOffset"] !== undefined ? data["startTimeOffset"] : undefined, }; } /** * The metadata of Datasets that contain time series data. */ export interface GoogleCloudAiplatformV1SchemaTimeSeriesDatasetMetadata { inputConfig?: GoogleCloudAiplatformV1SchemaTimeSeriesDatasetMetadataInputConfig; /** * The column name of the time column that identifies time order in the time * series. */ timeColumn?: string; /** * The column name of the time series identifier column that identifies the * time series. */ timeSeriesIdentifierColumn?: string; } export interface GoogleCloudAiplatformV1SchemaTimeSeriesDatasetMetadataBigQuerySource { /** * The URI of a BigQuery table. */ uri?: string; } export interface GoogleCloudAiplatformV1SchemaTimeSeriesDatasetMetadataGcsSource { /** * Cloud Storage URI of one or more files. Only CSV files are supported. The * first line of the CSV file is used as the header. If there are multiple * files, the header is the first line of the lexicographically first file, * the other files must either contain the exact same header or omit the * header. */ uri?: string[]; } /** * The time series Dataset's data source. The Dataset doesn't store the data * directly, but only pointer(s) to its data. */ export interface GoogleCloudAiplatformV1SchemaTimeSeriesDatasetMetadataInputConfig { bigquerySource?: GoogleCloudAiplatformV1SchemaTimeSeriesDatasetMetadataBigQuerySource; gcsSource?: GoogleCloudAiplatformV1SchemaTimeSeriesDatasetMetadataGcsSource; } /** * A TrainingJob that trains and uploads an AutoML Forecasting Model. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlForecasting { /** * The input parameters of this TrainingJob. */ inputs?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlForecastingInputs; /** * The metadata information. */ metadata?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlForecastingMetadata; } function serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlForecasting(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlForecasting { return { ...data, inputs: data["inputs"] !== undefined ? serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlForecastingInputs(data["inputs"]) : undefined, metadata: data["metadata"] !== undefined ? serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlForecastingMetadata(data["metadata"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlForecasting(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlForecasting { return { ...data, inputs: data["inputs"] !== undefined ? deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlForecastingInputs(data["inputs"]) : undefined, metadata: data["metadata"] !== undefined ? deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlForecastingMetadata(data["metadata"]) : undefined, }; } export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlForecastingInputs { /** * Additional experiment flags for the time series forcasting training. */ additionalExperiments?: string[]; /** * Names of columns that are available and provided when a forecast is * requested. These columns contain information for the given entity * (identified by the time_series_identifier_column column) that is known at * forecast. For example, predicted weather for a specific day. */ availableAtForecastColumns?: string[]; /** * The amount of time into the past training and prediction data is used for * model training and prediction respectively. Expressed in number of units * defined by the `data_granularity` field. */ contextWindow?: bigint; /** * Expected difference in time granularity between rows in the data. */ dataGranularity?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlForecastingInputsGranularity; /** * If probabilistic inference is enabled, the model will fit a distribution * that captures the uncertainty of a prediction. At inference time, the * predictive distribution is used to make a point prediction that minimizes * the optimization objective. For example, the mean of a predictive * distribution is the point prediction that minimizes RMSE loss. If quantiles * are specified, then the quantiles of the distribution are also returned. * The optimization objective cannot be minimize-quantile-loss. */ enableProbabilisticInference?: boolean; /** * Configuration for exporting test set predictions to a BigQuery table. If * this configuration is absent, then the export is not performed. */ exportEvaluatedDataItemsConfig?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionExportEvaluatedDataItemsConfig; /** * The amount of time into the future for which forecasted values for the * target are returned. Expressed in number of units defined by the * `data_granularity` field. */ forecastHorizon?: bigint; /** * Configuration that defines the hierarchical relationship of time series * and parameters for hierarchical forecasting strategies. */ hierarchyConfig?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionHierarchyConfig; /** * The geographical region based on which the holiday effect is applied in * modeling by adding holiday categorical array feature that include all * holidays matching the date. This option only allowed when data_granularity * is day. By default, holiday effect modeling is disabled. To turn it on, * specify the holiday region using this option. */ holidayRegions?: string[]; /** * Objective function the model is optimizing towards. The training process * creates a model that optimizes the value of the objective function over the * validation set. The supported optimization objectives: * "minimize-rmse" * (default) - Minimize root-mean-squared error (RMSE). * "minimize-mae" - * Minimize mean-absolute error (MAE). * "minimize-rmsle" - Minimize * root-mean-squared log error (RMSLE). * "minimize-rmspe" - Minimize * root-mean-squared percentage error (RMSPE). * "minimize-wape-mae" - * Minimize the combination of weighted absolute percentage error (WAPE) and * mean-absolute-error (MAE). * "minimize-quantile-loss" - Minimize the * quantile loss at the quantiles defined in `quantiles`. * "minimize-mape" - * Minimize the mean absolute percentage error. */ optimizationObjective?: string; /** * Quantiles to use for minimize-quantile-loss `optimization_objective`, or * for probabilistic inference. Up to 5 quantiles are allowed of values * between 0 and 1, exclusive. Required if the value of optimization_objective * is minimize-quantile-loss. Represents the percent quantiles to use for that * objective. Quantiles must be unique. */ quantiles?: number[]; /** * The name of the column that the Model is to predict values for. This * column must be unavailable at forecast. */ targetColumn?: string; /** * The name of the column that identifies time order in the time series. This * column must be available at forecast. */ timeColumn?: string; /** * Column names that should be used as attribute columns. The value of these * columns does not vary as a function of time. For example, store ID or item * color. */ timeSeriesAttributeColumns?: string[]; /** * The name of the column that identifies the time series. */ timeSeriesIdentifierColumn?: string; /** * Required. The train budget of creating this model, expressed in milli node * hours i.e. 1,000 value in this field means 1 node hour. The training cost * of the model will not exceed this budget. The final cost will be attempted * to be close to the budget, though may end up being (even) noticeably * smaller - at the backend's discretion. This especially may happen when * further model training ceases to provide any improvements. If the budget is * set to a value known to be insufficient to train a model for the given * dataset, the training won't be attempted and will error. The train budget * must be between 1,000 and 72,000 milli node hours, inclusive. */ trainBudgetMilliNodeHours?: bigint; /** * Each transformation will apply transform function to given input column. * And the result will be used for training. When creating transformation for * BigQuery Struct column, the column should be flattened using "." as the * delimiter. */ transformations?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlForecastingInputsTransformation[]; /** * Names of columns that are unavailable when a forecast is requested. This * column contains information for the given entity (identified by the * time_series_identifier_column) that is unknown before the forecast For * example, actual weather on a given day. */ unavailableAtForecastColumns?: string[]; /** * Validation options for the data validation component. The available * options are: * "fail-pipeline" - default, will validate against the * validation and fail the pipeline if it fails. * "ignore-validation" - * ignore the results of the validation and continue */ validationOptions?: string; /** * Column name that should be used as the weight column. Higher values in * this column give more importance to the row during model training. The * column must have numeric values between 0 and 10000 inclusively; 0 means * the row is ignored for training. If weight column field is not set, then * all rows are assumed to have equal weight of 1. */ weightColumn?: string; /** * Config containing strategy for generating sliding windows. */ windowConfig?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionWindowConfig; } function serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlForecastingInputs(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlForecastingInputs { return { ...data, contextWindow: data["contextWindow"] !== undefined ? String(data["contextWindow"]) : undefined, dataGranularity: data["dataGranularity"] !== undefined ? serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlForecastingInputsGranularity(data["dataGranularity"]) : undefined, forecastHorizon: data["forecastHorizon"] !== undefined ? String(data["forecastHorizon"]) : undefined, trainBudgetMilliNodeHours: data["trainBudgetMilliNodeHours"] !== undefined ? String(data["trainBudgetMilliNodeHours"]) : undefined, windowConfig: data["windowConfig"] !== undefined ? serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionWindowConfig(data["windowConfig"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlForecastingInputs(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlForecastingInputs { return { ...data, contextWindow: data["contextWindow"] !== undefined ? BigInt(data["contextWindow"]) : undefined, dataGranularity: data["dataGranularity"] !== undefined ? deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlForecastingInputsGranularity(data["dataGranularity"]) : undefined, forecastHorizon: data["forecastHorizon"] !== undefined ? BigInt(data["forecastHorizon"]) : undefined, trainBudgetMilliNodeHours: data["trainBudgetMilliNodeHours"] !== undefined ? BigInt(data["trainBudgetMilliNodeHours"]) : undefined, windowConfig: data["windowConfig"] !== undefined ? deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionWindowConfig(data["windowConfig"]) : undefined, }; } /** * A duration of time expressed in time granularity units. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlForecastingInputsGranularity { /** * The number of granularity_units between data points in the training data. * If `granularity_unit` is `minute`, can be 1, 5, 10, 15, or 30. For all * other values of `granularity_unit`, must be 1. */ quantity?: bigint; /** * The time granularity unit of this time period. The supported units are: * * "minute" * "hour" * "day" * "week" * "month" * "year" */ unit?: string; } function serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlForecastingInputsGranularity(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlForecastingInputsGranularity { return { ...data, quantity: data["quantity"] !== undefined ? String(data["quantity"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlForecastingInputsGranularity(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlForecastingInputsGranularity { return { ...data, quantity: data["quantity"] !== undefined ? BigInt(data["quantity"]) : undefined, }; } export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlForecastingInputsTransformation { auto?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlForecastingInputsTransformationAutoTransformation; categorical?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlForecastingInputsTransformationCategoricalTransformation; numeric?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlForecastingInputsTransformationNumericTransformation; text?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlForecastingInputsTransformationTextTransformation; timestamp?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlForecastingInputsTransformationTimestampTransformation; } /** * Training pipeline will infer the proper transformation based on the * statistic of dataset. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlForecastingInputsTransformationAutoTransformation { columnName?: string; } /** * Training pipeline will perform following transformation functions. * The * categorical string as is--no change to case, punctuation, spelling, tense, * and so on. * Convert the category name to a dictionary lookup index and * generate an embedding for each index. * Categories that appear less than 5 * times in the training dataset are treated as the "unknown" category. The * "unknown" category gets its own special lookup index and resulting embedding. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlForecastingInputsTransformationCategoricalTransformation { columnName?: string; } /** * Training pipeline will perform following transformation functions. * The * value converted to float32. * The z_score of the value. * log(value+1) when * the value is greater than or equal to 0. Otherwise, this transformation is * not applied and the value is considered a missing value. * z_score of * log(value+1) when the value is greater than or equal to 0. Otherwise, this * transformation is not applied and the value is considered a missing value. * * A boolean value that indicates whether the value is valid. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlForecastingInputsTransformationNumericTransformation { columnName?: string; } /** * Training pipeline will perform following transformation functions. * The * text as is--no change to case, punctuation, spelling, tense, and so on. * * Convert the category name to a dictionary lookup index and generate an * embedding for each index. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlForecastingInputsTransformationTextTransformation { columnName?: string; } /** * Training pipeline will perform following transformation functions. * Apply * the transformation functions for Numerical columns. * Determine the year, * month, day,and weekday. Treat each value from the timestamp as a Categorical * column. * Invalid numerical values (for example, values that fall outside of * a typical timestamp range, or are extreme values) receive no special * treatment and are not removed. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlForecastingInputsTransformationTimestampTransformation { columnName?: string; /** * The format in which that time field is expressed. The time_format must * either be one of: * `unix-seconds` * `unix-milliseconds` * * `unix-microseconds` * `unix-nanoseconds` (for respectively number of * seconds, milliseconds, microseconds and nanoseconds since start of the Unix * epoch); or be written in `strftime` syntax. If time_format is not set, then * the default format is RFC 3339 `date-time` format, where `time-offset` = * `"Z"` (e.g. 1985-04-12T23:20:50.52Z) */ timeFormat?: string; } /** * Model metadata specific to AutoML Forecasting. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlForecastingMetadata { /** * BigQuery destination uri for exported evaluated examples. */ evaluatedDataItemsBigqueryUri?: string; /** * Output only. The actual training cost of the model, expressed in milli * node hours, i.e. 1,000 value in this field means 1 node hour. Guaranteed to * not exceed the train budget. */ trainCostMilliNodeHours?: bigint; } function serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlForecastingMetadata(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlForecastingMetadata { return { ...data, trainCostMilliNodeHours: data["trainCostMilliNodeHours"] !== undefined ? String(data["trainCostMilliNodeHours"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlForecastingMetadata(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlForecastingMetadata { return { ...data, trainCostMilliNodeHours: data["trainCostMilliNodeHours"] !== undefined ? BigInt(data["trainCostMilliNodeHours"]) : undefined, }; } /** * A TrainingJob that trains and uploads an AutoML Image Classification Model. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageClassification { /** * The input parameters of this TrainingJob. */ inputs?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageClassificationInputs; /** * The metadata information. */ metadata?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageClassificationMetadata; } function serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageClassification(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageClassification { return { ...data, inputs: data["inputs"] !== undefined ? serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageClassificationInputs(data["inputs"]) : undefined, metadata: data["metadata"] !== undefined ? serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageClassificationMetadata(data["metadata"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageClassification(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageClassification { return { ...data, inputs: data["inputs"] !== undefined ? deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageClassificationInputs(data["inputs"]) : undefined, metadata: data["metadata"] !== undefined ? deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageClassificationMetadata(data["metadata"]) : undefined, }; } export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageClassificationInputs { /** * The ID of the `base` model. If it is specified, the new model will be * trained based on the `base` model. Otherwise, the new model will be trained * from scratch. The `base` model must be in the same Project and Location as * the new Model to train, and have the same modelType. */ baseModelId?: string; /** * The training budget of creating this model, expressed in milli node hours * i.e. 1,000 value in this field means 1 node hour. The actual * metadata.costMilliNodeHours will be equal or less than this value. If * further model training ceases to provide any improvements, it will stop * without using the full budget and the metadata.successfulStopReason will be * `model-converged`. Note, node_hour = actual_hour * * number_of_nodes_involved. For modelType `cloud`(default), the budget must * be between 8,000 and 800,000 milli node hours, inclusive. The default value * is 192,000 which represents one day in wall time, considering 8 nodes are * used. For model types `mobile-tf-low-latency-1`, `mobile-tf-versatile-1`, * `mobile-tf-high-accuracy-1`, the training budget must be between 1,000 and * 100,000 milli node hours, inclusive. The default value is 24,000 which * represents one day in wall time on a single node that is used. */ budgetMilliNodeHours?: bigint; /** * Use the entire training budget. This disables the early stopping feature. * When false the early stopping feature is enabled, which means that AutoML * Image Classification might stop training before the entire training budget * has been used. */ disableEarlyStopping?: boolean; modelType?: | "MODEL_TYPE_UNSPECIFIED" | "CLOUD" | "CLOUD_1" | "MOBILE_TF_LOW_LATENCY_1" | "MOBILE_TF_VERSATILE_1" | "MOBILE_TF_HIGH_ACCURACY_1" | "EFFICIENTNET" | "MAXVIT" | "VIT" | "COCA"; /** * If false, a single-label (multi-class) Model will be trained (i.e. * assuming that for each image just up to one annotation may be applicable). * If true, a multi-label Model will be trained (i.e. assuming that for each * image multiple annotations may be applicable). */ multiLabel?: boolean; /** * Trainer type for Vision TrainRequest. */ tunableParameter?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutomlImageTrainingTunableParameter; /** * The ID of `base` model for upTraining. If it is specified, the new model * will be upTrained based on the `base` model for upTraining. Otherwise, the * new model will be trained from scratch. The `base` model for upTraining * must be in the same Project and Location as the new Model to train, and * have the same modelType. */ uptrainBaseModelId?: string; } function serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageClassificationInputs(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageClassificationInputs { return { ...data, budgetMilliNodeHours: data["budgetMilliNodeHours"] !== undefined ? String(data["budgetMilliNodeHours"]) : undefined, tunableParameter: data["tunableParameter"] !== undefined ? serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutomlImageTrainingTunableParameter(data["tunableParameter"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageClassificationInputs(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageClassificationInputs { return { ...data, budgetMilliNodeHours: data["budgetMilliNodeHours"] !== undefined ? BigInt(data["budgetMilliNodeHours"]) : undefined, tunableParameter: data["tunableParameter"] !== undefined ? deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutomlImageTrainingTunableParameter(data["tunableParameter"]) : undefined, }; } export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageClassificationMetadata { /** * The actual training cost of creating this model, expressed in milli node * hours, i.e. 1,000 value in this field means 1 node hour. Guaranteed to not * exceed inputs.budgetMilliNodeHours. */ costMilliNodeHours?: bigint; /** * For successful job completions, this is the reason why the job has * finished. */ successfulStopReason?: | "SUCCESSFUL_STOP_REASON_UNSPECIFIED" | "BUDGET_REACHED" | "MODEL_CONVERGED"; } function serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageClassificationMetadata(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageClassificationMetadata { return { ...data, costMilliNodeHours: data["costMilliNodeHours"] !== undefined ? String(data["costMilliNodeHours"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageClassificationMetadata(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageClassificationMetadata { return { ...data, costMilliNodeHours: data["costMilliNodeHours"] !== undefined ? BigInt(data["costMilliNodeHours"]) : undefined, }; } /** * A TrainingJob that trains and uploads an AutoML Image Object Detection * Model. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageObjectDetection { /** * The input parameters of this TrainingJob. */ inputs?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageObjectDetectionInputs; /** * The metadata information */ metadata?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageObjectDetectionMetadata; } function serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageObjectDetection(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageObjectDetection { return { ...data, inputs: data["inputs"] !== undefined ? serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageObjectDetectionInputs(data["inputs"]) : undefined, metadata: data["metadata"] !== undefined ? serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageObjectDetectionMetadata(data["metadata"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageObjectDetection(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageObjectDetection { return { ...data, inputs: data["inputs"] !== undefined ? deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageObjectDetectionInputs(data["inputs"]) : undefined, metadata: data["metadata"] !== undefined ? deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageObjectDetectionMetadata(data["metadata"]) : undefined, }; } export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageObjectDetectionInputs { /** * The training budget of creating this model, expressed in milli node hours * i.e. 1,000 value in this field means 1 node hour. The actual * metadata.costMilliNodeHours will be equal or less than this value. If * further model training ceases to provide any improvements, it will stop * without using the full budget and the metadata.successfulStopReason will be * `model-converged`. Note, node_hour = actual_hour * * number_of_nodes_involved. For modelType `cloud`(default), the budget must * be between 20,000 and 900,000 milli node hours, inclusive. The default * value is 216,000 which represents one day in wall time, considering 9 nodes * are used. For model types `mobile-tf-low-latency-1`, * `mobile-tf-versatile-1`, `mobile-tf-high-accuracy-1` the training budget * must be between 1,000 and 100,000 milli node hours, inclusive. The default * value is 24,000 which represents one day in wall time on a single node that * is used. */ budgetMilliNodeHours?: bigint; /** * Use the entire training budget. This disables the early stopping feature. * When false the early stopping feature is enabled, which means that AutoML * Image Object Detection might stop training before the entire training * budget has been used. */ disableEarlyStopping?: boolean; modelType?: | "MODEL_TYPE_UNSPECIFIED" | "CLOUD_HIGH_ACCURACY_1" | "CLOUD_LOW_LATENCY_1" | "CLOUD_1" | "MOBILE_TF_LOW_LATENCY_1" | "MOBILE_TF_VERSATILE_1" | "MOBILE_TF_HIGH_ACCURACY_1" | "CLOUD_STREAMING_1" | "SPINENET" | "YOLO"; /** * Trainer type for Vision TrainRequest. */ tunableParameter?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutomlImageTrainingTunableParameter; /** * The ID of `base` model for upTraining. If it is specified, the new model * will be upTrained based on the `base` model for upTraining. Otherwise, the * new model will be trained from scratch. The `base` model for upTraining * must be in the same Project and Location as the new Model to train, and * have the same modelType. */ uptrainBaseModelId?: string; } function serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageObjectDetectionInputs(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageObjectDetectionInputs { return { ...data, budgetMilliNodeHours: data["budgetMilliNodeHours"] !== undefined ? String(data["budgetMilliNodeHours"]) : undefined, tunableParameter: data["tunableParameter"] !== undefined ? serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutomlImageTrainingTunableParameter(data["tunableParameter"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageObjectDetectionInputs(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageObjectDetectionInputs { return { ...data, budgetMilliNodeHours: data["budgetMilliNodeHours"] !== undefined ? BigInt(data["budgetMilliNodeHours"]) : undefined, tunableParameter: data["tunableParameter"] !== undefined ? deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutomlImageTrainingTunableParameter(data["tunableParameter"]) : undefined, }; } export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageObjectDetectionMetadata { /** * The actual training cost of creating this model, expressed in milli node * hours, i.e. 1,000 value in this field means 1 node hour. Guaranteed to not * exceed inputs.budgetMilliNodeHours. */ costMilliNodeHours?: bigint; /** * For successful job completions, this is the reason why the job has * finished. */ successfulStopReason?: | "SUCCESSFUL_STOP_REASON_UNSPECIFIED" | "BUDGET_REACHED" | "MODEL_CONVERGED"; } function serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageObjectDetectionMetadata(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageObjectDetectionMetadata { return { ...data, costMilliNodeHours: data["costMilliNodeHours"] !== undefined ? String(data["costMilliNodeHours"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageObjectDetectionMetadata(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageObjectDetectionMetadata { return { ...data, costMilliNodeHours: data["costMilliNodeHours"] !== undefined ? BigInt(data["costMilliNodeHours"]) : undefined, }; } /** * A TrainingJob that trains and uploads an AutoML Image Segmentation Model. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageSegmentation { /** * The input parameters of this TrainingJob. */ inputs?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageSegmentationInputs; /** * The metadata information. */ metadata?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageSegmentationMetadata; } function serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageSegmentation(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageSegmentation { return { ...data, inputs: data["inputs"] !== undefined ? serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageSegmentationInputs(data["inputs"]) : undefined, metadata: data["metadata"] !== undefined ? serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageSegmentationMetadata(data["metadata"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageSegmentation(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageSegmentation { return { ...data, inputs: data["inputs"] !== undefined ? deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageSegmentationInputs(data["inputs"]) : undefined, metadata: data["metadata"] !== undefined ? deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageSegmentationMetadata(data["metadata"]) : undefined, }; } export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageSegmentationInputs { /** * The ID of the `base` model. If it is specified, the new model will be * trained based on the `base` model. Otherwise, the new model will be trained * from scratch. The `base` model must be in the same Project and Location as * the new Model to train, and have the same modelType. */ baseModelId?: string; /** * The training budget of creating this model, expressed in milli node hours * i.e. 1,000 value in this field means 1 node hour. The actual * metadata.costMilliNodeHours will be equal or less than this value. If * further model training ceases to provide any improvements, it will stop * without using the full budget and the metadata.successfulStopReason will be * `model-converged`. Note, node_hour = actual_hour * * number_of_nodes_involved. Or actual_wall_clock_hours = * train_budget_milli_node_hours / (number_of_nodes_involved * 1000) For * modelType `cloud-high-accuracy-1`(default), the budget must be between * 20,000 and 2,000,000 milli node hours, inclusive. The default value is * 192,000 which represents one day in wall time (1000 milli * 24 hours * 8 * nodes). */ budgetMilliNodeHours?: bigint; modelType?: | "MODEL_TYPE_UNSPECIFIED" | "CLOUD_HIGH_ACCURACY_1" | "CLOUD_LOW_ACCURACY_1" | "MOBILE_TF_LOW_LATENCY_1"; } function serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageSegmentationInputs(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageSegmentationInputs { return { ...data, budgetMilliNodeHours: data["budgetMilliNodeHours"] !== undefined ? String(data["budgetMilliNodeHours"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageSegmentationInputs(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageSegmentationInputs { return { ...data, budgetMilliNodeHours: data["budgetMilliNodeHours"] !== undefined ? BigInt(data["budgetMilliNodeHours"]) : undefined, }; } export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageSegmentationMetadata { /** * The actual training cost of creating this model, expressed in milli node * hours, i.e. 1,000 value in this field means 1 node hour. Guaranteed to not * exceed inputs.budgetMilliNodeHours. */ costMilliNodeHours?: bigint; /** * For successful job completions, this is the reason why the job has * finished. */ successfulStopReason?: | "SUCCESSFUL_STOP_REASON_UNSPECIFIED" | "BUDGET_REACHED" | "MODEL_CONVERGED"; } function serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageSegmentationMetadata(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageSegmentationMetadata { return { ...data, costMilliNodeHours: data["costMilliNodeHours"] !== undefined ? String(data["costMilliNodeHours"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageSegmentationMetadata(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlImageSegmentationMetadata { return { ...data, costMilliNodeHours: data["costMilliNodeHours"] !== undefined ? BigInt(data["costMilliNodeHours"]) : undefined, }; } /** * A wrapper class which contains the tunable parameters in an AutoML Image * training job. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutomlImageTrainingTunableParameter { /** * Optional. An unique name of pretrained model checkpoint provided in model * garden, it will be mapped to a GCS location internally. */ checkpointName?: string; /** * Customizable dataset settings, used in the `model_garden_trainer`. */ datasetConfig?: { [key: string]: string }; /** * Optioinal. StudySpec of hyperparameter tuning job. Required for * `model_garden_trainer`. */ studySpec?: GoogleCloudAiplatformV1StudySpec; /** * Customizable trainer settings, used in the `model_garden_trainer`. */ trainerConfig?: { [key: string]: string }; trainerType?: | "TRAINER_TYPE_UNSPECIFIED" | "AUTOML_TRAINER" | "MODEL_GARDEN_TRAINER"; } function serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutomlImageTrainingTunableParameter(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutomlImageTrainingTunableParameter { return { ...data, studySpec: data["studySpec"] !== undefined ? serializeGoogleCloudAiplatformV1StudySpec(data["studySpec"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutomlImageTrainingTunableParameter(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutomlImageTrainingTunableParameter { return { ...data, studySpec: data["studySpec"] !== undefined ? deserializeGoogleCloudAiplatformV1StudySpec(data["studySpec"]) : undefined, }; } /** * A TrainingJob that trains and uploads an AutoML Tables Model. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTables { /** * The input parameters of this TrainingJob. */ inputs?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTablesInputs; /** * The metadata information. */ metadata?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTablesMetadata; } function serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTables(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTables { return { ...data, inputs: data["inputs"] !== undefined ? serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTablesInputs(data["inputs"]) : undefined, metadata: data["metadata"] !== undefined ? serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTablesMetadata(data["metadata"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTables(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTables { return { ...data, inputs: data["inputs"] !== undefined ? deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTablesInputs(data["inputs"]) : undefined, metadata: data["metadata"] !== undefined ? deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTablesMetadata(data["metadata"]) : undefined, }; } export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTablesInputs { /** * Additional experiment flags for the Tables training pipeline. */ additionalExperiments?: string[]; /** * Use the entire training budget. This disables the early stopping feature. * By default, the early stopping feature is enabled, which means that AutoML * Tables might stop training before the entire training budget has been used. */ disableEarlyStopping?: boolean; /** * Configuration for exporting test set predictions to a BigQuery table. If * this configuration is absent, then the export is not performed. */ exportEvaluatedDataItemsConfig?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionExportEvaluatedDataItemsConfig; /** * Objective function the model is optimizing towards. The training process * creates a model that maximizes/minimizes the value of the objective * function over the validation set. The supported optimization objectives * depend on the prediction type. If the field is not set, a default objective * function is used. classification (binary): "maximize-au-roc" (default) - * Maximize the area under the receiver operating characteristic (ROC) curve. * "minimize-log-loss" - Minimize log loss. "maximize-au-prc" - Maximize the * area under the precision-recall curve. "maximize-precision-at-recall" - * Maximize precision for a specified recall value. * "maximize-recall-at-precision" - Maximize recall for a specified precision * value. classification (multi-class): "minimize-log-loss" (default) - * Minimize log loss. regression: "minimize-rmse" (default) - Minimize * root-mean-squared error (RMSE). "minimize-mae" - Minimize mean-absolute * error (MAE). "minimize-rmsle" - Minimize root-mean-squared log error * (RMSLE). */ optimizationObjective?: string; /** * Required when optimization_objective is "maximize-recall-at-precision". * Must be between 0 and 1, inclusive. */ optimizationObjectivePrecisionValue?: number; /** * Required when optimization_objective is "maximize-precision-at-recall". * Must be between 0 and 1, inclusive. */ optimizationObjectiveRecallValue?: number; /** * The type of prediction the Model is to produce. "classification" - Predict * one out of multiple target values is picked for each row. "regression" - * Predict a value based on its relation to other values. This type is * available only to columns that contain semantically numeric values, i.e. * integers or floating point number, even if stored as e.g. strings. */ predictionType?: string; /** * The column name of the target column that the model is to predict. */ targetColumn?: string; /** * Required. The train budget of creating this model, expressed in milli node * hours i.e. 1,000 value in this field means 1 node hour. The training cost * of the model will not exceed this budget. The final cost will be attempted * to be close to the budget, though may end up being (even) noticeably * smaller - at the backend's discretion. This especially may happen when * further model training ceases to provide any improvements. If the budget is * set to a value known to be insufficient to train a model for the given * dataset, the training won't be attempted and will error. The train budget * must be between 1,000 and 72,000 milli node hours, inclusive. */ trainBudgetMilliNodeHours?: bigint; /** * Each transformation will apply transform function to given input column. * And the result will be used for training. When creating transformation for * BigQuery Struct column, the column should be flattened using "." as the * delimiter. */ transformations?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformation[]; /** * Column name that should be used as the weight column. Higher values in * this column give more importance to the row during model training. The * column must have numeric values between 0 and 10000 inclusively; 0 means * the row is ignored for training. If weight column field is not set, then * all rows are assumed to have equal weight of 1. */ weightColumnName?: string; } function serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTablesInputs(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTablesInputs { return { ...data, trainBudgetMilliNodeHours: data["trainBudgetMilliNodeHours"] !== undefined ? String(data["trainBudgetMilliNodeHours"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTablesInputs(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTablesInputs { return { ...data, trainBudgetMilliNodeHours: data["trainBudgetMilliNodeHours"] !== undefined ? BigInt(data["trainBudgetMilliNodeHours"]) : undefined, }; } export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformation { auto?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationAutoTransformation; categorical?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationCategoricalTransformation; numeric?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationNumericTransformation; repeatedCategorical?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationCategoricalArrayTransformation; repeatedNumeric?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationNumericArrayTransformation; repeatedText?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationTextArrayTransformation; text?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationTextTransformation; timestamp?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationTimestampTransformation; } /** * Training pipeline will infer the proper transformation based on the * statistic of dataset. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationAutoTransformation { columnName?: string; } /** * Treats the column as categorical array and performs following transformation * functions. * For each element in the array, convert the category name to a * dictionary lookup index and generate an embedding for each index. Combine the * embedding of all elements into a single embedding using the mean. * Empty * arrays treated as an embedding of zeroes. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationCategoricalArrayTransformation { columnName?: string; } /** * Training pipeline will perform following transformation functions. * The * categorical string as is--no change to case, punctuation, spelling, tense, * and so on. * Convert the category name to a dictionary lookup index and * generate an embedding for each index. * Categories that appear less than 5 * times in the training dataset are treated as the "unknown" category. The * "unknown" category gets its own special lookup index and resulting embedding. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationCategoricalTransformation { columnName?: string; } /** * Treats the column as numerical array and performs following transformation * functions. * All transformations for Numerical types applied to the average * of the all elements. * The average of empty arrays is treated as zero. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationNumericArrayTransformation { columnName?: string; /** * If invalid values is allowed, the training pipeline will create a boolean * feature that indicated whether the value is valid. Otherwise, the training * pipeline will discard the input row from trainining data. */ invalidValuesAllowed?: boolean; } /** * Training pipeline will perform following transformation functions. * The * value converted to float32. * The z_score of the value. * log(value+1) when * the value is greater than or equal to 0. Otherwise, this transformation is * not applied and the value is considered a missing value. * z_score of * log(value+1) when the value is greater than or equal to 0. Otherwise, this * transformation is not applied and the value is considered a missing value. * * A boolean value that indicates whether the value is valid. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationNumericTransformation { columnName?: string; /** * If invalid values is allowed, the training pipeline will create a boolean * feature that indicated whether the value is valid. Otherwise, the training * pipeline will discard the input row from trainining data. */ invalidValuesAllowed?: boolean; } /** * Treats the column as text array and performs following transformation * functions. * Concatenate all text values in the array into a single text * value using a space (" ") as a delimiter, and then treat the result as a * single text value. Apply the transformations for Text columns. * Empty arrays * treated as an empty text. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationTextArrayTransformation { columnName?: string; } /** * Training pipeline will perform following transformation functions. * The * text as is--no change to case, punctuation, spelling, tense, and so on. * * Tokenize text to words. Convert each words to a dictionary lookup index and * generate an embedding for each index. Combine the embedding of all elements * into a single embedding using the mean. * Tokenization is based on unicode * script boundaries. * Missing values get their own lookup index and resulting * embedding. * Stop-words receive no special treatment and are not removed. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationTextTransformation { columnName?: string; } /** * Training pipeline will perform following transformation functions. * Apply * the transformation functions for Numerical columns. * Determine the year, * month, day,and weekday. Treat each value from the * timestamp as a * Categorical column. * Invalid numerical values (for example, values that fall * outside of a typical timestamp range, or are extreme values) receive no * special treatment and are not removed. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTablesInputsTransformationTimestampTransformation { columnName?: string; /** * If invalid values is allowed, the training pipeline will create a boolean * feature that indicated whether the value is valid. Otherwise, the training * pipeline will discard the input row from trainining data. */ invalidValuesAllowed?: boolean; /** * The format in which that time field is expressed. The time_format must * either be one of: * `unix-seconds` * `unix-milliseconds` * * `unix-microseconds` * `unix-nanoseconds` (for respectively number of * seconds, milliseconds, microseconds and nanoseconds since start of the Unix * epoch); or be written in `strftime` syntax. If time_format is not set, then * the default format is RFC 3339 `date-time` format, where `time-offset` = * `"Z"` (e.g. 1985-04-12T23:20:50.52Z) */ timeFormat?: string; } /** * Model metadata specific to AutoML Tables. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTablesMetadata { /** * BigQuery destination uri for exported evaluated examples. */ evaluatedDataItemsBigqueryUri?: string; /** * Output only. The actual training cost of the model, expressed in milli * node hours, i.e. 1,000 value in this field means 1 node hour. Guaranteed to * not exceed the train budget. */ trainCostMilliNodeHours?: bigint; } function serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTablesMetadata(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTablesMetadata { return { ...data, trainCostMilliNodeHours: data["trainCostMilliNodeHours"] !== undefined ? String(data["trainCostMilliNodeHours"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTablesMetadata(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTablesMetadata { return { ...data, trainCostMilliNodeHours: data["trainCostMilliNodeHours"] !== undefined ? BigInt(data["trainCostMilliNodeHours"]) : undefined, }; } /** * A TrainingJob that trains and uploads an AutoML Text Classification Model. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTextClassification { /** * The input parameters of this TrainingJob. */ inputs?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTextClassificationInputs; } export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTextClassificationInputs { multiLabel?: boolean; } /** * A TrainingJob that trains and uploads an AutoML Text Extraction Model. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTextExtraction { /** * The input parameters of this TrainingJob. */ inputs?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTextExtractionInputs; } export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTextExtractionInputs { } /** * A TrainingJob that trains and uploads an AutoML Text Sentiment Model. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTextSentiment { /** * The input parameters of this TrainingJob. */ inputs?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTextSentimentInputs; } export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlTextSentimentInputs { /** * A sentiment is expressed as an integer ordinal, where higher value means a * more positive sentiment. The range of sentiments that will be used is * between 0 and sentimentMax (inclusive on both ends), and all the values in * the range must be represented in the dataset before a model can be created. * Only the Annotations with this sentimentMax will be used for training. * sentimentMax value must be between 1 and 10 (inclusive). */ sentimentMax?: number; } /** * A TrainingJob that trains and uploads an AutoML Video Action Recognition * Model. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlVideoActionRecognition { /** * The input parameters of this TrainingJob. */ inputs?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlVideoActionRecognitionInputs; } export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlVideoActionRecognitionInputs { modelType?: | "MODEL_TYPE_UNSPECIFIED" | "CLOUD" | "MOBILE_VERSATILE_1" | "MOBILE_JETSON_VERSATILE_1" | "MOBILE_CORAL_VERSATILE_1"; } /** * A TrainingJob that trains and uploads an AutoML Video Classification Model. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlVideoClassification { /** * The input parameters of this TrainingJob. */ inputs?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlVideoClassificationInputs; } export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlVideoClassificationInputs { modelType?: | "MODEL_TYPE_UNSPECIFIED" | "CLOUD" | "MOBILE_VERSATILE_1" | "MOBILE_JETSON_VERSATILE_1"; } /** * A TrainingJob that trains and uploads an AutoML Video ObjectTracking Model. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlVideoObjectTracking { /** * The input parameters of this TrainingJob. */ inputs?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlVideoObjectTrackingInputs; } export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionAutoMlVideoObjectTrackingInputs { modelType?: | "MODEL_TYPE_UNSPECIFIED" | "CLOUD" | "MOBILE_VERSATILE_1" | "MOBILE_CORAL_VERSATILE_1" | "MOBILE_CORAL_LOW_LATENCY_1" | "MOBILE_JETSON_VERSATILE_1" | "MOBILE_JETSON_LOW_LATENCY_1"; } export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionCustomJobMetadata { /** * The resource name of the CustomJob that has been created to carry out this * custom task. */ backingCustomJob?: string; } /** * A TrainingJob that trains a custom code Model. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionCustomTask { /** * The input parameters of this CustomTask. */ inputs?: GoogleCloudAiplatformV1CustomJobSpec; /** * The metadata information. */ metadata?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionCustomJobMetadata; } function serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionCustomTask(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionCustomTask { return { ...data, inputs: data["inputs"] !== undefined ? serializeGoogleCloudAiplatformV1CustomJobSpec(data["inputs"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionCustomTask(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionCustomTask { return { ...data, inputs: data["inputs"] !== undefined ? deserializeGoogleCloudAiplatformV1CustomJobSpec(data["inputs"]) : undefined, }; } /** * Configuration for exporting test set predictions to a BigQuery table. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionExportEvaluatedDataItemsConfig { /** * URI of desired destination BigQuery table. Expected format: * `bq://{project_id}:{dataset_id}:{table}` If not specified, then results are * exported to the following auto-created BigQuery table: * `{project_id}:export_evaluated_examples_{model_name}_{yyyy_MM_dd'T'HH_mm_ss_SSS'Z'}.evaluated_examples` */ destinationBigqueryUri?: string; /** * If true and an export destination is specified, then the contents of the * destination are overwritten. Otherwise, if the export destination already * exists, then the export operation fails. */ overrideExistingTable?: boolean; } /** * Configuration that defines the hierarchical relationship of time series and * parameters for hierarchical forecasting strategies. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionHierarchyConfig { /** * A list of time series attribute column names that define the time series * hierarchy. Only one level of hierarchy is supported, ex. 'region' for a * hierarchy of stores or 'department' for a hierarchy of products. If * multiple columns are specified, time series will be grouped by their * combined values, ex. ('blue', 'large') for 'color' and 'size', up to 5 * columns are accepted. If no group columns are specified, all time series * are considered to be part of the same group. */ groupColumns?: string[]; /** * The weight of the loss for predictions aggregated over both the horizon * and time series in the same hierarchy group. */ groupTemporalTotalWeight?: number; /** * The weight of the loss for predictions aggregated over time series in the * same group. */ groupTotalWeight?: number; /** * The weight of the loss for predictions aggregated over the horizon for a * single time series. */ temporalTotalWeight?: number; } export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionHyperparameterTuningJobMetadata { /** * The resource name of the HyperparameterTuningJob that has been created to * carry out this HyperparameterTuning task. */ backingHyperparameterTuningJob?: string; /** * The resource name of the CustomJob that has been created to run the best * Trial of this HyperparameterTuning task. */ bestTrialBackingCustomJob?: string; } export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionHyperparameterTuningJobSpec { /** * The number of failed Trials that need to be seen before failing the * HyperparameterTuningJob. If set to 0, Vertex AI decides how many Trials * must fail before the whole job fails. */ maxFailedTrialCount?: number; /** * The desired total number of Trials. */ maxTrialCount?: number; /** * The desired number of Trials to run in parallel. */ parallelTrialCount?: number; /** * Study configuration of the HyperparameterTuningJob. */ studySpec?: GoogleCloudAiplatformV1StudySpec; /** * The spec of a trial job. The same spec applies to the CustomJobs created * in all the trials. */ trialJobSpec?: GoogleCloudAiplatformV1CustomJobSpec; } function serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionHyperparameterTuningJobSpec(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionHyperparameterTuningJobSpec { return { ...data, studySpec: data["studySpec"] !== undefined ? serializeGoogleCloudAiplatformV1StudySpec(data["studySpec"]) : undefined, trialJobSpec: data["trialJobSpec"] !== undefined ? serializeGoogleCloudAiplatformV1CustomJobSpec(data["trialJobSpec"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionHyperparameterTuningJobSpec(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionHyperparameterTuningJobSpec { return { ...data, studySpec: data["studySpec"] !== undefined ? deserializeGoogleCloudAiplatformV1StudySpec(data["studySpec"]) : undefined, trialJobSpec: data["trialJobSpec"] !== undefined ? deserializeGoogleCloudAiplatformV1CustomJobSpec(data["trialJobSpec"]) : undefined, }; } /** * A TrainingJob that tunes Hypererparameters of a custom code Model. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionHyperparameterTuningTask { /** * The input parameters of this HyperparameterTuningTask. */ inputs?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionHyperparameterTuningJobSpec; /** * The metadata information. */ metadata?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionHyperparameterTuningJobMetadata; } function serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionHyperparameterTuningTask(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionHyperparameterTuningTask { return { ...data, inputs: data["inputs"] !== undefined ? serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionHyperparameterTuningJobSpec(data["inputs"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionHyperparameterTuningTask(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionHyperparameterTuningTask { return { ...data, inputs: data["inputs"] !== undefined ? deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionHyperparameterTuningJobSpec(data["inputs"]) : undefined, }; } /** * A TrainingJob that trains and uploads an AutoML Forecasting Model. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionSeq2SeqPlusForecasting { /** * The input parameters of this TrainingJob. */ inputs?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputs; /** * The metadata information. */ metadata?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingMetadata; } function serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionSeq2SeqPlusForecasting(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionSeq2SeqPlusForecasting { return { ...data, inputs: data["inputs"] !== undefined ? serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputs(data["inputs"]) : undefined, metadata: data["metadata"] !== undefined ? serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingMetadata(data["metadata"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionSeq2SeqPlusForecasting(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionSeq2SeqPlusForecasting { return { ...data, inputs: data["inputs"] !== undefined ? deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputs(data["inputs"]) : undefined, metadata: data["metadata"] !== undefined ? deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingMetadata(data["metadata"]) : undefined, }; } export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputs { /** * Additional experiment flags for the time series forcasting training. */ additionalExperiments?: string[]; /** * Names of columns that are available and provided when a forecast is * requested. These columns contain information for the given entity * (identified by the time_series_identifier_column column) that is known at * forecast. For example, predicted weather for a specific day. */ availableAtForecastColumns?: string[]; /** * The amount of time into the past training and prediction data is used for * model training and prediction respectively. Expressed in number of units * defined by the `data_granularity` field. */ contextWindow?: bigint; /** * Expected difference in time granularity between rows in the data. */ dataGranularity?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsGranularity; /** * Configuration for exporting test set predictions to a BigQuery table. If * this configuration is absent, then the export is not performed. */ exportEvaluatedDataItemsConfig?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionExportEvaluatedDataItemsConfig; /** * The amount of time into the future for which forecasted values for the * target are returned. Expressed in number of units defined by the * `data_granularity` field. */ forecastHorizon?: bigint; /** * Configuration that defines the hierarchical relationship of time series * and parameters for hierarchical forecasting strategies. */ hierarchyConfig?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionHierarchyConfig; /** * The geographical region based on which the holiday effect is applied in * modeling by adding holiday categorical array feature that include all * holidays matching the date. This option only allowed when data_granularity * is day. By default, holiday effect modeling is disabled. To turn it on, * specify the holiday region using this option. */ holidayRegions?: string[]; /** * Objective function the model is optimizing towards. The training process * creates a model that optimizes the value of the objective function over the * validation set. The supported optimization objectives: * "minimize-rmse" * (default) - Minimize root-mean-squared error (RMSE). * "minimize-mae" - * Minimize mean-absolute error (MAE). * "minimize-rmsle" - Minimize * root-mean-squared log error (RMSLE). * "minimize-rmspe" - Minimize * root-mean-squared percentage error (RMSPE). * "minimize-wape-mae" - * Minimize the combination of weighted absolute percentage error (WAPE) and * mean-absolute-error (MAE). * "minimize-quantile-loss" - Minimize the * quantile loss at the quantiles defined in `quantiles`. * "minimize-mape" - * Minimize the mean absolute percentage error. */ optimizationObjective?: string; /** * Quantiles to use for minimize-quantile-loss `optimization_objective`. Up * to 5 quantiles are allowed of values between 0 and 1, exclusive. Required * if the value of optimization_objective is minimize-quantile-loss. * Represents the percent quantiles to use for that objective. Quantiles must * be unique. */ quantiles?: number[]; /** * The name of the column that the Model is to predict values for. This * column must be unavailable at forecast. */ targetColumn?: string; /** * The name of the column that identifies time order in the time series. This * column must be available at forecast. */ timeColumn?: string; /** * Column names that should be used as attribute columns. The value of these * columns does not vary as a function of time. For example, store ID or item * color. */ timeSeriesAttributeColumns?: string[]; /** * The name of the column that identifies the time series. */ timeSeriesIdentifierColumn?: string; /** * Required. The train budget of creating this model, expressed in milli node * hours i.e. 1,000 value in this field means 1 node hour. The training cost * of the model will not exceed this budget. The final cost will be attempted * to be close to the budget, though may end up being (even) noticeably * smaller - at the backend's discretion. This especially may happen when * further model training ceases to provide any improvements. If the budget is * set to a value known to be insufficient to train a model for the given * dataset, the training won't be attempted and will error. The train budget * must be between 1,000 and 72,000 milli node hours, inclusive. */ trainBudgetMilliNodeHours?: bigint; /** * Each transformation will apply transform function to given input column. * And the result will be used for training. When creating transformation for * BigQuery Struct column, the column should be flattened using "." as the * delimiter. */ transformations?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsTransformation[]; /** * Names of columns that are unavailable when a forecast is requested. This * column contains information for the given entity (identified by the * time_series_identifier_column) that is unknown before the forecast For * example, actual weather on a given day. */ unavailableAtForecastColumns?: string[]; /** * Validation options for the data validation component. The available * options are: * "fail-pipeline" - default, will validate against the * validation and fail the pipeline if it fails. * "ignore-validation" - * ignore the results of the validation and continue */ validationOptions?: string; /** * Column name that should be used as the weight column. Higher values in * this column give more importance to the row during model training. The * column must have numeric values between 0 and 10000 inclusively; 0 means * the row is ignored for training. If weight column field is not set, then * all rows are assumed to have equal weight of 1. This column must be * available at forecast. */ weightColumn?: string; /** * Config containing strategy for generating sliding windows. */ windowConfig?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionWindowConfig; } function serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputs(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputs { return { ...data, contextWindow: data["contextWindow"] !== undefined ? String(data["contextWindow"]) : undefined, dataGranularity: data["dataGranularity"] !== undefined ? serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsGranularity(data["dataGranularity"]) : undefined, forecastHorizon: data["forecastHorizon"] !== undefined ? String(data["forecastHorizon"]) : undefined, trainBudgetMilliNodeHours: data["trainBudgetMilliNodeHours"] !== undefined ? String(data["trainBudgetMilliNodeHours"]) : undefined, windowConfig: data["windowConfig"] !== undefined ? serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionWindowConfig(data["windowConfig"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputs(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputs { return { ...data, contextWindow: data["contextWindow"] !== undefined ? BigInt(data["contextWindow"]) : undefined, dataGranularity: data["dataGranularity"] !== undefined ? deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsGranularity(data["dataGranularity"]) : undefined, forecastHorizon: data["forecastHorizon"] !== undefined ? BigInt(data["forecastHorizon"]) : undefined, trainBudgetMilliNodeHours: data["trainBudgetMilliNodeHours"] !== undefined ? BigInt(data["trainBudgetMilliNodeHours"]) : undefined, windowConfig: data["windowConfig"] !== undefined ? deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionWindowConfig(data["windowConfig"]) : undefined, }; } /** * A duration of time expressed in time granularity units. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsGranularity { /** * The number of granularity_units between data points in the training data. * If `granularity_unit` is `minute`, can be 1, 5, 10, 15, or 30. For all * other values of `granularity_unit`, must be 1. */ quantity?: bigint; /** * The time granularity unit of this time period. The supported units are: * * "minute" * "hour" * "day" * "week" * "month" * "year" */ unit?: string; } function serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsGranularity(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsGranularity { return { ...data, quantity: data["quantity"] !== undefined ? String(data["quantity"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsGranularity(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsGranularity { return { ...data, quantity: data["quantity"] !== undefined ? BigInt(data["quantity"]) : undefined, }; } export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsTransformation { auto?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsTransformationAutoTransformation; categorical?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsTransformationCategoricalTransformation; numeric?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsTransformationNumericTransformation; text?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsTransformationTextTransformation; timestamp?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsTransformationTimestampTransformation; } /** * Training pipeline will infer the proper transformation based on the * statistic of dataset. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsTransformationAutoTransformation { columnName?: string; } /** * Training pipeline will perform following transformation functions. * The * categorical string as is--no change to case, punctuation, spelling, tense, * and so on. * Convert the category name to a dictionary lookup index and * generate an embedding for each index. * Categories that appear less than 5 * times in the training dataset are treated as the "unknown" category. The * "unknown" category gets its own special lookup index and resulting embedding. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsTransformationCategoricalTransformation { columnName?: string; } /** * Training pipeline will perform following transformation functions. * The * value converted to float32. * The z_score of the value. * log(value+1) when * the value is greater than or equal to 0. Otherwise, this transformation is * not applied and the value is considered a missing value. * z_score of * log(value+1) when the value is greater than or equal to 0. Otherwise, this * transformation is not applied and the value is considered a missing value. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsTransformationNumericTransformation { columnName?: string; } /** * Training pipeline will perform following transformation functions. * The * text as is--no change to case, punctuation, spelling, tense, and so on. * * Convert the category name to a dictionary lookup index and generate an * embedding for each index. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsTransformationTextTransformation { columnName?: string; } /** * Training pipeline will perform following transformation functions. * Apply * the transformation functions for Numerical columns. * Determine the year, * month, day,and weekday. Treat each value from the timestamp as a Categorical * column. * Invalid numerical values (for example, values that fall outside of * a typical timestamp range, or are extreme values) receive no special * treatment and are not removed. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingInputsTransformationTimestampTransformation { columnName?: string; /** * The format in which that time field is expressed. The time_format must * either be one of: * `unix-seconds` * `unix-milliseconds` * * `unix-microseconds` * `unix-nanoseconds` (for respectively number of * seconds, milliseconds, microseconds and nanoseconds since start of the Unix * epoch); or be written in `strftime` syntax. If time_format is not set, then * the default format is RFC 3339 `date-time` format, where `time-offset` = * `"Z"` (e.g. 1985-04-12T23:20:50.52Z) */ timeFormat?: string; } /** * Model metadata specific to Seq2Seq Plus Forecasting. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingMetadata { /** * BigQuery destination uri for exported evaluated examples. */ evaluatedDataItemsBigqueryUri?: string; /** * Output only. The actual training cost of the model, expressed in milli * node hours, i.e. 1,000 value in this field means 1 node hour. Guaranteed to * not exceed the train budget. */ trainCostMilliNodeHours?: bigint; } function serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingMetadata(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingMetadata { return { ...data, trainCostMilliNodeHours: data["trainCostMilliNodeHours"] !== undefined ? String(data["trainCostMilliNodeHours"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingMetadata(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionSeq2SeqPlusForecastingMetadata { return { ...data, trainCostMilliNodeHours: data["trainCostMilliNodeHours"] !== undefined ? BigInt(data["trainCostMilliNodeHours"]) : undefined, }; } /** * A TrainingJob that trains and uploads an AutoML Forecasting Model. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionTftForecasting { /** * The input parameters of this TrainingJob. */ inputs?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionTftForecastingInputs; /** * The metadata information. */ metadata?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionTftForecastingMetadata; } function serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionTftForecasting(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionTftForecasting { return { ...data, inputs: data["inputs"] !== undefined ? serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionTftForecastingInputs(data["inputs"]) : undefined, metadata: data["metadata"] !== undefined ? serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionTftForecastingMetadata(data["metadata"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionTftForecasting(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionTftForecasting { return { ...data, inputs: data["inputs"] !== undefined ? deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionTftForecastingInputs(data["inputs"]) : undefined, metadata: data["metadata"] !== undefined ? deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionTftForecastingMetadata(data["metadata"]) : undefined, }; } export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionTftForecastingInputs { /** * Additional experiment flags for the time series forcasting training. */ additionalExperiments?: string[]; /** * Names of columns that are available and provided when a forecast is * requested. These columns contain information for the given entity * (identified by the time_series_identifier_column column) that is known at * forecast. For example, predicted weather for a specific day. */ availableAtForecastColumns?: string[]; /** * The amount of time into the past training and prediction data is used for * model training and prediction respectively. Expressed in number of units * defined by the `data_granularity` field. */ contextWindow?: bigint; /** * Expected difference in time granularity between rows in the data. */ dataGranularity?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionTftForecastingInputsGranularity; /** * Configuration for exporting test set predictions to a BigQuery table. If * this configuration is absent, then the export is not performed. */ exportEvaluatedDataItemsConfig?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionExportEvaluatedDataItemsConfig; /** * The amount of time into the future for which forecasted values for the * target are returned. Expressed in number of units defined by the * `data_granularity` field. */ forecastHorizon?: bigint; /** * Configuration that defines the hierarchical relationship of time series * and parameters for hierarchical forecasting strategies. */ hierarchyConfig?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionHierarchyConfig; /** * The geographical region based on which the holiday effect is applied in * modeling by adding holiday categorical array feature that include all * holidays matching the date. This option only allowed when data_granularity * is day. By default, holiday effect modeling is disabled. To turn it on, * specify the holiday region using this option. */ holidayRegions?: string[]; /** * Objective function the model is optimizing towards. The training process * creates a model that optimizes the value of the objective function over the * validation set. The supported optimization objectives: * "minimize-rmse" * (default) - Minimize root-mean-squared error (RMSE). * "minimize-mae" - * Minimize mean-absolute error (MAE). * "minimize-rmsle" - Minimize * root-mean-squared log error (RMSLE). * "minimize-rmspe" - Minimize * root-mean-squared percentage error (RMSPE). * "minimize-wape-mae" - * Minimize the combination of weighted absolute percentage error (WAPE) and * mean-absolute-error (MAE). * "minimize-quantile-loss" - Minimize the * quantile loss at the quantiles defined in `quantiles`. * "minimize-mape" - * Minimize the mean absolute percentage error. */ optimizationObjective?: string; /** * Quantiles to use for minimize-quantile-loss `optimization_objective`. Up * to 5 quantiles are allowed of values between 0 and 1, exclusive. Required * if the value of optimization_objective is minimize-quantile-loss. * Represents the percent quantiles to use for that objective. Quantiles must * be unique. */ quantiles?: number[]; /** * The name of the column that the Model is to predict values for. This * column must be unavailable at forecast. */ targetColumn?: string; /** * The name of the column that identifies time order in the time series. This * column must be available at forecast. */ timeColumn?: string; /** * Column names that should be used as attribute columns. The value of these * columns does not vary as a function of time. For example, store ID or item * color. */ timeSeriesAttributeColumns?: string[]; /** * The name of the column that identifies the time series. */ timeSeriesIdentifierColumn?: string; /** * Required. The train budget of creating this model, expressed in milli node * hours i.e. 1,000 value in this field means 1 node hour. The training cost * of the model will not exceed this budget. The final cost will be attempted * to be close to the budget, though may end up being (even) noticeably * smaller - at the backend's discretion. This especially may happen when * further model training ceases to provide any improvements. If the budget is * set to a value known to be insufficient to train a model for the given * dataset, the training won't be attempted and will error. The train budget * must be between 1,000 and 72,000 milli node hours, inclusive. */ trainBudgetMilliNodeHours?: bigint; /** * Each transformation will apply transform function to given input column. * And the result will be used for training. When creating transformation for * BigQuery Struct column, the column should be flattened using "." as the * delimiter. */ transformations?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionTftForecastingInputsTransformation[]; /** * Names of columns that are unavailable when a forecast is requested. This * column contains information for the given entity (identified by the * time_series_identifier_column) that is unknown before the forecast For * example, actual weather on a given day. */ unavailableAtForecastColumns?: string[]; /** * Validation options for the data validation component. The available * options are: * "fail-pipeline" - default, will validate against the * validation and fail the pipeline if it fails. * "ignore-validation" - * ignore the results of the validation and continue */ validationOptions?: string; /** * Column name that should be used as the weight column. Higher values in * this column give more importance to the row during model training. The * column must have numeric values between 0 and 10000 inclusively; 0 means * the row is ignored for training. If weight column field is not set, then * all rows are assumed to have equal weight of 1. This column must be * available at forecast. */ weightColumn?: string; /** * Config containing strategy for generating sliding windows. */ windowConfig?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionWindowConfig; } function serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionTftForecastingInputs(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionTftForecastingInputs { return { ...data, contextWindow: data["contextWindow"] !== undefined ? String(data["contextWindow"]) : undefined, dataGranularity: data["dataGranularity"] !== undefined ? serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionTftForecastingInputsGranularity(data["dataGranularity"]) : undefined, forecastHorizon: data["forecastHorizon"] !== undefined ? String(data["forecastHorizon"]) : undefined, trainBudgetMilliNodeHours: data["trainBudgetMilliNodeHours"] !== undefined ? String(data["trainBudgetMilliNodeHours"]) : undefined, windowConfig: data["windowConfig"] !== undefined ? serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionWindowConfig(data["windowConfig"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionTftForecastingInputs(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionTftForecastingInputs { return { ...data, contextWindow: data["contextWindow"] !== undefined ? BigInt(data["contextWindow"]) : undefined, dataGranularity: data["dataGranularity"] !== undefined ? deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionTftForecastingInputsGranularity(data["dataGranularity"]) : undefined, forecastHorizon: data["forecastHorizon"] !== undefined ? BigInt(data["forecastHorizon"]) : undefined, trainBudgetMilliNodeHours: data["trainBudgetMilliNodeHours"] !== undefined ? BigInt(data["trainBudgetMilliNodeHours"]) : undefined, windowConfig: data["windowConfig"] !== undefined ? deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionWindowConfig(data["windowConfig"]) : undefined, }; } /** * A duration of time expressed in time granularity units. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionTftForecastingInputsGranularity { /** * The number of granularity_units between data points in the training data. * If `granularity_unit` is `minute`, can be 1, 5, 10, 15, or 30. For all * other values of `granularity_unit`, must be 1. */ quantity?: bigint; /** * The time granularity unit of this time period. The supported units are: * * "minute" * "hour" * "day" * "week" * "month" * "year" */ unit?: string; } function serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionTftForecastingInputsGranularity(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionTftForecastingInputsGranularity { return { ...data, quantity: data["quantity"] !== undefined ? String(data["quantity"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionTftForecastingInputsGranularity(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionTftForecastingInputsGranularity { return { ...data, quantity: data["quantity"] !== undefined ? BigInt(data["quantity"]) : undefined, }; } export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionTftForecastingInputsTransformation { auto?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionTftForecastingInputsTransformationAutoTransformation; categorical?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionTftForecastingInputsTransformationCategoricalTransformation; numeric?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionTftForecastingInputsTransformationNumericTransformation; text?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionTftForecastingInputsTransformationTextTransformation; timestamp?: GoogleCloudAiplatformV1SchemaTrainingjobDefinitionTftForecastingInputsTransformationTimestampTransformation; } /** * Training pipeline will infer the proper transformation based on the * statistic of dataset. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionTftForecastingInputsTransformationAutoTransformation { columnName?: string; } /** * Training pipeline will perform following transformation functions. * The * categorical string as is--no change to case, punctuation, spelling, tense, * and so on. * Convert the category name to a dictionary lookup index and * generate an embedding for each index. * Categories that appear less than 5 * times in the training dataset are treated as the "unknown" category. The * "unknown" category gets its own special lookup index and resulting embedding. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionTftForecastingInputsTransformationCategoricalTransformation { columnName?: string; } /** * Training pipeline will perform following transformation functions. * The * value converted to float32. * The z_score of the value. * log(value+1) when * the value is greater than or equal to 0. Otherwise, this transformation is * not applied and the value is considered a missing value. * z_score of * log(value+1) when the value is greater than or equal to 0. Otherwise, this * transformation is not applied and the value is considered a missing value. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionTftForecastingInputsTransformationNumericTransformation { columnName?: string; } /** * Training pipeline will perform following transformation functions. * The * text as is--no change to case, punctuation, spelling, tense, and so on. * * Convert the category name to a dictionary lookup index and generate an * embedding for each index. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionTftForecastingInputsTransformationTextTransformation { columnName?: string; } /** * Training pipeline will perform following transformation functions. * Apply * the transformation functions for Numerical columns. * Determine the year, * month, day,and weekday. Treat each value from the timestamp as a Categorical * column. * Invalid numerical values (for example, values that fall outside of * a typical timestamp range, or are extreme values) receive no special * treatment and are not removed. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionTftForecastingInputsTransformationTimestampTransformation { columnName?: string; /** * The format in which that time field is expressed. The time_format must * either be one of: * `unix-seconds` * `unix-milliseconds` * * `unix-microseconds` * `unix-nanoseconds` (for respectively number of * seconds, milliseconds, microseconds and nanoseconds since start of the Unix * epoch); or be written in `strftime` syntax. If time_format is not set, then * the default format is RFC 3339 `date-time` format, where `time-offset` = * `"Z"` (e.g. 1985-04-12T23:20:50.52Z) */ timeFormat?: string; } /** * Model metadata specific to TFT Forecasting. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionTftForecastingMetadata { /** * BigQuery destination uri for exported evaluated examples. */ evaluatedDataItemsBigqueryUri?: string; /** * Output only. The actual training cost of the model, expressed in milli * node hours, i.e. 1,000 value in this field means 1 node hour. Guaranteed to * not exceed the train budget. */ trainCostMilliNodeHours?: bigint; } function serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionTftForecastingMetadata(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionTftForecastingMetadata { return { ...data, trainCostMilliNodeHours: data["trainCostMilliNodeHours"] !== undefined ? String(data["trainCostMilliNodeHours"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionTftForecastingMetadata(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionTftForecastingMetadata { return { ...data, trainCostMilliNodeHours: data["trainCostMilliNodeHours"] !== undefined ? BigInt(data["trainCostMilliNodeHours"]) : undefined, }; } /** * Config that contains the strategy used to generate sliding windows in time * series training. A window is a series of rows that comprise the context up to * the time of prediction, and the horizon following. The corresponding row for * each window marks the start of the forecast horizon. Each window is used as * an input example for training/evaluation. */ export interface GoogleCloudAiplatformV1SchemaTrainingjobDefinitionWindowConfig { /** * Name of the column that should be used to generate sliding windows. The * column should contain either booleans or string booleans; if the value of * the row is True, generate a sliding window with the horizon starting at * that row. The column will not be used as a feature in training. */ column?: string; /** * Maximum number of windows that should be generated across all time series. */ maxCount?: bigint; /** * Stride length used to generate input examples. Within one time series, * every {$STRIDE_LENGTH} rows will be used to generate a sliding window. */ strideLength?: bigint; } function serializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionWindowConfig(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionWindowConfig { return { ...data, maxCount: data["maxCount"] !== undefined ? String(data["maxCount"]) : undefined, strideLength: data["strideLength"] !== undefined ? String(data["strideLength"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaTrainingjobDefinitionWindowConfig(data: any): GoogleCloudAiplatformV1SchemaTrainingjobDefinitionWindowConfig { return { ...data, maxCount: data["maxCount"] !== undefined ? BigInt(data["maxCount"]) : undefined, strideLength: data["strideLength"] !== undefined ? BigInt(data["strideLength"]) : undefined, }; } /** * A vertex represents a 2D point in the image. NOTE: the normalized vertex * coordinates are relative to the original image and range from 0 to 1. */ export interface GoogleCloudAiplatformV1SchemaVertex { /** * X coordinate. */ x?: number; /** * Y coordinate. */ y?: number; } /** * Annotation details specific to video action recognition. */ export interface GoogleCloudAiplatformV1SchemaVideoActionRecognitionAnnotation { /** * The resource Id of the AnnotationSpec that this Annotation pertains to. */ annotationSpecId?: string; /** * The display name of the AnnotationSpec that this Annotation pertains to. */ displayName?: string; /** * This Annotation applies to the time period represented by the TimeSegment. * If it's not set, the Annotation applies to the whole video. */ timeSegment?: GoogleCloudAiplatformV1SchemaTimeSegment; } function serializeGoogleCloudAiplatformV1SchemaVideoActionRecognitionAnnotation(data: any): GoogleCloudAiplatformV1SchemaVideoActionRecognitionAnnotation { return { ...data, timeSegment: data["timeSegment"] !== undefined ? serializeGoogleCloudAiplatformV1SchemaTimeSegment(data["timeSegment"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaVideoActionRecognitionAnnotation(data: any): GoogleCloudAiplatformV1SchemaVideoActionRecognitionAnnotation { return { ...data, timeSegment: data["timeSegment"] !== undefined ? deserializeGoogleCloudAiplatformV1SchemaTimeSegment(data["timeSegment"]) : undefined, }; } /** * Annotation details specific to video classification. */ export interface GoogleCloudAiplatformV1SchemaVideoClassificationAnnotation { /** * The resource Id of the AnnotationSpec that this Annotation pertains to. */ annotationSpecId?: string; /** * The display name of the AnnotationSpec that this Annotation pertains to. */ displayName?: string; /** * This Annotation applies to the time period represented by the TimeSegment. * If it's not set, the Annotation applies to the whole video. */ timeSegment?: GoogleCloudAiplatformV1SchemaTimeSegment; } function serializeGoogleCloudAiplatformV1SchemaVideoClassificationAnnotation(data: any): GoogleCloudAiplatformV1SchemaVideoClassificationAnnotation { return { ...data, timeSegment: data["timeSegment"] !== undefined ? serializeGoogleCloudAiplatformV1SchemaTimeSegment(data["timeSegment"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaVideoClassificationAnnotation(data: any): GoogleCloudAiplatformV1SchemaVideoClassificationAnnotation { return { ...data, timeSegment: data["timeSegment"] !== undefined ? deserializeGoogleCloudAiplatformV1SchemaTimeSegment(data["timeSegment"]) : undefined, }; } /** * Payload of Video DataItem. */ export interface GoogleCloudAiplatformV1SchemaVideoDataItem { /** * Required. Google Cloud Storage URI points to the original video in user's * bucket. The video is up to 50 GB in size and up to 3 hour in duration. */ gcsUri?: string; /** * Output only. The mime type of the content of the video. Only the videos in * below listed mime types are supported. Supported mime_type: - video/mp4 - * video/avi - video/quicktime */ readonly mimeType?: string; } /** * The metadata of Datasets that contain Video DataItems. */ export interface GoogleCloudAiplatformV1SchemaVideoDatasetMetadata { /** * Points to a YAML file stored on Google Cloud Storage describing payload of * the Video DataItems that belong to this Dataset. */ dataItemSchemaUri?: string; /** * Google Cloud Storage Bucket name that contains the blob data of this * Dataset. */ gcsBucket?: string; } /** * Annotation details specific to video object tracking. */ export interface GoogleCloudAiplatformV1SchemaVideoObjectTrackingAnnotation { /** * The resource Id of the AnnotationSpec that this Annotation pertains to. */ annotationSpecId?: string; /** * The display name of the AnnotationSpec that this Annotation pertains to. */ displayName?: string; /** * The instance of the object, expressed as a positive integer. Used to track * the same object across different frames. */ instanceId?: bigint; /** * A time (frame) of a video to which this annotation pertains. Represented * as the duration since the video's start. */ timeOffset?: number /* Duration */; /** * The rightmost coordinate of the bounding box. */ xMax?: number; /** * The leftmost coordinate of the bounding box. */ xMin?: number; /** * The bottommost coordinate of the bounding box. */ yMax?: number; /** * The topmost coordinate of the bounding box. */ yMin?: number; } function serializeGoogleCloudAiplatformV1SchemaVideoObjectTrackingAnnotation(data: any): GoogleCloudAiplatformV1SchemaVideoObjectTrackingAnnotation { return { ...data, instanceId: data["instanceId"] !== undefined ? String(data["instanceId"]) : undefined, timeOffset: data["timeOffset"] !== undefined ? data["timeOffset"] : undefined, }; } function deserializeGoogleCloudAiplatformV1SchemaVideoObjectTrackingAnnotation(data: any): GoogleCloudAiplatformV1SchemaVideoObjectTrackingAnnotation { return { ...data, instanceId: data["instanceId"] !== undefined ? BigInt(data["instanceId"]) : undefined, timeOffset: data["timeOffset"] !== undefined ? data["timeOffset"] : undefined, }; } export interface GoogleCloudAiplatformV1SchemaVisualInspectionClassificationLabelSavedQueryMetadata { /** * Whether or not the classification label is multi_label. */ multiLabel?: boolean; } export interface GoogleCloudAiplatformV1SchemaVisualInspectionMaskSavedQueryMetadata { } /** * Response message for DatasetService.SearchDataItems. */ export interface GoogleCloudAiplatformV1SearchDataItemsResponse { /** * The DataItemViews read. */ dataItemViews?: GoogleCloudAiplatformV1DataItemView[]; /** * A token to retrieve next page of results. Pass to * SearchDataItemsRequest.page_token to obtain that page. */ nextPageToken?: string; } /** * Google search entry point. */ export interface GoogleCloudAiplatformV1SearchEntryPoint { /** * Optional. Web content snippet that can be embedded in a web page or an app * webview. */ renderedContent?: string; /** * Optional. Base64 encoded JSON representing array of tuple. */ sdkBlob?: Uint8Array; } function serializeGoogleCloudAiplatformV1SearchEntryPoint(data: any): GoogleCloudAiplatformV1SearchEntryPoint { return { ...data, sdkBlob: data["sdkBlob"] !== undefined ? encodeBase64(data["sdkBlob"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1SearchEntryPoint(data: any): GoogleCloudAiplatformV1SearchEntryPoint { return { ...data, sdkBlob: data["sdkBlob"] !== undefined ? decodeBase64(data["sdkBlob"] as string) : undefined, }; } /** * Response message for FeaturestoreService.SearchFeatures. */ export interface GoogleCloudAiplatformV1SearchFeaturesResponse { /** * The Features matching the request. Fields returned: * `name` * * `description` * `labels` * `create_time` * `update_time` */ features?: GoogleCloudAiplatformV1Feature[]; /** * A token, which can be sent as SearchFeaturesRequest.page_token to retrieve * the next page. If this field is omitted, there are no subsequent pages. */ nextPageToken?: string; } /** * Request message for MigrationService.SearchMigratableResources. */ export interface GoogleCloudAiplatformV1SearchMigratableResourcesRequest { /** * A filter for your search. You can use the following types of filters: * * Resource type filters. The following strings filter for a specific type of * MigratableResource: * `ml_engine_model_version:*` * `automl_model:*` * * `automl_dataset:*` * `data_labeling_dataset:*` * "Migrated or not" filters. * The following strings filter for resources that either have or have not * already been migrated: * `last_migrate_time:*` filters for migrated * resources. * `NOT last_migrate_time:*` filters for not yet migrated * resources. */ filter?: string; /** * The standard page size. The default and maximum value is 100. */ pageSize?: number; /** * The standard page token. */ pageToken?: string; } /** * Response message for MigrationService.SearchMigratableResources. */ export interface GoogleCloudAiplatformV1SearchMigratableResourcesResponse { /** * All migratable resources that can be migrated to the location specified in * the request. */ migratableResources?: GoogleCloudAiplatformV1MigratableResource[]; /** * The standard next-page token. The migratable_resources may not fill * page_size in SearchMigratableResourcesRequest even when there are * subsequent pages. */ nextPageToken?: string; } /** * Request message for * JobService.SearchModelDeploymentMonitoringStatsAnomalies. */ export interface GoogleCloudAiplatformV1SearchModelDeploymentMonitoringStatsAnomaliesRequest { /** * Required. The DeployedModel ID of the * [ModelDeploymentMonitoringObjectiveConfig.deployed_model_id]. */ deployedModelId?: string; /** * The latest timestamp of stats being generated. If not set, indicates * feching stats till the latest possible one. */ endTime?: Date; /** * The feature display name. If specified, only return the stats belonging to * this feature. Format: * ModelMonitoringStatsAnomalies.FeatureHistoricStatsAnomalies.feature_display_name, * example: "user_destination". */ featureDisplayName?: string; /** * Required. Objectives of the stats to retrieve. */ objectives?: GoogleCloudAiplatformV1SearchModelDeploymentMonitoringStatsAnomaliesRequestStatsAnomaliesObjective[]; /** * The standard list page size. */ pageSize?: number; /** * A page token received from a previous * JobService.SearchModelDeploymentMonitoringStatsAnomalies call. */ pageToken?: string; /** * The earliest timestamp of stats being generated. If not set, indicates * fetching stats till the earliest possible one. */ startTime?: Date; } function serializeGoogleCloudAiplatformV1SearchModelDeploymentMonitoringStatsAnomaliesRequest(data: any): GoogleCloudAiplatformV1SearchModelDeploymentMonitoringStatsAnomaliesRequest { return { ...data, endTime: data["endTime"] !== undefined ? data["endTime"].toISOString() : undefined, startTime: data["startTime"] !== undefined ? data["startTime"].toISOString() : undefined, }; } function deserializeGoogleCloudAiplatformV1SearchModelDeploymentMonitoringStatsAnomaliesRequest(data: any): GoogleCloudAiplatformV1SearchModelDeploymentMonitoringStatsAnomaliesRequest { return { ...data, endTime: data["endTime"] !== undefined ? new Date(data["endTime"]) : undefined, startTime: data["startTime"] !== undefined ? new Date(data["startTime"]) : undefined, }; } /** * Stats requested for specific objective. */ export interface GoogleCloudAiplatformV1SearchModelDeploymentMonitoringStatsAnomaliesRequestStatsAnomaliesObjective { /** * If set, all attribution scores between * SearchModelDeploymentMonitoringStatsAnomaliesRequest.start_time and * SearchModelDeploymentMonitoringStatsAnomaliesRequest.end_time are fetched, * and page token doesn't take effect in this case. Only used to retrieve * attribution score for the top Features which has the highest attribution * score in the latest monitoring run. */ topFeatureCount?: number; type?: | "MODEL_DEPLOYMENT_MONITORING_OBJECTIVE_TYPE_UNSPECIFIED" | "RAW_FEATURE_SKEW" | "RAW_FEATURE_DRIFT" | "FEATURE_ATTRIBUTION_SKEW" | "FEATURE_ATTRIBUTION_DRIFT"; } /** * Response message for * JobService.SearchModelDeploymentMonitoringStatsAnomalies. */ export interface GoogleCloudAiplatformV1SearchModelDeploymentMonitoringStatsAnomaliesResponse { /** * Stats retrieved for requested objectives. There are at most 1000 * ModelMonitoringStatsAnomalies.FeatureHistoricStatsAnomalies.prediction_stats * in the response. */ monitoringStats?: GoogleCloudAiplatformV1ModelMonitoringStatsAnomalies[]; /** * The page token that can be used by the next * JobService.SearchModelDeploymentMonitoringStatsAnomalies call. */ nextPageToken?: string; } function serializeGoogleCloudAiplatformV1SearchModelDeploymentMonitoringStatsAnomaliesResponse(data: any): GoogleCloudAiplatformV1SearchModelDeploymentMonitoringStatsAnomaliesResponse { return { ...data, monitoringStats: data["monitoringStats"] !== undefined ? data["monitoringStats"].map((item: any) => (serializeGoogleCloudAiplatformV1ModelMonitoringStatsAnomalies(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1SearchModelDeploymentMonitoringStatsAnomaliesResponse(data: any): GoogleCloudAiplatformV1SearchModelDeploymentMonitoringStatsAnomaliesResponse { return { ...data, monitoringStats: data["monitoringStats"] !== undefined ? data["monitoringStats"].map((item: any) => (deserializeGoogleCloudAiplatformV1ModelMonitoringStatsAnomalies(item))) : undefined, }; } /** * The request message for FeatureOnlineStoreService.SearchNearestEntities. */ export interface GoogleCloudAiplatformV1SearchNearestEntitiesRequest { /** * Required. The query. */ query?: GoogleCloudAiplatformV1NearestNeighborQuery; /** * Optional. If set to true, the full entities (including all vector values * and metadata) of the nearest neighbors are returned; otherwise only entity * id of the nearest neighbors will be returned. Note that returning full * entities will significantly increase the latency and cost of the query. */ returnFullEntity?: boolean; } function serializeGoogleCloudAiplatformV1SearchNearestEntitiesRequest(data: any): GoogleCloudAiplatformV1SearchNearestEntitiesRequest { return { ...data, query: data["query"] !== undefined ? serializeGoogleCloudAiplatformV1NearestNeighborQuery(data["query"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1SearchNearestEntitiesRequest(data: any): GoogleCloudAiplatformV1SearchNearestEntitiesRequest { return { ...data, query: data["query"] !== undefined ? deserializeGoogleCloudAiplatformV1NearestNeighborQuery(data["query"]) : undefined, }; } /** * Response message for FeatureOnlineStoreService.SearchNearestEntities */ export interface GoogleCloudAiplatformV1SearchNearestEntitiesResponse { /** * The nearest neighbors of the query entity. */ nearestNeighbors?: GoogleCloudAiplatformV1NearestNeighbors; } function serializeGoogleCloudAiplatformV1SearchNearestEntitiesResponse(data: any): GoogleCloudAiplatformV1SearchNearestEntitiesResponse { return { ...data, nearestNeighbors: data["nearestNeighbors"] !== undefined ? serializeGoogleCloudAiplatformV1NearestNeighbors(data["nearestNeighbors"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1SearchNearestEntitiesResponse(data: any): GoogleCloudAiplatformV1SearchNearestEntitiesResponse { return { ...data, nearestNeighbors: data["nearestNeighbors"] !== undefined ? deserializeGoogleCloudAiplatformV1NearestNeighbors(data["nearestNeighbors"]) : undefined, }; } /** * Represents an environment variable where the value is a secret in Cloud * Secret Manager. */ export interface GoogleCloudAiplatformV1SecretEnvVar { /** * Required. Name of the secret environment variable. */ name?: string; /** * Required. Reference to a secret stored in the Cloud Secret Manager that * will provide the value for this environment variable. */ secretRef?: GoogleCloudAiplatformV1SecretRef; } /** * Reference to a secret stored in the Cloud Secret Manager that will provide * the value for this environment variable. */ export interface GoogleCloudAiplatformV1SecretRef { /** * Required. The name of the secret in Cloud Secret Manager. Format: * {secret_name}. */ secret?: string; /** * The Cloud Secret Manager secret version. Can be 'latest' for the latest * version, an integer for a specific version, or a version alias. */ version?: string; } /** * Segment of the content. */ export interface GoogleCloudAiplatformV1Segment { /** * Output only. End index in the given Part, measured in bytes. Offset from * the start of the Part, exclusive, starting at zero. */ readonly endIndex?: number; /** * Output only. The index of a Part object within its parent Content object. */ readonly partIndex?: number; /** * Output only. Start index in the given Part, measured in bytes. Offset from * the start of the Part, inclusive, starting at zero. */ readonly startIndex?: number; /** * Output only. The text corresponding to the segment from the response. */ readonly text?: string; } /** * Configuration for the use of custom service account to run the workloads. */ export interface GoogleCloudAiplatformV1ServiceAccountSpec { /** * Required. If true, custom user-managed service account is enforced to run * any workloads (for example, Vertex Jobs) on the resource. Otherwise, uses * the [Vertex AI Custom Code Service * Agent](https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents). */ enableCustomServiceAccount?: boolean; /** * Optional. Required when all below conditions are met * * `enable_custom_service_account` is true; * any runtime is specified via * `ResourceRuntimeSpec` on creation time, for example, Ray The users must * have `iam.serviceAccounts.actAs` permission on this service account and * then the specified runtime containers will run as it. Do not set this field * if you want to submit jobs using custom service account to this * PersistentResource after creation, but only specify the `service_account` * inside the job. */ serviceAccount?: string; } /** * The SharePointSources to pass to ImportRagFiles. */ export interface GoogleCloudAiplatformV1SharePointSources { /** * The SharePoint sources. */ sharePointSources?: GoogleCloudAiplatformV1SharePointSourcesSharePointSource[]; } /** * An individual SharePointSource. */ export interface GoogleCloudAiplatformV1SharePointSourcesSharePointSource { /** * The Application ID for the app registered in Microsoft Azure Portal. The * application must also be configured with MS Graph permissions * "Files.ReadAll", "Sites.ReadAll" and BrowserSiteLists.Read.All. */ clientId?: string; /** * The application secret for the app registered in Azure. */ clientSecret?: GoogleCloudAiplatformV1ApiAuthApiKeyConfig; /** * The ID of the drive to download from. */ driveId?: string; /** * The name of the drive to download from. */ driveName?: string; /** * Output only. The SharePoint file id. Output only. */ readonly fileId?: string; /** * The ID of the SharePoint folder to download from. */ sharepointFolderId?: string; /** * The path of the SharePoint folder to download from. */ sharepointFolderPath?: string; /** * The name of the SharePoint site to download from. This can be the site * name or the site id. */ sharepointSiteName?: string; /** * Unique identifier of the Azure Active Directory Instance. */ tenantId?: string; } /** * A set of Shielded Instance options. See [Images using supported Shielded VM * features](https://cloud.google.com/compute/docs/instances/modifying-shielded-vm). */ export interface GoogleCloudAiplatformV1ShieldedVmConfig { /** * Defines whether the instance has [Secure * Boot](https://cloud.google.com/compute/shielded-vm/docs/shielded-vm#secure-boot) * enabled. Secure Boot helps ensure that the system only runs authentic * software by verifying the digital signature of all boot components, and * halting the boot process if signature verification fails. */ enableSecureBoot?: boolean; } /** * The Slack source for the ImportRagFilesRequest. */ export interface GoogleCloudAiplatformV1SlackSource { /** * Required. The Slack channels. */ channels?: GoogleCloudAiplatformV1SlackSourceSlackChannels[]; } function serializeGoogleCloudAiplatformV1SlackSource(data: any): GoogleCloudAiplatformV1SlackSource { return { ...data, channels: data["channels"] !== undefined ? data["channels"].map((item: any) => (serializeGoogleCloudAiplatformV1SlackSourceSlackChannels(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1SlackSource(data: any): GoogleCloudAiplatformV1SlackSource { return { ...data, channels: data["channels"] !== undefined ? data["channels"].map((item: any) => (deserializeGoogleCloudAiplatformV1SlackSourceSlackChannels(item))) : undefined, }; } /** * SlackChannels contains the Slack channels and corresponding access token. */ export interface GoogleCloudAiplatformV1SlackSourceSlackChannels { /** * Required. The SecretManager secret version resource name (e.g. * projects/{project}/secrets/{secret}/versions/{version}) storing the Slack * channel access token that has access to the slack channel IDs. See: * https://api.slack.com/tutorials/tracks/getting-a-token. */ apiKeyConfig?: GoogleCloudAiplatformV1ApiAuthApiKeyConfig; /** * Required. The Slack channel IDs. */ channels?: GoogleCloudAiplatformV1SlackSourceSlackChannelsSlackChannel[]; } function serializeGoogleCloudAiplatformV1SlackSourceSlackChannels(data: any): GoogleCloudAiplatformV1SlackSourceSlackChannels { return { ...data, channels: data["channels"] !== undefined ? data["channels"].map((item: any) => (serializeGoogleCloudAiplatformV1SlackSourceSlackChannelsSlackChannel(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1SlackSourceSlackChannels(data: any): GoogleCloudAiplatformV1SlackSourceSlackChannels { return { ...data, channels: data["channels"] !== undefined ? data["channels"].map((item: any) => (deserializeGoogleCloudAiplatformV1SlackSourceSlackChannelsSlackChannel(item))) : undefined, }; } /** * SlackChannel contains the Slack channel ID and the time range to import. */ export interface GoogleCloudAiplatformV1SlackSourceSlackChannelsSlackChannel { /** * Required. The Slack channel ID. */ channelId?: string; /** * Optional. The ending timestamp for messages to import. */ endTime?: Date; /** * Optional. The starting timestamp for messages to import. */ startTime?: Date; } function serializeGoogleCloudAiplatformV1SlackSourceSlackChannelsSlackChannel(data: any): GoogleCloudAiplatformV1SlackSourceSlackChannelsSlackChannel { return { ...data, endTime: data["endTime"] !== undefined ? data["endTime"].toISOString() : undefined, startTime: data["startTime"] !== undefined ? data["startTime"].toISOString() : undefined, }; } function deserializeGoogleCloudAiplatformV1SlackSourceSlackChannelsSlackChannel(data: any): GoogleCloudAiplatformV1SlackSourceSlackChannelsSlackChannel { return { ...data, endTime: data["endTime"] !== undefined ? new Date(data["endTime"]) : undefined, startTime: data["startTime"] !== undefined ? new Date(data["startTime"]) : undefined, }; } /** * Config for SmoothGrad approximation of gradients. When enabled, the * gradients are approximated by averaging the gradients from noisy samples in * the vicinity of the inputs. Adding noise can help improve the computed * gradients. Refer to this paper for more details: * https://arxiv.org/pdf/1706.03825.pdf */ export interface GoogleCloudAiplatformV1SmoothGradConfig { /** * This is similar to noise_sigma, but provides additional flexibility. A * separate noise sigma can be provided for each feature, which is useful if * their distributions are different. No noise is added to features that are * not set. If this field is unset, noise_sigma will be used for all features. */ featureNoiseSigma?: GoogleCloudAiplatformV1FeatureNoiseSigma; /** * This is a single float value and will be used to add noise to all the * features. Use this field when all features are normalized to have the same * distribution: scale to range [0, 1], [-1, 1] or z-scoring, where features * are normalized to have 0-mean and 1-variance. Learn more about * [normalization](https://developers.google.com/machine-learning/data-prep/transform/normalization). * For best results the recommended value is about 10% - 20% of the standard * deviation of the input feature. Refer to section 3.2 of the SmoothGrad * paper: https://arxiv.org/pdf/1706.03825.pdf. Defaults to 0.1. If the * distribution is different per feature, set feature_noise_sigma instead for * each feature. */ noiseSigma?: number; /** * The number of gradient samples to use for approximation. The higher this * number, the more accurate the gradient is, but the runtime complexity * increases by this factor as well. Valid range of its value is [1, 50]. * Defaults to 3. */ noisySampleCount?: number; } /** * SpecialistPool represents customers' own workforce to work on their data * labeling jobs. It includes a group of specialist managers and workers. * Managers are responsible for managing the workers in this pool as well as * customers' data labeling jobs associated with this pool. Customers create * specialist pool as well as start data labeling jobs on Cloud, managers and * workers handle the jobs using CrowdCompute console. */ export interface GoogleCloudAiplatformV1SpecialistPool { /** * Required. The user-defined name of the SpecialistPool. The name can be up * to 128 characters long and can consist of any UTF-8 characters. This field * should be unique on project-level. */ displayName?: string; /** * Required. The resource name of the SpecialistPool. */ name?: string; /** * Output only. The resource name of the pending data labeling jobs. */ readonly pendingDataLabelingJobs?: string[]; /** * The email addresses of the managers in the SpecialistPool. */ specialistManagerEmails?: string[]; /** * Output only. The number of managers in this SpecialistPool. */ readonly specialistManagersCount?: number; /** * The email addresses of workers in the SpecialistPool. */ specialistWorkerEmails?: string[]; } /** * Configuration for Speculative Decoding. */ export interface GoogleCloudAiplatformV1SpeculativeDecodingSpec { /** * draft model speculation. */ draftModelSpeculation?: GoogleCloudAiplatformV1SpeculativeDecodingSpecDraftModelSpeculation; /** * N-Gram speculation. */ ngramSpeculation?: GoogleCloudAiplatformV1SpeculativeDecodingSpecNgramSpeculation; /** * The number of speculative tokens to generate at each step. */ speculativeTokenCount?: number; } /** * Draft model speculation works by using the smaller model to generate * candidate tokens for speculative decoding. */ export interface GoogleCloudAiplatformV1SpeculativeDecodingSpecDraftModelSpeculation { /** * Required. The resource name of the draft model. */ draftModel?: string; } /** * N-Gram speculation works by trying to find matching tokens in the previous * prompt sequence and use those as speculation for generating new tokens. */ export interface GoogleCloudAiplatformV1SpeculativeDecodingSpecNgramSpeculation { /** * The number of last N input tokens used as ngram to search/match against * the previous prompt sequence. This is equal to the N in N-Gram. The default * value is 3 if not specified. */ ngramSize?: number; } /** * The speech generation config. */ export interface GoogleCloudAiplatformV1SpeechConfig { /** * Optional. Language code (ISO 639. e.g. en-US) for the speech * synthesization. */ languageCode?: string; /** * The configuration for the speaker to use. */ voiceConfig?: GoogleCloudAiplatformV1VoiceConfig; } /** * Metadata information for NotebookService.StartNotebookRuntime. */ export interface GoogleCloudAiplatformV1StartNotebookRuntimeOperationMetadata { /** * The operation generic information. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; /** * A human-readable message that shows the intermediate progress details of * NotebookRuntime. */ progressMessage?: string; } /** * Request message for NotebookService.StartNotebookRuntime. */ export interface GoogleCloudAiplatformV1StartNotebookRuntimeRequest { } /** * Request message for NotebookService.StopNotebookRuntime. */ export interface GoogleCloudAiplatformV1StopNotebookRuntimeRequest { } /** * Request message for VizierService.StopTrial. */ export interface GoogleCloudAiplatformV1StopTrialRequest { } /** * Assigns input data to the training, validation, and test sets so that the * distribution of values found in the categorical column (as specified by the * `key` field) is mirrored within each split. The fraction values determine the * relative sizes of the splits. For example, if the specified column has three * values, with 50% of the rows having value "A", 25% value "B", and 25% value * "C", and the split fractions are specified as 80/10/10, then the training set * will constitute 80% of the training data, with about 50% of the training set * rows having the value "A" for the specified column, about 25% having the * value "B", and about 25% having the value "C". Only the top 500 occurring * values are used; any values not in the top 500 values are randomly assigned * to a split. If less than three rows contain a specific value, those rows are * randomly assigned. Supported only for tabular Datasets. */ export interface GoogleCloudAiplatformV1StratifiedSplit { /** * Required. The key is a name of one of the Dataset's data columns. The key * provided must be for a categorical column. */ key?: string; /** * The fraction of the input data that is to be used to evaluate the Model. */ testFraction?: number; /** * The fraction of the input data that is to be used to train the Model. */ trainingFraction?: number; /** * The fraction of the input data that is to be used to validate the Model. */ validationFraction?: number; } /** * Request message for PredictionService.StreamingPredict. The first message * must contain endpoint field and optionally input. The subsequent messages * must contain input. */ export interface GoogleCloudAiplatformV1StreamingPredictRequest { /** * The prediction input. */ inputs?: GoogleCloudAiplatformV1Tensor[]; /** * The parameters that govern the prediction. */ parameters?: GoogleCloudAiplatformV1Tensor; } function serializeGoogleCloudAiplatformV1StreamingPredictRequest(data: any): GoogleCloudAiplatformV1StreamingPredictRequest { return { ...data, inputs: data["inputs"] !== undefined ? data["inputs"].map((item: any) => (serializeGoogleCloudAiplatformV1Tensor(item))) : undefined, parameters: data["parameters"] !== undefined ? serializeGoogleCloudAiplatformV1Tensor(data["parameters"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1StreamingPredictRequest(data: any): GoogleCloudAiplatformV1StreamingPredictRequest { return { ...data, inputs: data["inputs"] !== undefined ? data["inputs"].map((item: any) => (deserializeGoogleCloudAiplatformV1Tensor(item))) : undefined, parameters: data["parameters"] !== undefined ? deserializeGoogleCloudAiplatformV1Tensor(data["parameters"]) : undefined, }; } /** * Response message for PredictionService.StreamingPredict. */ export interface GoogleCloudAiplatformV1StreamingPredictResponse { /** * The prediction output. */ outputs?: GoogleCloudAiplatformV1Tensor[]; /** * The parameters that govern the prediction. */ parameters?: GoogleCloudAiplatformV1Tensor; } function serializeGoogleCloudAiplatformV1StreamingPredictResponse(data: any): GoogleCloudAiplatformV1StreamingPredictResponse { return { ...data, outputs: data["outputs"] !== undefined ? data["outputs"].map((item: any) => (serializeGoogleCloudAiplatformV1Tensor(item))) : undefined, parameters: data["parameters"] !== undefined ? serializeGoogleCloudAiplatformV1Tensor(data["parameters"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1StreamingPredictResponse(data: any): GoogleCloudAiplatformV1StreamingPredictResponse { return { ...data, outputs: data["outputs"] !== undefined ? data["outputs"].map((item: any) => (deserializeGoogleCloudAiplatformV1Tensor(item))) : undefined, parameters: data["parameters"] !== undefined ? deserializeGoogleCloudAiplatformV1Tensor(data["parameters"]) : undefined, }; } /** * Request message for * FeaturestoreOnlineServingService.StreamingReadFeatureValues. */ export interface GoogleCloudAiplatformV1StreamingReadFeatureValuesRequest { /** * Required. IDs of entities to read Feature values of. The maximum number of * IDs is 100. For example, for a machine learning model predicting user * clicks on a website, an entity ID could be `user_123`. */ entityIds?: string[]; /** * Required. Selector choosing Features of the target EntityType. Feature IDs * will be deduplicated. */ featureSelector?: GoogleCloudAiplatformV1FeatureSelector; } /** * Request message for ReasoningEngineExecutionService.StreamQuery. */ export interface GoogleCloudAiplatformV1StreamQueryReasoningEngineRequest { /** * Optional. Class method to be used for the stream query. It is optional and * defaults to "stream_query" if unspecified. */ classMethod?: string; /** * Optional. Input content provided by users in JSON object format. Examples * include text query, function calling parameters, media bytes, etc. */ input?: { [key: string]: any }; } /** * Request message for PredictionService.StreamRawPredict. */ export interface GoogleCloudAiplatformV1StreamRawPredictRequest { /** * The prediction input. Supports HTTP headers and arbitrary data payload. */ httpBody?: GoogleApiHttpBody; } function serializeGoogleCloudAiplatformV1StreamRawPredictRequest(data: any): GoogleCloudAiplatformV1StreamRawPredictRequest { return { ...data, httpBody: data["httpBody"] !== undefined ? serializeGoogleApiHttpBody(data["httpBody"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1StreamRawPredictRequest(data: any): GoogleCloudAiplatformV1StreamRawPredictRequest { return { ...data, httpBody: data["httpBody"] !== undefined ? deserializeGoogleApiHttpBody(data["httpBody"]) : undefined, }; } /** * A list of string values. */ export interface GoogleCloudAiplatformV1StringArray { /** * A list of string values. */ values?: string[]; } /** * One field of a Struct (or object) type feature value. */ export interface GoogleCloudAiplatformV1StructFieldValue { /** * Name of the field in the struct feature. */ name?: string; /** * The value for this field. */ value?: GoogleCloudAiplatformV1FeatureValue; } function serializeGoogleCloudAiplatformV1StructFieldValue(data: any): GoogleCloudAiplatformV1StructFieldValue { return { ...data, value: data["value"] !== undefined ? serializeGoogleCloudAiplatformV1FeatureValue(data["value"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1StructFieldValue(data: any): GoogleCloudAiplatformV1StructFieldValue { return { ...data, value: data["value"] !== undefined ? deserializeGoogleCloudAiplatformV1FeatureValue(data["value"]) : undefined, }; } /** * Struct (or object) type feature value. */ export interface GoogleCloudAiplatformV1StructValue { /** * A list of field values. */ values?: GoogleCloudAiplatformV1StructFieldValue[]; } function serializeGoogleCloudAiplatformV1StructValue(data: any): GoogleCloudAiplatformV1StructValue { return { ...data, values: data["values"] !== undefined ? data["values"].map((item: any) => (serializeGoogleCloudAiplatformV1StructFieldValue(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1StructValue(data: any): GoogleCloudAiplatformV1StructValue { return { ...data, values: data["values"] !== undefined ? data["values"].map((item: any) => (deserializeGoogleCloudAiplatformV1StructFieldValue(item))) : undefined, }; } /** * A message representing a Study. */ export interface GoogleCloudAiplatformV1Study { /** * Output only. Time at which the study was created. */ readonly createTime?: Date; /** * Required. Describes the Study, default value is empty string. */ displayName?: string; /** * Output only. A human readable reason why the Study is inactive. This * should be empty if a study is ACTIVE or COMPLETED. */ readonly inactiveReason?: string; /** * Output only. The name of a study. The study's globally unique identifier. * Format: `projects/{project}/locations/{location}/studies/{study}` */ readonly name?: string; /** * Output only. The detailed state of a Study. */ readonly state?: | "STATE_UNSPECIFIED" | "ACTIVE" | "INACTIVE" | "COMPLETED"; /** * Required. Configuration of the Study. */ studySpec?: GoogleCloudAiplatformV1StudySpec; } function serializeGoogleCloudAiplatformV1Study(data: any): GoogleCloudAiplatformV1Study { return { ...data, studySpec: data["studySpec"] !== undefined ? serializeGoogleCloudAiplatformV1StudySpec(data["studySpec"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1Study(data: any): GoogleCloudAiplatformV1Study { return { ...data, createTime: data["createTime"] !== undefined ? new Date(data["createTime"]) : undefined, studySpec: data["studySpec"] !== undefined ? deserializeGoogleCloudAiplatformV1StudySpec(data["studySpec"]) : undefined, }; } /** * Represents specification of a Study. */ export interface GoogleCloudAiplatformV1StudySpec { /** * The search algorithm specified for the Study. */ algorithm?: | "ALGORITHM_UNSPECIFIED" | "GRID_SEARCH" | "RANDOM_SEARCH"; /** * The automated early stopping spec using convex stopping rule. */ convexAutomatedStoppingSpec?: GoogleCloudAiplatformV1StudySpecConvexAutomatedStoppingSpec; /** * The automated early stopping spec using decay curve rule. */ decayCurveStoppingSpec?: GoogleCloudAiplatformV1StudySpecDecayCurveAutomatedStoppingSpec; /** * Describe which measurement selection type will be used */ measurementSelectionType?: | "MEASUREMENT_SELECTION_TYPE_UNSPECIFIED" | "LAST_MEASUREMENT" | "BEST_MEASUREMENT"; /** * The automated early stopping spec using median rule. */ medianAutomatedStoppingSpec?: GoogleCloudAiplatformV1StudySpecMedianAutomatedStoppingSpec; /** * Required. Metric specs for the Study. */ metrics?: GoogleCloudAiplatformV1StudySpecMetricSpec[]; /** * The observation noise level of the study. Currently only supported by the * Vertex AI Vizier service. Not supported by HyperparameterTuningJob or * TrainingPipeline. */ observationNoise?: | "OBSERVATION_NOISE_UNSPECIFIED" | "LOW" | "HIGH"; /** * Required. The set of parameters to tune. */ parameters?: GoogleCloudAiplatformV1StudySpecParameterSpec[]; /** * Conditions for automated stopping of a Study. Enable automated stopping by * configuring at least one condition. */ studyStoppingConfig?: GoogleCloudAiplatformV1StudySpecStudyStoppingConfig; } function serializeGoogleCloudAiplatformV1StudySpec(data: any): GoogleCloudAiplatformV1StudySpec { return { ...data, convexAutomatedStoppingSpec: data["convexAutomatedStoppingSpec"] !== undefined ? serializeGoogleCloudAiplatformV1StudySpecConvexAutomatedStoppingSpec(data["convexAutomatedStoppingSpec"]) : undefined, parameters: data["parameters"] !== undefined ? data["parameters"].map((item: any) => (serializeGoogleCloudAiplatformV1StudySpecParameterSpec(item))) : undefined, studyStoppingConfig: data["studyStoppingConfig"] !== undefined ? serializeGoogleCloudAiplatformV1StudySpecStudyStoppingConfig(data["studyStoppingConfig"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1StudySpec(data: any): GoogleCloudAiplatformV1StudySpec { return { ...data, convexAutomatedStoppingSpec: data["convexAutomatedStoppingSpec"] !== undefined ? deserializeGoogleCloudAiplatformV1StudySpecConvexAutomatedStoppingSpec(data["convexAutomatedStoppingSpec"]) : undefined, parameters: data["parameters"] !== undefined ? data["parameters"].map((item: any) => (deserializeGoogleCloudAiplatformV1StudySpecParameterSpec(item))) : undefined, studyStoppingConfig: data["studyStoppingConfig"] !== undefined ? deserializeGoogleCloudAiplatformV1StudySpecStudyStoppingConfig(data["studyStoppingConfig"]) : undefined, }; } /** * Configuration for ConvexAutomatedStoppingSpec. When there are enough * completed trials (configured by min_measurement_count), for pending trials * with enough measurements and steps, the policy first computes an overestimate * of the objective value at max_num_steps according to the slope of the * incomplete objective value curve. No prediction can be made if the curve is * completely flat. If the overestimation is worse than the best objective value * of the completed trials, this pending trial will be early-stopped, but a last * measurement will be added to the pending trial with max_num_steps and * predicted objective value from the autoregression model. */ export interface GoogleCloudAiplatformV1StudySpecConvexAutomatedStoppingSpec { /** * The hyper-parameter name used in the tuning job that stands for learning * rate. Leave it blank if learning rate is not in a parameter in tuning. The * learning_rate is used to estimate the objective value of the ongoing trial. */ learningRateParameterName?: string; /** * Steps used in predicting the final objective for early stopped trials. In * general, it's set to be the same as the defined steps in training / tuning. * If not defined, it will learn it from the completed trials. When use_steps * is false, this field is set to the maximum elapsed seconds. */ maxStepCount?: bigint; /** * The minimal number of measurements in a Trial. Early-stopping checks will * not trigger if less than min_measurement_count+1 completed trials or * pending trials with less than min_measurement_count measurements. If not * defined, the default value is 5. */ minMeasurementCount?: bigint; /** * Minimum number of steps for a trial to complete. Trials which do not have * a measurement with step_count > min_step_count won't be considered for * early stopping. It's ok to set it to 0, and a trial can be early stopped at * any stage. By default, min_step_count is set to be one-tenth of the * max_step_count. When use_elapsed_duration is true, this field is set to the * minimum elapsed seconds. */ minStepCount?: bigint; /** * ConvexAutomatedStoppingSpec by default only updates the trials that needs * to be early stopped using a newly trained auto-regressive model. When this * flag is set to True, all stopped trials from the beginning are potentially * updated in terms of their `final_measurement`. Also, note that the training * logic of autoregressive models is different in this case. Enabling this * option has shown better results and this may be the default option in the * future. */ updateAllStoppedTrials?: boolean; /** * This bool determines whether or not the rule is applied based on * elapsed_secs or steps. If use_elapsed_duration==false, the early stopping * decision is made according to the predicted objective values according to * the target steps. If use_elapsed_duration==true, elapsed_secs is used * instead of steps. Also, in this case, the parameters max_num_steps and * min_num_steps are overloaded to contain max_elapsed_seconds and * min_elapsed_seconds. */ useElapsedDuration?: boolean; } function serializeGoogleCloudAiplatformV1StudySpecConvexAutomatedStoppingSpec(data: any): GoogleCloudAiplatformV1StudySpecConvexAutomatedStoppingSpec { return { ...data, maxStepCount: data["maxStepCount"] !== undefined ? String(data["maxStepCount"]) : undefined, minMeasurementCount: data["minMeasurementCount"] !== undefined ? String(data["minMeasurementCount"]) : undefined, minStepCount: data["minStepCount"] !== undefined ? String(data["minStepCount"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1StudySpecConvexAutomatedStoppingSpec(data: any): GoogleCloudAiplatformV1StudySpecConvexAutomatedStoppingSpec { return { ...data, maxStepCount: data["maxStepCount"] !== undefined ? BigInt(data["maxStepCount"]) : undefined, minMeasurementCount: data["minMeasurementCount"] !== undefined ? BigInt(data["minMeasurementCount"]) : undefined, minStepCount: data["minStepCount"] !== undefined ? BigInt(data["minStepCount"]) : undefined, }; } /** * The decay curve automated stopping rule builds a Gaussian Process Regressor * to predict the final objective value of a Trial based on the already * completed Trials and the intermediate measurements of the current Trial. * Early stopping is requested for the current Trial if there is very low * probability to exceed the optimal value found so far. */ export interface GoogleCloudAiplatformV1StudySpecDecayCurveAutomatedStoppingSpec { /** * True if Measurement.elapsed_duration is used as the x-axis of each Trials * Decay Curve. Otherwise, Measurement.step_count will be used as the x-axis. */ useElapsedDuration?: boolean; } /** * The median automated stopping rule stops a pending Trial if the Trial's best * objective_value is strictly below the median 'performance' of all completed * Trials reported up to the Trial's last measurement. Currently, 'performance' * refers to the running average of the objective values reported by the Trial * in each measurement. */ export interface GoogleCloudAiplatformV1StudySpecMedianAutomatedStoppingSpec { /** * True if median automated stopping rule applies on * Measurement.elapsed_duration. It means that elapsed_duration field of * latest measurement of current Trial is used to compute median objective * value for each completed Trials. */ useElapsedDuration?: boolean; } /** * Represents a metric to optimize. */ export interface GoogleCloudAiplatformV1StudySpecMetricSpec { /** * Required. The optimization goal of the metric. */ goal?: | "GOAL_TYPE_UNSPECIFIED" | "MAXIMIZE" | "MINIMIZE"; /** * Required. The ID of the metric. Must not contain whitespaces and must be * unique amongst all MetricSpecs. */ metricId?: string; /** * Used for safe search. In the case, the metric will be a safety metric. You * must provide a separate metric for objective metric. */ safetyConfig?: GoogleCloudAiplatformV1StudySpecMetricSpecSafetyMetricConfig; } /** * Used in safe optimization to specify threshold levels and risk tolerance. */ export interface GoogleCloudAiplatformV1StudySpecMetricSpecSafetyMetricConfig { /** * Desired minimum fraction of safe trials (over total number of trials) that * should be targeted by the algorithm at any time during the study (best * effort). This should be between 0.0 and 1.0 and a value of 0.0 means that * there is no minimum and an algorithm proceeds without targeting any * specific fraction. A value of 1.0 means that the algorithm attempts to only * Suggest safe Trials. */ desiredMinSafeTrialsFraction?: number; /** * Safety threshold (boundary value between safe and unsafe). NOTE that if * you leave SafetyMetricConfig unset, a default value of 0 will be used. */ safetyThreshold?: number; } /** * Represents a single parameter to optimize. */ export interface GoogleCloudAiplatformV1StudySpecParameterSpec { /** * The value spec for a 'CATEGORICAL' parameter. */ categoricalValueSpec?: GoogleCloudAiplatformV1StudySpecParameterSpecCategoricalValueSpec; /** * A conditional parameter node is active if the parameter's value matches * the conditional node's parent_value_condition. If two items in * conditional_parameter_specs have the same name, they must have disjoint * parent_value_condition. */ conditionalParameterSpecs?: GoogleCloudAiplatformV1StudySpecParameterSpecConditionalParameterSpec[]; /** * The value spec for a 'DISCRETE' parameter. */ discreteValueSpec?: GoogleCloudAiplatformV1StudySpecParameterSpecDiscreteValueSpec; /** * The value spec for a 'DOUBLE' parameter. */ doubleValueSpec?: GoogleCloudAiplatformV1StudySpecParameterSpecDoubleValueSpec; /** * The value spec for an 'INTEGER' parameter. */ integerValueSpec?: GoogleCloudAiplatformV1StudySpecParameterSpecIntegerValueSpec; /** * Required. The ID of the parameter. Must not contain whitespaces and must * be unique amongst all ParameterSpecs. */ parameterId?: string; /** * How the parameter should be scaled. Leave unset for `CATEGORICAL` * parameters. */ scaleType?: | "SCALE_TYPE_UNSPECIFIED" | "UNIT_LINEAR_SCALE" | "UNIT_LOG_SCALE" | "UNIT_REVERSE_LOG_SCALE"; } function serializeGoogleCloudAiplatformV1StudySpecParameterSpec(data: any): GoogleCloudAiplatformV1StudySpecParameterSpec { return { ...data, conditionalParameterSpecs: data["conditionalParameterSpecs"] !== undefined ? data["conditionalParameterSpecs"].map((item: any) => (serializeGoogleCloudAiplatformV1StudySpecParameterSpecConditionalParameterSpec(item))) : undefined, integerValueSpec: data["integerValueSpec"] !== undefined ? serializeGoogleCloudAiplatformV1StudySpecParameterSpecIntegerValueSpec(data["integerValueSpec"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1StudySpecParameterSpec(data: any): GoogleCloudAiplatformV1StudySpecParameterSpec { return { ...data, conditionalParameterSpecs: data["conditionalParameterSpecs"] !== undefined ? data["conditionalParameterSpecs"].map((item: any) => (deserializeGoogleCloudAiplatformV1StudySpecParameterSpecConditionalParameterSpec(item))) : undefined, integerValueSpec: data["integerValueSpec"] !== undefined ? deserializeGoogleCloudAiplatformV1StudySpecParameterSpecIntegerValueSpec(data["integerValueSpec"]) : undefined, }; } /** * Value specification for a parameter in `CATEGORICAL` type. */ export interface GoogleCloudAiplatformV1StudySpecParameterSpecCategoricalValueSpec { /** * A default value for a `CATEGORICAL` parameter that is assumed to be a * relatively good starting point. Unset value signals that there is no * offered starting point. Currently only supported by the Vertex AI Vizier * service. Not supported by HyperparameterTuningJob or TrainingPipeline. */ defaultValue?: string; /** * Required. The list of possible categories. */ values?: string[]; } /** * Represents a parameter spec with condition from its parent parameter. */ export interface GoogleCloudAiplatformV1StudySpecParameterSpecConditionalParameterSpec { /** * Required. The spec for a conditional parameter. */ parameterSpec?: GoogleCloudAiplatformV1StudySpecParameterSpec; /** * The spec for matching values from a parent parameter of `CATEGORICAL` * type. */ parentCategoricalValues?: GoogleCloudAiplatformV1StudySpecParameterSpecConditionalParameterSpecCategoricalValueCondition; /** * The spec for matching values from a parent parameter of `DISCRETE` type. */ parentDiscreteValues?: GoogleCloudAiplatformV1StudySpecParameterSpecConditionalParameterSpecDiscreteValueCondition; /** * The spec for matching values from a parent parameter of `INTEGER` type. */ parentIntValues?: GoogleCloudAiplatformV1StudySpecParameterSpecConditionalParameterSpecIntValueCondition; } function serializeGoogleCloudAiplatformV1StudySpecParameterSpecConditionalParameterSpec(data: any): GoogleCloudAiplatformV1StudySpecParameterSpecConditionalParameterSpec { return { ...data, parameterSpec: data["parameterSpec"] !== undefined ? serializeGoogleCloudAiplatformV1StudySpecParameterSpec(data["parameterSpec"]) : undefined, parentIntValues: data["parentIntValues"] !== undefined ? serializeGoogleCloudAiplatformV1StudySpecParameterSpecConditionalParameterSpecIntValueCondition(data["parentIntValues"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1StudySpecParameterSpecConditionalParameterSpec(data: any): GoogleCloudAiplatformV1StudySpecParameterSpecConditionalParameterSpec { return { ...data, parameterSpec: data["parameterSpec"] !== undefined ? deserializeGoogleCloudAiplatformV1StudySpecParameterSpec(data["parameterSpec"]) : undefined, parentIntValues: data["parentIntValues"] !== undefined ? deserializeGoogleCloudAiplatformV1StudySpecParameterSpecConditionalParameterSpecIntValueCondition(data["parentIntValues"]) : undefined, }; } /** * Represents the spec to match categorical values from parent parameter. */ export interface GoogleCloudAiplatformV1StudySpecParameterSpecConditionalParameterSpecCategoricalValueCondition { /** * Required. Matches values of the parent parameter of 'CATEGORICAL' type. * All values must exist in `categorical_value_spec` of parent parameter. */ values?: string[]; } /** * Represents the spec to match discrete values from parent parameter. */ export interface GoogleCloudAiplatformV1StudySpecParameterSpecConditionalParameterSpecDiscreteValueCondition { /** * Required. Matches values of the parent parameter of 'DISCRETE' type. All * values must exist in `discrete_value_spec` of parent parameter. The Epsilon * of the value matching is 1e-10. */ values?: number[]; } /** * Represents the spec to match integer values from parent parameter. */ export interface GoogleCloudAiplatformV1StudySpecParameterSpecConditionalParameterSpecIntValueCondition { /** * Required. Matches values of the parent parameter of 'INTEGER' type. All * values must lie in `integer_value_spec` of parent parameter. */ values?: bigint[]; } function serializeGoogleCloudAiplatformV1StudySpecParameterSpecConditionalParameterSpecIntValueCondition(data: any): GoogleCloudAiplatformV1StudySpecParameterSpecConditionalParameterSpecIntValueCondition { return { ...data, values: data["values"] !== undefined ? data["values"].map((item: any) => (String(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1StudySpecParameterSpecConditionalParameterSpecIntValueCondition(data: any): GoogleCloudAiplatformV1StudySpecParameterSpecConditionalParameterSpecIntValueCondition { return { ...data, values: data["values"] !== undefined ? data["values"].map((item: any) => (BigInt(item))) : undefined, }; } /** * Value specification for a parameter in `DISCRETE` type. */ export interface GoogleCloudAiplatformV1StudySpecParameterSpecDiscreteValueSpec { /** * A default value for a `DISCRETE` parameter that is assumed to be a * relatively good starting point. Unset value signals that there is no * offered starting point. It automatically rounds to the nearest feasible * discrete point. Currently only supported by the Vertex AI Vizier service. * Not supported by HyperparameterTuningJob or TrainingPipeline. */ defaultValue?: number; /** * Required. A list of possible values. The list should be in increasing * order and at least 1e-10 apart. For instance, this parameter might have * possible settings of 1.5, 2.5, and 4.0. This list should not contain more * than 1,000 values. */ values?: number[]; } /** * Value specification for a parameter in `DOUBLE` type. */ export interface GoogleCloudAiplatformV1StudySpecParameterSpecDoubleValueSpec { /** * A default value for a `DOUBLE` parameter that is assumed to be a * relatively good starting point. Unset value signals that there is no * offered starting point. Currently only supported by the Vertex AI Vizier * service. Not supported by HyperparameterTuningJob or TrainingPipeline. */ defaultValue?: number; /** * Required. Inclusive maximum value of the parameter. */ maxValue?: number; /** * Required. Inclusive minimum value of the parameter. */ minValue?: number; } /** * Value specification for a parameter in `INTEGER` type. */ export interface GoogleCloudAiplatformV1StudySpecParameterSpecIntegerValueSpec { /** * A default value for an `INTEGER` parameter that is assumed to be a * relatively good starting point. Unset value signals that there is no * offered starting point. Currently only supported by the Vertex AI Vizier * service. Not supported by HyperparameterTuningJob or TrainingPipeline. */ defaultValue?: bigint; /** * Required. Inclusive maximum value of the parameter. */ maxValue?: bigint; /** * Required. Inclusive minimum value of the parameter. */ minValue?: bigint; } function serializeGoogleCloudAiplatformV1StudySpecParameterSpecIntegerValueSpec(data: any): GoogleCloudAiplatformV1StudySpecParameterSpecIntegerValueSpec { return { ...data, defaultValue: data["defaultValue"] !== undefined ? String(data["defaultValue"]) : undefined, maxValue: data["maxValue"] !== undefined ? String(data["maxValue"]) : undefined, minValue: data["minValue"] !== undefined ? String(data["minValue"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1StudySpecParameterSpecIntegerValueSpec(data: any): GoogleCloudAiplatformV1StudySpecParameterSpecIntegerValueSpec { return { ...data, defaultValue: data["defaultValue"] !== undefined ? BigInt(data["defaultValue"]) : undefined, maxValue: data["maxValue"] !== undefined ? BigInt(data["maxValue"]) : undefined, minValue: data["minValue"] !== undefined ? BigInt(data["minValue"]) : undefined, }; } /** * The configuration (stopping conditions) for automated stopping of a Study. * Conditions include trial budgets, time budgets, and convergence detection. */ export interface GoogleCloudAiplatformV1StudySpecStudyStoppingConfig { /** * If the objective value has not improved for this much time, stop the * study. WARNING: Effective only for single-objective studies. */ maxDurationNoProgress?: number /* Duration */; /** * If the specified time or duration has passed, stop the study. */ maximumRuntimeConstraint?: GoogleCloudAiplatformV1StudyTimeConstraint; /** * If there are more than this many trials, stop the study. */ maxNumTrials?: number; /** * If the objective value has not improved for this many consecutive trials, * stop the study. WARNING: Effective only for single-objective studies. */ maxNumTrialsNoProgress?: number; /** * Each "stopping rule" in this proto specifies an "if" condition. Before * Vizier would generate a new suggestion, it first checks each specified * stopping rule, from top to bottom in this list. Note that the first few * rules (e.g. minimum_runtime_constraint, min_num_trials) will prevent other * stopping rules from being evaluated until they are met. For example, * setting `min_num_trials=5` and `always_stop_after= 1 hour` means that the * Study will ONLY stop after it has 5 COMPLETED trials, even if more than an * hour has passed since its creation. It follows the first applicable rule * (whose "if" condition is satisfied) to make a stopping decision. If none of * the specified rules are applicable, then Vizier decides that the study * should not stop. If Vizier decides that the study should stop, the study * enters STOPPING state (or STOPPING_ASAP if should_stop_asap = true). * IMPORTANT: The automatic study state transition happens precisely as * described above; that is, deleting trials or updating StudyConfig NEVER * automatically moves the study state back to ACTIVE. If you want to _resume_ * a Study that was stopped, 1) change the stopping conditions if necessary, * 2) activate the study, and then 3) ask for suggestions. If the specified * time or duration has not passed, do not stop the study. */ minimumRuntimeConstraint?: GoogleCloudAiplatformV1StudyTimeConstraint; /** * If there are fewer than this many COMPLETED trials, do not stop the study. */ minNumTrials?: number; /** * If true, a Study enters STOPPING_ASAP whenever it would normally enters * STOPPING state. The bottom line is: set to true if you want to interrupt * on-going evaluations of Trials as soon as the study stopping condition is * met. (Please see Study.State documentation for the source of truth). */ shouldStopAsap?: boolean; } function serializeGoogleCloudAiplatformV1StudySpecStudyStoppingConfig(data: any): GoogleCloudAiplatformV1StudySpecStudyStoppingConfig { return { ...data, maxDurationNoProgress: data["maxDurationNoProgress"] !== undefined ? data["maxDurationNoProgress"] : undefined, maximumRuntimeConstraint: data["maximumRuntimeConstraint"] !== undefined ? serializeGoogleCloudAiplatformV1StudyTimeConstraint(data["maximumRuntimeConstraint"]) : undefined, minimumRuntimeConstraint: data["minimumRuntimeConstraint"] !== undefined ? serializeGoogleCloudAiplatformV1StudyTimeConstraint(data["minimumRuntimeConstraint"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1StudySpecStudyStoppingConfig(data: any): GoogleCloudAiplatformV1StudySpecStudyStoppingConfig { return { ...data, maxDurationNoProgress: data["maxDurationNoProgress"] !== undefined ? data["maxDurationNoProgress"] : undefined, maximumRuntimeConstraint: data["maximumRuntimeConstraint"] !== undefined ? deserializeGoogleCloudAiplatformV1StudyTimeConstraint(data["maximumRuntimeConstraint"]) : undefined, minimumRuntimeConstraint: data["minimumRuntimeConstraint"] !== undefined ? deserializeGoogleCloudAiplatformV1StudyTimeConstraint(data["minimumRuntimeConstraint"]) : undefined, }; } /** * Time-based Constraint for Study */ export interface GoogleCloudAiplatformV1StudyTimeConstraint { /** * Compares the wallclock time to this time. Must use UTC timezone. */ endTime?: Date; /** * Counts the wallclock time passed since the creation of this Study. */ maxDuration?: number /* Duration */; } function serializeGoogleCloudAiplatformV1StudyTimeConstraint(data: any): GoogleCloudAiplatformV1StudyTimeConstraint { return { ...data, endTime: data["endTime"] !== undefined ? data["endTime"].toISOString() : undefined, maxDuration: data["maxDuration"] !== undefined ? data["maxDuration"] : undefined, }; } function deserializeGoogleCloudAiplatformV1StudyTimeConstraint(data: any): GoogleCloudAiplatformV1StudyTimeConstraint { return { ...data, endTime: data["endTime"] !== undefined ? new Date(data["endTime"]) : undefined, maxDuration: data["maxDuration"] !== undefined ? data["maxDuration"] : undefined, }; } /** * Details of operations that perform Trials suggestion. */ export interface GoogleCloudAiplatformV1SuggestTrialsMetadata { /** * The identifier of the client that is requesting the suggestion. If * multiple SuggestTrialsRequests have the same `client_id`, the service will * return the identical suggested Trial if the Trial is pending, and provide a * new Trial if the last suggested Trial was completed. */ clientId?: string; /** * Operation metadata for suggesting Trials. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Request message for VizierService.SuggestTrials. */ export interface GoogleCloudAiplatformV1SuggestTrialsRequest { /** * Required. The identifier of the client that is requesting the suggestion. * If multiple SuggestTrialsRequests have the same `client_id`, the service * will return the identical suggested Trial if the Trial is pending, and * provide a new Trial if the last suggested Trial was completed. */ clientId?: string; /** * Optional. This allows you to specify the "context" for a Trial; a context * is a slice (a subspace) of the search space. Typical uses for contexts: 1) * You are using Vizier to tune a server for best performance, but there's a * strong weekly cycle. The context specifies the day-of-week. This allows * Tuesday to generalize from Wednesday without assuming that everything is * identical. 2) Imagine you're optimizing some medical treatment for people. * As they walk in the door, you know certain facts about them (e.g. sex, * weight, height, blood-pressure). Put that information in the context, and * Vizier will adapt its suggestions to the patient. 3) You want to do a fair * A/B test efficiently. Specify the "A" and "B" conditions as contexts, and * Vizier will generalize between "A" and "B" conditions. If they are similar, * this will allow Vizier to converge to the optimum faster than if "A" and * "B" were separate Studies. NOTE: You can also enter contexts as REQUESTED * Trials, e.g. via the CreateTrial() RPC; that's the asynchronous option * where you don't need a close association between contexts and suggestions. * NOTE: All the Parameters you set in a context MUST be defined in the Study. * NOTE: You must supply 0 or $suggestion_count contexts. If you don't supply * any contexts, Vizier will make suggestions from the full search space * specified in the StudySpec; if you supply a full set of context, each * suggestion will match the corresponding context. NOTE: A Context with no * features set matches anything, and allows suggestions from the full search * space. NOTE: Contexts MUST lie within the search space specified in the * StudySpec. It's an error if they don't. NOTE: Contexts preferentially match * ACTIVE then REQUESTED trials before new suggestions are generated. NOTE: * Generation of suggestions involves a match between a Context and * (optionally) a REQUESTED trial; if that match is not fully specified, a * suggestion will be geneated in the merged subspace. */ contexts?: GoogleCloudAiplatformV1TrialContext[]; /** * Required. The number of suggestions requested. It must be positive. */ suggestionCount?: number; } /** * Response message for VizierService.SuggestTrials. */ export interface GoogleCloudAiplatformV1SuggestTrialsResponse { /** * The time at which operation processing completed. */ endTime?: Date; /** * The time at which the operation was started. */ startTime?: Date; /** * The state of the Study. */ studyState?: | "STATE_UNSPECIFIED" | "ACTIVE" | "INACTIVE" | "COMPLETED"; /** * A list of Trials. */ trials?: GoogleCloudAiplatformV1Trial[]; } function serializeGoogleCloudAiplatformV1SuggestTrialsResponse(data: any): GoogleCloudAiplatformV1SuggestTrialsResponse { return { ...data, endTime: data["endTime"] !== undefined ? data["endTime"].toISOString() : undefined, startTime: data["startTime"] !== undefined ? data["startTime"].toISOString() : undefined, }; } function deserializeGoogleCloudAiplatformV1SuggestTrialsResponse(data: any): GoogleCloudAiplatformV1SuggestTrialsResponse { return { ...data, endTime: data["endTime"] !== undefined ? new Date(data["endTime"]) : undefined, startTime: data["startTime"] !== undefined ? new Date(data["startTime"]) : undefined, }; } /** * Input for summarization helpfulness metric. */ export interface GoogleCloudAiplatformV1SummarizationHelpfulnessInput { /** * Required. Summarization helpfulness instance. */ instance?: GoogleCloudAiplatformV1SummarizationHelpfulnessInstance; /** * Required. Spec for summarization helpfulness score metric. */ metricSpec?: GoogleCloudAiplatformV1SummarizationHelpfulnessSpec; } /** * Spec for summarization helpfulness instance. */ export interface GoogleCloudAiplatformV1SummarizationHelpfulnessInstance { /** * Required. Text to be summarized. */ context?: string; /** * Optional. Summarization prompt for LLM. */ instruction?: string; /** * Required. Output of the evaluated model. */ prediction?: string; /** * Optional. Ground truth used to compare against the prediction. */ reference?: string; } /** * Spec for summarization helpfulness result. */ export interface GoogleCloudAiplatformV1SummarizationHelpfulnessResult { /** * Output only. Confidence for summarization helpfulness score. */ readonly confidence?: number; /** * Output only. Explanation for summarization helpfulness score. */ readonly explanation?: string; /** * Output only. Summarization Helpfulness score. */ readonly score?: number; } /** * Spec for summarization helpfulness score metric. */ export interface GoogleCloudAiplatformV1SummarizationHelpfulnessSpec { /** * Optional. Whether to use instance.reference to compute summarization * helpfulness. */ useReference?: boolean; /** * Optional. Which version to use for evaluation. */ version?: number; } /** * Input for summarization quality metric. */ export interface GoogleCloudAiplatformV1SummarizationQualityInput { /** * Required. Summarization quality instance. */ instance?: GoogleCloudAiplatformV1SummarizationQualityInstance; /** * Required. Spec for summarization quality score metric. */ metricSpec?: GoogleCloudAiplatformV1SummarizationQualitySpec; } /** * Spec for summarization quality instance. */ export interface GoogleCloudAiplatformV1SummarizationQualityInstance { /** * Required. Text to be summarized. */ context?: string; /** * Required. Summarization prompt for LLM. */ instruction?: string; /** * Required. Output of the evaluated model. */ prediction?: string; /** * Optional. Ground truth used to compare against the prediction. */ reference?: string; } /** * Spec for summarization quality result. */ export interface GoogleCloudAiplatformV1SummarizationQualityResult { /** * Output only. Confidence for summarization quality score. */ readonly confidence?: number; /** * Output only. Explanation for summarization quality score. */ readonly explanation?: string; /** * Output only. Summarization Quality score. */ readonly score?: number; } /** * Spec for summarization quality score metric. */ export interface GoogleCloudAiplatformV1SummarizationQualitySpec { /** * Optional. Whether to use instance.reference to compute summarization * quality. */ useReference?: boolean; /** * Optional. Which version to use for evaluation. */ version?: number; } /** * Input for summarization verbosity metric. */ export interface GoogleCloudAiplatformV1SummarizationVerbosityInput { /** * Required. Summarization verbosity instance. */ instance?: GoogleCloudAiplatformV1SummarizationVerbosityInstance; /** * Required. Spec for summarization verbosity score metric. */ metricSpec?: GoogleCloudAiplatformV1SummarizationVerbositySpec; } /** * Spec for summarization verbosity instance. */ export interface GoogleCloudAiplatformV1SummarizationVerbosityInstance { /** * Required. Text to be summarized. */ context?: string; /** * Optional. Summarization prompt for LLM. */ instruction?: string; /** * Required. Output of the evaluated model. */ prediction?: string; /** * Optional. Ground truth used to compare against the prediction. */ reference?: string; } /** * Spec for summarization verbosity result. */ export interface GoogleCloudAiplatformV1SummarizationVerbosityResult { /** * Output only. Confidence for summarization verbosity score. */ readonly confidence?: number; /** * Output only. Explanation for summarization verbosity score. */ readonly explanation?: string; /** * Output only. Summarization Verbosity score. */ readonly score?: number; } /** * Spec for summarization verbosity score metric. */ export interface GoogleCloudAiplatformV1SummarizationVerbositySpec { /** * Optional. Whether to use instance.reference to compute summarization * verbosity. */ useReference?: boolean; /** * Optional. Which version to use for evaluation. */ version?: number; } /** * Hyperparameters for SFT. */ export interface GoogleCloudAiplatformV1SupervisedHyperParameters { /** * Optional. Adapter size for tuning. */ adapterSize?: | "ADAPTER_SIZE_UNSPECIFIED" | "ADAPTER_SIZE_ONE" | "ADAPTER_SIZE_TWO" | "ADAPTER_SIZE_FOUR" | "ADAPTER_SIZE_EIGHT" | "ADAPTER_SIZE_SIXTEEN" | "ADAPTER_SIZE_THIRTY_TWO"; /** * Optional. Number of complete passes the model makes over the entire * training dataset during training. */ epochCount?: bigint; /** * Optional. Multiplier for adjusting the default learning rate. */ learningRateMultiplier?: number; } function serializeGoogleCloudAiplatformV1SupervisedHyperParameters(data: any): GoogleCloudAiplatformV1SupervisedHyperParameters { return { ...data, epochCount: data["epochCount"] !== undefined ? String(data["epochCount"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1SupervisedHyperParameters(data: any): GoogleCloudAiplatformV1SupervisedHyperParameters { return { ...data, epochCount: data["epochCount"] !== undefined ? BigInt(data["epochCount"]) : undefined, }; } /** * Dataset distribution for Supervised Tuning. */ export interface GoogleCloudAiplatformV1SupervisedTuningDatasetDistribution { /** * Output only. Sum of a given population of values that are billable. */ readonly billableSum?: bigint; /** * Output only. Defines the histogram bucket. */ readonly buckets?: GoogleCloudAiplatformV1SupervisedTuningDatasetDistributionDatasetBucket[]; /** * Output only. The maximum of the population values. */ readonly max?: number; /** * Output only. The arithmetic mean of the values in the population. */ readonly mean?: number; /** * Output only. The median of the values in the population. */ readonly median?: number; /** * Output only. The minimum of the population values. */ readonly min?: number; /** * Output only. The 5th percentile of the values in the population. */ readonly p5?: number; /** * Output only. The 95th percentile of the values in the population. */ readonly p95?: number; /** * Output only. Sum of a given population of values. */ readonly sum?: bigint; } /** * Dataset bucket used to create a histogram for the distribution given a * population of values. */ export interface GoogleCloudAiplatformV1SupervisedTuningDatasetDistributionDatasetBucket { /** * Output only. Number of values in the bucket. */ readonly count?: number; /** * Output only. Left bound of the bucket. */ readonly left?: number; /** * Output only. Right bound of the bucket. */ readonly right?: number; } /** * Tuning data statistics for Supervised Tuning. */ export interface GoogleCloudAiplatformV1SupervisedTuningDataStats { /** * Output only. For each index in `truncated_example_indices`, the * user-facing reason why the example was dropped. Must not include example * itself. */ readonly droppedExampleReasons?: string[]; /** * Output only. Number of billable characters in the tuning dataset. */ readonly totalBillableCharacterCount?: bigint; /** * Output only. Number of billable tokens in the tuning dataset. */ readonly totalBillableTokenCount?: bigint; /** * Output only. The number of examples in the dataset that have been dropped. * An example can be dropped for reasons including: too many tokens, contains * an invalid image, contains too many images, etc. */ readonly totalTruncatedExampleCount?: bigint; /** * Output only. Number of tuning characters in the tuning dataset. */ readonly totalTuningCharacterCount?: bigint; /** * Output only. A partial sample of the indices (starting from 1) of the * dropped examples. */ readonly truncatedExampleIndices?: bigint[]; /** * Output only. Number of examples in the tuning dataset. */ readonly tuningDatasetExampleCount?: bigint; /** * Output only. Number of tuning steps for this Tuning Job. */ readonly tuningStepCount?: bigint; /** * Output only. Sample user messages in the training dataset uri. */ readonly userDatasetExamples?: GoogleCloudAiplatformV1Content[]; /** * Output only. Dataset distributions for the user input tokens. */ readonly userInputTokenDistribution?: GoogleCloudAiplatformV1SupervisedTuningDatasetDistribution; /** * Output only. Dataset distributions for the messages per example. */ readonly userMessagePerExampleDistribution?: GoogleCloudAiplatformV1SupervisedTuningDatasetDistribution; /** * Output only. Dataset distributions for the user output tokens. */ readonly userOutputTokenDistribution?: GoogleCloudAiplatformV1SupervisedTuningDatasetDistribution; } /** * Tuning Spec for Supervised Tuning for first party models. */ export interface GoogleCloudAiplatformV1SupervisedTuningSpec { /** * Optional. Hyperparameters for SFT. */ hyperParameters?: GoogleCloudAiplatformV1SupervisedHyperParameters; /** * Required. Cloud Storage path to file containing training dataset for * tuning. The dataset must be formatted as a JSONL file. */ trainingDatasetUri?: string; /** * Optional. Cloud Storage path to file containing validation dataset for * tuning. The dataset must be formatted as a JSONL file. */ validationDatasetUri?: string; } function serializeGoogleCloudAiplatformV1SupervisedTuningSpec(data: any): GoogleCloudAiplatformV1SupervisedTuningSpec { return { ...data, hyperParameters: data["hyperParameters"] !== undefined ? serializeGoogleCloudAiplatformV1SupervisedHyperParameters(data["hyperParameters"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1SupervisedTuningSpec(data: any): GoogleCloudAiplatformV1SupervisedTuningSpec { return { ...data, hyperParameters: data["hyperParameters"] !== undefined ? deserializeGoogleCloudAiplatformV1SupervisedHyperParameters(data["hyperParameters"]) : undefined, }; } /** * Request message for FeatureOnlineStoreAdminService.SyncFeatureView. */ export interface GoogleCloudAiplatformV1SyncFeatureViewRequest { } /** * Response message for FeatureOnlineStoreAdminService.SyncFeatureView. */ export interface GoogleCloudAiplatformV1SyncFeatureViewResponse { /** * Format: * `projects/{project}/locations/{location}/featureOnlineStores/{feature_online_store}/featureViews/{feature_view}/featureViewSyncs/{feature_view_sync}` */ featureViewSync?: string; } /** * A tensor value type. */ export interface GoogleCloudAiplatformV1Tensor { /** * Type specific representations that make it easy to create tensor protos in * all languages. Only the representation corresponding to "dtype" can be set. * The values hold the flattened representation of the tensor in row major * order. BOOL */ boolVal?: boolean[]; /** * STRING */ bytesVal?: Uint8Array[]; /** * DOUBLE */ doubleVal?: number[]; /** * The data type of tensor. */ dtype?: | "DATA_TYPE_UNSPECIFIED" | "BOOL" | "STRING" | "FLOAT" | "DOUBLE" | "INT8" | "INT16" | "INT32" | "INT64" | "UINT8" | "UINT16" | "UINT32" | "UINT64"; /** * FLOAT */ floatVal?: number[]; /** * INT64 */ int64Val?: bigint[]; /** * INT_8 INT_16 INT_32 */ intVal?: number[]; /** * A list of tensor values. */ listVal?: GoogleCloudAiplatformV1Tensor[]; /** * Shape of the tensor. */ shape?: bigint[]; /** * STRING */ stringVal?: string[]; /** * A map of string to tensor. */ structVal?: { [key: string]: GoogleCloudAiplatformV1Tensor }; /** * Serialized raw tensor content. */ tensorVal?: Uint8Array; /** * UINT64 */ uint64Val?: bigint[]; /** * UINT8 UINT16 UINT32 */ uintVal?: number[]; } function serializeGoogleCloudAiplatformV1Tensor(data: any): GoogleCloudAiplatformV1Tensor { return { ...data, bytesVal: data["bytesVal"] !== undefined ? data["bytesVal"].map((item: any) => (encodeBase64(item))) : undefined, int64Val: data["int64Val"] !== undefined ? data["int64Val"].map((item: any) => (String(item))) : undefined, listVal: data["listVal"] !== undefined ? data["listVal"].map((item: any) => (serializeGoogleCloudAiplatformV1Tensor(item))) : undefined, shape: data["shape"] !== undefined ? data["shape"].map((item: any) => (String(item))) : undefined, structVal: data["structVal"] !== undefined ? Object.fromEntries(Object.entries(data["structVal"]).map(([k, v]: [string, any]) => ([k, serializeGoogleCloudAiplatformV1Tensor(v)]))) : undefined, tensorVal: data["tensorVal"] !== undefined ? encodeBase64(data["tensorVal"]) : undefined, uint64Val: data["uint64Val"] !== undefined ? data["uint64Val"].map((item: any) => (String(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1Tensor(data: any): GoogleCloudAiplatformV1Tensor { return { ...data, bytesVal: data["bytesVal"] !== undefined ? data["bytesVal"].map((item: any) => (decodeBase64(item as string))) : undefined, int64Val: data["int64Val"] !== undefined ? data["int64Val"].map((item: any) => (BigInt(item))) : undefined, listVal: data["listVal"] !== undefined ? data["listVal"].map((item: any) => (deserializeGoogleCloudAiplatformV1Tensor(item))) : undefined, shape: data["shape"] !== undefined ? data["shape"].map((item: any) => (BigInt(item))) : undefined, structVal: data["structVal"] !== undefined ? Object.fromEntries(Object.entries(data["structVal"]).map(([k, v]: [string, any]) => ([k, deserializeGoogleCloudAiplatformV1Tensor(v)]))) : undefined, tensorVal: data["tensorVal"] !== undefined ? decodeBase64(data["tensorVal"] as string) : undefined, uint64Val: data["uint64Val"] !== undefined ? data["uint64Val"].map((item: any) => (BigInt(item))) : undefined, }; } /** * Tensorboard is a physical database that stores users' training metrics. A * default Tensorboard is provided in each region of a Google Cloud project. If * needed users can also create extra Tensorboards in their projects. */ export interface GoogleCloudAiplatformV1Tensorboard { /** * Output only. Consumer project Cloud Storage path prefix used to store blob * data, which can either be a bucket or directory. Does not end with a '/'. */ readonly blobStoragePathPrefix?: string; /** * Output only. Timestamp when this Tensorboard was created. */ readonly createTime?: Date; /** * Description of this Tensorboard. */ description?: string; /** * Required. User provided name of this Tensorboard. */ displayName?: string; /** * Customer-managed encryption key spec for a Tensorboard. If set, this * Tensorboard and all sub-resources of this Tensorboard will be secured by * this key. */ encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec; /** * Used to perform a consistent read-modify-write updates. If not set, a * blind "overwrite" update happens. */ etag?: string; /** * Used to indicate if the TensorBoard instance is the default one. Each * project & region can have at most one default TensorBoard instance. * Creation of a default TensorBoard instance and updating an existing * TensorBoard instance to be default will mark all other TensorBoard * instances (if any) as non default. */ isDefault?: boolean; /** * The labels with user-defined metadata to organize your Tensorboards. Label * keys and values can be no longer than 64 characters (Unicode codepoints), * can only contain lowercase letters, numeric characters, underscores and * dashes. International characters are allowed. No more than 64 user labels * can be associated with one Tensorboard (System labels are excluded). See * https://goo.gl/xmQnxf for more information and examples of labels. System * reserved label keys are prefixed with "aiplatform.googleapis.com/" and are * immutable. */ labels?: { [key: string]: string }; /** * Output only. Name of the Tensorboard. Format: * `projects/{project}/locations/{location}/tensorboards/{tensorboard}` */ readonly name?: string; /** * Output only. The number of Runs stored in this Tensorboard. */ readonly runCount?: number; /** * Output only. Reserved for future use. */ readonly satisfiesPzi?: boolean; /** * Output only. Reserved for future use. */ readonly satisfiesPzs?: boolean; /** * Output only. Timestamp when this Tensorboard was last updated. */ readonly updateTime?: Date; } /** * One blob (e.g, image, graph) viewable on a blob metric plot. */ export interface GoogleCloudAiplatformV1TensorboardBlob { /** * Optional. The bytes of the blob is not present unless it's returned by the * ReadTensorboardBlobData endpoint. */ data?: Uint8Array; /** * Output only. A URI safe key uniquely identifying a blob. Can be used to * locate the blob stored in the Cloud Storage bucket of the consumer project. */ readonly id?: string; } function serializeGoogleCloudAiplatformV1TensorboardBlob(data: any): GoogleCloudAiplatformV1TensorboardBlob { return { ...data, data: data["data"] !== undefined ? encodeBase64(data["data"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1TensorboardBlob(data: any): GoogleCloudAiplatformV1TensorboardBlob { return { ...data, data: data["data"] !== undefined ? decodeBase64(data["data"] as string) : undefined, }; } /** * One point viewable on a blob metric plot, but mostly just a wrapper message * to work around repeated fields can't be used directly within `oneof` fields. */ export interface GoogleCloudAiplatformV1TensorboardBlobSequence { /** * List of blobs contained within the sequence. */ values?: GoogleCloudAiplatformV1TensorboardBlob[]; } function serializeGoogleCloudAiplatformV1TensorboardBlobSequence(data: any): GoogleCloudAiplatformV1TensorboardBlobSequence { return { ...data, values: data["values"] !== undefined ? data["values"].map((item: any) => (serializeGoogleCloudAiplatformV1TensorboardBlob(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1TensorboardBlobSequence(data: any): GoogleCloudAiplatformV1TensorboardBlobSequence { return { ...data, values: data["values"] !== undefined ? data["values"].map((item: any) => (deserializeGoogleCloudAiplatformV1TensorboardBlob(item))) : undefined, }; } /** * A TensorboardExperiment is a group of TensorboardRuns, that are typically * the results of a training job run, in a Tensorboard. */ export interface GoogleCloudAiplatformV1TensorboardExperiment { /** * Output only. Timestamp when this TensorboardExperiment was created. */ readonly createTime?: Date; /** * Description of this TensorboardExperiment. */ description?: string; /** * User provided name of this TensorboardExperiment. */ displayName?: string; /** * Used to perform consistent read-modify-write updates. If not set, a blind * "overwrite" update happens. */ etag?: string; /** * The labels with user-defined metadata to organize your * TensorboardExperiment. Label keys and values cannot be longer than 64 * characters (Unicode codepoints), can only contain lowercase letters, * numeric characters, underscores and dashes. International characters are * allowed. No more than 64 user labels can be associated with one Dataset * (System labels are excluded). See https://goo.gl/xmQnxf for more * information and examples of labels. System reserved label keys are prefixed * with `aiplatform.googleapis.com/` and are immutable. The following system * labels exist for each Dataset: * * `aiplatform.googleapis.com/dataset_metadata_schema`: output only. Its value * is the metadata_schema's title. */ labels?: { [key: string]: string }; /** * Output only. Name of the TensorboardExperiment. Format: * `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}` */ readonly name?: string; /** * Immutable. Source of the TensorboardExperiment. Example: a custom training * job. */ source?: string; /** * Output only. Timestamp when this TensorboardExperiment was last updated. */ readonly updateTime?: Date; } /** * TensorboardRun maps to a specific execution of a training job with a given * set of hyperparameter values, model definition, dataset, etc */ export interface GoogleCloudAiplatformV1TensorboardRun { /** * Output only. Timestamp when this TensorboardRun was created. */ readonly createTime?: Date; /** * Description of this TensorboardRun. */ description?: string; /** * Required. User provided name of this TensorboardRun. This value must be * unique among all TensorboardRuns belonging to the same parent * TensorboardExperiment. */ displayName?: string; /** * Used to perform a consistent read-modify-write updates. If not set, a * blind "overwrite" update happens. */ etag?: string; /** * The labels with user-defined metadata to organize your TensorboardRuns. * This field will be used to filter and visualize Runs in the Tensorboard UI. * For example, a Vertex AI training job can set a label * aiplatform.googleapis.com/training_job_id=xxxxx to all the runs created * within that job. An end user can set a label experiment_id=xxxxx for all * the runs produced in a Jupyter notebook. These runs can be grouped by a * label value and visualized together in the Tensorboard UI. Label keys and * values can be no longer than 64 characters (Unicode codepoints), can only * contain lowercase letters, numeric characters, underscores and dashes. * International characters are allowed. No more than 64 user labels can be * associated with one TensorboardRun (System labels are excluded). See * https://goo.gl/xmQnxf for more information and examples of labels. System * reserved label keys are prefixed with "aiplatform.googleapis.com/" and are * immutable. */ labels?: { [key: string]: string }; /** * Output only. Name of the TensorboardRun. Format: * `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}` */ readonly name?: string; /** * Output only. Timestamp when this TensorboardRun was last updated. */ readonly updateTime?: Date; } /** * One point viewable on a tensor metric plot. */ export interface GoogleCloudAiplatformV1TensorboardTensor { /** * Required. Serialized form of * https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/framework/tensor.proto */ value?: Uint8Array; /** * Optional. Version number of TensorProto used to serialize value. */ versionNumber?: number; } function serializeGoogleCloudAiplatformV1TensorboardTensor(data: any): GoogleCloudAiplatformV1TensorboardTensor { return { ...data, value: data["value"] !== undefined ? encodeBase64(data["value"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1TensorboardTensor(data: any): GoogleCloudAiplatformV1TensorboardTensor { return { ...data, value: data["value"] !== undefined ? decodeBase64(data["value"] as string) : undefined, }; } /** * TensorboardTimeSeries maps to times series produced in training runs */ export interface GoogleCloudAiplatformV1TensorboardTimeSeries { /** * Output only. Timestamp when this TensorboardTimeSeries was created. */ readonly createTime?: Date; /** * Description of this TensorboardTimeSeries. */ description?: string; /** * Required. User provided name of this TensorboardTimeSeries. This value * should be unique among all TensorboardTimeSeries resources belonging to the * same TensorboardRun resource (parent resource). */ displayName?: string; /** * Used to perform a consistent read-modify-write updates. If not set, a * blind "overwrite" update happens. */ etag?: string; /** * Output only. Scalar, Tensor, or Blob metadata for this * TensorboardTimeSeries. */ readonly metadata?: GoogleCloudAiplatformV1TensorboardTimeSeriesMetadata; /** * Output only. Name of the TensorboardTimeSeries. */ readonly name?: string; /** * Data of the current plugin, with the size limited to 65KB. */ pluginData?: Uint8Array; /** * Immutable. Name of the plugin this time series pertain to. Such as Scalar, * Tensor, Blob */ pluginName?: string; /** * Output only. Timestamp when this TensorboardTimeSeries was last updated. */ readonly updateTime?: Date; /** * Required. Immutable. Type of TensorboardTimeSeries value. */ valueType?: | "VALUE_TYPE_UNSPECIFIED" | "SCALAR" | "TENSOR" | "BLOB_SEQUENCE"; } function serializeGoogleCloudAiplatformV1TensorboardTimeSeries(data: any): GoogleCloudAiplatformV1TensorboardTimeSeries { return { ...data, pluginData: data["pluginData"] !== undefined ? encodeBase64(data["pluginData"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1TensorboardTimeSeries(data: any): GoogleCloudAiplatformV1TensorboardTimeSeries { return { ...data, createTime: data["createTime"] !== undefined ? new Date(data["createTime"]) : undefined, pluginData: data["pluginData"] !== undefined ? decodeBase64(data["pluginData"] as string) : undefined, updateTime: data["updateTime"] !== undefined ? new Date(data["updateTime"]) : undefined, }; } /** * Describes metadata for a TensorboardTimeSeries. */ export interface GoogleCloudAiplatformV1TensorboardTimeSeriesMetadata { /** * Output only. The largest blob sequence length (number of blobs) of all * data points in this time series, if its ValueType is BLOB_SEQUENCE. */ readonly maxBlobSequenceLength?: bigint; /** * Output only. Max step index of all data points within a * TensorboardTimeSeries. */ readonly maxStep?: bigint; /** * Output only. Max wall clock timestamp of all data points within a * TensorboardTimeSeries. */ readonly maxWallTime?: Date; } /** * The storage details for TFRecord output content. */ export interface GoogleCloudAiplatformV1TFRecordDestination { /** * Required. Google Cloud Storage location. */ gcsDestination?: GoogleCloudAiplatformV1GcsDestination; } /** * The config for feature monitoring threshold. */ export interface GoogleCloudAiplatformV1ThresholdConfig { /** * Specify a threshold value that can trigger the alert. If this threshold * config is for feature distribution distance: 1. For categorical feature, * the distribution distance is calculated by L-inifinity norm. 2. For * numerical feature, the distribution distance is calculated by * Jensen–Shannon divergence. Each feature must have a non-zero threshold if * they need to be monitored. Otherwise no alert will be triggered for that * feature. */ value?: number; } /** * All the data stored in a TensorboardTimeSeries. */ export interface GoogleCloudAiplatformV1TimeSeriesData { /** * Required. The ID of the TensorboardTimeSeries, which will become the final * component of the TensorboardTimeSeries' resource name */ tensorboardTimeSeriesId?: string; /** * Required. Data points in this time series. */ values?: GoogleCloudAiplatformV1TimeSeriesDataPoint[]; /** * Required. Immutable. The value type of this time series. All the values in * this time series data must match this value type. */ valueType?: | "VALUE_TYPE_UNSPECIFIED" | "SCALAR" | "TENSOR" | "BLOB_SEQUENCE"; } function serializeGoogleCloudAiplatformV1TimeSeriesData(data: any): GoogleCloudAiplatformV1TimeSeriesData { return { ...data, values: data["values"] !== undefined ? data["values"].map((item: any) => (serializeGoogleCloudAiplatformV1TimeSeriesDataPoint(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1TimeSeriesData(data: any): GoogleCloudAiplatformV1TimeSeriesData { return { ...data, values: data["values"] !== undefined ? data["values"].map((item: any) => (deserializeGoogleCloudAiplatformV1TimeSeriesDataPoint(item))) : undefined, }; } /** * A TensorboardTimeSeries data point. */ export interface GoogleCloudAiplatformV1TimeSeriesDataPoint { /** * A blob sequence value. */ blobs?: GoogleCloudAiplatformV1TensorboardBlobSequence; /** * A scalar value. */ scalar?: GoogleCloudAiplatformV1Scalar; /** * Step index of this data point within the run. */ step?: bigint; /** * A tensor value. */ tensor?: GoogleCloudAiplatformV1TensorboardTensor; /** * Wall clock timestamp when this data point is generated by the end user. */ wallTime?: Date; } function serializeGoogleCloudAiplatformV1TimeSeriesDataPoint(data: any): GoogleCloudAiplatformV1TimeSeriesDataPoint { return { ...data, blobs: data["blobs"] !== undefined ? serializeGoogleCloudAiplatformV1TensorboardBlobSequence(data["blobs"]) : undefined, step: data["step"] !== undefined ? String(data["step"]) : undefined, tensor: data["tensor"] !== undefined ? serializeGoogleCloudAiplatformV1TensorboardTensor(data["tensor"]) : undefined, wallTime: data["wallTime"] !== undefined ? data["wallTime"].toISOString() : undefined, }; } function deserializeGoogleCloudAiplatformV1TimeSeriesDataPoint(data: any): GoogleCloudAiplatformV1TimeSeriesDataPoint { return { ...data, blobs: data["blobs"] !== undefined ? deserializeGoogleCloudAiplatformV1TensorboardBlobSequence(data["blobs"]) : undefined, step: data["step"] !== undefined ? BigInt(data["step"]) : undefined, tensor: data["tensor"] !== undefined ? deserializeGoogleCloudAiplatformV1TensorboardTensor(data["tensor"]) : undefined, wallTime: data["wallTime"] !== undefined ? new Date(data["wallTime"]) : undefined, }; } /** * Assigns input data to training, validation, and test sets based on a * provided timestamps. The youngest data pieces are assigned to training set, * next to validation set, and the oldest to the test set. Supported only for * tabular Datasets. */ export interface GoogleCloudAiplatformV1TimestampSplit { /** * Required. The key is a name of one of the Dataset's data columns. The * values of the key (the values in the column) must be in RFC 3339 * `date-time` format, where `time-offset` = `"Z"` (e.g. * 1985-04-12T23:20:50.52Z). If for a piece of data the key is not present or * has an invalid value, that piece is ignored by the pipeline. */ key?: string; /** * The fraction of the input data that is to be used to evaluate the Model. */ testFraction?: number; /** * The fraction of the input data that is to be used to train the Model. */ trainingFraction?: number; /** * The fraction of the input data that is to be used to validate the Model. */ validationFraction?: number; } /** * Tokens info with a list of tokens and the corresponding list of token ids. */ export interface GoogleCloudAiplatformV1TokensInfo { /** * Optional. Optional fields for the role from the corresponding Content. */ role?: string; /** * A list of token ids from the input. */ tokenIds?: bigint[]; /** * A list of tokens from the input. */ tokens?: Uint8Array[]; } function serializeGoogleCloudAiplatformV1TokensInfo(data: any): GoogleCloudAiplatformV1TokensInfo { return { ...data, tokenIds: data["tokenIds"] !== undefined ? data["tokenIds"].map((item: any) => (String(item))) : undefined, tokens: data["tokens"] !== undefined ? data["tokens"].map((item: any) => (encodeBase64(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1TokensInfo(data: any): GoogleCloudAiplatformV1TokensInfo { return { ...data, tokenIds: data["tokenIds"] !== undefined ? data["tokenIds"].map((item: any) => (BigInt(item))) : undefined, tokens: data["tokens"] !== undefined ? data["tokens"].map((item: any) => (decodeBase64(item as string))) : undefined, }; } /** * Tool details that the model may use to generate response. A `Tool` is a * piece of code that enables the system to interact with external systems to * perform an action, or set of actions, outside of knowledge and scope of the * model. A Tool object should contain exactly one type of Tool (e.g * FunctionDeclaration, Retrieval or GoogleSearchRetrieval). */ export interface GoogleCloudAiplatformV1Tool { /** * Optional. CodeExecution tool type. Enables the model to execute code as * part of generation. */ codeExecution?: GoogleCloudAiplatformV1ToolCodeExecution; /** * Optional. Tool to support searching public web data, powered by Vertex AI * Search and Sec4 compliance. */ enterpriseWebSearch?: GoogleCloudAiplatformV1EnterpriseWebSearch; /** * Optional. Function tool type. One or more function declarations to be * passed to the model along with the current user query. Model may decide to * call a subset of these functions by populating FunctionCall in the * response. User should provide a FunctionResponse for each function call in * the next turn. Based on the function responses, Model will generate the * final response back to the user. Maximum 128 function declarations can be * provided. */ functionDeclarations?: GoogleCloudAiplatformV1FunctionDeclaration[]; /** * Optional. GoogleSearch tool type. Tool to support Google Search in Model. * Powered by Google. */ googleSearch?: GoogleCloudAiplatformV1ToolGoogleSearch; /** * Optional. GoogleSearchRetrieval tool type. Specialized retrieval tool that * is powered by Google search. */ googleSearchRetrieval?: GoogleCloudAiplatformV1GoogleSearchRetrieval; /** * Optional. Retrieval tool type. System will always execute the provided * retrieval tool(s) to get external knowledge to answer the prompt. Retrieval * results are presented to the model for generation. */ retrieval?: GoogleCloudAiplatformV1Retrieval; } function serializeGoogleCloudAiplatformV1Tool(data: any): GoogleCloudAiplatformV1Tool { return { ...data, functionDeclarations: data["functionDeclarations"] !== undefined ? data["functionDeclarations"].map((item: any) => (serializeGoogleCloudAiplatformV1FunctionDeclaration(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1Tool(data: any): GoogleCloudAiplatformV1Tool { return { ...data, functionDeclarations: data["functionDeclarations"] !== undefined ? data["functionDeclarations"].map((item: any) => (deserializeGoogleCloudAiplatformV1FunctionDeclaration(item))) : undefined, }; } /** * Spec for tool call. */ export interface GoogleCloudAiplatformV1ToolCall { /** * Optional. Spec for tool input */ toolInput?: string; /** * Required. Spec for tool name */ toolName?: string; } /** * Input for tool call valid metric. */ export interface GoogleCloudAiplatformV1ToolCallValidInput { /** * Required. Repeated tool call valid instances. */ instances?: GoogleCloudAiplatformV1ToolCallValidInstance[]; /** * Required. Spec for tool call valid metric. */ metricSpec?: GoogleCloudAiplatformV1ToolCallValidSpec; } /** * Spec for tool call valid instance. */ export interface GoogleCloudAiplatformV1ToolCallValidInstance { /** * Required. Output of the evaluated model. */ prediction?: string; /** * Required. Ground truth used to compare against the prediction. */ reference?: string; } /** * Tool call valid metric value for an instance. */ export interface GoogleCloudAiplatformV1ToolCallValidMetricValue { /** * Output only. Tool call valid score. */ readonly score?: number; } /** * Results for tool call valid metric. */ export interface GoogleCloudAiplatformV1ToolCallValidResults { /** * Output only. Tool call valid metric values. */ readonly toolCallValidMetricValues?: GoogleCloudAiplatformV1ToolCallValidMetricValue[]; } /** * Spec for tool call valid metric. */ export interface GoogleCloudAiplatformV1ToolCallValidSpec { } /** * Tool that executes code generated by the model, and automatically returns * the result to the model. See also [ExecutableCode]and [CodeExecutionResult] * which are input and output to this tool. */ export interface GoogleCloudAiplatformV1ToolCodeExecution { } /** * Tool config. This config is shared for all tools provided in the request. */ export interface GoogleCloudAiplatformV1ToolConfig { /** * Optional. Function calling config. */ functionCallingConfig?: GoogleCloudAiplatformV1FunctionCallingConfig; /** * Optional. Retrieval config. */ retrievalConfig?: GoogleCloudAiplatformV1RetrievalConfig; } /** * GoogleSearch tool type. Tool to support Google Search in Model. Powered by * Google. */ export interface GoogleCloudAiplatformV1ToolGoogleSearch { } /** * Input for tool name match metric. */ export interface GoogleCloudAiplatformV1ToolNameMatchInput { /** * Required. Repeated tool name match instances. */ instances?: GoogleCloudAiplatformV1ToolNameMatchInstance[]; /** * Required. Spec for tool name match metric. */ metricSpec?: GoogleCloudAiplatformV1ToolNameMatchSpec; } /** * Spec for tool name match instance. */ export interface GoogleCloudAiplatformV1ToolNameMatchInstance { /** * Required. Output of the evaluated model. */ prediction?: string; /** * Required. Ground truth used to compare against the prediction. */ reference?: string; } /** * Tool name match metric value for an instance. */ export interface GoogleCloudAiplatformV1ToolNameMatchMetricValue { /** * Output only. Tool name match score. */ readonly score?: number; } /** * Results for tool name match metric. */ export interface GoogleCloudAiplatformV1ToolNameMatchResults { /** * Output only. Tool name match metric values. */ readonly toolNameMatchMetricValues?: GoogleCloudAiplatformV1ToolNameMatchMetricValue[]; } /** * Spec for tool name match metric. */ export interface GoogleCloudAiplatformV1ToolNameMatchSpec { } /** * Input for tool parameter key match metric. */ export interface GoogleCloudAiplatformV1ToolParameterKeyMatchInput { /** * Required. Repeated tool parameter key match instances. */ instances?: GoogleCloudAiplatformV1ToolParameterKeyMatchInstance[]; /** * Required. Spec for tool parameter key match metric. */ metricSpec?: GoogleCloudAiplatformV1ToolParameterKeyMatchSpec; } /** * Spec for tool parameter key match instance. */ export interface GoogleCloudAiplatformV1ToolParameterKeyMatchInstance { /** * Required. Output of the evaluated model. */ prediction?: string; /** * Required. Ground truth used to compare against the prediction. */ reference?: string; } /** * Tool parameter key match metric value for an instance. */ export interface GoogleCloudAiplatformV1ToolParameterKeyMatchMetricValue { /** * Output only. Tool parameter key match score. */ readonly score?: number; } /** * Results for tool parameter key match metric. */ export interface GoogleCloudAiplatformV1ToolParameterKeyMatchResults { /** * Output only. Tool parameter key match metric values. */ readonly toolParameterKeyMatchMetricValues?: GoogleCloudAiplatformV1ToolParameterKeyMatchMetricValue[]; } /** * Spec for tool parameter key match metric. */ export interface GoogleCloudAiplatformV1ToolParameterKeyMatchSpec { } /** * Input for tool parameter key value match metric. */ export interface GoogleCloudAiplatformV1ToolParameterKVMatchInput { /** * Required. Repeated tool parameter key value match instances. */ instances?: GoogleCloudAiplatformV1ToolParameterKVMatchInstance[]; /** * Required. Spec for tool parameter key value match metric. */ metricSpec?: GoogleCloudAiplatformV1ToolParameterKVMatchSpec; } /** * Spec for tool parameter key value match instance. */ export interface GoogleCloudAiplatformV1ToolParameterKVMatchInstance { /** * Required. Output of the evaluated model. */ prediction?: string; /** * Required. Ground truth used to compare against the prediction. */ reference?: string; } /** * Tool parameter key value match metric value for an instance. */ export interface GoogleCloudAiplatformV1ToolParameterKVMatchMetricValue { /** * Output only. Tool parameter key value match score. */ readonly score?: number; } /** * Results for tool parameter key value match metric. */ export interface GoogleCloudAiplatformV1ToolParameterKVMatchResults { /** * Output only. Tool parameter key value match metric values. */ readonly toolParameterKvMatchMetricValues?: GoogleCloudAiplatformV1ToolParameterKVMatchMetricValue[]; } /** * Spec for tool parameter key value match metric. */ export interface GoogleCloudAiplatformV1ToolParameterKVMatchSpec { /** * Optional. Whether to use STRICT string match on parameter values. */ useStrictStringMatch?: boolean; } /** * CMLE training config. For every active learning labeling iteration, system * will train a machine learning model on CMLE. The trained model will be used * by data sampling algorithm to select DataItems. */ export interface GoogleCloudAiplatformV1TrainingConfig { /** * The timeout hours for the CMLE training job, expressed in milli hours i.e. * 1,000 value in this field means 1 hour. */ timeoutTrainingMilliHours?: bigint; } function serializeGoogleCloudAiplatformV1TrainingConfig(data: any): GoogleCloudAiplatformV1TrainingConfig { return { ...data, timeoutTrainingMilliHours: data["timeoutTrainingMilliHours"] !== undefined ? String(data["timeoutTrainingMilliHours"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1TrainingConfig(data: any): GoogleCloudAiplatformV1TrainingConfig { return { ...data, timeoutTrainingMilliHours: data["timeoutTrainingMilliHours"] !== undefined ? BigInt(data["timeoutTrainingMilliHours"]) : undefined, }; } /** * The TrainingPipeline orchestrates tasks associated with training a Model. It * always executes the training task, and optionally may also export data from * Vertex AI's Dataset which becomes the training input, upload the Model to * Vertex AI, and evaluate the Model. */ export interface GoogleCloudAiplatformV1TrainingPipeline { /** * Output only. Time when the TrainingPipeline was created. */ readonly createTime?: Date; /** * Required. The user-defined name of this TrainingPipeline. */ displayName?: string; /** * Customer-managed encryption key spec for a TrainingPipeline. If set, this * TrainingPipeline will be secured by this key. Note: Model trained by this * TrainingPipeline is also secured by this key if model_to_upload is not set * separately. */ encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec; /** * Output only. Time when the TrainingPipeline entered any of the following * states: `PIPELINE_STATE_SUCCEEDED`, `PIPELINE_STATE_FAILED`, * `PIPELINE_STATE_CANCELLED`. */ readonly endTime?: Date; /** * Output only. Only populated when the pipeline's state is * `PIPELINE_STATE_FAILED` or `PIPELINE_STATE_CANCELLED`. */ readonly error?: GoogleRpcStatus; /** * Specifies Vertex AI owned input data that may be used for training the * Model. The TrainingPipeline's training_task_definition should make clear * whether this config is used and if there are any special requirements on * how it should be filled. If nothing about this config is mentioned in the * training_task_definition, then it should be assumed that the * TrainingPipeline does not depend on this configuration. */ inputDataConfig?: GoogleCloudAiplatformV1InputDataConfig; /** * The labels with user-defined metadata to organize TrainingPipelines. Label * keys and values can be no longer than 64 characters (Unicode codepoints), * can only contain lowercase letters, numeric characters, underscores and * dashes. International characters are allowed. See https://goo.gl/xmQnxf for * more information and examples of labels. */ labels?: { [key: string]: string }; /** * Optional. The ID to use for the uploaded Model, which will become the * final component of the model resource name. This value may be up to 63 * characters, and valid characters are `[a-z0-9_-]`. The first character * cannot be a number or hyphen. */ modelId?: string; /** * Describes the Model that may be uploaded (via ModelService.UploadModel) by * this TrainingPipeline. The TrainingPipeline's training_task_definition * should make clear whether this Model description should be populated, and * if there are any special requirements regarding how it should be filled. If * nothing is mentioned in the training_task_definition, then it should be * assumed that this field should not be filled and the training task either * uploads the Model without a need of this information, or that training task * does not support uploading a Model as part of the pipeline. When the * Pipeline's state becomes `PIPELINE_STATE_SUCCEEDED` and the trained Model * had been uploaded into Vertex AI, then the model_to_upload's resource name * is populated. The Model is always uploaded into the Project and Location in * which this pipeline is. */ modelToUpload?: GoogleCloudAiplatformV1Model; /** * Output only. Resource name of the TrainingPipeline. */ readonly name?: string; /** * Optional. When specify this field, the `model_to_upload` will not be * uploaded as a new model, instead, it will become a new version of this * `parent_model`. */ parentModel?: string; /** * Output only. Time when the TrainingPipeline for the first time entered the * `PIPELINE_STATE_RUNNING` state. */ readonly startTime?: Date; /** * Output only. The detailed state of the pipeline. */ readonly state?: | "PIPELINE_STATE_UNSPECIFIED" | "PIPELINE_STATE_QUEUED" | "PIPELINE_STATE_PENDING" | "PIPELINE_STATE_RUNNING" | "PIPELINE_STATE_SUCCEEDED" | "PIPELINE_STATE_FAILED" | "PIPELINE_STATE_CANCELLING" | "PIPELINE_STATE_CANCELLED" | "PIPELINE_STATE_PAUSED"; /** * Required. A Google Cloud Storage path to the YAML file that defines the * training task which is responsible for producing the model artifact, and * may also include additional auxiliary work. The definition files that can * be used here are found in * gs://google-cloud-aiplatform/schema/trainingjob/definition/. Note: The URI * given on output will be immutable and probably different, including the URI * scheme, than the one given on input. The output URI will point to a * location where the user only has a read access. */ trainingTaskDefinition?: string; /** * Required. The training task's parameter(s), as specified in the * training_task_definition's `inputs`. */ trainingTaskInputs?: any; /** * Output only. The metadata information as specified in the * training_task_definition's `metadata`. This metadata is an auxiliary * runtime and final information about the training task. While the pipeline * is running this information is populated only at a best effort basis. Only * present if the pipeline's training_task_definition contains `metadata` * object. */ readonly trainingTaskMetadata?: any; /** * Output only. Time when the TrainingPipeline was most recently updated. */ readonly updateTime?: Date; } function serializeGoogleCloudAiplatformV1TrainingPipeline(data: any): GoogleCloudAiplatformV1TrainingPipeline { return { ...data, modelToUpload: data["modelToUpload"] !== undefined ? serializeGoogleCloudAiplatformV1Model(data["modelToUpload"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1TrainingPipeline(data: any): GoogleCloudAiplatformV1TrainingPipeline { return { ...data, createTime: data["createTime"] !== undefined ? new Date(data["createTime"]) : undefined, endTime: data["endTime"] !== undefined ? new Date(data["endTime"]) : undefined, modelToUpload: data["modelToUpload"] !== undefined ? deserializeGoogleCloudAiplatformV1Model(data["modelToUpload"]) : undefined, startTime: data["startTime"] !== undefined ? new Date(data["startTime"]) : undefined, updateTime: data["updateTime"] !== undefined ? new Date(data["updateTime"]) : undefined, }; } /** * Spec for trajectory. */ export interface GoogleCloudAiplatformV1Trajectory { /** * Required. Tool calls in the trajectory. */ toolCalls?: GoogleCloudAiplatformV1ToolCall[]; } /** * Instances and metric spec for TrajectoryAnyOrderMatch metric. */ export interface GoogleCloudAiplatformV1TrajectoryAnyOrderMatchInput { /** * Required. Repeated TrajectoryAnyOrderMatch instance. */ instances?: GoogleCloudAiplatformV1TrajectoryAnyOrderMatchInstance[]; /** * Required. Spec for TrajectoryAnyOrderMatch metric. */ metricSpec?: GoogleCloudAiplatformV1TrajectoryAnyOrderMatchSpec; } /** * Spec for TrajectoryAnyOrderMatch instance. */ export interface GoogleCloudAiplatformV1TrajectoryAnyOrderMatchInstance { /** * Required. Spec for predicted tool call trajectory. */ predictedTrajectory?: GoogleCloudAiplatformV1Trajectory; /** * Required. Spec for reference tool call trajectory. */ referenceTrajectory?: GoogleCloudAiplatformV1Trajectory; } /** * TrajectoryAnyOrderMatch metric value for an instance. */ export interface GoogleCloudAiplatformV1TrajectoryAnyOrderMatchMetricValue { /** * Output only. TrajectoryAnyOrderMatch score. */ readonly score?: number; } /** * Results for TrajectoryAnyOrderMatch metric. */ export interface GoogleCloudAiplatformV1TrajectoryAnyOrderMatchResults { /** * Output only. TrajectoryAnyOrderMatch metric values. */ readonly trajectoryAnyOrderMatchMetricValues?: GoogleCloudAiplatformV1TrajectoryAnyOrderMatchMetricValue[]; } /** * Spec for TrajectoryAnyOrderMatch metric - returns 1 if all tool calls in the * reference trajectory appear in the predicted trajectory in any order, else 0. */ export interface GoogleCloudAiplatformV1TrajectoryAnyOrderMatchSpec { } /** * Instances and metric spec for TrajectoryExactMatch metric. */ export interface GoogleCloudAiplatformV1TrajectoryExactMatchInput { /** * Required. Repeated TrajectoryExactMatch instance. */ instances?: GoogleCloudAiplatformV1TrajectoryExactMatchInstance[]; /** * Required. Spec for TrajectoryExactMatch metric. */ metricSpec?: GoogleCloudAiplatformV1TrajectoryExactMatchSpec; } /** * Spec for TrajectoryExactMatch instance. */ export interface GoogleCloudAiplatformV1TrajectoryExactMatchInstance { /** * Required. Spec for predicted tool call trajectory. */ predictedTrajectory?: GoogleCloudAiplatformV1Trajectory; /** * Required. Spec for reference tool call trajectory. */ referenceTrajectory?: GoogleCloudAiplatformV1Trajectory; } /** * TrajectoryExactMatch metric value for an instance. */ export interface GoogleCloudAiplatformV1TrajectoryExactMatchMetricValue { /** * Output only. TrajectoryExactMatch score. */ readonly score?: number; } /** * Results for TrajectoryExactMatch metric. */ export interface GoogleCloudAiplatformV1TrajectoryExactMatchResults { /** * Output only. TrajectoryExactMatch metric values. */ readonly trajectoryExactMatchMetricValues?: GoogleCloudAiplatformV1TrajectoryExactMatchMetricValue[]; } /** * Spec for TrajectoryExactMatch metric - returns 1 if tool calls in the * reference trajectory exactly match the predicted trajectory, else 0. */ export interface GoogleCloudAiplatformV1TrajectoryExactMatchSpec { } /** * Instances and metric spec for TrajectoryInOrderMatch metric. */ export interface GoogleCloudAiplatformV1TrajectoryInOrderMatchInput { /** * Required. Repeated TrajectoryInOrderMatch instance. */ instances?: GoogleCloudAiplatformV1TrajectoryInOrderMatchInstance[]; /** * Required. Spec for TrajectoryInOrderMatch metric. */ metricSpec?: GoogleCloudAiplatformV1TrajectoryInOrderMatchSpec; } /** * Spec for TrajectoryInOrderMatch instance. */ export interface GoogleCloudAiplatformV1TrajectoryInOrderMatchInstance { /** * Required. Spec for predicted tool call trajectory. */ predictedTrajectory?: GoogleCloudAiplatformV1Trajectory; /** * Required. Spec for reference tool call trajectory. */ referenceTrajectory?: GoogleCloudAiplatformV1Trajectory; } /** * TrajectoryInOrderMatch metric value for an instance. */ export interface GoogleCloudAiplatformV1TrajectoryInOrderMatchMetricValue { /** * Output only. TrajectoryInOrderMatch score. */ readonly score?: number; } /** * Results for TrajectoryInOrderMatch metric. */ export interface GoogleCloudAiplatformV1TrajectoryInOrderMatchResults { /** * Output only. TrajectoryInOrderMatch metric values. */ readonly trajectoryInOrderMatchMetricValues?: GoogleCloudAiplatformV1TrajectoryInOrderMatchMetricValue[]; } /** * Spec for TrajectoryInOrderMatch metric - returns 1 if tool calls in the * reference trajectory appear in the predicted trajectory in the same order, * else 0. */ export interface GoogleCloudAiplatformV1TrajectoryInOrderMatchSpec { } /** * Instances and metric spec for TrajectoryPrecision metric. */ export interface GoogleCloudAiplatformV1TrajectoryPrecisionInput { /** * Required. Repeated TrajectoryPrecision instance. */ instances?: GoogleCloudAiplatformV1TrajectoryPrecisionInstance[]; /** * Required. Spec for TrajectoryPrecision metric. */ metricSpec?: GoogleCloudAiplatformV1TrajectoryPrecisionSpec; } /** * Spec for TrajectoryPrecision instance. */ export interface GoogleCloudAiplatformV1TrajectoryPrecisionInstance { /** * Required. Spec for predicted tool call trajectory. */ predictedTrajectory?: GoogleCloudAiplatformV1Trajectory; /** * Required. Spec for reference tool call trajectory. */ referenceTrajectory?: GoogleCloudAiplatformV1Trajectory; } /** * TrajectoryPrecision metric value for an instance. */ export interface GoogleCloudAiplatformV1TrajectoryPrecisionMetricValue { /** * Output only. TrajectoryPrecision score. */ readonly score?: number; } /** * Results for TrajectoryPrecision metric. */ export interface GoogleCloudAiplatformV1TrajectoryPrecisionResults { /** * Output only. TrajectoryPrecision metric values. */ readonly trajectoryPrecisionMetricValues?: GoogleCloudAiplatformV1TrajectoryPrecisionMetricValue[]; } /** * Spec for TrajectoryPrecision metric - returns a float score based on average * precision of individual tool calls. */ export interface GoogleCloudAiplatformV1TrajectoryPrecisionSpec { } /** * Instances and metric spec for TrajectoryRecall metric. */ export interface GoogleCloudAiplatformV1TrajectoryRecallInput { /** * Required. Repeated TrajectoryRecall instance. */ instances?: GoogleCloudAiplatformV1TrajectoryRecallInstance[]; /** * Required. Spec for TrajectoryRecall metric. */ metricSpec?: GoogleCloudAiplatformV1TrajectoryRecallSpec; } /** * Spec for TrajectoryRecall instance. */ export interface GoogleCloudAiplatformV1TrajectoryRecallInstance { /** * Required. Spec for predicted tool call trajectory. */ predictedTrajectory?: GoogleCloudAiplatformV1Trajectory; /** * Required. Spec for reference tool call trajectory. */ referenceTrajectory?: GoogleCloudAiplatformV1Trajectory; } /** * TrajectoryRecall metric value for an instance. */ export interface GoogleCloudAiplatformV1TrajectoryRecallMetricValue { /** * Output only. TrajectoryRecall score. */ readonly score?: number; } /** * Results for TrajectoryRecall metric. */ export interface GoogleCloudAiplatformV1TrajectoryRecallResults { /** * Output only. TrajectoryRecall metric values. */ readonly trajectoryRecallMetricValues?: GoogleCloudAiplatformV1TrajectoryRecallMetricValue[]; } /** * Spec for TrajectoryRecall metric - returns a float score based on average * recall of individual tool calls. */ export interface GoogleCloudAiplatformV1TrajectoryRecallSpec { } /** * Instances and metric spec for TrajectorySingleToolUse metric. */ export interface GoogleCloudAiplatformV1TrajectorySingleToolUseInput { /** * Required. Repeated TrajectorySingleToolUse instance. */ instances?: GoogleCloudAiplatformV1TrajectorySingleToolUseInstance[]; /** * Required. Spec for TrajectorySingleToolUse metric. */ metricSpec?: GoogleCloudAiplatformV1TrajectorySingleToolUseSpec; } /** * Spec for TrajectorySingleToolUse instance. */ export interface GoogleCloudAiplatformV1TrajectorySingleToolUseInstance { /** * Required. Spec for predicted tool call trajectory. */ predictedTrajectory?: GoogleCloudAiplatformV1Trajectory; } /** * TrajectorySingleToolUse metric value for an instance. */ export interface GoogleCloudAiplatformV1TrajectorySingleToolUseMetricValue { /** * Output only. TrajectorySingleToolUse score. */ readonly score?: number; } /** * Results for TrajectorySingleToolUse metric. */ export interface GoogleCloudAiplatformV1TrajectorySingleToolUseResults { /** * Output only. TrajectorySingleToolUse metric values. */ readonly trajectorySingleToolUseMetricValues?: GoogleCloudAiplatformV1TrajectorySingleToolUseMetricValue[]; } /** * Spec for TrajectorySingleToolUse metric - returns 1 if tool is present in * the predicted trajectory, else 0. */ export interface GoogleCloudAiplatformV1TrajectorySingleToolUseSpec { /** * Required. Spec for tool name to be checked for in the predicted * trajectory. */ toolName?: string; } /** * A message representing a Trial. A Trial contains a unique set of Parameters * that has been or will be evaluated, along with the objective metrics got by * running the Trial. */ export interface GoogleCloudAiplatformV1Trial { /** * Output only. The identifier of the client that originally requested this * Trial. Each client is identified by a unique client_id. When a client asks * for a suggestion, Vertex AI Vizier will assign it a Trial. The client * should evaluate the Trial, complete it, and report back to Vertex AI * Vizier. If suggestion is asked again by same client_id before the Trial is * completed, the same Trial will be returned. Multiple clients with different * client_ids can ask for suggestions simultaneously, each of them will get * their own Trial. */ readonly clientId?: string; /** * Output only. The CustomJob name linked to the Trial. It's set for a * HyperparameterTuningJob's Trial. */ readonly customJob?: string; /** * Output only. Time when the Trial's status changed to `SUCCEEDED` or * `INFEASIBLE`. */ readonly endTime?: Date; /** * Output only. The final measurement containing the objective value. */ readonly finalMeasurement?: GoogleCloudAiplatformV1Measurement; /** * Output only. The identifier of the Trial assigned by the service. */ readonly id?: string; /** * Output only. A human readable string describing why the Trial is * infeasible. This is set only if Trial state is `INFEASIBLE`. */ readonly infeasibleReason?: string; /** * Output only. A list of measurements that are strictly lexicographically * ordered by their induced tuples (steps, elapsed_duration). These are used * for early stopping computations. */ readonly measurements?: GoogleCloudAiplatformV1Measurement[]; /** * Output only. Resource name of the Trial assigned by the service. */ readonly name?: string; /** * Output only. The parameters of the Trial. */ readonly parameters?: GoogleCloudAiplatformV1TrialParameter[]; /** * Output only. Time when the Trial was started. */ readonly startTime?: Date; /** * Output only. The detailed state of the Trial. */ readonly state?: | "STATE_UNSPECIFIED" | "REQUESTED" | "ACTIVE" | "STOPPING" | "SUCCEEDED" | "INFEASIBLE"; /** * Output only. URIs for accessing [interactive * shells](https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell) * (one URI for each training node). Only available if this trial is part of a * HyperparameterTuningJob and the job's trial_job_spec.enable_web_access * field is `true`. The keys are names of each node used for the trial; for * example, `workerpool0-0` for the primary node, `workerpool1-0` for the * first node in the second worker pool, and `workerpool1-1` for the second * node in the second worker pool. The values are the URIs for each node's * interactive shell. */ readonly webAccessUris?: { [key: string]: string }; } export interface GoogleCloudAiplatformV1TrialContext { /** * A human-readable field which can store a description of this context. This * will become part of the resulting Trial's description field. */ description?: string; /** * If/when a Trial is generated or selected from this Context, its Parameters * will match any parameters specified here. (I.e. if this context specifies * parameter name:'a' int_value:3, then a resulting Trial will have * int_value:3 for its parameter named 'a'.) Note that we first attempt to * match existing REQUESTED Trials with contexts, and if there are no matches, * we generate suggestions in the subspace defined by the parameters specified * here. NOTE: a Context without any Parameters matches the entire feasible * search space. */ parameters?: GoogleCloudAiplatformV1TrialParameter[]; } /** * A message representing a parameter to be tuned. */ export interface GoogleCloudAiplatformV1TrialParameter { /** * Output only. The ID of the parameter. The parameter should be defined in * StudySpec's Parameters. */ readonly parameterId?: string; /** * Output only. The value of the parameter. `number_value` will be set if a * parameter defined in StudySpec is in type 'INTEGER', 'DOUBLE' or * 'DISCRETE'. `string_value` will be set if a parameter defined in StudySpec * is in type 'CATEGORICAL'. */ readonly value?: any; } /** * The Model Registry Model and Online Prediction Endpoint associated with this * TuningJob. */ export interface GoogleCloudAiplatformV1TunedModel { /** * Output only. A resource name of an Endpoint. Format: * `projects/{project}/locations/{location}/endpoints/{endpoint}`. */ readonly endpoint?: string; /** * Output only. The resource name of the TunedModel. Format: * `projects/{project}/locations/{location}/models/{model}`. */ readonly model?: string; } /** * TunedModel Reference for legacy model migration. */ export interface GoogleCloudAiplatformV1TunedModelRef { /** * Support migration from tuning job list page, from bison model to gemini * model. */ pipelineJob?: string; /** * Support migration from model registry. */ tunedModel?: string; /** * Support migration from tuning job list page, from gemini-1.0-pro-002 to * 1.5 and above. */ tuningJob?: string; } /** * The tuning data statistic values for TuningJob. */ export interface GoogleCloudAiplatformV1TuningDataStats { /** * The SFT Tuning data stats. */ supervisedTuningDataStats?: GoogleCloudAiplatformV1SupervisedTuningDataStats; } /** * Represents a TuningJob that runs with Google owned models. */ export interface GoogleCloudAiplatformV1TuningJob { /** * The base model that is being tuned. See [Supported * models](https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/tuning#supported_models). */ baseModel?: string; /** * Output only. Time when the TuningJob was created. */ readonly createTime?: Date; /** * Optional. The description of the TuningJob. */ description?: string; /** * Customer-managed encryption key options for a TuningJob. If this is set, * then all resources created by the TuningJob will be encrypted with the * provided encryption key. */ encryptionSpec?: GoogleCloudAiplatformV1EncryptionSpec; /** * Output only. Time when the TuningJob entered any of the following * JobStates: `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, * `JOB_STATE_CANCELLED`, `JOB_STATE_EXPIRED`. */ readonly endTime?: Date; /** * Output only. Only populated when job's state is `JOB_STATE_FAILED` or * `JOB_STATE_CANCELLED`. */ readonly error?: GoogleRpcStatus; /** * Output only. The Experiment associated with this TuningJob. */ readonly experiment?: string; /** * Optional. The labels with user-defined metadata to organize TuningJob and * generated resources such as Model and Endpoint. Label keys and values can * be no longer than 64 characters (Unicode codepoints), can only contain * lowercase letters, numeric characters, underscores and dashes. * International characters are allowed. See https://goo.gl/xmQnxf for more * information and examples of labels. */ labels?: { [key: string]: string }; /** * Output only. Identifier. Resource name of a TuningJob. Format: * `projects/{project}/locations/{location}/tuningJobs/{tuning_job}` */ readonly name?: string; /** * The service account that the tuningJob workload runs as. If not specified, * the Vertex AI Secure Fine-Tuned Service Agent in the project will be used. * See * https://cloud.google.com/iam/docs/service-agents#vertex-ai-secure-fine-tuning-service-agent * Users starting the pipeline must have the `iam.serviceAccounts.actAs` * permission on this service account. */ serviceAccount?: string; /** * Output only. Time when the TuningJob for the first time entered the * `JOB_STATE_RUNNING` state. */ readonly startTime?: Date; /** * Output only. The detailed state of the job. */ readonly state?: | "JOB_STATE_UNSPECIFIED" | "JOB_STATE_QUEUED" | "JOB_STATE_PENDING" | "JOB_STATE_RUNNING" | "JOB_STATE_SUCCEEDED" | "JOB_STATE_FAILED" | "JOB_STATE_CANCELLING" | "JOB_STATE_CANCELLED" | "JOB_STATE_PAUSED" | "JOB_STATE_EXPIRED" | "JOB_STATE_UPDATING" | "JOB_STATE_PARTIALLY_SUCCEEDED"; /** * Tuning Spec for Supervised Fine Tuning. */ supervisedTuningSpec?: GoogleCloudAiplatformV1SupervisedTuningSpec; /** * Output only. The tuned model resources associated with this TuningJob. */ readonly tunedModel?: GoogleCloudAiplatformV1TunedModel; /** * Optional. The display name of the TunedModel. The name can be up to 128 * characters long and can consist of any UTF-8 characters. */ tunedModelDisplayName?: string; /** * Output only. The tuning data statistics associated with this TuningJob. */ readonly tuningDataStats?: GoogleCloudAiplatformV1TuningDataStats; /** * Output only. Time when the TuningJob was most recently updated. */ readonly updateTime?: Date; } function serializeGoogleCloudAiplatformV1TuningJob(data: any): GoogleCloudAiplatformV1TuningJob { return { ...data, supervisedTuningSpec: data["supervisedTuningSpec"] !== undefined ? serializeGoogleCloudAiplatformV1SupervisedTuningSpec(data["supervisedTuningSpec"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1TuningJob(data: any): GoogleCloudAiplatformV1TuningJob { return { ...data, createTime: data["createTime"] !== undefined ? new Date(data["createTime"]) : undefined, endTime: data["endTime"] !== undefined ? new Date(data["endTime"]) : undefined, startTime: data["startTime"] !== undefined ? new Date(data["startTime"]) : undefined, supervisedTuningSpec: data["supervisedTuningSpec"] !== undefined ? deserializeGoogleCloudAiplatformV1SupervisedTuningSpec(data["supervisedTuningSpec"]) : undefined, updateTime: data["updateTime"] !== undefined ? new Date(data["updateTime"]) : undefined, }; } /** * Runtime operation information for IndexEndpointService.UndeployIndex. */ export interface GoogleCloudAiplatformV1UndeployIndexOperationMetadata { /** * The operation generic information. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Request message for IndexEndpointService.UndeployIndex. */ export interface GoogleCloudAiplatformV1UndeployIndexRequest { /** * Required. The ID of the DeployedIndex to be undeployed from the * IndexEndpoint. */ deployedIndexId?: string; } /** * Response message for IndexEndpointService.UndeployIndex. */ export interface GoogleCloudAiplatformV1UndeployIndexResponse { } /** * Runtime operation information for EndpointService.UndeployModel. */ export interface GoogleCloudAiplatformV1UndeployModelOperationMetadata { /** * The operation generic information. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Request message for EndpointService.UndeployModel. */ export interface GoogleCloudAiplatformV1UndeployModelRequest { /** * Required. The ID of the DeployedModel to be undeployed from the Endpoint. */ deployedModelId?: string; /** * If this field is provided, then the Endpoint's traffic_split will be * overwritten with it. If last DeployedModel is being undeployed from the * Endpoint, the [Endpoint.traffic_split] will always end up empty when this * call returns. A DeployedModel will be successfully undeployed only if it * doesn't have any traffic assigned to it when this method executes, or if * this field unassigns any traffic to it. */ trafficSplit?: { [key: string]: number }; } /** * Response message for EndpointService.UndeployModel. */ export interface GoogleCloudAiplatformV1UndeployModelResponse { } /** * Contains model information necessary to perform batch prediction without * requiring a full model import. */ export interface GoogleCloudAiplatformV1UnmanagedContainerModel { /** * The path to the directory containing the Model artifact and any of its * supporting files. */ artifactUri?: string; /** * Input only. The specification of the container that is to be used when * deploying this Model. */ containerSpec?: GoogleCloudAiplatformV1ModelContainerSpec; /** * Contains the schemata used in Model's predictions and explanations */ predictSchemata?: GoogleCloudAiplatformV1PredictSchemata; } function serializeGoogleCloudAiplatformV1UnmanagedContainerModel(data: any): GoogleCloudAiplatformV1UnmanagedContainerModel { return { ...data, containerSpec: data["containerSpec"] !== undefined ? serializeGoogleCloudAiplatformV1ModelContainerSpec(data["containerSpec"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1UnmanagedContainerModel(data: any): GoogleCloudAiplatformV1UnmanagedContainerModel { return { ...data, containerSpec: data["containerSpec"] !== undefined ? deserializeGoogleCloudAiplatformV1ModelContainerSpec(data["containerSpec"]) : undefined, }; } /** * Runtime operation information for UpdateDeploymentResourcePool method. */ export interface GoogleCloudAiplatformV1UpdateDeploymentResourcePoolOperationMetadata { /** * The operation generic information. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Request message for EndpointService.UpdateEndpointLongRunning. */ export interface GoogleCloudAiplatformV1UpdateEndpointLongRunningRequest { /** * Required. The Endpoint which replaces the resource on the server. * Currently we only support updating the `client_connection_config` field, * all the other fields' update will be blocked. */ endpoint?: GoogleCloudAiplatformV1Endpoint; } function serializeGoogleCloudAiplatformV1UpdateEndpointLongRunningRequest(data: any): GoogleCloudAiplatformV1UpdateEndpointLongRunningRequest { return { ...data, endpoint: data["endpoint"] !== undefined ? serializeGoogleCloudAiplatformV1Endpoint(data["endpoint"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1UpdateEndpointLongRunningRequest(data: any): GoogleCloudAiplatformV1UpdateEndpointLongRunningRequest { return { ...data, endpoint: data["endpoint"] !== undefined ? deserializeGoogleCloudAiplatformV1Endpoint(data["endpoint"]) : undefined, }; } /** * Runtime operation information for ModelService.UpdateExplanationDataset. */ export interface GoogleCloudAiplatformV1UpdateExplanationDatasetOperationMetadata { /** * The common part of the operation metadata. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Request message for ModelService.UpdateExplanationDataset. */ export interface GoogleCloudAiplatformV1UpdateExplanationDatasetRequest { /** * The example config containing the location of the dataset. */ examples?: GoogleCloudAiplatformV1Examples; } /** * Response message of ModelService.UpdateExplanationDataset operation. */ export interface GoogleCloudAiplatformV1UpdateExplanationDatasetResponse { } /** * Details of operations that perform update FeatureGroup. */ export interface GoogleCloudAiplatformV1UpdateFeatureGroupOperationMetadata { /** * Operation metadata for FeatureGroup. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Details of operations that perform update FeatureOnlineStore. */ export interface GoogleCloudAiplatformV1UpdateFeatureOnlineStoreOperationMetadata { /** * Operation metadata for FeatureOnlineStore. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Details of operations that perform update Feature. */ export interface GoogleCloudAiplatformV1UpdateFeatureOperationMetadata { /** * Operation metadata for Feature Update. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Details of operations that perform update Featurestore. */ export interface GoogleCloudAiplatformV1UpdateFeaturestoreOperationMetadata { /** * Operation metadata for Featurestore. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Details of operations that perform update FeatureView. */ export interface GoogleCloudAiplatformV1UpdateFeatureViewOperationMetadata { /** * Operation metadata for FeatureView Update. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Runtime operation information for IndexService.UpdateIndex. */ export interface GoogleCloudAiplatformV1UpdateIndexOperationMetadata { /** * The operation generic information. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; /** * The operation metadata with regard to Matching Engine Index operation. */ nearestNeighborSearchOperationMetadata?: GoogleCloudAiplatformV1NearestNeighborSearchOperationMetadata; } function serializeGoogleCloudAiplatformV1UpdateIndexOperationMetadata(data: any): GoogleCloudAiplatformV1UpdateIndexOperationMetadata { return { ...data, nearestNeighborSearchOperationMetadata: data["nearestNeighborSearchOperationMetadata"] !== undefined ? serializeGoogleCloudAiplatformV1NearestNeighborSearchOperationMetadata(data["nearestNeighborSearchOperationMetadata"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1UpdateIndexOperationMetadata(data: any): GoogleCloudAiplatformV1UpdateIndexOperationMetadata { return { ...data, nearestNeighborSearchOperationMetadata: data["nearestNeighborSearchOperationMetadata"] !== undefined ? deserializeGoogleCloudAiplatformV1NearestNeighborSearchOperationMetadata(data["nearestNeighborSearchOperationMetadata"]) : undefined, }; } /** * Runtime operation information for * JobService.UpdateModelDeploymentMonitoringJob. */ export interface GoogleCloudAiplatformV1UpdateModelDeploymentMonitoringJobOperationMetadata { /** * The operation generic information. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Details of operations that perform update PersistentResource. */ export interface GoogleCloudAiplatformV1UpdatePersistentResourceOperationMetadata { /** * Operation metadata for PersistentResource. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; /** * Progress Message for Update LRO */ progressMessage?: string; } /** * Runtime operation metadata for SpecialistPoolService.UpdateSpecialistPool. */ export interface GoogleCloudAiplatformV1UpdateSpecialistPoolOperationMetadata { /** * The operation generic information. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; /** * Output only. The name of the SpecialistPool to which the specialists are * being added. Format: * `projects/{project_id}/locations/{location_id}/specialistPools/{specialist_pool}` */ readonly specialistPool?: string; } /** * Details of operations that perform update Tensorboard. */ export interface GoogleCloudAiplatformV1UpdateTensorboardOperationMetadata { /** * Operation metadata for Tensorboard. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Metadata information for NotebookService.UpgradeNotebookRuntime. */ export interface GoogleCloudAiplatformV1UpgradeNotebookRuntimeOperationMetadata { /** * The operation generic information. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; /** * A human-readable message that shows the intermediate progress details of * NotebookRuntime. */ progressMessage?: string; } /** * Request message for NotebookService.UpgradeNotebookRuntime. */ export interface GoogleCloudAiplatformV1UpgradeNotebookRuntimeRequest { } /** * Details of ModelService.UploadModel operation. */ export interface GoogleCloudAiplatformV1UploadModelOperationMetadata { /** * The common part of the operation metadata. */ genericMetadata?: GoogleCloudAiplatformV1GenericOperationMetadata; } /** * Request message for ModelService.UploadModel. */ export interface GoogleCloudAiplatformV1UploadModelRequest { /** * Required. The Model to create. */ model?: GoogleCloudAiplatformV1Model; /** * Optional. The ID to use for the uploaded Model, which will become the * final component of the model resource name. This value may be up to 63 * characters, and valid characters are `[a-z0-9_-]`. The first character * cannot be a number or hyphen. */ modelId?: string; /** * Optional. The resource name of the model into which to upload the version. * Only specify this field when uploading a new version. */ parentModel?: string; /** * Optional. The user-provided custom service account to use to do the model * upload. If empty, [Vertex AI Service * Agent](https://cloud.google.com/vertex-ai/docs/general/access-control#service-agents) * will be used to access resources needed to upload the model. This account * must belong to the target project where the model is uploaded to, i.e., the * project specified in the `parent` field of this request and have necessary * read permissions (to Google Cloud Storage, Artifact Registry, etc.). */ serviceAccount?: string; } function serializeGoogleCloudAiplatformV1UploadModelRequest(data: any): GoogleCloudAiplatformV1UploadModelRequest { return { ...data, model: data["model"] !== undefined ? serializeGoogleCloudAiplatformV1Model(data["model"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1UploadModelRequest(data: any): GoogleCloudAiplatformV1UploadModelRequest { return { ...data, model: data["model"] !== undefined ? deserializeGoogleCloudAiplatformV1Model(data["model"]) : undefined, }; } /** * Response message of ModelService.UploadModel operation. */ export interface GoogleCloudAiplatformV1UploadModelResponse { /** * The name of the uploaded Model resource. Format: * `projects/{project}/locations/{location}/models/{model}` */ model?: string; /** * Output only. The version ID of the model that is uploaded. */ readonly modelVersionId?: string; } /** * Config for uploading RagFile. */ export interface GoogleCloudAiplatformV1UploadRagFileConfig { /** * Specifies the transformation config for RagFiles. */ ragFileTransformationConfig?: GoogleCloudAiplatformV1RagFileTransformationConfig; } /** * Request message for VertexRagDataService.UploadRagFile. */ export interface GoogleCloudAiplatformV1UploadRagFileRequest { /** * Required. The RagFile to upload. */ ragFile?: GoogleCloudAiplatformV1RagFile; /** * Required. The config for the RagFiles to be uploaded into the RagCorpus. * VertexRagDataService.UploadRagFile. */ uploadRagFileConfig?: GoogleCloudAiplatformV1UploadRagFileConfig; } function serializeGoogleCloudAiplatformV1UploadRagFileRequest(data: any): GoogleCloudAiplatformV1UploadRagFileRequest { return { ...data, ragFile: data["ragFile"] !== undefined ? serializeGoogleCloudAiplatformV1RagFile(data["ragFile"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1UploadRagFileRequest(data: any): GoogleCloudAiplatformV1UploadRagFileRequest { return { ...data, ragFile: data["ragFile"] !== undefined ? deserializeGoogleCloudAiplatformV1RagFile(data["ragFile"]) : undefined, }; } /** * Response message for VertexRagDataService.UploadRagFile. */ export interface GoogleCloudAiplatformV1UploadRagFileResponse { /** * The error that occurred while processing the RagFile. */ error?: GoogleRpcStatus; /** * The RagFile that had been uploaded into the RagCorpus. */ ragFile?: GoogleCloudAiplatformV1RagFile; } function serializeGoogleCloudAiplatformV1UploadRagFileResponse(data: any): GoogleCloudAiplatformV1UploadRagFileResponse { return { ...data, ragFile: data["ragFile"] !== undefined ? serializeGoogleCloudAiplatformV1RagFile(data["ragFile"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1UploadRagFileResponse(data: any): GoogleCloudAiplatformV1UploadRagFileResponse { return { ...data, ragFile: data["ragFile"] !== undefined ? deserializeGoogleCloudAiplatformV1RagFile(data["ragFile"]) : undefined, }; } /** * Request message for IndexService.UpsertDatapoints */ export interface GoogleCloudAiplatformV1UpsertDatapointsRequest { /** * A list of datapoints to be created/updated. */ datapoints?: GoogleCloudAiplatformV1IndexDatapoint[]; /** * Optional. Update mask is used to specify the fields to be overwritten in * the datapoints by the update. The fields specified in the update_mask are * relative to each IndexDatapoint inside datapoints, not the full request. * Updatable fields: * Use `all_restricts` to update both restricts and * numeric_restricts. */ updateMask?: string /* FieldMask */; } function serializeGoogleCloudAiplatformV1UpsertDatapointsRequest(data: any): GoogleCloudAiplatformV1UpsertDatapointsRequest { return { ...data, datapoints: data["datapoints"] !== undefined ? data["datapoints"].map((item: any) => (serializeGoogleCloudAiplatformV1IndexDatapoint(item))) : undefined, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } function deserializeGoogleCloudAiplatformV1UpsertDatapointsRequest(data: any): GoogleCloudAiplatformV1UpsertDatapointsRequest { return { ...data, datapoints: data["datapoints"] !== undefined ? data["datapoints"].map((item: any) => (deserializeGoogleCloudAiplatformV1IndexDatapoint(item))) : undefined, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } /** * Response message for IndexService.UpsertDatapoints */ export interface GoogleCloudAiplatformV1UpsertDatapointsResponse { } /** * References an API call. It contains more information about long running * operation and Jobs that are triggered by the API call. */ export interface GoogleCloudAiplatformV1UserActionReference { /** * For API calls that start a LabelingJob. Resource name of the LabelingJob. * Format: * `projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}` */ dataLabelingJob?: string; /** * The method name of the API RPC call. For example, * "/google.cloud.aiplatform.{apiVersion}.DatasetService.CreateDataset" */ method?: string; /** * For API calls that return a long running operation. Resource name of the * long running operation. Format: * `projects/{project}/locations/{location}/operations/{operation}` */ operation?: string; } /** * Value is the value of the field. */ export interface GoogleCloudAiplatformV1Value { /** * A double value. */ doubleValue?: number; /** * An integer value. */ intValue?: bigint; /** * A string value. */ stringValue?: string; } function serializeGoogleCloudAiplatformV1Value(data: any): GoogleCloudAiplatformV1Value { return { ...data, intValue: data["intValue"] !== undefined ? String(data["intValue"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1Value(data: any): GoogleCloudAiplatformV1Value { return { ...data, intValue: data["intValue"] !== undefined ? BigInt(data["intValue"]) : undefined, }; } /** * Retrieve from Vertex AI Search datastore or engine for grounding. datastore * and engine are mutually exclusive. See * https://cloud.google.com/products/agent-builder */ export interface GoogleCloudAiplatformV1VertexAISearch { /** * Optional. Fully-qualified Vertex AI Search data store resource ID. Format: * `projects/{project}/locations/{location}/collections/{collection}/dataStores/{dataStore}` */ datastore?: string; /** * Optional. Fully-qualified Vertex AI Search engine resource ID. Format: * `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}` */ engine?: string; } /** * Config for the Vertex AI Search. */ export interface GoogleCloudAiplatformV1VertexAiSearchConfig { /** * Vertex AI Search Serving Config resource full name. For example, * `projects/{project}/locations/{location}/collections/{collection}/engines/{engine}/servingConfigs/{serving_config}` * or * `projects/{project}/locations/{location}/collections/{collection}/dataStores/{data_store}/servingConfigs/{serving_config}`. */ servingConfig?: string; } /** * Retrieve from Vertex RAG Store for grounding. */ export interface GoogleCloudAiplatformV1VertexRagStore { /** * Optional. The representation of the rag source. It can be used to specify * corpus only or ragfiles. Currently only support one corpus or multiple * files from one corpus. In the future we may open up multiple corpora * support. */ ragResources?: GoogleCloudAiplatformV1VertexRagStoreRagResource[]; /** * Optional. The retrieval config for the Rag query. */ ragRetrievalConfig?: GoogleCloudAiplatformV1RagRetrievalConfig; /** * Optional. Number of top k results to return from the selected corpora. */ similarityTopK?: number; /** * Optional. Only return results with vector distance smaller than the * threshold. */ vectorDistanceThreshold?: number; } /** * The definition of the Rag resource. */ export interface GoogleCloudAiplatformV1VertexRagStoreRagResource { /** * Optional. RagCorpora resource name. Format: * `projects/{project}/locations/{location}/ragCorpora/{rag_corpus}` */ ragCorpus?: string; /** * Optional. rag_file_id. The files should be in the same rag_corpus set in * rag_corpus field. */ ragFileIds?: string[]; } /** * Metadata describes the input video content. */ export interface GoogleCloudAiplatformV1VideoMetadata { /** * Optional. The end offset of the video. */ endOffset?: number /* Duration */; /** * Optional. The start offset of the video. */ startOffset?: number /* Duration */; } function serializeGoogleCloudAiplatformV1VideoMetadata(data: any): GoogleCloudAiplatformV1VideoMetadata { return { ...data, endOffset: data["endOffset"] !== undefined ? data["endOffset"] : undefined, startOffset: data["startOffset"] !== undefined ? data["startOffset"] : undefined, }; } function deserializeGoogleCloudAiplatformV1VideoMetadata(data: any): GoogleCloudAiplatformV1VideoMetadata { return { ...data, endOffset: data["endOffset"] !== undefined ? data["endOffset"] : undefined, startOffset: data["startOffset"] !== undefined ? data["startOffset"] : undefined, }; } /** * The configuration for the voice to use. */ export interface GoogleCloudAiplatformV1VoiceConfig { /** * The configuration for the prebuilt voice to use. */ prebuiltVoiceConfig?: GoogleCloudAiplatformV1PrebuiltVoiceConfig; } /** * Represents the spec of a worker pool in a job. */ export interface GoogleCloudAiplatformV1WorkerPoolSpec { /** * The custom container task. */ containerSpec?: GoogleCloudAiplatformV1ContainerSpec; /** * Disk spec. */ diskSpec?: GoogleCloudAiplatformV1DiskSpec; /** * Optional. Immutable. The specification of a single machine. */ machineSpec?: GoogleCloudAiplatformV1MachineSpec; /** * Optional. List of NFS mount spec. */ nfsMounts?: GoogleCloudAiplatformV1NfsMount[]; /** * The Python packaged task. */ pythonPackageSpec?: GoogleCloudAiplatformV1PythonPackageSpec; /** * Optional. The number of worker replicas to use for this worker pool. */ replicaCount?: bigint; } function serializeGoogleCloudAiplatformV1WorkerPoolSpec(data: any): GoogleCloudAiplatformV1WorkerPoolSpec { return { ...data, replicaCount: data["replicaCount"] !== undefined ? String(data["replicaCount"]) : undefined, }; } function deserializeGoogleCloudAiplatformV1WorkerPoolSpec(data: any): GoogleCloudAiplatformV1WorkerPoolSpec { return { ...data, replicaCount: data["replicaCount"] !== undefined ? BigInt(data["replicaCount"]) : undefined, }; } /** * Contains Feature values to be written for a specific entity. */ export interface GoogleCloudAiplatformV1WriteFeatureValuesPayload { /** * Required. The ID of the entity. */ entityId?: string; /** * Required. Feature values to be written, mapping from Feature ID to value. * Up to 100,000 `feature_values` entries may be written across all payloads. * The feature generation time, aligned by days, must be no older than five * years (1825 days) and no later than one year (366 days) in the future. */ featureValues?: { [key: string]: GoogleCloudAiplatformV1FeatureValue }; } function serializeGoogleCloudAiplatformV1WriteFeatureValuesPayload(data: any): GoogleCloudAiplatformV1WriteFeatureValuesPayload { return { ...data, featureValues: data["featureValues"] !== undefined ? Object.fromEntries(Object.entries(data["featureValues"]).map(([k, v]: [string, any]) => ([k, serializeGoogleCloudAiplatformV1FeatureValue(v)]))) : undefined, }; } function deserializeGoogleCloudAiplatformV1WriteFeatureValuesPayload(data: any): GoogleCloudAiplatformV1WriteFeatureValuesPayload { return { ...data, featureValues: data["featureValues"] !== undefined ? Object.fromEntries(Object.entries(data["featureValues"]).map(([k, v]: [string, any]) => ([k, deserializeGoogleCloudAiplatformV1FeatureValue(v)]))) : undefined, }; } /** * Request message for FeaturestoreOnlineServingService.WriteFeatureValues. */ export interface GoogleCloudAiplatformV1WriteFeatureValuesRequest { /** * Required. The entities to be written. Up to 100,000 feature values can be * written across all `payloads`. */ payloads?: GoogleCloudAiplatformV1WriteFeatureValuesPayload[]; } function serializeGoogleCloudAiplatformV1WriteFeatureValuesRequest(data: any): GoogleCloudAiplatformV1WriteFeatureValuesRequest { return { ...data, payloads: data["payloads"] !== undefined ? data["payloads"].map((item: any) => (serializeGoogleCloudAiplatformV1WriteFeatureValuesPayload(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1WriteFeatureValuesRequest(data: any): GoogleCloudAiplatformV1WriteFeatureValuesRequest { return { ...data, payloads: data["payloads"] !== undefined ? data["payloads"].map((item: any) => (deserializeGoogleCloudAiplatformV1WriteFeatureValuesPayload(item))) : undefined, }; } /** * Response message for FeaturestoreOnlineServingService.WriteFeatureValues. */ export interface GoogleCloudAiplatformV1WriteFeatureValuesResponse { } /** * Request message for TensorboardService.WriteTensorboardExperimentData. */ export interface GoogleCloudAiplatformV1WriteTensorboardExperimentDataRequest { /** * Required. Requests containing per-run TensorboardTimeSeries data to write. */ writeRunDataRequests?: GoogleCloudAiplatformV1WriteTensorboardRunDataRequest[]; } function serializeGoogleCloudAiplatformV1WriteTensorboardExperimentDataRequest(data: any): GoogleCloudAiplatformV1WriteTensorboardExperimentDataRequest { return { ...data, writeRunDataRequests: data["writeRunDataRequests"] !== undefined ? data["writeRunDataRequests"].map((item: any) => (serializeGoogleCloudAiplatformV1WriteTensorboardRunDataRequest(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1WriteTensorboardExperimentDataRequest(data: any): GoogleCloudAiplatformV1WriteTensorboardExperimentDataRequest { return { ...data, writeRunDataRequests: data["writeRunDataRequests"] !== undefined ? data["writeRunDataRequests"].map((item: any) => (deserializeGoogleCloudAiplatformV1WriteTensorboardRunDataRequest(item))) : undefined, }; } /** * Response message for TensorboardService.WriteTensorboardExperimentData. */ export interface GoogleCloudAiplatformV1WriteTensorboardExperimentDataResponse { } /** * Request message for TensorboardService.WriteTensorboardRunData. */ export interface GoogleCloudAiplatformV1WriteTensorboardRunDataRequest { /** * Required. The resource name of the TensorboardRun to write data to. * Format: * `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}` */ tensorboardRun?: string; /** * Required. The TensorboardTimeSeries data to write. Values with in a time * series are indexed by their step value. Repeated writes to the same step * will overwrite the existing value for that step. The upper limit of data * points per write request is 5000. */ timeSeriesData?: GoogleCloudAiplatformV1TimeSeriesData[]; } function serializeGoogleCloudAiplatformV1WriteTensorboardRunDataRequest(data: any): GoogleCloudAiplatformV1WriteTensorboardRunDataRequest { return { ...data, timeSeriesData: data["timeSeriesData"] !== undefined ? data["timeSeriesData"].map((item: any) => (serializeGoogleCloudAiplatformV1TimeSeriesData(item))) : undefined, }; } function deserializeGoogleCloudAiplatformV1WriteTensorboardRunDataRequest(data: any): GoogleCloudAiplatformV1WriteTensorboardRunDataRequest { return { ...data, timeSeriesData: data["timeSeriesData"] !== undefined ? data["timeSeriesData"].map((item: any) => (deserializeGoogleCloudAiplatformV1TimeSeriesData(item))) : undefined, }; } /** * Response message for TensorboardService.WriteTensorboardRunData. */ export interface GoogleCloudAiplatformV1WriteTensorboardRunDataResponse { } /** * An explanation method that redistributes Integrated Gradients attributions * to segmented regions, taking advantage of the model's fully differentiable * structure. Refer to this paper for more details: * https://arxiv.org/abs/1906.02825 Supported only by image Models. */ export interface GoogleCloudAiplatformV1XraiAttribution { /** * Config for XRAI with blur baseline. When enabled, a linear path from the * maximally blurred image to the input image is created. Using a blurred * baseline instead of zero (black image) is motivated by the BlurIG approach * explained here: https://arxiv.org/abs/2004.03383 */ blurBaselineConfig?: GoogleCloudAiplatformV1BlurBaselineConfig; /** * Config for SmoothGrad approximation of gradients. When enabled, the * gradients are approximated by averaging the gradients from noisy samples in * the vicinity of the inputs. Adding noise can help improve the computed * gradients. Refer to this paper for more details: * https://arxiv.org/pdf/1706.03825.pdf */ smoothGradConfig?: GoogleCloudAiplatformV1SmoothGradConfig; /** * Required. The number of steps for approximating the path integral. A good * value to start is 50 and gradually increase until the sum to diff property * is met within the desired error range. Valid range of its value is [1, * 100], inclusively. */ stepCount?: number; } /** * The response message for Locations.ListLocations. */ export interface GoogleCloudLocationListLocationsResponse { /** * A list of locations that matches the specified filter in the request. */ locations?: GoogleCloudLocationLocation[]; /** * The standard List next-page token. */ nextPageToken?: string; } /** * A resource that represents a Google Cloud location. */ export interface GoogleCloudLocationLocation { /** * The friendly name for this location, typically a nearby city name. For * example, "Tokyo". */ displayName?: string; /** * Cross-service attributes for the location. For example * {"cloud.googleapis.com/region": "us-east1"} */ labels?: { [key: string]: string }; /** * The canonical id for this location. For example: `"us-east1"`. */ locationId?: string; /** * Service-specific metadata. For example the available capacity at the given * location. */ metadata?: { [key: string]: any }; /** * Resource name for the location, which may vary between implementations. * For example: `"projects/example-project/locations/us-east1"` */ name?: string; } /** * Associates `members`, or principals, with a `role`. */ export interface GoogleIamV1Binding { /** * The condition that is associated with this binding. If the condition * evaluates to `true`, then this binding applies to the current request. If * the condition evaluates to `false`, then this binding does not apply to the * current request. However, a different role binding might grant the same * role to one or more of the principals in this binding. To learn which * resources support conditions in their IAM policies, see the [IAM * documentation](https://cloud.google.com/iam/help/conditions/resource-policies). */ condition?: GoogleTypeExpr; /** * Specifies the principals requesting access for a Google Cloud resource. * `members` can have the following values: * `allUsers`: A special identifier * that represents anyone who is on the internet; with or without a Google * account. * `allAuthenticatedUsers`: A special identifier that represents * anyone who is authenticated with a Google account or a service account. * Does not include identities that come from external identity providers * (IdPs) through identity federation. * `user:{emailid}`: An email address * that represents a specific Google account. For example, `alice@example.com` * . * `serviceAccount:{emailid}`: An email address that represents a Google * service account. For example, `my-other-app@appspot.gserviceaccount.com`. * * `serviceAccount:{projectid}.svc.id.goog[{namespace}/{kubernetes-sa}]`: An * identifier for a [Kubernetes service * account](https://cloud.google.com/kubernetes-engine/docs/how-to/kubernetes-service-accounts). * For example, `my-project.svc.id.goog[my-namespace/my-kubernetes-sa]`. * * `group:{emailid}`: An email address that represents a Google group. For * example, `admins@example.com`. * `domain:{domain}`: The G Suite domain * (primary) that represents all the users of that domain. For example, * `google.com` or `example.com`. * * `principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}`: * A single identity in a workforce identity pool. * * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/group/{group_id}`: * All workforce identities in a group. * * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/attribute.{attribute_name}/{attribute_value}`: * All workforce identities with a specific attribute value. * * `principalSet://iam.googleapis.com/locations/global/workforcePools/{pool_id}/*`: * All identities in a workforce identity pool. * * `principal://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/subject/{subject_attribute_value}`: * A single identity in a workload identity pool. * * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/group/{group_id}`: * A workload identity pool group. * * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/attribute.{attribute_name}/{attribute_value}`: * All identities in a workload identity pool with a certain attribute. * * `principalSet://iam.googleapis.com/projects/{project_number}/locations/global/workloadIdentityPools/{pool_id}/*`: * All identities in a workload identity pool. * * `deleted:user:{emailid}?uid={uniqueid}`: An email address (plus unique * identifier) representing a user that has been recently deleted. For * example, `alice@example.com?uid=123456789012345678901`. If the user is * recovered, this value reverts to `user:{emailid}` and the recovered user * retains the role in the binding. * * `deleted:serviceAccount:{emailid}?uid={uniqueid}`: An email address (plus * unique identifier) representing a service account that has been recently * deleted. For example, * `my-other-app@appspot.gserviceaccount.com?uid=123456789012345678901`. If * the service account is undeleted, this value reverts to * `serviceAccount:{emailid}` and the undeleted service account retains the * role in the binding. * `deleted:group:{emailid}?uid={uniqueid}`: An email * address (plus unique identifier) representing a Google group that has been * recently deleted. For example, * `admins@example.com?uid=123456789012345678901`. If the group is recovered, * this value reverts to `group:{emailid}` and the recovered group retains the * role in the binding. * * `deleted:principal://iam.googleapis.com/locations/global/workforcePools/{pool_id}/subject/{subject_attribute_value}`: * Deleted single identity in a workforce identity pool. For example, * `deleted:principal://iam.googleapis.com/locations/global/workforcePools/my-pool-id/subject/my-subject-attribute-value`. */ members?: string[]; /** * Role that is assigned to the list of `members`, or principals. For * example, `roles/viewer`, `roles/editor`, or `roles/owner`. For an overview * of the IAM roles and permissions, see the [IAM * documentation](https://cloud.google.com/iam/docs/roles-overview). For a * list of the available pre-defined roles, see * [here](https://cloud.google.com/iam/docs/understanding-roles). */ role?: string; } /** * An Identity and Access Management (IAM) policy, which specifies access * controls for Google Cloud resources. A `Policy` is a collection of * `bindings`. A `binding` binds one or more `members`, or principals, to a * single `role`. Principals can be user accounts, service accounts, Google * groups, and domains (such as G Suite). A `role` is a named list of * permissions; each `role` can be an IAM predefined role or a user-created * custom role. For some types of Google Cloud resources, a `binding` can also * specify a `condition`, which is a logical expression that allows access to a * resource only if the expression evaluates to `true`. A condition can add * constraints based on attributes of the request, the resource, or both. To * learn which resources support conditions in their IAM policies, see the [IAM * documentation](https://cloud.google.com/iam/help/conditions/resource-policies). * **JSON example:** ``` { "bindings": [ { "role": * "roles/resourcemanager.organizationAdmin", "members": [ * "user:mike@example.com", "group:admins@example.com", "domain:google.com", * "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": * "roles/resourcemanager.organizationViewer", "members": [ * "user:eve@example.com" ], "condition": { "title": "expirable access", * "description": "Does not grant access after Sep 2020", "expression": * "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": * "BwWWja0YfJA=", "version": 3 } ``` **YAML example:** ``` bindings: - members: * - user:mike@example.com - group:admins@example.com - domain:google.com - * serviceAccount:my-project-id@appspot.gserviceaccount.com role: * roles/resourcemanager.organizationAdmin - members: - user:eve@example.com * role: roles/resourcemanager.organizationViewer condition: title: expirable * access description: Does not grant access after Sep 2020 expression: * request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= * version: 3 ``` For a description of IAM and its features, see the [IAM * documentation](https://cloud.google.com/iam/docs/). */ export interface GoogleIamV1Policy { /** * Associates a list of `members`, or principals, with a `role`. Optionally, * may specify a `condition` that determines how and when the `bindings` are * applied. Each of the `bindings` must contain at least one principal. The * `bindings` in a `Policy` can refer to up to 1,500 principals; up to 250 of * these principals can be Google groups. Each occurrence of a principal * counts towards these limits. For example, if the `bindings` grant 50 * different roles to `user:alice@example.com`, and not to any other * principal, then you can add another 1,450 principals to the `bindings` in * the `Policy`. */ bindings?: GoogleIamV1Binding[]; /** * `etag` is used for optimistic concurrency control as a way to help prevent * simultaneous updates of a policy from overwriting each other. It is * strongly suggested that systems make use of the `etag` in the * read-modify-write cycle to perform policy updates in order to avoid race * conditions: An `etag` is returned in the response to `getIamPolicy`, and * systems are expected to put that etag in the request to `setIamPolicy` to * ensure that their change will be applied to the same version of the policy. * **Important:** If you use IAM Conditions, you must include the `etag` field * whenever you call `setIamPolicy`. If you omit this field, then IAM allows * you to overwrite a version `3` policy with a version `1` policy, and all of * the conditions in the version `3` policy are lost. */ etag?: Uint8Array; /** * Specifies the format of the policy. Valid values are `0`, `1`, and `3`. * Requests that specify an invalid value are rejected. Any operation that * affects conditional role bindings must specify version `3`. This * requirement applies to the following operations: * Getting a policy that * includes a conditional role binding * Adding a conditional role binding to * a policy * Changing a conditional role binding in a policy * Removing any * role binding, with or without a condition, from a policy that includes * conditions **Important:** If you use IAM Conditions, you must include the * `etag` field whenever you call `setIamPolicy`. If you omit this field, then * IAM allows you to overwrite a version `3` policy with a version `1` policy, * and all of the conditions in the version `3` policy are lost. If a policy * does not include any conditions, operations on that policy may specify any * valid version or leave the field unset. To learn which resources support * conditions in their IAM policies, see the [IAM * documentation](https://cloud.google.com/iam/help/conditions/resource-policies). */ version?: number; } function serializeGoogleIamV1Policy(data: any): GoogleIamV1Policy { return { ...data, etag: data["etag"] !== undefined ? encodeBase64(data["etag"]) : undefined, }; } function deserializeGoogleIamV1Policy(data: any): GoogleIamV1Policy { return { ...data, etag: data["etag"] !== undefined ? decodeBase64(data["etag"] as string) : undefined, }; } /** * Request message for `SetIamPolicy` method. */ export interface GoogleIamV1SetIamPolicyRequest { /** * REQUIRED: The complete policy to be applied to the `resource`. The size of * the policy is limited to a few 10s of KB. An empty policy is a valid policy * but certain Google Cloud services (such as Projects) might reject them. */ policy?: GoogleIamV1Policy; } function serializeGoogleIamV1SetIamPolicyRequest(data: any): GoogleIamV1SetIamPolicyRequest { return { ...data, policy: data["policy"] !== undefined ? serializeGoogleIamV1Policy(data["policy"]) : undefined, }; } function deserializeGoogleIamV1SetIamPolicyRequest(data: any): GoogleIamV1SetIamPolicyRequest { return { ...data, policy: data["policy"] !== undefined ? deserializeGoogleIamV1Policy(data["policy"]) : undefined, }; } /** * Response message for `TestIamPermissions` method. */ export interface GoogleIamV1TestIamPermissionsResponse { /** * A subset of `TestPermissionsRequest.permissions` that the caller is * allowed. */ permissions?: string[]; } /** * The response message for Operations.ListOperations. */ export interface GoogleLongrunningListOperationsResponse { /** * The standard List next-page token. */ nextPageToken?: string; /** * A list of operations that matches the specified filter in the request. */ operations?: GoogleLongrunningOperation[]; } /** * This resource represents a long-running operation that is the result of a * network API call. */ export interface GoogleLongrunningOperation { /** * If the value is `false`, it means the operation is still in progress. If * `true`, the operation is completed, and either `error` or `response` is * available. */ done?: boolean; /** * The error result of the operation in case of failure or cancellation. */ error?: GoogleRpcStatus; /** * Service-specific metadata associated with the operation. It typically * contains progress information and common metadata such as create time. Some * services might not provide such metadata. Any method that returns a * long-running operation should document the metadata type, if any. */ metadata?: { [key: string]: any }; /** * The server-assigned name, which is only unique within the same service * that originally returns it. If you use the default HTTP mapping, the `name` * should be a resource name ending with `operations/{unique_id}`. */ name?: string; /** * The normal, successful response of the operation. If the original method * returns no data on success, such as `Delete`, the response is * `google.protobuf.Empty`. If the original method is standard * `Get`/`Create`/`Update`, the response should be the resource. For other * methods, the response should have the type `XxxResponse`, where `Xxx` is * the original method name. For example, if the original method name is * `TakeSnapshot()`, the inferred response type is `TakeSnapshotResponse`. */ response?: { [key: string]: any }; } /** * A generic empty message that you can re-use to avoid defining duplicated * empty messages in your APIs. A typical example is to use it as the request or * the response type of an API method. For instance: service Foo { rpc * Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } */ export interface GoogleProtobufEmpty { } /** * The `Status` type defines a logical error model that is suitable for * different programming environments, including REST APIs and RPC APIs. It is * used by [gRPC](https://github.com/grpc). Each `Status` message contains three * pieces of data: error code, error message, and error details. You can find * out more about this error model and how to work with it in the [API Design * Guide](https://cloud.google.com/apis/design/errors). */ export interface GoogleRpcStatus { /** * The status code, which should be an enum value of google.rpc.Code. */ code?: number; /** * A list of messages that carry the error details. There is a common set of * message types for APIs to use. */ details?: { [key: string]: any }[]; /** * A developer-facing error message, which should be in English. Any * user-facing error message should be localized and sent in the * google.rpc.Status.details field, or localized by the client. */ message?: string; } /** * Represents a color in the RGBA color space. This representation is designed * for simplicity of conversion to and from color representations in various * languages over compactness. For example, the fields of this representation * can be trivially provided to the constructor of `java.awt.Color` in Java; it * can also be trivially provided to UIColor's `+colorWithRed:green:blue:alpha` * method in iOS; and, with just a little work, it can be easily formatted into * a CSS `rgba()` string in JavaScript. This reference page doesn't have * information about the absolute color space that should be used to interpret * the RGB value—for example, sRGB, Adobe RGB, DCI-P3, and BT.2020. By default, * applications should assume the sRGB color space. When color equality needs to * be decided, implementations, unless documented otherwise, treat two colors as * equal if all their red, green, blue, and alpha values each differ by at most * `1e-5`. Example (Java): import com.google.type.Color; // ... public static * java.awt.Color fromProto(Color protocolor) { float alpha = * protocolor.hasAlpha() ? protocolor.getAlpha().getValue() : 1.0; return new * java.awt.Color( protocolor.getRed(), protocolor.getGreen(), * protocolor.getBlue(), alpha); } public static Color toProto(java.awt.Color * color) { float red = (float) color.getRed(); float green = (float) * color.getGreen(); float blue = (float) color.getBlue(); float denominator = * 255.0; Color.Builder resultBuilder = Color .newBuilder() .setRed(red / * denominator) .setGreen(green / denominator) .setBlue(blue / denominator); int * alpha = color.getAlpha(); if (alpha != 255) { result.setAlpha( FloatValue * .newBuilder() .setValue(((float) alpha) / denominator) .build()); } return * resultBuilder.build(); } // ... Example (iOS / Obj-C): // ... static UIColor* * fromProto(Color* protocolor) { float red = [protocolor red]; float green = * [protocolor green]; float blue = [protocolor blue]; FloatValue* alpha_wrapper * = [protocolor alpha]; float alpha = 1.0; if (alpha_wrapper != nil) { alpha = * [alpha_wrapper value]; } return [UIColor colorWithRed:red green:green * blue:blue alpha:alpha]; } static Color* toProto(UIColor* color) { CGFloat * red, green, blue, alpha; if (![color getRed:&red green:&green blue:&blue * alpha:&alpha]) { return nil; } Color* result = [[Color alloc] init]; [result * setRed:red]; [result setGreen:green]; [result setBlue:blue]; if (alpha <= * 0.9999) { [result setAlpha:floatWrapperWithValue(alpha)]; } [result * autorelease]; return result; } // ... Example (JavaScript): // ... var * protoToCssColor = function(rgb_color) { var redFrac = rgb_color.red || 0.0; * var greenFrac = rgb_color.green || 0.0; var blueFrac = rgb_color.blue || 0.0; * var red = Math.floor(redFrac * 255); var green = Math.floor(greenFrac * 255); * var blue = Math.floor(blueFrac * 255); if (!('alpha' in rgb_color)) { return * rgbToCssColor(red, green, blue); } var alphaFrac = rgb_color.alpha.value || * 0.0; var rgbParams = [red, green, blue].join(','); return ['rgba(', * rgbParams, ',', alphaFrac, ')'].join(''); }; var rgbToCssColor = * function(red, green, blue) { var rgbNumber = new Number((red << 16) | (green * << 8) | blue); var hexString = rgbNumber.toString(16); var missingZeros = 6 - * hexString.length; var resultBuilder = ['#']; for (var i = 0; i < * missingZeros; i++) { resultBuilder.push('0'); } * resultBuilder.push(hexString); return resultBuilder.join(''); }; // ... */ export interface GoogleTypeColor { /** * The fraction of this color that should be applied to the pixel. That is, * the final pixel color is defined by the equation: `pixel color = alpha * * (this color) + (1.0 - alpha) * (background color)` This means that a value * of 1.0 corresponds to a solid color, whereas a value of 0.0 corresponds to * a completely transparent color. This uses a wrapper message rather than a * simple float scalar so that it is possible to distinguish between a default * value and the value being unset. If omitted, this color object is rendered * as a solid color (as if the alpha value had been explicitly given a value * of 1.0). */ alpha?: number; /** * The amount of blue in the color as a value in the interval [0, 1]. */ blue?: number; /** * The amount of green in the color as a value in the interval [0, 1]. */ green?: number; /** * The amount of red in the color as a value in the interval [0, 1]. */ red?: number; } /** * Represents a whole or partial calendar date, such as a birthday. The time of * day and time zone are either specified elsewhere or are insignificant. The * date is relative to the Gregorian Calendar. This can represent one of the * following: * A full date, with non-zero year, month, and day values. * A * month and day, with a zero year (for example, an anniversary). * A year on * its own, with a zero month and a zero day. * A year and month, with a zero * day (for example, a credit card expiration date). Related types: * * google.type.TimeOfDay * google.type.DateTime * google.protobuf.Timestamp */ export interface GoogleTypeDate { /** * Day of a month. Must be from 1 to 31 and valid for the year and month, or * 0 to specify a year by itself or a year and month where the day isn't * significant. */ day?: number; /** * Month of a year. Must be from 1 to 12, or 0 to specify a year without a * month and day. */ month?: number; /** * Year of the date. Must be from 1 to 9999, or 0 to specify a date without a * year. */ year?: number; } /** * Represents a textual expression in the Common Expression Language (CEL) * syntax. CEL is a C-like expression language. The syntax and semantics of CEL * are documented at https://github.com/google/cel-spec. Example (Comparison): * title: "Summary size limit" description: "Determines if a summary is less * than 100 chars" expression: "document.summary.size() < 100" Example * (Equality): title: "Requestor is owner" description: "Determines if requestor * is the document owner" expression: "document.owner == * request.auth.claims.email" Example (Logic): title: "Public documents" * description: "Determine whether the document should be publicly visible" * expression: "document.type != 'private' && document.type != 'internal'" * Example (Data Manipulation): title: "Notification string" description: * "Create a notification string with a timestamp." expression: "'New message * received at ' + string(document.create_time)" The exact variables and * functions that may be referenced within an expression are determined by the * service that evaluates it. See the service documentation for additional * information. */ export interface GoogleTypeExpr { /** * Optional. Description of the expression. This is a longer text which * describes the expression, e.g. when hovered over it in a UI. */ description?: string; /** * Textual representation of an expression in Common Expression Language * syntax. */ expression?: string; /** * Optional. String indicating the location of the expression for error * reporting, e.g. a file name and a position in the file. */ location?: string; /** * Optional. Title for the expression, i.e. a short string describing its * purpose. This can be used e.g. in UIs which allow to enter the expression. */ title?: string; } /** * Represents a time interval, encoded as a Timestamp start (inclusive) and a * Timestamp end (exclusive). The start must be less than or equal to the end. * When the start equals the end, the interval is empty (matches no time). When * both start and end are unspecified, the interval matches any time. */ export interface GoogleTypeInterval { /** * Optional. Exclusive end of the interval. If specified, a Timestamp * matching this interval will have to be before the end. */ endTime?: Date; /** * Optional. Inclusive start of the interval. If specified, a Timestamp * matching this interval will have to be the same or after the start. */ startTime?: Date; } function serializeGoogleTypeInterval(data: any): GoogleTypeInterval { return { ...data, endTime: data["endTime"] !== undefined ? data["endTime"].toISOString() : undefined, startTime: data["startTime"] !== undefined ? data["startTime"].toISOString() : undefined, }; } function deserializeGoogleTypeInterval(data: any): GoogleTypeInterval { return { ...data, endTime: data["endTime"] !== undefined ? new Date(data["endTime"]) : undefined, startTime: data["startTime"] !== undefined ? new Date(data["startTime"]) : undefined, }; } /** * An object that represents a latitude/longitude pair. This is expressed as a * pair of doubles to represent degrees latitude and degrees longitude. Unless * specified otherwise, this object must conform to the WGS84 standard. Values * must be within normalized ranges. */ export interface GoogleTypeLatLng { /** * The latitude in degrees. It must be in the range [-90.0, +90.0]. */ latitude?: number; /** * The longitude in degrees. It must be in the range [-180.0, +180.0]. */ longitude?: number; } /** * Represents an amount of money with its currency type. */ export interface GoogleTypeMoney { /** * The three-letter currency code defined in ISO 4217. */ currencyCode?: string; /** * Number of nano (10^-9) units of the amount. The value must be between * -999,999,999 and +999,999,999 inclusive. If `units` is positive, `nanos` * must be positive or zero. If `units` is zero, `nanos` can be positive, * zero, or negative. If `units` is negative, `nanos` must be negative or * zero. For example $-1.75 is represented as `units`=-1 and * `nanos`=-750,000,000. */ nanos?: number; /** * The whole units of the amount. For example if `currencyCode` is `"USD"`, * then 1 unit is one US dollar. */ units?: bigint; } function serializeGoogleTypeMoney(data: any): GoogleTypeMoney { return { ...data, units: data["units"] !== undefined ? String(data["units"]) : undefined, }; } function deserializeGoogleTypeMoney(data: any): GoogleTypeMoney { return { ...data, units: data["units"] !== undefined ? BigInt(data["units"]) : undefined, }; } /** * Additional options for AIplatform#projectsLocationsBatchPredictionJobsList. */ export interface ProjectsLocationsBatchPredictionJobsListOptions { /** * The standard list filter. Supported fields: * `display_name` supports `=`, * `!=` comparisons, and `:` wildcard. * `model_display_name` supports `=`, * `!=` comparisons. * `state` supports `=`, `!=` comparisons. * `create_time` * supports `=`, `!=`,`<`, `<=`,`>`, `>=` comparisons. `create_time` must be * in RFC 3339 format. * `labels` supports general map functions that is: * `labels.key=value` - key:value equality `labels.key:* - key existence Some * examples of using the filter are: * `state="JOB_STATE_SUCCEEDED" AND * display_name:"my_job_*"` * `state!="JOB_STATE_FAILED" OR * display_name="my_job"` * `NOT display_name="my_job"` * * `create_time>"2021-05-18T00:00:00Z"` * `labels.keyA=valueA` * * `labels.keyB:*` */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. Typically obtained via * ListBatchPredictionJobsResponse.next_page_token of the previous * JobService.ListBatchPredictionJobs call. */ pageToken?: string; /** * Mask specifying which fields to read. */ readMask?: string /* FieldMask */; } function serializeProjectsLocationsBatchPredictionJobsListOptions(data: any): ProjectsLocationsBatchPredictionJobsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } function deserializeProjectsLocationsBatchPredictionJobsListOptions(data: any): ProjectsLocationsBatchPredictionJobsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsCachedContentsList. */ export interface ProjectsLocationsCachedContentsListOptions { /** * Optional. The maximum number of cached contents to return. The service may * return fewer than this value. If unspecified, some default (under maximum) * number of items will be returned. The maximum value is 1000; values above * 1000 will be coerced to 1000. */ pageSize?: number; /** * Optional. A page token, received from a previous `ListCachedContents` * call. Provide this to retrieve the subsequent page. When paginating, all * other parameters provided to `ListCachedContents` must match the call that * provided the page token. */ pageToken?: string; } /** * Additional options for AIplatform#projectsLocationsCachedContentsPatch. */ export interface ProjectsLocationsCachedContentsPatchOptions { /** * Required. The list of fields to update. */ updateMask?: string /* FieldMask */; } function serializeProjectsLocationsCachedContentsPatchOptions(data: any): ProjectsLocationsCachedContentsPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } function deserializeProjectsLocationsCachedContentsPatchOptions(data: any): ProjectsLocationsCachedContentsPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsCustomJobsList. */ export interface ProjectsLocationsCustomJobsListOptions { /** * The standard list filter. Supported fields: * `display_name` supports `=`, * `!=` comparisons, and `:` wildcard. * `state` supports `=`, `!=` * comparisons. * `create_time` supports `=`, `!=`,`<`, `<=`,`>`, `>=` * comparisons. `create_time` must be in RFC 3339 format. * `labels` supports * general map functions that is: `labels.key=value` - key:value equality * `labels.key:* - key existence Some examples of using the filter are: * * `state="JOB_STATE_SUCCEEDED" AND display_name:"my_job_*"` * * `state!="JOB_STATE_FAILED" OR display_name="my_job"` * `NOT * display_name="my_job"` * `create_time>"2021-05-18T00:00:00Z"` * * `labels.keyA=valueA` * `labels.keyB:*` */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. Typically obtained via * ListCustomJobsResponse.next_page_token of the previous * JobService.ListCustomJobs call. */ pageToken?: string; /** * Mask specifying which fields to read. */ readMask?: string /* FieldMask */; } function serializeProjectsLocationsCustomJobsListOptions(data: any): ProjectsLocationsCustomJobsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } function deserializeProjectsLocationsCustomJobsListOptions(data: any): ProjectsLocationsCustomJobsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsCustomJobsOperationsList. */ export interface ProjectsLocationsCustomJobsOperationsListOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } /** * Additional options for AIplatform#projectsLocationsCustomJobsOperationsWait. */ export interface ProjectsLocationsCustomJobsOperationsWaitOptions { /** * The maximum duration to wait before timing out. If left blank, the wait * will be at most the time permitted by the underlying HTTP/RPC protocol. If * RPC context deadline is also specified, the shorter one will be used. */ timeout?: number /* Duration */; } function serializeProjectsLocationsCustomJobsOperationsWaitOptions(data: any): ProjectsLocationsCustomJobsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } function deserializeProjectsLocationsCustomJobsOperationsWaitOptions(data: any): ProjectsLocationsCustomJobsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsDataLabelingJobsList. */ export interface ProjectsLocationsDataLabelingJobsListOptions { /** * The standard list filter. Supported fields: * `display_name` supports `=`, * `!=` comparisons, and `:` wildcard. * `state` supports `=`, `!=` * comparisons. * `create_time` supports `=`, `!=`,`<`, `<=`,`>`, `>=` * comparisons. `create_time` must be in RFC 3339 format. * `labels` supports * general map functions that is: `labels.key=value` - key:value equality * `labels.key:* - key existence Some examples of using the filter are: * * `state="JOB_STATE_SUCCEEDED" AND display_name:"my_job_*"` * * `state!="JOB_STATE_FAILED" OR display_name="my_job"` * `NOT * display_name="my_job"` * `create_time>"2021-05-18T00:00:00Z"` * * `labels.keyA=valueA` * `labels.keyB:*` */ filter?: string; /** * A comma-separated list of fields to order by, sorted in ascending order by * default. Use `desc` after a field name for descending. */ orderBy?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; /** * Mask specifying which fields to read. FieldMask represents a set of * symbolic field paths. For example, the mask can be `paths: "name"`. The * "name" here is a field in DataLabelingJob. If this field is not set, all * fields of the DataLabelingJob are returned. */ readMask?: string /* FieldMask */; } function serializeProjectsLocationsDataLabelingJobsListOptions(data: any): ProjectsLocationsDataLabelingJobsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } function deserializeProjectsLocationsDataLabelingJobsListOptions(data: any): ProjectsLocationsDataLabelingJobsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsDataLabelingJobsOperationsList. */ export interface ProjectsLocationsDataLabelingJobsOperationsListOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsDataLabelingJobsOperationsWait. */ export interface ProjectsLocationsDataLabelingJobsOperationsWaitOptions { /** * The maximum duration to wait before timing out. If left blank, the wait * will be at most the time permitted by the underlying HTTP/RPC protocol. If * RPC context deadline is also specified, the shorter one will be used. */ timeout?: number /* Duration */; } function serializeProjectsLocationsDataLabelingJobsOperationsWaitOptions(data: any): ProjectsLocationsDataLabelingJobsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } function deserializeProjectsLocationsDataLabelingJobsOperationsWaitOptions(data: any): ProjectsLocationsDataLabelingJobsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsDatasetsAnnotationSpecsGet. */ export interface ProjectsLocationsDatasetsAnnotationSpecsGetOptions { /** * Mask specifying which fields to read. */ readMask?: string /* FieldMask */; } function serializeProjectsLocationsDatasetsAnnotationSpecsGetOptions(data: any): ProjectsLocationsDatasetsAnnotationSpecsGetOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } function deserializeProjectsLocationsDatasetsAnnotationSpecsGetOptions(data: any): ProjectsLocationsDatasetsAnnotationSpecsGetOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsDatasetsAnnotationSpecsOperationsList. */ export interface ProjectsLocationsDatasetsAnnotationSpecsOperationsListOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsDatasetsAnnotationSpecsOperationsWait. */ export interface ProjectsLocationsDatasetsAnnotationSpecsOperationsWaitOptions { /** * The maximum duration to wait before timing out. If left blank, the wait * will be at most the time permitted by the underlying HTTP/RPC protocol. If * RPC context deadline is also specified, the shorter one will be used. */ timeout?: number /* Duration */; } function serializeProjectsLocationsDatasetsAnnotationSpecsOperationsWaitOptions(data: any): ProjectsLocationsDatasetsAnnotationSpecsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } function deserializeProjectsLocationsDatasetsAnnotationSpecsOperationsWaitOptions(data: any): ProjectsLocationsDatasetsAnnotationSpecsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsDatasetsDataItemsAnnotationsList. */ export interface ProjectsLocationsDatasetsDataItemsAnnotationsListOptions { /** * The standard list filter. */ filter?: string; /** * A comma-separated list of fields to order by, sorted in ascending order. * Use "desc" after a field name for descending. */ orderBy?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; /** * Mask specifying which fields to read. */ readMask?: string /* FieldMask */; } function serializeProjectsLocationsDatasetsDataItemsAnnotationsListOptions(data: any): ProjectsLocationsDatasetsDataItemsAnnotationsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } function deserializeProjectsLocationsDatasetsDataItemsAnnotationsListOptions(data: any): ProjectsLocationsDatasetsDataItemsAnnotationsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsDatasetsDataItemsAnnotationsOperationsList. */ export interface ProjectsLocationsDatasetsDataItemsAnnotationsOperationsListOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsDatasetsDataItemsAnnotationsOperationsWait. */ export interface ProjectsLocationsDatasetsDataItemsAnnotationsOperationsWaitOptions { /** * The maximum duration to wait before timing out. If left blank, the wait * will be at most the time permitted by the underlying HTTP/RPC protocol. If * RPC context deadline is also specified, the shorter one will be used. */ timeout?: number /* Duration */; } function serializeProjectsLocationsDatasetsDataItemsAnnotationsOperationsWaitOptions(data: any): ProjectsLocationsDatasetsDataItemsAnnotationsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } function deserializeProjectsLocationsDatasetsDataItemsAnnotationsOperationsWaitOptions(data: any): ProjectsLocationsDatasetsDataItemsAnnotationsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsDatasetsDataItemsList. */ export interface ProjectsLocationsDatasetsDataItemsListOptions { /** * The standard list filter. */ filter?: string; /** * A comma-separated list of fields to order by, sorted in ascending order. * Use "desc" after a field name for descending. */ orderBy?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; /** * Mask specifying which fields to read. */ readMask?: string /* FieldMask */; } function serializeProjectsLocationsDatasetsDataItemsListOptions(data: any): ProjectsLocationsDatasetsDataItemsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } function deserializeProjectsLocationsDatasetsDataItemsListOptions(data: any): ProjectsLocationsDatasetsDataItemsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsDatasetsDataItemsOperationsList. */ export interface ProjectsLocationsDatasetsDataItemsOperationsListOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsDatasetsDataItemsOperationsWait. */ export interface ProjectsLocationsDatasetsDataItemsOperationsWaitOptions { /** * The maximum duration to wait before timing out. If left blank, the wait * will be at most the time permitted by the underlying HTTP/RPC protocol. If * RPC context deadline is also specified, the shorter one will be used. */ timeout?: number /* Duration */; } function serializeProjectsLocationsDatasetsDataItemsOperationsWaitOptions(data: any): ProjectsLocationsDatasetsDataItemsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } function deserializeProjectsLocationsDatasetsDataItemsOperationsWaitOptions(data: any): ProjectsLocationsDatasetsDataItemsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsDatasetsDatasetVersionsGet. */ export interface ProjectsLocationsDatasetsDatasetVersionsGetOptions { /** * Mask specifying which fields to read. */ readMask?: string /* FieldMask */; } function serializeProjectsLocationsDatasetsDatasetVersionsGetOptions(data: any): ProjectsLocationsDatasetsDatasetVersionsGetOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } function deserializeProjectsLocationsDatasetsDatasetVersionsGetOptions(data: any): ProjectsLocationsDatasetsDatasetVersionsGetOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsDatasetsDatasetVersionsList. */ export interface ProjectsLocationsDatasetsDatasetVersionsListOptions { /** * Optional. The standard list filter. */ filter?: string; /** * Optional. A comma-separated list of fields to order by, sorted in * ascending order. Use "desc" after a field name for descending. */ orderBy?: string; /** * Optional. The standard list page size. */ pageSize?: number; /** * Optional. The standard list page token. */ pageToken?: string; /** * Optional. Mask specifying which fields to read. */ readMask?: string /* FieldMask */; } function serializeProjectsLocationsDatasetsDatasetVersionsListOptions(data: any): ProjectsLocationsDatasetsDatasetVersionsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } function deserializeProjectsLocationsDatasetsDatasetVersionsListOptions(data: any): ProjectsLocationsDatasetsDatasetVersionsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsDatasetsDatasetVersionsPatch. */ export interface ProjectsLocationsDatasetsDatasetVersionsPatchOptions { /** * Required. The update mask applies to the resource. For the `FieldMask` * definition, see google.protobuf.FieldMask. Updatable fields: * * `display_name` */ updateMask?: string /* FieldMask */; } function serializeProjectsLocationsDatasetsDatasetVersionsPatchOptions(data: any): ProjectsLocationsDatasetsDatasetVersionsPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } function deserializeProjectsLocationsDatasetsDatasetVersionsPatchOptions(data: any): ProjectsLocationsDatasetsDatasetVersionsPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsDatasetsGet. */ export interface ProjectsLocationsDatasetsGetOptions { /** * Mask specifying which fields to read. */ readMask?: string /* FieldMask */; } function serializeProjectsLocationsDatasetsGetOptions(data: any): ProjectsLocationsDatasetsGetOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } function deserializeProjectsLocationsDatasetsGetOptions(data: any): ProjectsLocationsDatasetsGetOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsDatasetsList. */ export interface ProjectsLocationsDatasetsListOptions { /** * An expression for filtering the results of the request. For field names * both snake_case and camelCase are supported. * `display_name`: supports = * and != * `metadata_schema_uri`: supports = and != * `labels` supports * general map functions that is: * `labels.key=value` - key:value equality * * `labels.key:* or labels:key - key existence * A key including a space must * be quoted. `labels."a key"`. Some examples: * `displayName="myDisplayName"` * * `labels.myKey="myValue"` */ filter?: string; /** * A comma-separated list of fields to order by, sorted in ascending order. * Use "desc" after a field name for descending. Supported fields: * * `display_name` * `create_time` * `update_time` */ orderBy?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; /** * Mask specifying which fields to read. */ readMask?: string /* FieldMask */; } function serializeProjectsLocationsDatasetsListOptions(data: any): ProjectsLocationsDatasetsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } function deserializeProjectsLocationsDatasetsListOptions(data: any): ProjectsLocationsDatasetsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsDatasetsOperationsList. */ export interface ProjectsLocationsDatasetsOperationsListOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } /** * Additional options for AIplatform#projectsLocationsDatasetsOperationsWait. */ export interface ProjectsLocationsDatasetsOperationsWaitOptions { /** * The maximum duration to wait before timing out. If left blank, the wait * will be at most the time permitted by the underlying HTTP/RPC protocol. If * RPC context deadline is also specified, the shorter one will be used. */ timeout?: number /* Duration */; } function serializeProjectsLocationsDatasetsOperationsWaitOptions(data: any): ProjectsLocationsDatasetsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } function deserializeProjectsLocationsDatasetsOperationsWaitOptions(data: any): ProjectsLocationsDatasetsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsDatasetsPatch. */ export interface ProjectsLocationsDatasetsPatchOptions { /** * Required. The update mask applies to the resource. For the `FieldMask` * definition, see google.protobuf.FieldMask. Updatable fields: * * `display_name` * `description` * `labels` */ updateMask?: string /* FieldMask */; } function serializeProjectsLocationsDatasetsPatchOptions(data: any): ProjectsLocationsDatasetsPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } function deserializeProjectsLocationsDatasetsPatchOptions(data: any): ProjectsLocationsDatasetsPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsDatasetsSavedQueriesList. */ export interface ProjectsLocationsDatasetsSavedQueriesListOptions { /** * The standard list filter. */ filter?: string; /** * A comma-separated list of fields to order by, sorted in ascending order. * Use "desc" after a field name for descending. */ orderBy?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; /** * Mask specifying which fields to read. */ readMask?: string /* FieldMask */; } function serializeProjectsLocationsDatasetsSavedQueriesListOptions(data: any): ProjectsLocationsDatasetsSavedQueriesListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } function deserializeProjectsLocationsDatasetsSavedQueriesListOptions(data: any): ProjectsLocationsDatasetsSavedQueriesListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsDatasetsSavedQueriesOperationsList. */ export interface ProjectsLocationsDatasetsSavedQueriesOperationsListOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsDatasetsSavedQueriesOperationsWait. */ export interface ProjectsLocationsDatasetsSavedQueriesOperationsWaitOptions { /** * The maximum duration to wait before timing out. If left blank, the wait * will be at most the time permitted by the underlying HTTP/RPC protocol. If * RPC context deadline is also specified, the shorter one will be used. */ timeout?: number /* Duration */; } function serializeProjectsLocationsDatasetsSavedQueriesOperationsWaitOptions(data: any): ProjectsLocationsDatasetsSavedQueriesOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } function deserializeProjectsLocationsDatasetsSavedQueriesOperationsWaitOptions(data: any): ProjectsLocationsDatasetsSavedQueriesOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsDatasetsSearchDataItems. */ export interface ProjectsLocationsDatasetsSearchDataItemsOptions { /** * An expression that specifies what Annotations will be returned per * DataItem. Annotations satisfied either of the conditions will be returned. * * `annotation_spec_id` - for = or !=. Must specify `saved_query_id=` - * saved query id that annotations should belong to. */ annotationFilters?: string; /** * An expression for filtering the Annotations that will be returned per * DataItem. * `annotation_spec_id` - for = or !=. */ annotationsFilter?: string; /** * If set, only up to this many of Annotations will be returned per * DataItemView. The maximum value is 1000. If not set, the maximum value will * be used. */ annotationsLimit?: number; /** * An expression for filtering the DataItem that will be returned. * * `data_item_id` - for = or !=. * `labeled` - for = or !=. * * `has_annotation(ANNOTATION_SPEC_ID)` - true only for DataItem that have at * least one annotation with annotation_spec_id = `ANNOTATION_SPEC_ID` in the * context of SavedQuery or DataLabelingJob. For example: * `data_item=1` * * `has_annotation(5)` */ dataItemFilter?: string; /** * The resource name of a DataLabelingJob. Format: * `projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}` * If this field is set, all of the search will be done in the context of this * DataLabelingJob. */ dataLabelingJob?: string; /** * Mask specifying which fields of DataItemView to read. */ fieldMask?: string /* FieldMask */; /** * A comma-separated list of fields to order by, sorted in ascending order. * Use "desc" after a field name for descending. */ orderBy?: string; /** * A comma-separated list of annotation fields to order by, sorted in * ascending order. Use "desc" after a field name for descending. Must also * specify saved_query. */ ["orderByAnnotation.orderBy"]?: string; /** * Required. Saved query of the Annotation. Only Annotations belong to this * saved query will be considered for ordering. */ ["orderByAnnotation.savedQuery"]?: string; /** * A comma-separated list of data item fields to order by, sorted in * ascending order. Use "desc" after a field name for descending. */ orderByDataItem?: string; /** * Requested page size. Server may return fewer results than requested. * Default and maximum page size is 100. */ pageSize?: number; /** * A token identifying a page of results for the server to return Typically * obtained via SearchDataItemsResponse.next_page_token of the previous * DatasetService.SearchDataItems call. */ pageToken?: string; /** * The resource name of a SavedQuery(annotation set in UI). Format: * `projects/{project}/locations/{location}/datasets/{dataset}/savedQueries/{saved_query}` * All of the search will be done in the context of this SavedQuery. */ savedQuery?: string; } function serializeProjectsLocationsDatasetsSearchDataItemsOptions(data: any): ProjectsLocationsDatasetsSearchDataItemsOptions { return { ...data, fieldMask: data["fieldMask"] !== undefined ? data["fieldMask"] : undefined, }; } function deserializeProjectsLocationsDatasetsSearchDataItemsOptions(data: any): ProjectsLocationsDatasetsSearchDataItemsOptions { return { ...data, fieldMask: data["fieldMask"] !== undefined ? data["fieldMask"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsDeploymentResourcePoolsList. */ export interface ProjectsLocationsDeploymentResourcePoolsListOptions { /** * The maximum number of DeploymentResourcePools to return. The service may * return fewer than this value. */ pageSize?: number; /** * A page token, received from a previous `ListDeploymentResourcePools` call. * Provide this to retrieve the subsequent page. When paginating, all other * parameters provided to `ListDeploymentResourcePools` must match the call * that provided the page token. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsDeploymentResourcePoolsOperationsList. */ export interface ProjectsLocationsDeploymentResourcePoolsOperationsListOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsDeploymentResourcePoolsOperationsWait. */ export interface ProjectsLocationsDeploymentResourcePoolsOperationsWaitOptions { /** * The maximum duration to wait before timing out. If left blank, the wait * will be at most the time permitted by the underlying HTTP/RPC protocol. If * RPC context deadline is also specified, the shorter one will be used. */ timeout?: number /* Duration */; } function serializeProjectsLocationsDeploymentResourcePoolsOperationsWaitOptions(data: any): ProjectsLocationsDeploymentResourcePoolsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } function deserializeProjectsLocationsDeploymentResourcePoolsOperationsWaitOptions(data: any): ProjectsLocationsDeploymentResourcePoolsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsDeploymentResourcePoolsPatch. */ export interface ProjectsLocationsDeploymentResourcePoolsPatchOptions { /** * Required. The list of fields to update. */ updateMask?: string /* FieldMask */; } function serializeProjectsLocationsDeploymentResourcePoolsPatchOptions(data: any): ProjectsLocationsDeploymentResourcePoolsPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } function deserializeProjectsLocationsDeploymentResourcePoolsPatchOptions(data: any): ProjectsLocationsDeploymentResourcePoolsPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsDeploymentResourcePoolsQueryDeployedModels. */ export interface ProjectsLocationsDeploymentResourcePoolsQueryDeployedModelsOptions { /** * The maximum number of DeployedModels to return. The service may return * fewer than this value. */ pageSize?: number; /** * A page token, received from a previous `QueryDeployedModels` call. Provide * this to retrieve the subsequent page. When paginating, all other parameters * provided to `QueryDeployedModels` must match the call that provided the * page token. */ pageToken?: string; } /** * Additional options for AIplatform#projectsLocationsEndpointsCreate. */ export interface ProjectsLocationsEndpointsCreateOptions { /** * Immutable. The ID to use for endpoint, which will become the final * component of the endpoint resource name. If not provided, Vertex AI will * generate a value for this ID. If the first character is a letter, this * value may be up to 63 characters, and valid characters are `[a-z0-9-]`. The * last character must be a letter or number. If the first character is a * number, this value may be up to 9 characters, and valid characters are * `[0-9]` with no leading zeros. When using HTTP/JSON, this field is * populated based on a query string argument, such as `?endpoint_id=12345`. * This is the fallback for fields that are not included in either the URI or * the body. */ endpointId?: string; } /** * Additional options for AIplatform#projectsLocationsEndpointsList. */ export interface ProjectsLocationsEndpointsListOptions { /** * Optional. An expression for filtering the results of the request. For * field names both snake_case and camelCase are supported. * `endpoint` * supports `=` and `!=`. `endpoint` represents the Endpoint ID, i.e. the last * segment of the Endpoint's resource name. * `display_name` supports `=` and * `!=`. * `labels` supports general map functions that is: * * `labels.key=value` - key:value equality * `labels.key:*` or `labels:key` - * key existence * A key including a space must be quoted. `labels."a key"`. * * `base_model_name` only supports `=`. Some examples: * `endpoint=1` * * `displayName="myDisplayName"` * `labels.myKey="myValue"` * * `baseModelName="text-bison"` */ filter?: string; /** * A comma-separated list of fields to order by, sorted in ascending order. * Use "desc" after a field name for descending. Supported fields: * * `display_name` * `create_time` * `update_time` Example: `display_name, * create_time desc`. */ orderBy?: string; /** * Optional. The standard list page size. */ pageSize?: number; /** * Optional. The standard list page token. Typically obtained via * ListEndpointsResponse.next_page_token of the previous * EndpointService.ListEndpoints call. */ pageToken?: string; /** * Optional. Mask specifying which fields to read. */ readMask?: string /* FieldMask */; } function serializeProjectsLocationsEndpointsListOptions(data: any): ProjectsLocationsEndpointsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } function deserializeProjectsLocationsEndpointsListOptions(data: any): ProjectsLocationsEndpointsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsEndpointsOperationsList. */ export interface ProjectsLocationsEndpointsOperationsListOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } /** * Additional options for AIplatform#projectsLocationsEndpointsOperationsWait. */ export interface ProjectsLocationsEndpointsOperationsWaitOptions { /** * The maximum duration to wait before timing out. If left blank, the wait * will be at most the time permitted by the underlying HTTP/RPC protocol. If * RPC context deadline is also specified, the shorter one will be used. */ timeout?: number /* Duration */; } function serializeProjectsLocationsEndpointsOperationsWaitOptions(data: any): ProjectsLocationsEndpointsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } function deserializeProjectsLocationsEndpointsOperationsWaitOptions(data: any): ProjectsLocationsEndpointsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsEndpointsPatch. */ export interface ProjectsLocationsEndpointsPatchOptions { /** * Required. The update mask applies to the resource. See * google.protobuf.FieldMask. */ updateMask?: string /* FieldMask */; } function serializeProjectsLocationsEndpointsPatchOptions(data: any): ProjectsLocationsEndpointsPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } function deserializeProjectsLocationsEndpointsPatchOptions(data: any): ProjectsLocationsEndpointsPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsFeatureGroupsCreate. */ export interface ProjectsLocationsFeatureGroupsCreateOptions { /** * Required. The ID to use for this FeatureGroup, which will become the final * component of the FeatureGroup's resource name. This value may be up to 128 * characters, and valid characters are `[a-z0-9_]`. The first character * cannot be a number. The value must be unique within the project and * location. */ featureGroupId?: string; } /** * Additional options for AIplatform#projectsLocationsFeatureGroupsDelete. */ export interface ProjectsLocationsFeatureGroupsDeleteOptions { /** * If set to true, any Features under this FeatureGroup will also be deleted. * (Otherwise, the request will only work if the FeatureGroup has no * Features.) */ force?: boolean; } /** * Additional options for * AIplatform#projectsLocationsFeatureGroupsFeaturesCreate. */ export interface ProjectsLocationsFeatureGroupsFeaturesCreateOptions { /** * Required. The ID to use for the Feature, which will become the final * component of the Feature's resource name. This value may be up to 128 * characters, and valid characters are `[a-z0-9_]`. The first character * cannot be a number. The value must be unique within an * EntityType/FeatureGroup. */ featureId?: string; } /** * Additional options for * AIplatform#projectsLocationsFeatureGroupsFeaturesList. */ export interface ProjectsLocationsFeatureGroupsFeaturesListOptions { /** * Lists the Features that match the filter expression. The following filters * are supported: * `value_type`: Supports = and != comparisons. * * `create_time`: Supports =, !=, <, >, >=, and <= comparisons. Values must be * in RFC 3339 format. * `update_time`: Supports =, !=, <, >, >=, and <= * comparisons. Values must be in RFC 3339 format. * `labels`: Supports * key-value equality as well as key presence. Examples: * `value_type = * DOUBLE` --> Features whose type is DOUBLE. * `create_time > * \"2020-01-31T15:30:00.000000Z\" OR update_time > * \"2020-01-31T15:30:00.000000Z\"` --> EntityTypes created or updated after * 2020-01-31T15:30:00.000000Z. * `labels.active = yes AND labels.env = prod` * --> Features having both (active: yes) and (env: prod) labels. * * `labels.env: *` --> Any Feature which has a label with 'env' as the key. */ filter?: string; /** * Only applicable for Vertex AI Feature Store (Legacy). If set, return the * most recent ListFeaturesRequest.latest_stats_count of stats for each * Feature in response. Valid value is [0, 10]. If number of stats exists < * ListFeaturesRequest.latest_stats_count, return all existing stats. */ latestStatsCount?: number; /** * A comma-separated list of fields to order by, sorted in ascending order. * Use "desc" after a field name for descending. Supported fields: * * `feature_id` * `value_type` (Not supported for FeatureRegistry Feature) * * `create_time` * `update_time` */ orderBy?: string; /** * The maximum number of Features to return. The service may return fewer * than this value. If unspecified, at most 1000 Features will be returned. * The maximum value is 1000; any value greater than 1000 will be coerced to * 1000. */ pageSize?: number; /** * A page token, received from a previous FeaturestoreService.ListFeatures * call or FeatureRegistryService.ListFeatures call. Provide this to retrieve * the subsequent page. When paginating, all other parameters provided to * FeaturestoreService.ListFeatures or FeatureRegistryService.ListFeatures * must match the call that provided the page token. */ pageToken?: string; /** * Mask specifying which fields to read. */ readMask?: string /* FieldMask */; } function serializeProjectsLocationsFeatureGroupsFeaturesListOptions(data: any): ProjectsLocationsFeatureGroupsFeaturesListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } function deserializeProjectsLocationsFeatureGroupsFeaturesListOptions(data: any): ProjectsLocationsFeatureGroupsFeaturesListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsFeatureGroupsFeaturesOperationsListWait. */ export interface ProjectsLocationsFeatureGroupsFeaturesOperationsListWaitOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsFeatureGroupsFeaturesOperationsWait. */ export interface ProjectsLocationsFeatureGroupsFeaturesOperationsWaitOptions { /** * The maximum duration to wait before timing out. If left blank, the wait * will be at most the time permitted by the underlying HTTP/RPC protocol. If * RPC context deadline is also specified, the shorter one will be used. */ timeout?: number /* Duration */; } function serializeProjectsLocationsFeatureGroupsFeaturesOperationsWaitOptions(data: any): ProjectsLocationsFeatureGroupsFeaturesOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } function deserializeProjectsLocationsFeatureGroupsFeaturesOperationsWaitOptions(data: any): ProjectsLocationsFeatureGroupsFeaturesOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsFeatureGroupsFeaturesPatch. */ export interface ProjectsLocationsFeatureGroupsFeaturesPatchOptions { /** * Field mask is used to specify the fields to be overwritten in the Features * resource by the update. The fields specified in the update_mask are * relative to the resource, not the full request. A field will be overwritten * if it is in the mask. If the user does not provide a mask then only the * non-empty fields present in the request will be overwritten. Set the * update_mask to `*` to override all fields. Updatable fields: * * `description` * `labels` * `disable_monitoring` (Not supported for * FeatureRegistryService Feature) * `point_of_contact` (Not supported for * FeaturestoreService FeatureStore) */ updateMask?: string /* FieldMask */; } function serializeProjectsLocationsFeatureGroupsFeaturesPatchOptions(data: any): ProjectsLocationsFeatureGroupsFeaturesPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } function deserializeProjectsLocationsFeatureGroupsFeaturesPatchOptions(data: any): ProjectsLocationsFeatureGroupsFeaturesPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsFeatureGroupsGetIamPolicy. */ export interface ProjectsLocationsFeatureGroupsGetIamPolicyOptions { /** * Optional. The maximum policy version that will be used to format the * policy. Valid values are 0, 1, and 3. Requests specifying an invalid value * will be rejected. Requests for policies with any conditional role bindings * must specify version 3. Policies with no conditional role bindings may * specify any valid value or leave the field unset. The policy in the * response might use the policy version that you specified, or it might use a * lower policy version. For example, if you specify version 3, but the policy * has no conditional role bindings, the response uses version 1. To learn * which resources support conditions in their IAM policies, see the [IAM * documentation](https://cloud.google.com/iam/help/conditions/resource-policies). */ ["options.requestedPolicyVersion"]?: number; } /** * Additional options for AIplatform#projectsLocationsFeatureGroupsList. */ export interface ProjectsLocationsFeatureGroupsListOptions { /** * Lists the FeatureGroups that match the filter expression. The following * fields are supported: * `create_time`: Supports `=`, `!=`, `<`, `>`, `<=`, * and `>=` comparisons. Values must be in RFC 3339 format. * `update_time`: * Supports `=`, `!=`, `<`, `>`, `<=`, and `>=` comparisons. Values must be in * RFC 3339 format. * `labels`: Supports key-value equality and key presence. * Examples: * `create_time > "2020-01-01" OR update_time > "2020-01-01"` * FeatureGroups created or updated after 2020-01-01. * `labels.env = "prod"` * FeatureGroups with label "env" set to "prod". */ filter?: string; /** * A comma-separated list of fields to order by, sorted in ascending order. * Use "desc" after a field name for descending. Supported Fields: * * `create_time` * `update_time` */ orderBy?: string; /** * The maximum number of FeatureGroups to return. The service may return * fewer than this value. If unspecified, at most 100 FeatureGroups will be * returned. The maximum value is 100; any value greater than 100 will be * coerced to 100. */ pageSize?: number; /** * A page token, received from a previous * FeatureRegistryService.ListFeatureGroups call. Provide this to retrieve the * subsequent page. When paginating, all other parameters provided to * FeatureRegistryService.ListFeatureGroups must match the call that provided * the page token. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsFeatureGroupsOperationsListWait. */ export interface ProjectsLocationsFeatureGroupsOperationsListWaitOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsFeatureGroupsOperationsWait. */ export interface ProjectsLocationsFeatureGroupsOperationsWaitOptions { /** * The maximum duration to wait before timing out. If left blank, the wait * will be at most the time permitted by the underlying HTTP/RPC protocol. If * RPC context deadline is also specified, the shorter one will be used. */ timeout?: number /* Duration */; } function serializeProjectsLocationsFeatureGroupsOperationsWaitOptions(data: any): ProjectsLocationsFeatureGroupsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } function deserializeProjectsLocationsFeatureGroupsOperationsWaitOptions(data: any): ProjectsLocationsFeatureGroupsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsFeatureGroupsPatch. */ export interface ProjectsLocationsFeatureGroupsPatchOptions { /** * Field mask is used to specify the fields to be overwritten in the * FeatureGroup resource by the update. The fields specified in the * update_mask are relative to the resource, not the full request. A field * will be overwritten if it is in the mask. If the user does not provide a * mask then only the non-empty fields present in the request will be * overwritten. Set the update_mask to `*` to override all fields. Updatable * fields: * `labels` * `description` * `big_query` * * `big_query.entity_id_columns` * `service_agent_type` */ updateMask?: string /* FieldMask */; } function serializeProjectsLocationsFeatureGroupsPatchOptions(data: any): ProjectsLocationsFeatureGroupsPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } function deserializeProjectsLocationsFeatureGroupsPatchOptions(data: any): ProjectsLocationsFeatureGroupsPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsFeatureGroupsTestIamPermissions. */ export interface ProjectsLocationsFeatureGroupsTestIamPermissionsOptions { /** * The set of permissions to check for the `resource`. Permissions with * wildcards (such as `*` or `storage.*`) are not allowed. For more * information see [IAM * Overview](https://cloud.google.com/iam/docs/overview#permissions). */ permissions?: string; } /** * Additional options for * AIplatform#projectsLocationsFeatureOnlineStoresCreate. */ export interface ProjectsLocationsFeatureOnlineStoresCreateOptions { /** * Required. The ID to use for this FeatureOnlineStore, which will become the * final component of the FeatureOnlineStore's resource name. This value may * be up to 60 characters, and valid characters are `[a-z0-9_]`. The first * character cannot be a number. The value must be unique within the project * and location. */ featureOnlineStoreId?: string; } /** * Additional options for * AIplatform#projectsLocationsFeatureOnlineStoresDelete. */ export interface ProjectsLocationsFeatureOnlineStoresDeleteOptions { /** * If set to true, any FeatureViews and Features for this FeatureOnlineStore * will also be deleted. (Otherwise, the request will only work if the * FeatureOnlineStore has no FeatureViews.) */ force?: boolean; } /** * Additional options for * AIplatform#projectsLocationsFeatureOnlineStoresFeatureViewsCreate. */ export interface ProjectsLocationsFeatureOnlineStoresFeatureViewsCreateOptions { /** * Required. The ID to use for the FeatureView, which will become the final * component of the FeatureView's resource name. This value may be up to 60 * characters, and valid characters are `[a-z0-9_]`. The first character * cannot be a number. The value must be unique within a FeatureOnlineStore. */ featureViewId?: string; /** * Immutable. If set to true, one on demand sync will be run immediately, * regardless whether the FeatureView.sync_config is configured or not. */ runSyncImmediately?: boolean; } /** * Additional options for * AIplatform#projectsLocationsFeatureOnlineStoresFeatureViewsFeatureViewSyncsList. */ export interface ProjectsLocationsFeatureOnlineStoresFeatureViewsFeatureViewSyncsListOptions { /** * Lists the FeatureViewSyncs that match the filter expression. The following * filters are supported: * `create_time`: Supports `=`, `!=`, `<`, `>`, `>=`, * and `<=` comparisons. Values must be in RFC 3339 format. Examples: * * `create_time > \"2020-01-31T15:30:00.000000Z\"` --> FeatureViewSyncs * created after 2020-01-31T15:30:00.000000Z. */ filter?: string; /** * A comma-separated list of fields to order by, sorted in ascending order. * Use "desc" after a field name for descending. Supported fields: * * `create_time` */ orderBy?: string; /** * The maximum number of FeatureViewSyncs to return. The service may return * fewer than this value. If unspecified, at most 1000 FeatureViewSyncs will * be returned. The maximum value is 1000; any value greater than 1000 will be * coerced to 1000. */ pageSize?: number; /** * A page token, received from a previous * FeatureOnlineStoreAdminService.ListFeatureViewSyncs call. Provide this to * retrieve the subsequent page. When paginating, all other parameters * provided to FeatureOnlineStoreAdminService.ListFeatureViewSyncs must match * the call that provided the page token. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsFeatureOnlineStoresFeatureViewsGetIamPolicy. */ export interface ProjectsLocationsFeatureOnlineStoresFeatureViewsGetIamPolicyOptions { /** * Optional. The maximum policy version that will be used to format the * policy. Valid values are 0, 1, and 3. Requests specifying an invalid value * will be rejected. Requests for policies with any conditional role bindings * must specify version 3. Policies with no conditional role bindings may * specify any valid value or leave the field unset. The policy in the * response might use the policy version that you specified, or it might use a * lower policy version. For example, if you specify version 3, but the policy * has no conditional role bindings, the response uses version 1. To learn * which resources support conditions in their IAM policies, see the [IAM * documentation](https://cloud.google.com/iam/help/conditions/resource-policies). */ ["options.requestedPolicyVersion"]?: number; } /** * Additional options for * AIplatform#projectsLocationsFeatureOnlineStoresFeatureViewsList. */ export interface ProjectsLocationsFeatureOnlineStoresFeatureViewsListOptions { /** * Lists the FeatureViews that match the filter expression. The following * filters are supported: * `create_time`: Supports `=`, `!=`, `<`, `>`, `>=`, * and `<=` comparisons. Values must be in RFC 3339 format. * `update_time`: * Supports `=`, `!=`, `<`, `>`, `>=`, and `<=` comparisons. Values must be in * RFC 3339 format. * `labels`: Supports key-value equality as well as key * presence. Examples: * `create_time > \"2020-01-31T15:30:00.000000Z\" OR * update_time > \"2020-01-31T15:30:00.000000Z\"` --> FeatureViews created or * updated after 2020-01-31T15:30:00.000000Z. * `labels.active = yes AND * labels.env = prod` --> FeatureViews having both (active: yes) and (env: * prod) labels. * `labels.env: *` --> Any FeatureView which has a label with * 'env' as the key. */ filter?: string; /** * A comma-separated list of fields to order by, sorted in ascending order. * Use "desc" after a field name for descending. Supported fields: * * `feature_view_id` * `create_time` * `update_time` */ orderBy?: string; /** * The maximum number of FeatureViews to return. The service may return fewer * than this value. If unspecified, at most 1000 FeatureViews will be * returned. The maximum value is 1000; any value greater than 1000 will be * coerced to 1000. */ pageSize?: number; /** * A page token, received from a previous * FeatureOnlineStoreAdminService.ListFeatureViews call. Provide this to * retrieve the subsequent page. When paginating, all other parameters * provided to FeatureOnlineStoreAdminService.ListFeatureViews must match the * call that provided the page token. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsFeatureOnlineStoresFeatureViewsOperationsListWait. */ export interface ProjectsLocationsFeatureOnlineStoresFeatureViewsOperationsListWaitOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsFeatureOnlineStoresFeatureViewsOperationsWait. */ export interface ProjectsLocationsFeatureOnlineStoresFeatureViewsOperationsWaitOptions { /** * The maximum duration to wait before timing out. If left blank, the wait * will be at most the time permitted by the underlying HTTP/RPC protocol. If * RPC context deadline is also specified, the shorter one will be used. */ timeout?: number /* Duration */; } function serializeProjectsLocationsFeatureOnlineStoresFeatureViewsOperationsWaitOptions(data: any): ProjectsLocationsFeatureOnlineStoresFeatureViewsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } function deserializeProjectsLocationsFeatureOnlineStoresFeatureViewsOperationsWaitOptions(data: any): ProjectsLocationsFeatureOnlineStoresFeatureViewsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsFeatureOnlineStoresFeatureViewsPatch. */ export interface ProjectsLocationsFeatureOnlineStoresFeatureViewsPatchOptions { /** * Field mask is used to specify the fields to be overwritten in the * FeatureView resource by the update. The fields specified in the update_mask * are relative to the resource, not the full request. A field will be * overwritten if it is in the mask. If the user does not provide a mask then * only the non-empty fields present in the request will be overwritten. Set * the update_mask to `*` to override all fields. Updatable fields: * `labels` * * `service_agent_type` * `big_query_source` * `big_query_source.uri` * * `big_query_source.entity_id_columns` * `feature_registry_source` * * `feature_registry_source.feature_groups` * `sync_config` * * `sync_config.cron` * `optimized_config.automatic_resources` */ updateMask?: string /* FieldMask */; } function serializeProjectsLocationsFeatureOnlineStoresFeatureViewsPatchOptions(data: any): ProjectsLocationsFeatureOnlineStoresFeatureViewsPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } function deserializeProjectsLocationsFeatureOnlineStoresFeatureViewsPatchOptions(data: any): ProjectsLocationsFeatureOnlineStoresFeatureViewsPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsFeatureOnlineStoresFeatureViewsTestIamPermissions. */ export interface ProjectsLocationsFeatureOnlineStoresFeatureViewsTestIamPermissionsOptions { /** * The set of permissions to check for the `resource`. Permissions with * wildcards (such as `*` or `storage.*`) are not allowed. For more * information see [IAM * Overview](https://cloud.google.com/iam/docs/overview#permissions). */ permissions?: string; } /** * Additional options for * AIplatform#projectsLocationsFeatureOnlineStoresGetIamPolicy. */ export interface ProjectsLocationsFeatureOnlineStoresGetIamPolicyOptions { /** * Optional. The maximum policy version that will be used to format the * policy. Valid values are 0, 1, and 3. Requests specifying an invalid value * will be rejected. Requests for policies with any conditional role bindings * must specify version 3. Policies with no conditional role bindings may * specify any valid value or leave the field unset. The policy in the * response might use the policy version that you specified, or it might use a * lower policy version. For example, if you specify version 3, but the policy * has no conditional role bindings, the response uses version 1. To learn * which resources support conditions in their IAM policies, see the [IAM * documentation](https://cloud.google.com/iam/help/conditions/resource-policies). */ ["options.requestedPolicyVersion"]?: number; } /** * Additional options for AIplatform#projectsLocationsFeatureOnlineStoresList. */ export interface ProjectsLocationsFeatureOnlineStoresListOptions { /** * Lists the FeatureOnlineStores that match the filter expression. The * following fields are supported: * `create_time`: Supports `=`, `!=`, `<`, * `>`, `<=`, and `>=` comparisons. Values must be in RFC 3339 format. * * `update_time`: Supports `=`, `!=`, `<`, `>`, `<=`, and `>=` comparisons. * Values must be in RFC 3339 format. * `labels`: Supports key-value equality * and key presence. Examples: * `create_time > "2020-01-01" OR update_time > * "2020-01-01"` FeatureOnlineStores created or updated after 2020-01-01. * * `labels.env = "prod"` FeatureOnlineStores with label "env" set to "prod". */ filter?: string; /** * A comma-separated list of fields to order by, sorted in ascending order. * Use "desc" after a field name for descending. Supported Fields: * * `create_time` * `update_time` */ orderBy?: string; /** * The maximum number of FeatureOnlineStores to return. The service may * return fewer than this value. If unspecified, at most 100 * FeatureOnlineStores will be returned. The maximum value is 100; any value * greater than 100 will be coerced to 100. */ pageSize?: number; /** * A page token, received from a previous * FeatureOnlineStoreAdminService.ListFeatureOnlineStores call. Provide this * to retrieve the subsequent page. When paginating, all other parameters * provided to FeatureOnlineStoreAdminService.ListFeatureOnlineStores must * match the call that provided the page token. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsFeatureOnlineStoresOperationsListWait. */ export interface ProjectsLocationsFeatureOnlineStoresOperationsListWaitOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsFeatureOnlineStoresOperationsWait. */ export interface ProjectsLocationsFeatureOnlineStoresOperationsWaitOptions { /** * The maximum duration to wait before timing out. If left blank, the wait * will be at most the time permitted by the underlying HTTP/RPC protocol. If * RPC context deadline is also specified, the shorter one will be used. */ timeout?: number /* Duration */; } function serializeProjectsLocationsFeatureOnlineStoresOperationsWaitOptions(data: any): ProjectsLocationsFeatureOnlineStoresOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } function deserializeProjectsLocationsFeatureOnlineStoresOperationsWaitOptions(data: any): ProjectsLocationsFeatureOnlineStoresOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsFeatureOnlineStoresPatch. */ export interface ProjectsLocationsFeatureOnlineStoresPatchOptions { /** * Field mask is used to specify the fields to be overwritten in the * FeatureOnlineStore resource by the update. The fields specified in the * update_mask are relative to the resource, not the full request. A field * will be overwritten if it is in the mask. If the user does not provide a * mask then only the non-empty fields present in the request will be * overwritten. Set the update_mask to `*` to override all fields. Updatable * fields: * `labels` * `description` * `bigtable` * `bigtable.auto_scaling` * * `bigtable.enable_multi_region_replica` */ updateMask?: string /* FieldMask */; } function serializeProjectsLocationsFeatureOnlineStoresPatchOptions(data: any): ProjectsLocationsFeatureOnlineStoresPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } function deserializeProjectsLocationsFeatureOnlineStoresPatchOptions(data: any): ProjectsLocationsFeatureOnlineStoresPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsFeatureOnlineStoresTestIamPermissions. */ export interface ProjectsLocationsFeatureOnlineStoresTestIamPermissionsOptions { /** * The set of permissions to check for the `resource`. Permissions with * wildcards (such as `*` or `storage.*`) are not allowed. For more * information see [IAM * Overview](https://cloud.google.com/iam/docs/overview#permissions). */ permissions?: string; } /** * Additional options for AIplatform#projectsLocationsFeaturestoresCreate. */ export interface ProjectsLocationsFeaturestoresCreateOptions { /** * Required. The ID to use for this Featurestore, which will become the final * component of the Featurestore's resource name. This value may be up to 60 * characters, and valid characters are `[a-z0-9_]`. The first character * cannot be a number. The value must be unique within the project and * location. */ featurestoreId?: string; } /** * Additional options for AIplatform#projectsLocationsFeaturestoresDelete. */ export interface ProjectsLocationsFeaturestoresDeleteOptions { /** * If set to true, any EntityTypes and Features for this Featurestore will * also be deleted. (Otherwise, the request will only work if the Featurestore * has no EntityTypes.) */ force?: boolean; } /** * Additional options for * AIplatform#projectsLocationsFeaturestoresEntityTypesCreate. */ export interface ProjectsLocationsFeaturestoresEntityTypesCreateOptions { /** * Required. The ID to use for the EntityType, which will become the final * component of the EntityType's resource name. This value may be up to 60 * characters, and valid characters are `[a-z0-9_]`. The first character * cannot be a number. The value must be unique within a featurestore. */ entityTypeId?: string; } /** * Additional options for * AIplatform#projectsLocationsFeaturestoresEntityTypesDelete. */ export interface ProjectsLocationsFeaturestoresEntityTypesDeleteOptions { /** * If set to true, any Features for this EntityType will also be deleted. * (Otherwise, the request will only work if the EntityType has no Features.) */ force?: boolean; } /** * Additional options for * AIplatform#projectsLocationsFeaturestoresEntityTypesFeaturesCreate. */ export interface ProjectsLocationsFeaturestoresEntityTypesFeaturesCreateOptions { /** * Required. The ID to use for the Feature, which will become the final * component of the Feature's resource name. This value may be up to 128 * characters, and valid characters are `[a-z0-9_]`. The first character * cannot be a number. The value must be unique within an * EntityType/FeatureGroup. */ featureId?: string; } /** * Additional options for * AIplatform#projectsLocationsFeaturestoresEntityTypesFeaturesList. */ export interface ProjectsLocationsFeaturestoresEntityTypesFeaturesListOptions { /** * Lists the Features that match the filter expression. The following filters * are supported: * `value_type`: Supports = and != comparisons. * * `create_time`: Supports =, !=, <, >, >=, and <= comparisons. Values must be * in RFC 3339 format. * `update_time`: Supports =, !=, <, >, >=, and <= * comparisons. Values must be in RFC 3339 format. * `labels`: Supports * key-value equality as well as key presence. Examples: * `value_type = * DOUBLE` --> Features whose type is DOUBLE. * `create_time > * \"2020-01-31T15:30:00.000000Z\" OR update_time > * \"2020-01-31T15:30:00.000000Z\"` --> EntityTypes created or updated after * 2020-01-31T15:30:00.000000Z. * `labels.active = yes AND labels.env = prod` * --> Features having both (active: yes) and (env: prod) labels. * * `labels.env: *` --> Any Feature which has a label with 'env' as the key. */ filter?: string; /** * Only applicable for Vertex AI Feature Store (Legacy). If set, return the * most recent ListFeaturesRequest.latest_stats_count of stats for each * Feature in response. Valid value is [0, 10]. If number of stats exists < * ListFeaturesRequest.latest_stats_count, return all existing stats. */ latestStatsCount?: number; /** * A comma-separated list of fields to order by, sorted in ascending order. * Use "desc" after a field name for descending. Supported fields: * * `feature_id` * `value_type` (Not supported for FeatureRegistry Feature) * * `create_time` * `update_time` */ orderBy?: string; /** * The maximum number of Features to return. The service may return fewer * than this value. If unspecified, at most 1000 Features will be returned. * The maximum value is 1000; any value greater than 1000 will be coerced to * 1000. */ pageSize?: number; /** * A page token, received from a previous FeaturestoreService.ListFeatures * call or FeatureRegistryService.ListFeatures call. Provide this to retrieve * the subsequent page. When paginating, all other parameters provided to * FeaturestoreService.ListFeatures or FeatureRegistryService.ListFeatures * must match the call that provided the page token. */ pageToken?: string; /** * Mask specifying which fields to read. */ readMask?: string /* FieldMask */; } function serializeProjectsLocationsFeaturestoresEntityTypesFeaturesListOptions(data: any): ProjectsLocationsFeaturestoresEntityTypesFeaturesListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } function deserializeProjectsLocationsFeaturestoresEntityTypesFeaturesListOptions(data: any): ProjectsLocationsFeaturestoresEntityTypesFeaturesListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsFeaturestoresEntityTypesFeaturesOperationsList. */ export interface ProjectsLocationsFeaturestoresEntityTypesFeaturesOperationsListOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsFeaturestoresEntityTypesFeaturesOperationsWait. */ export interface ProjectsLocationsFeaturestoresEntityTypesFeaturesOperationsWaitOptions { /** * The maximum duration to wait before timing out. If left blank, the wait * will be at most the time permitted by the underlying HTTP/RPC protocol. If * RPC context deadline is also specified, the shorter one will be used. */ timeout?: number /* Duration */; } function serializeProjectsLocationsFeaturestoresEntityTypesFeaturesOperationsWaitOptions(data: any): ProjectsLocationsFeaturestoresEntityTypesFeaturesOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } function deserializeProjectsLocationsFeaturestoresEntityTypesFeaturesOperationsWaitOptions(data: any): ProjectsLocationsFeaturestoresEntityTypesFeaturesOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsFeaturestoresEntityTypesFeaturesPatch. */ export interface ProjectsLocationsFeaturestoresEntityTypesFeaturesPatchOptions { /** * Field mask is used to specify the fields to be overwritten in the Features * resource by the update. The fields specified in the update_mask are * relative to the resource, not the full request. A field will be overwritten * if it is in the mask. If the user does not provide a mask then only the * non-empty fields present in the request will be overwritten. Set the * update_mask to `*` to override all fields. Updatable fields: * * `description` * `labels` * `disable_monitoring` (Not supported for * FeatureRegistryService Feature) * `point_of_contact` (Not supported for * FeaturestoreService FeatureStore) */ updateMask?: string /* FieldMask */; } function serializeProjectsLocationsFeaturestoresEntityTypesFeaturesPatchOptions(data: any): ProjectsLocationsFeaturestoresEntityTypesFeaturesPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } function deserializeProjectsLocationsFeaturestoresEntityTypesFeaturesPatchOptions(data: any): ProjectsLocationsFeaturestoresEntityTypesFeaturesPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsFeaturestoresEntityTypesGetIamPolicy. */ export interface ProjectsLocationsFeaturestoresEntityTypesGetIamPolicyOptions { /** * Optional. The maximum policy version that will be used to format the * policy. Valid values are 0, 1, and 3. Requests specifying an invalid value * will be rejected. Requests for policies with any conditional role bindings * must specify version 3. Policies with no conditional role bindings may * specify any valid value or leave the field unset. The policy in the * response might use the policy version that you specified, or it might use a * lower policy version. For example, if you specify version 3, but the policy * has no conditional role bindings, the response uses version 1. To learn * which resources support conditions in their IAM policies, see the [IAM * documentation](https://cloud.google.com/iam/help/conditions/resource-policies). */ ["options.requestedPolicyVersion"]?: number; } /** * Additional options for * AIplatform#projectsLocationsFeaturestoresEntityTypesList. */ export interface ProjectsLocationsFeaturestoresEntityTypesListOptions { /** * Lists the EntityTypes that match the filter expression. The following * filters are supported: * `create_time`: Supports `=`, `!=`, `<`, `>`, `>=`, * and `<=` comparisons. Values must be in RFC 3339 format. * `update_time`: * Supports `=`, `!=`, `<`, `>`, `>=`, and `<=` comparisons. Values must be in * RFC 3339 format. * `labels`: Supports key-value equality as well as key * presence. Examples: * `create_time > \"2020-01-31T15:30:00.000000Z\" OR * update_time > \"2020-01-31T15:30:00.000000Z\"` --> EntityTypes created or * updated after 2020-01-31T15:30:00.000000Z. * `labels.active = yes AND * labels.env = prod` --> EntityTypes having both (active: yes) and (env: * prod) labels. * `labels.env: *` --> Any EntityType which has a label with * 'env' as the key. */ filter?: string; /** * A comma-separated list of fields to order by, sorted in ascending order. * Use "desc" after a field name for descending. Supported fields: * * `entity_type_id` * `create_time` * `update_time` */ orderBy?: string; /** * The maximum number of EntityTypes to return. The service may return fewer * than this value. If unspecified, at most 1000 EntityTypes will be returned. * The maximum value is 1000; any value greater than 1000 will be coerced to * 1000. */ pageSize?: number; /** * A page token, received from a previous FeaturestoreService.ListEntityTypes * call. Provide this to retrieve the subsequent page. When paginating, all * other parameters provided to FeaturestoreService.ListEntityTypes must match * the call that provided the page token. */ pageToken?: string; /** * Mask specifying which fields to read. */ readMask?: string /* FieldMask */; } function serializeProjectsLocationsFeaturestoresEntityTypesListOptions(data: any): ProjectsLocationsFeaturestoresEntityTypesListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } function deserializeProjectsLocationsFeaturestoresEntityTypesListOptions(data: any): ProjectsLocationsFeaturestoresEntityTypesListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsFeaturestoresEntityTypesOperationsList. */ export interface ProjectsLocationsFeaturestoresEntityTypesOperationsListOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsFeaturestoresEntityTypesOperationsWait. */ export interface ProjectsLocationsFeaturestoresEntityTypesOperationsWaitOptions { /** * The maximum duration to wait before timing out. If left blank, the wait * will be at most the time permitted by the underlying HTTP/RPC protocol. If * RPC context deadline is also specified, the shorter one will be used. */ timeout?: number /* Duration */; } function serializeProjectsLocationsFeaturestoresEntityTypesOperationsWaitOptions(data: any): ProjectsLocationsFeaturestoresEntityTypesOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } function deserializeProjectsLocationsFeaturestoresEntityTypesOperationsWaitOptions(data: any): ProjectsLocationsFeaturestoresEntityTypesOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsFeaturestoresEntityTypesPatch. */ export interface ProjectsLocationsFeaturestoresEntityTypesPatchOptions { /** * Field mask is used to specify the fields to be overwritten in the * EntityType resource by the update. The fields specified in the update_mask * are relative to the resource, not the full request. A field will be * overwritten if it is in the mask. If the user does not provide a mask then * only the non-empty fields present in the request will be overwritten. Set * the update_mask to `*` to override all fields. Updatable fields: * * `description` * `labels` * `monitoring_config.snapshot_analysis.disabled` * * `monitoring_config.snapshot_analysis.monitoring_interval_days` * * `monitoring_config.snapshot_analysis.staleness_days` * * `monitoring_config.import_features_analysis.state` * * `monitoring_config.import_features_analysis.anomaly_detection_baseline` * * `monitoring_config.numerical_threshold_config.value` * * `monitoring_config.categorical_threshold_config.value` * * `offline_storage_ttl_days` */ updateMask?: string /* FieldMask */; } function serializeProjectsLocationsFeaturestoresEntityTypesPatchOptions(data: any): ProjectsLocationsFeaturestoresEntityTypesPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } function deserializeProjectsLocationsFeaturestoresEntityTypesPatchOptions(data: any): ProjectsLocationsFeaturestoresEntityTypesPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsFeaturestoresEntityTypesTestIamPermissions. */ export interface ProjectsLocationsFeaturestoresEntityTypesTestIamPermissionsOptions { /** * The set of permissions to check for the `resource`. Permissions with * wildcards (such as `*` or `storage.*`) are not allowed. For more * information see [IAM * Overview](https://cloud.google.com/iam/docs/overview#permissions). */ permissions?: string; } /** * Additional options for * AIplatform#projectsLocationsFeaturestoresGetIamPolicy. */ export interface ProjectsLocationsFeaturestoresGetIamPolicyOptions { /** * Optional. The maximum policy version that will be used to format the * policy. Valid values are 0, 1, and 3. Requests specifying an invalid value * will be rejected. Requests for policies with any conditional role bindings * must specify version 3. Policies with no conditional role bindings may * specify any valid value or leave the field unset. The policy in the * response might use the policy version that you specified, or it might use a * lower policy version. For example, if you specify version 3, but the policy * has no conditional role bindings, the response uses version 1. To learn * which resources support conditions in their IAM policies, see the [IAM * documentation](https://cloud.google.com/iam/help/conditions/resource-policies). */ ["options.requestedPolicyVersion"]?: number; } /** * Additional options for AIplatform#projectsLocationsFeaturestoresList. */ export interface ProjectsLocationsFeaturestoresListOptions { /** * Lists the featurestores that match the filter expression. The following * fields are supported: * `create_time`: Supports `=`, `!=`, `<`, `>`, `<=`, * and `>=` comparisons. Values must be in RFC 3339 format. * `update_time`: * Supports `=`, `!=`, `<`, `>`, `<=`, and `>=` comparisons. Values must be in * RFC 3339 format. * `online_serving_config.fixed_node_count`: Supports `=`, * `!=`, `<`, `>`, `<=`, and `>=` comparisons. * `labels`: Supports key-value * equality and key presence. Examples: * `create_time > "2020-01-01" OR * update_time > "2020-01-01"` Featurestores created or updated after * 2020-01-01. * `labels.env = "prod"` Featurestores with label "env" set to * "prod". */ filter?: string; /** * A comma-separated list of fields to order by, sorted in ascending order. * Use "desc" after a field name for descending. Supported Fields: * * `create_time` * `update_time` * `online_serving_config.fixed_node_count` */ orderBy?: string; /** * The maximum number of Featurestores to return. The service may return * fewer than this value. If unspecified, at most 100 Featurestores will be * returned. The maximum value is 100; any value greater than 100 will be * coerced to 100. */ pageSize?: number; /** * A page token, received from a previous * FeaturestoreService.ListFeaturestores call. Provide this to retrieve the * subsequent page. When paginating, all other parameters provided to * FeaturestoreService.ListFeaturestores must match the call that provided the * page token. */ pageToken?: string; /** * Mask specifying which fields to read. */ readMask?: string /* FieldMask */; } function serializeProjectsLocationsFeaturestoresListOptions(data: any): ProjectsLocationsFeaturestoresListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } function deserializeProjectsLocationsFeaturestoresListOptions(data: any): ProjectsLocationsFeaturestoresListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsFeaturestoresOperationsList. */ export interface ProjectsLocationsFeaturestoresOperationsListOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsFeaturestoresOperationsWait. */ export interface ProjectsLocationsFeaturestoresOperationsWaitOptions { /** * The maximum duration to wait before timing out. If left blank, the wait * will be at most the time permitted by the underlying HTTP/RPC protocol. If * RPC context deadline is also specified, the shorter one will be used. */ timeout?: number /* Duration */; } function serializeProjectsLocationsFeaturestoresOperationsWaitOptions(data: any): ProjectsLocationsFeaturestoresOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } function deserializeProjectsLocationsFeaturestoresOperationsWaitOptions(data: any): ProjectsLocationsFeaturestoresOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsFeaturestoresPatch. */ export interface ProjectsLocationsFeaturestoresPatchOptions { /** * Field mask is used to specify the fields to be overwritten in the * Featurestore resource by the update. The fields specified in the * update_mask are relative to the resource, not the full request. A field * will be overwritten if it is in the mask. If the user does not provide a * mask then only the non-empty fields present in the request will be * overwritten. Set the update_mask to `*` to override all fields. Updatable * fields: * `labels` * `online_serving_config.fixed_node_count` * * `online_serving_config.scaling` * `online_storage_ttl_days` */ updateMask?: string /* FieldMask */; } function serializeProjectsLocationsFeaturestoresPatchOptions(data: any): ProjectsLocationsFeaturestoresPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } function deserializeProjectsLocationsFeaturestoresPatchOptions(data: any): ProjectsLocationsFeaturestoresPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsFeaturestoresSearchFeatures. */ export interface ProjectsLocationsFeaturestoresSearchFeaturesOptions { /** * The maximum number of Features to return. The service may return fewer * than this value. If unspecified, at most 100 Features will be returned. The * maximum value is 100; any value greater than 100 will be coerced to 100. */ pageSize?: number; /** * A page token, received from a previous FeaturestoreService.SearchFeatures * call. Provide this to retrieve the subsequent page. When paginating, all * other parameters provided to FeaturestoreService.SearchFeatures, except * `page_size`, must match the call that provided the page token. */ pageToken?: string; /** * Query string that is a conjunction of field-restricted queries and/or * field-restricted filters. Field-restricted queries and filters can be * combined using `AND` to form a conjunction. A field query is in the form * FIELD:QUERY. This implicitly checks if QUERY exists as a substring within * Feature's FIELD. The QUERY and the FIELD are converted to a sequence of * words (i.e. tokens) for comparison. This is done by: * Removing * leading/trailing whitespace and tokenizing the search value. Characters * that are not one of alphanumeric `[a-zA-Z0-9]`, underscore `_`, or asterisk * `*` are treated as delimiters for tokens. `*` is treated as a wildcard that * matches characters within a token. * Ignoring case. * Prepending an * asterisk to the first and appending an asterisk to the last token in QUERY. * A QUERY must be either a singular token or a phrase. A phrase is one or * multiple words enclosed in double quotation marks ("). With phrases, the * order of the words is important. Words in the phrase must be matching in * order and consecutively. Supported FIELDs for field-restricted queries: * * `feature_id` * `description` * `entity_type_id` Examples: * `feature_id: * foo` --> Matches a Feature with ID containing the substring `foo` (eg. * `foo`, `foofeature`, `barfoo`). * `feature_id: foo*feature` --> Matches a * Feature with ID containing the substring `foo*feature` (eg. * `foobarfeature`). * `feature_id: foo AND description: bar` --> Matches a * Feature with ID containing the substring `foo` and description containing * the substring `bar`. Besides field queries, the following exact-match * filters are supported. The exact-match filters do not support wildcards. * Unlike field-restricted queries, exact-match filters are case-sensitive. * * `feature_id`: Supports = comparisons. * `description`: Supports = * comparisons. Multi-token filters should be enclosed in quotes. * * `entity_type_id`: Supports = comparisons. * `value_type`: Supports = and != * comparisons. * `labels`: Supports key-value equality as well as key * presence. * `featurestore_id`: Supports = comparisons. Examples: * * `description = "foo bar"` --> Any Feature with description exactly equal to * `foo bar` * `value_type = DOUBLE` --> Features whose type is DOUBLE. * * `labels.active = yes AND labels.env = prod` --> Features having both * (active: yes) and (env: prod) labels. * `labels.env: *` --> Any Feature * which has a label with `env` as the key. */ query?: string; } /** * Additional options for * AIplatform#projectsLocationsFeaturestoresTestIamPermissions. */ export interface ProjectsLocationsFeaturestoresTestIamPermissionsOptions { /** * The set of permissions to check for the `resource`. Permissions with * wildcards (such as `*` or `storage.*`) are not allowed. For more * information see [IAM * Overview](https://cloud.google.com/iam/docs/overview#permissions). */ permissions?: string; } /** * Additional options for * AIplatform#projectsLocationsHyperparameterTuningJobsList. */ export interface ProjectsLocationsHyperparameterTuningJobsListOptions { /** * The standard list filter. Supported fields: * `display_name` supports `=`, * `!=` comparisons, and `:` wildcard. * `state` supports `=`, `!=` * comparisons. * `create_time` supports `=`, `!=`,`<`, `<=`,`>`, `>=` * comparisons. `create_time` must be in RFC 3339 format. * `labels` supports * general map functions that is: `labels.key=value` - key:value equality * `labels.key:* - key existence Some examples of using the filter are: * * `state="JOB_STATE_SUCCEEDED" AND display_name:"my_job_*"` * * `state!="JOB_STATE_FAILED" OR display_name="my_job"` * `NOT * display_name="my_job"` * `create_time>"2021-05-18T00:00:00Z"` * * `labels.keyA=valueA` * `labels.keyB:*` */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. Typically obtained via * ListHyperparameterTuningJobsResponse.next_page_token of the previous * JobService.ListHyperparameterTuningJobs call. */ pageToken?: string; /** * Mask specifying which fields to read. */ readMask?: string /* FieldMask */; } function serializeProjectsLocationsHyperparameterTuningJobsListOptions(data: any): ProjectsLocationsHyperparameterTuningJobsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } function deserializeProjectsLocationsHyperparameterTuningJobsListOptions(data: any): ProjectsLocationsHyperparameterTuningJobsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsHyperparameterTuningJobsOperationsList. */ export interface ProjectsLocationsHyperparameterTuningJobsOperationsListOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsHyperparameterTuningJobsOperationsWait. */ export interface ProjectsLocationsHyperparameterTuningJobsOperationsWaitOptions { /** * The maximum duration to wait before timing out. If left blank, the wait * will be at most the time permitted by the underlying HTTP/RPC protocol. If * RPC context deadline is also specified, the shorter one will be used. */ timeout?: number /* Duration */; } function serializeProjectsLocationsHyperparameterTuningJobsOperationsWaitOptions(data: any): ProjectsLocationsHyperparameterTuningJobsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } function deserializeProjectsLocationsHyperparameterTuningJobsOperationsWaitOptions(data: any): ProjectsLocationsHyperparameterTuningJobsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsIndexEndpointsList. */ export interface ProjectsLocationsIndexEndpointsListOptions { /** * Optional. An expression for filtering the results of the request. For * field names both snake_case and camelCase are supported. * `index_endpoint` * supports = and !=. `index_endpoint` represents the IndexEndpoint ID, ie. * the last segment of the IndexEndpoint's resourcename. * `display_name` * supports =, != and regex() (uses * [re2](https://github.com/google/re2/wiki/Syntax) syntax) * `labels` * supports general map functions that is: `labels.key=value` - key:value * equality `labels.key:* or labels:key - key existence A key including a * space must be quoted. `labels."a key"`. Some examples: * * `index_endpoint="1"` * `display_name="myDisplayName"` * * `regex(display_name, "^A") -> The display name starts with an A. * * `labels.myKey="myValue"` */ filter?: string; /** * Optional. The standard list page size. */ pageSize?: number; /** * Optional. The standard list page token. Typically obtained via * ListIndexEndpointsResponse.next_page_token of the previous * IndexEndpointService.ListIndexEndpoints call. */ pageToken?: string; /** * Optional. Mask specifying which fields to read. */ readMask?: string /* FieldMask */; } function serializeProjectsLocationsIndexEndpointsListOptions(data: any): ProjectsLocationsIndexEndpointsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } function deserializeProjectsLocationsIndexEndpointsListOptions(data: any): ProjectsLocationsIndexEndpointsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsIndexEndpointsOperationsList. */ export interface ProjectsLocationsIndexEndpointsOperationsListOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsIndexEndpointsOperationsWait. */ export interface ProjectsLocationsIndexEndpointsOperationsWaitOptions { /** * The maximum duration to wait before timing out. If left blank, the wait * will be at most the time permitted by the underlying HTTP/RPC protocol. If * RPC context deadline is also specified, the shorter one will be used. */ timeout?: number /* Duration */; } function serializeProjectsLocationsIndexEndpointsOperationsWaitOptions(data: any): ProjectsLocationsIndexEndpointsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } function deserializeProjectsLocationsIndexEndpointsOperationsWaitOptions(data: any): ProjectsLocationsIndexEndpointsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsIndexEndpointsPatch. */ export interface ProjectsLocationsIndexEndpointsPatchOptions { /** * Required. The update mask applies to the resource. See * google.protobuf.FieldMask. */ updateMask?: string /* FieldMask */; } function serializeProjectsLocationsIndexEndpointsPatchOptions(data: any): ProjectsLocationsIndexEndpointsPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } function deserializeProjectsLocationsIndexEndpointsPatchOptions(data: any): ProjectsLocationsIndexEndpointsPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsIndexesList. */ export interface ProjectsLocationsIndexesListOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. Typically obtained via * ListIndexesResponse.next_page_token of the previous * IndexService.ListIndexes call. */ pageToken?: string; /** * Mask specifying which fields to read. */ readMask?: string /* FieldMask */; } function serializeProjectsLocationsIndexesListOptions(data: any): ProjectsLocationsIndexesListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } function deserializeProjectsLocationsIndexesListOptions(data: any): ProjectsLocationsIndexesListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsIndexesOperationsList. */ export interface ProjectsLocationsIndexesOperationsListOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } /** * Additional options for AIplatform#projectsLocationsIndexesOperationsWait. */ export interface ProjectsLocationsIndexesOperationsWaitOptions { /** * The maximum duration to wait before timing out. If left blank, the wait * will be at most the time permitted by the underlying HTTP/RPC protocol. If * RPC context deadline is also specified, the shorter one will be used. */ timeout?: number /* Duration */; } function serializeProjectsLocationsIndexesOperationsWaitOptions(data: any): ProjectsLocationsIndexesOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } function deserializeProjectsLocationsIndexesOperationsWaitOptions(data: any): ProjectsLocationsIndexesOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsIndexesPatch. */ export interface ProjectsLocationsIndexesPatchOptions { /** * The update mask applies to the resource. For the `FieldMask` definition, * see google.protobuf.FieldMask. */ updateMask?: string /* FieldMask */; } function serializeProjectsLocationsIndexesPatchOptions(data: any): ProjectsLocationsIndexesPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } function deserializeProjectsLocationsIndexesPatchOptions(data: any): ProjectsLocationsIndexesPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsList. */ export interface ProjectsLocationsListOptions { /** * Optional. A list of extra location types that should be used as conditions * for controlling the visibility of the locations. */ extraLocationTypes?: string; /** * A filter to narrow down results to a preferred subset. The filtering * language accepts strings like `"displayName=tokyo"`, and is documented in * more detail in [AIP-160](https://google.aip.dev/160). */ filter?: string; /** * The maximum number of results to return. If not set, the service selects a * default. */ pageSize?: number; /** * A page token received from the `next_page_token` field in the response. * Send that page token to receive the subsequent page. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsMetadataStoresArtifactsCreate. */ export interface ProjectsLocationsMetadataStoresArtifactsCreateOptions { /** * The {artifact} portion of the resource name with the format: * `projects/{project}/locations/{location}/metadataStores/{metadatastore}/artifacts/{artifact}` * If not provided, the Artifact's ID will be a UUID generated by the service. * Must be 4-128 characters in length. Valid characters are `/a-z-/`. Must be * unique across all Artifacts in the parent MetadataStore. (Otherwise the * request will fail with ALREADY_EXISTS, or PERMISSION_DENIED if the caller * can't view the preexisting Artifact.) */ artifactId?: string; } /** * Additional options for * AIplatform#projectsLocationsMetadataStoresArtifactsDelete. */ export interface ProjectsLocationsMetadataStoresArtifactsDeleteOptions { /** * Optional. The etag of the Artifact to delete. If this is provided, it must * match the server's etag. Otherwise, the request will fail with a * FAILED_PRECONDITION. */ etag?: string; } /** * Additional options for * AIplatform#projectsLocationsMetadataStoresArtifactsList. */ export interface ProjectsLocationsMetadataStoresArtifactsListOptions { /** * Filter specifying the boolean condition for the Artifacts to satisfy in * order to be part of the result set. The syntax to define filter query is * based on https://google.aip.dev/160. The supported set of filters include * the following: * **Attribute filtering**: For example: `display_name = * "test"`. Supported fields include: `name`, `display_name`, `uri`, `state`, * `schema_title`, `create_time`, and `update_time`. Time fields, such as * `create_time` and `update_time`, require values specified in RFC-3339 * format. For example: `create_time = "2020-11-19T11:30:00-04:00"` * * **Metadata field**: To filter on metadata fields use traversal operation as * follows: `metadata..`. For example: `metadata.field_1.number_value = 10.0` * In case the field name contains special characters (such as colon), one can * embed it inside double quote. For example: `metadata."field:1".number_value * = 10.0` * **Context based filtering**: To filter Artifacts based on the * contexts to which they belong, use the function operator with the full * resource name `in_context()`. For example: * `in_context("projects//locations//metadataStores//contexts/")` Each of the * above supported filter types can be combined together using logical * operators (`AND` & `OR`). Maximum nested expression depth allowed is 5. For * example: `display_name = "test" AND metadata.field1.bool_value = true`. */ filter?: string; /** * How the list of messages is ordered. Specify the values to order by and an * ordering operation. The default sorting order is ascending. To specify * descending order for a field, users append a " desc" suffix; for example: * "foo desc, bar". Subfields are specified with a `.` character, such as * foo.bar. see https://google.aip.dev/132#ordering for more details. */ orderBy?: string; /** * The maximum number of Artifacts to return. The service may return fewer. * Must be in range 1-1000, inclusive. Defaults to 100. */ pageSize?: number; /** * A page token, received from a previous MetadataService.ListArtifacts call. * Provide this to retrieve the subsequent page. When paginating, all other * provided parameters must match the call that provided the page token. * (Otherwise the request will fail with INVALID_ARGUMENT error.) */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsMetadataStoresArtifactsOperationsList. */ export interface ProjectsLocationsMetadataStoresArtifactsOperationsListOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsMetadataStoresArtifactsOperationsWait. */ export interface ProjectsLocationsMetadataStoresArtifactsOperationsWaitOptions { /** * The maximum duration to wait before timing out. If left blank, the wait * will be at most the time permitted by the underlying HTTP/RPC protocol. If * RPC context deadline is also specified, the shorter one will be used. */ timeout?: number /* Duration */; } function serializeProjectsLocationsMetadataStoresArtifactsOperationsWaitOptions(data: any): ProjectsLocationsMetadataStoresArtifactsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } function deserializeProjectsLocationsMetadataStoresArtifactsOperationsWaitOptions(data: any): ProjectsLocationsMetadataStoresArtifactsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsMetadataStoresArtifactsPatch. */ export interface ProjectsLocationsMetadataStoresArtifactsPatchOptions { /** * If set to true, and the Artifact is not found, a new Artifact is created. */ allowMissing?: boolean; /** * Optional. A FieldMask indicating which fields should be updated. */ updateMask?: string /* FieldMask */; } function serializeProjectsLocationsMetadataStoresArtifactsPatchOptions(data: any): ProjectsLocationsMetadataStoresArtifactsPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } function deserializeProjectsLocationsMetadataStoresArtifactsPatchOptions(data: any): ProjectsLocationsMetadataStoresArtifactsPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsMetadataStoresArtifactsQueryArtifactLineageSubgraph. */ export interface ProjectsLocationsMetadataStoresArtifactsQueryArtifactLineageSubgraphOptions { /** * Filter specifying the boolean condition for the Artifacts to satisfy in * order to be part of the Lineage Subgraph. The syntax to define filter query * is based on https://google.aip.dev/160. The supported set of filters * include the following: * **Attribute filtering**: For example: * `display_name = "test"` Supported fields include: `name`, `display_name`, * `uri`, `state`, `schema_title`, `create_time`, and `update_time`. Time * fields, such as `create_time` and `update_time`, require values specified * in RFC-3339 format. For example: `create_time = * "2020-11-19T11:30:00-04:00"` * **Metadata field**: To filter on metadata * fields use traversal operation as follows: `metadata..`. For example: * `metadata.field_1.number_value = 10.0` In case the field name contains * special characters (such as colon), one can embed it inside double quote. * For example: `metadata."field:1".number_value = 10.0` Each of the above * supported filter types can be combined together using logical operators * (`AND` & `OR`). Maximum nested expression depth allowed is 5. For example: * `display_name = "test" AND metadata.field1.bool_value = true`. */ filter?: string; /** * Specifies the size of the lineage graph in terms of number of hops from * the specified artifact. Negative Value: INVALID_ARGUMENT error is returned * 0: Only input artifact is returned. No value: Transitive closure is * performed to return the complete graph. */ maxHops?: number; } /** * Additional options for * AIplatform#projectsLocationsMetadataStoresContextsCreate. */ export interface ProjectsLocationsMetadataStoresContextsCreateOptions { /** * The {context} portion of the resource name with the format: * `projects/{project}/locations/{location}/metadataStores/{metadatastore}/contexts/{context}`. * If not provided, the Context's ID will be a UUID generated by the service. * Must be 4-128 characters in length. Valid characters are `/a-z-/`. Must be * unique across all Contexts in the parent MetadataStore. (Otherwise the * request will fail with ALREADY_EXISTS, or PERMISSION_DENIED if the caller * can't view the preexisting Context.) */ contextId?: string; } /** * Additional options for * AIplatform#projectsLocationsMetadataStoresContextsDelete. */ export interface ProjectsLocationsMetadataStoresContextsDeleteOptions { /** * Optional. The etag of the Context to delete. If this is provided, it must * match the server's etag. Otherwise, the request will fail with a * FAILED_PRECONDITION. */ etag?: string; /** * The force deletion semantics is still undefined. Users should not use this * field. */ force?: boolean; } /** * Additional options for * AIplatform#projectsLocationsMetadataStoresContextsList. */ export interface ProjectsLocationsMetadataStoresContextsListOptions { /** * Filter specifying the boolean condition for the Contexts to satisfy in * order to be part of the result set. The syntax to define filter query is * based on https://google.aip.dev/160. Following are the supported set of * filters: * **Attribute filtering**: For example: `display_name = "test"`. * Supported fields include: `name`, `display_name`, `schema_title`, * `create_time`, and `update_time`. Time fields, such as `create_time` and * `update_time`, require values specified in RFC-3339 format. For example: * `create_time = "2020-11-19T11:30:00-04:00"`. * **Metadata field**: To * filter on metadata fields use traversal operation as follows: `metadata..`. * For example: `metadata.field_1.number_value = 10.0`. In case the field name * contains special characters (such as colon), one can embed it inside double * quote. For example: `metadata."field:1".number_value = 10.0` * **Parent * Child filtering**: To filter Contexts based on parent-child relationship * use the HAS operator as follows: ``` parent_contexts: * "projects//locations//metadataStores//contexts/" child_contexts: * "projects//locations//metadataStores//contexts/" ``` Each of the above * supported filters can be combined together using logical operators (`AND` & * `OR`). Maximum nested expression depth allowed is 5. For example: * `display_name = "test" AND metadata.field1.bool_value = true`. */ filter?: string; /** * How the list of messages is ordered. Specify the values to order by and an * ordering operation. The default sorting order is ascending. To specify * descending order for a field, users append a " desc" suffix; for example: * "foo desc, bar". Subfields are specified with a `.` character, such as * foo.bar. see https://google.aip.dev/132#ordering for more details. */ orderBy?: string; /** * The maximum number of Contexts to return. The service may return fewer. * Must be in range 1-1000, inclusive. Defaults to 100. */ pageSize?: number; /** * A page token, received from a previous MetadataService.ListContexts call. * Provide this to retrieve the subsequent page. When paginating, all other * provided parameters must match the call that provided the page token. * (Otherwise the request will fail with INVALID_ARGUMENT error.) */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsMetadataStoresContextsOperationsList. */ export interface ProjectsLocationsMetadataStoresContextsOperationsListOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsMetadataStoresContextsOperationsWait. */ export interface ProjectsLocationsMetadataStoresContextsOperationsWaitOptions { /** * The maximum duration to wait before timing out. If left blank, the wait * will be at most the time permitted by the underlying HTTP/RPC protocol. If * RPC context deadline is also specified, the shorter one will be used. */ timeout?: number /* Duration */; } function serializeProjectsLocationsMetadataStoresContextsOperationsWaitOptions(data: any): ProjectsLocationsMetadataStoresContextsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } function deserializeProjectsLocationsMetadataStoresContextsOperationsWaitOptions(data: any): ProjectsLocationsMetadataStoresContextsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsMetadataStoresContextsPatch. */ export interface ProjectsLocationsMetadataStoresContextsPatchOptions { /** * If set to true, and the Context is not found, a new Context is created. */ allowMissing?: boolean; /** * Optional. A FieldMask indicating which fields should be updated. */ updateMask?: string /* FieldMask */; } function serializeProjectsLocationsMetadataStoresContextsPatchOptions(data: any): ProjectsLocationsMetadataStoresContextsPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } function deserializeProjectsLocationsMetadataStoresContextsPatchOptions(data: any): ProjectsLocationsMetadataStoresContextsPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsMetadataStoresCreate. */ export interface ProjectsLocationsMetadataStoresCreateOptions { /** * The {metadatastore} portion of the resource name with the format: * `projects/{project}/locations/{location}/metadataStores/{metadatastore}` If * not provided, the MetadataStore's ID will be a UUID generated by the * service. Must be 4-128 characters in length. Valid characters are `/a-z-/`. * Must be unique across all MetadataStores in the parent Location. (Otherwise * the request will fail with ALREADY_EXISTS, or PERMISSION_DENIED if the * caller can't view the preexisting MetadataStore.) */ metadataStoreId?: string; } /** * Additional options for AIplatform#projectsLocationsMetadataStoresDelete. */ export interface ProjectsLocationsMetadataStoresDeleteOptions { /** * Deprecated: Field is no longer supported. */ force?: boolean; } /** * Additional options for * AIplatform#projectsLocationsMetadataStoresExecutionsCreate. */ export interface ProjectsLocationsMetadataStoresExecutionsCreateOptions { /** * The {execution} portion of the resource name with the format: * `projects/{project}/locations/{location}/metadataStores/{metadatastore}/executions/{execution}` * If not provided, the Execution's ID will be a UUID generated by the * service. Must be 4-128 characters in length. Valid characters are `/a-z-/`. * Must be unique across all Executions in the parent MetadataStore. * (Otherwise the request will fail with ALREADY_EXISTS, or PERMISSION_DENIED * if the caller can't view the preexisting Execution.) */ executionId?: string; } /** * Additional options for * AIplatform#projectsLocationsMetadataStoresExecutionsDelete. */ export interface ProjectsLocationsMetadataStoresExecutionsDeleteOptions { /** * Optional. The etag of the Execution to delete. If this is provided, it * must match the server's etag. Otherwise, the request will fail with a * FAILED_PRECONDITION. */ etag?: string; } /** * Additional options for * AIplatform#projectsLocationsMetadataStoresExecutionsList. */ export interface ProjectsLocationsMetadataStoresExecutionsListOptions { /** * Filter specifying the boolean condition for the Executions to satisfy in * order to be part of the result set. The syntax to define filter query is * based on https://google.aip.dev/160. Following are the supported set of * filters: * **Attribute filtering**: For example: `display_name = "test"`. * Supported fields include: `name`, `display_name`, `state`, `schema_title`, * `create_time`, and `update_time`. Time fields, such as `create_time` and * `update_time`, require values specified in RFC-3339 format. For example: * `create_time = "2020-11-19T11:30:00-04:00"`. * **Metadata field**: To * filter on metadata fields use traversal operation as follows: `metadata..` * For example: `metadata.field_1.number_value = 10.0` In case the field name * contains special characters (such as colon), one can embed it inside double * quote. For example: `metadata."field:1".number_value = 10.0` * **Context * based filtering**: To filter Executions based on the contexts to which they * belong use the function operator with the full resource name: * `in_context()`. For example: * `in_context("projects//locations//metadataStores//contexts/")` Each of the * above supported filters can be combined together using logical operators * (`AND` & `OR`). Maximum nested expression depth allowed is 5. For example: * `display_name = "test" AND metadata.field1.bool_value = true`. */ filter?: string; /** * How the list of messages is ordered. Specify the values to order by and an * ordering operation. The default sorting order is ascending. To specify * descending order for a field, users append a " desc" suffix; for example: * "foo desc, bar". Subfields are specified with a `.` character, such as * foo.bar. see https://google.aip.dev/132#ordering for more details. */ orderBy?: string; /** * The maximum number of Executions to return. The service may return fewer. * Must be in range 1-1000, inclusive. Defaults to 100. */ pageSize?: number; /** * A page token, received from a previous MetadataService.ListExecutions * call. Provide this to retrieve the subsequent page. When paginating, all * other provided parameters must match the call that provided the page token. * (Otherwise the request will fail with an INVALID_ARGUMENT error.) */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsMetadataStoresExecutionsOperationsList. */ export interface ProjectsLocationsMetadataStoresExecutionsOperationsListOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsMetadataStoresExecutionsOperationsWait. */ export interface ProjectsLocationsMetadataStoresExecutionsOperationsWaitOptions { /** * The maximum duration to wait before timing out. If left blank, the wait * will be at most the time permitted by the underlying HTTP/RPC protocol. If * RPC context deadline is also specified, the shorter one will be used. */ timeout?: number /* Duration */; } function serializeProjectsLocationsMetadataStoresExecutionsOperationsWaitOptions(data: any): ProjectsLocationsMetadataStoresExecutionsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } function deserializeProjectsLocationsMetadataStoresExecutionsOperationsWaitOptions(data: any): ProjectsLocationsMetadataStoresExecutionsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsMetadataStoresExecutionsPatch. */ export interface ProjectsLocationsMetadataStoresExecutionsPatchOptions { /** * If set to true, and the Execution is not found, a new Execution is * created. */ allowMissing?: boolean; /** * Optional. A FieldMask indicating which fields should be updated. */ updateMask?: string /* FieldMask */; } function serializeProjectsLocationsMetadataStoresExecutionsPatchOptions(data: any): ProjectsLocationsMetadataStoresExecutionsPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } function deserializeProjectsLocationsMetadataStoresExecutionsPatchOptions(data: any): ProjectsLocationsMetadataStoresExecutionsPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsMetadataStoresList. */ export interface ProjectsLocationsMetadataStoresListOptions { /** * The maximum number of Metadata Stores to return. The service may return * fewer. Must be in range 1-1000, inclusive. Defaults to 100. */ pageSize?: number; /** * A page token, received from a previous MetadataService.ListMetadataStores * call. Provide this to retrieve the subsequent page. When paginating, all * other provided parameters must match the call that provided the page token. * (Otherwise the request will fail with INVALID_ARGUMENT error.) */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsMetadataStoresMetadataSchemasCreate. */ export interface ProjectsLocationsMetadataStoresMetadataSchemasCreateOptions { /** * The {metadata_schema} portion of the resource name with the format: * `projects/{project}/locations/{location}/metadataStores/{metadatastore}/metadataSchemas/{metadataschema}` * If not provided, the MetadataStore's ID will be a UUID generated by the * service. Must be 4-128 characters in length. Valid characters are `/a-z-/`. * Must be unique across all MetadataSchemas in the parent Location. * (Otherwise the request will fail with ALREADY_EXISTS, or PERMISSION_DENIED * if the caller can't view the preexisting MetadataSchema.) */ metadataSchemaId?: string; } /** * Additional options for * AIplatform#projectsLocationsMetadataStoresMetadataSchemasList. */ export interface ProjectsLocationsMetadataStoresMetadataSchemasListOptions { /** * A query to filter available MetadataSchemas for matching results. */ filter?: string; /** * The maximum number of MetadataSchemas to return. The service may return * fewer. Must be in range 1-1000, inclusive. Defaults to 100. */ pageSize?: number; /** * A page token, received from a previous MetadataService.ListMetadataSchemas * call. Provide this to retrieve the next page. When paginating, all other * provided parameters must match the call that provided the page token. * (Otherwise the request will fail with INVALID_ARGUMENT error.) */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsMetadataStoresOperationsList. */ export interface ProjectsLocationsMetadataStoresOperationsListOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsMetadataStoresOperationsWait. */ export interface ProjectsLocationsMetadataStoresOperationsWaitOptions { /** * The maximum duration to wait before timing out. If left blank, the wait * will be at most the time permitted by the underlying HTTP/RPC protocol. If * RPC context deadline is also specified, the shorter one will be used. */ timeout?: number /* Duration */; } function serializeProjectsLocationsMetadataStoresOperationsWaitOptions(data: any): ProjectsLocationsMetadataStoresOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } function deserializeProjectsLocationsMetadataStoresOperationsWaitOptions(data: any): ProjectsLocationsMetadataStoresOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsMigratableResourcesOperationsList. */ export interface ProjectsLocationsMigratableResourcesOperationsListOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsMigratableResourcesOperationsWait. */ export interface ProjectsLocationsMigratableResourcesOperationsWaitOptions { /** * The maximum duration to wait before timing out. If left blank, the wait * will be at most the time permitted by the underlying HTTP/RPC protocol. If * RPC context deadline is also specified, the shorter one will be used. */ timeout?: number /* Duration */; } function serializeProjectsLocationsMigratableResourcesOperationsWaitOptions(data: any): ProjectsLocationsMigratableResourcesOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } function deserializeProjectsLocationsMigratableResourcesOperationsWaitOptions(data: any): ProjectsLocationsMigratableResourcesOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsModelDeploymentMonitoringJobsList. */ export interface ProjectsLocationsModelDeploymentMonitoringJobsListOptions { /** * The standard list filter. Supported fields: * `display_name` supports `=`, * `!=` comparisons, and `:` wildcard. * `state` supports `=`, `!=` * comparisons. * `create_time` supports `=`, `!=`,`<`, `<=`,`>`, `>=` * comparisons. `create_time` must be in RFC 3339 format. * `labels` supports * general map functions that is: `labels.key=value` - key:value equality * `labels.key:* - key existence Some examples of using the filter are: * * `state="JOB_STATE_SUCCEEDED" AND display_name:"my_job_*"` * * `state!="JOB_STATE_FAILED" OR display_name="my_job"` * `NOT * display_name="my_job"` * `create_time>"2021-05-18T00:00:00Z"` * * `labels.keyA=valueA` * `labels.keyB:*` */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; /** * Mask specifying which fields to read */ readMask?: string /* FieldMask */; } function serializeProjectsLocationsModelDeploymentMonitoringJobsListOptions(data: any): ProjectsLocationsModelDeploymentMonitoringJobsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } function deserializeProjectsLocationsModelDeploymentMonitoringJobsListOptions(data: any): ProjectsLocationsModelDeploymentMonitoringJobsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsModelDeploymentMonitoringJobsOperationsList. */ export interface ProjectsLocationsModelDeploymentMonitoringJobsOperationsListOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsModelDeploymentMonitoringJobsOperationsWait. */ export interface ProjectsLocationsModelDeploymentMonitoringJobsOperationsWaitOptions { /** * The maximum duration to wait before timing out. If left blank, the wait * will be at most the time permitted by the underlying HTTP/RPC protocol. If * RPC context deadline is also specified, the shorter one will be used. */ timeout?: number /* Duration */; } function serializeProjectsLocationsModelDeploymentMonitoringJobsOperationsWaitOptions(data: any): ProjectsLocationsModelDeploymentMonitoringJobsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } function deserializeProjectsLocationsModelDeploymentMonitoringJobsOperationsWaitOptions(data: any): ProjectsLocationsModelDeploymentMonitoringJobsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsModelDeploymentMonitoringJobsPatch. */ export interface ProjectsLocationsModelDeploymentMonitoringJobsPatchOptions { /** * Required. The update mask is used to specify the fields to be overwritten * in the ModelDeploymentMonitoringJob resource by the update. The fields * specified in the update_mask are relative to the resource, not the full * request. A field will be overwritten if it is in the mask. If the user does * not provide a mask then only the non-empty fields present in the request * will be overwritten. Set the update_mask to `*` to override all fields. For * the objective config, the user can either provide the update mask for * model_deployment_monitoring_objective_configs or any combination of its * nested fields, such as: * model_deployment_monitoring_objective_configs.objective_config.training_dataset. * Updatable fields: * `display_name` * * `model_deployment_monitoring_schedule_config` * * `model_monitoring_alert_config` * `logging_sampling_strategy` * `labels` * * `log_ttl` * `enable_monitoring_pipeline_logs` . and * * `model_deployment_monitoring_objective_configs` . or * * `model_deployment_monitoring_objective_configs.objective_config.training_dataset` * * * `model_deployment_monitoring_objective_configs.objective_config.training_prediction_skew_detection_config` * * * `model_deployment_monitoring_objective_configs.objective_config.prediction_drift_detection_config` */ updateMask?: string /* FieldMask */; } function serializeProjectsLocationsModelDeploymentMonitoringJobsPatchOptions(data: any): ProjectsLocationsModelDeploymentMonitoringJobsPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } function deserializeProjectsLocationsModelDeploymentMonitoringJobsPatchOptions(data: any): ProjectsLocationsModelDeploymentMonitoringJobsPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsModelsEvaluationsList. */ export interface ProjectsLocationsModelsEvaluationsListOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. Typically obtained via * ListModelEvaluationsResponse.next_page_token of the previous * ModelService.ListModelEvaluations call. */ pageToken?: string; /** * Mask specifying which fields to read. */ readMask?: string /* FieldMask */; } function serializeProjectsLocationsModelsEvaluationsListOptions(data: any): ProjectsLocationsModelsEvaluationsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } function deserializeProjectsLocationsModelsEvaluationsListOptions(data: any): ProjectsLocationsModelsEvaluationsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsModelsEvaluationsOperationsList. */ export interface ProjectsLocationsModelsEvaluationsOperationsListOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsModelsEvaluationsOperationsWait. */ export interface ProjectsLocationsModelsEvaluationsOperationsWaitOptions { /** * The maximum duration to wait before timing out. If left blank, the wait * will be at most the time permitted by the underlying HTTP/RPC protocol. If * RPC context deadline is also specified, the shorter one will be used. */ timeout?: number /* Duration */; } function serializeProjectsLocationsModelsEvaluationsOperationsWaitOptions(data: any): ProjectsLocationsModelsEvaluationsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } function deserializeProjectsLocationsModelsEvaluationsOperationsWaitOptions(data: any): ProjectsLocationsModelsEvaluationsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsModelsEvaluationsSlicesList. */ export interface ProjectsLocationsModelsEvaluationsSlicesListOptions { /** * The standard list filter. * `slice.dimension` - for =. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. Typically obtained via * ListModelEvaluationSlicesResponse.next_page_token of the previous * ModelService.ListModelEvaluationSlices call. */ pageToken?: string; /** * Mask specifying which fields to read. */ readMask?: string /* FieldMask */; } function serializeProjectsLocationsModelsEvaluationsSlicesListOptions(data: any): ProjectsLocationsModelsEvaluationsSlicesListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } function deserializeProjectsLocationsModelsEvaluationsSlicesListOptions(data: any): ProjectsLocationsModelsEvaluationsSlicesListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsModelsGetIamPolicy. */ export interface ProjectsLocationsModelsGetIamPolicyOptions { /** * Optional. The maximum policy version that will be used to format the * policy. Valid values are 0, 1, and 3. Requests specifying an invalid value * will be rejected. Requests for policies with any conditional role bindings * must specify version 3. Policies with no conditional role bindings may * specify any valid value or leave the field unset. The policy in the * response might use the policy version that you specified, or it might use a * lower policy version. For example, if you specify version 3, but the policy * has no conditional role bindings, the response uses version 1. To learn * which resources support conditions in their IAM policies, see the [IAM * documentation](https://cloud.google.com/iam/help/conditions/resource-policies). */ ["options.requestedPolicyVersion"]?: number; } /** * Additional options for AIplatform#projectsLocationsModelsListCheckpoints. */ export interface ProjectsLocationsModelsListCheckpointsOptions { /** * Optional. The standard list page size. */ pageSize?: number; /** * Optional. The standard list page token. Typically obtained via * next_page_token of the previous ListModelVersionCheckpoints call. */ pageToken?: string; } /** * Additional options for AIplatform#projectsLocationsModelsList. */ export interface ProjectsLocationsModelsListOptions { /** * An expression for filtering the results of the request. For field names * both snake_case and camelCase are supported. * `model` supports = and !=. * `model` represents the Model ID, i.e. the last segment of the Model's * resource name. * `display_name` supports = and != * `labels` supports * general map functions that is: * `labels.key=value` - key:value equality * * `labels.key:* or labels:key - key existence * A key including a space must * be quoted. `labels."a key"`. * `base_model_name` only supports = Some * examples: * `model=1234` * `displayName="myDisplayName"` * * `labels.myKey="myValue"` * `baseModelName="text-bison"` */ filter?: string; /** * A comma-separated list of fields to order by, sorted in ascending order. * Use "desc" after a field name for descending. Supported fields: * * `display_name` * `create_time` * `update_time` Example: `display_name, * create_time desc`. */ orderBy?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. Typically obtained via * ListModelsResponse.next_page_token of the previous ModelService.ListModels * call. */ pageToken?: string; /** * Mask specifying which fields to read. */ readMask?: string /* FieldMask */; } function serializeProjectsLocationsModelsListOptions(data: any): ProjectsLocationsModelsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } function deserializeProjectsLocationsModelsListOptions(data: any): ProjectsLocationsModelsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsModelsListVersions. */ export interface ProjectsLocationsModelsListVersionsOptions { /** * An expression for filtering the results of the request. For field names * both snake_case and camelCase are supported. * `labels` supports general * map functions that is: * `labels.key=value` - key:value equality * * `labels.key:* or labels:key - key existence * A key including a space must * be quoted. `labels."a key"`. Some examples: * `labels.myKey="myValue"` */ filter?: string; /** * A comma-separated list of fields to order by, sorted in ascending order. * Use "desc" after a field name for descending. Supported fields: * * `create_time` * `update_time` Example: `update_time asc, create_time desc`. */ orderBy?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. Typically obtained via next_page_token of * the previous ListModelVersions call. */ pageToken?: string; /** * Mask specifying which fields to read. */ readMask?: string /* FieldMask */; } function serializeProjectsLocationsModelsListVersionsOptions(data: any): ProjectsLocationsModelsListVersionsOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } function deserializeProjectsLocationsModelsListVersionsOptions(data: any): ProjectsLocationsModelsListVersionsOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsModelsOperationsList. */ export interface ProjectsLocationsModelsOperationsListOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } /** * Additional options for AIplatform#projectsLocationsModelsOperationsWait. */ export interface ProjectsLocationsModelsOperationsWaitOptions { /** * The maximum duration to wait before timing out. If left blank, the wait * will be at most the time permitted by the underlying HTTP/RPC protocol. If * RPC context deadline is also specified, the shorter one will be used. */ timeout?: number /* Duration */; } function serializeProjectsLocationsModelsOperationsWaitOptions(data: any): ProjectsLocationsModelsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } function deserializeProjectsLocationsModelsOperationsWaitOptions(data: any): ProjectsLocationsModelsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsModelsPatch. */ export interface ProjectsLocationsModelsPatchOptions { /** * Required. The update mask applies to the resource. For the `FieldMask` * definition, see google.protobuf.FieldMask. */ updateMask?: string /* FieldMask */; } function serializeProjectsLocationsModelsPatchOptions(data: any): ProjectsLocationsModelsPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } function deserializeProjectsLocationsModelsPatchOptions(data: any): ProjectsLocationsModelsPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsModelsTestIamPermissions. */ export interface ProjectsLocationsModelsTestIamPermissionsOptions { /** * The set of permissions to check for the `resource`. Permissions with * wildcards (such as `*` or `storage.*`) are not allowed. For more * information see [IAM * Overview](https://cloud.google.com/iam/docs/overview#permissions). */ permissions?: string; } /** * Additional options for AIplatform#projectsLocationsNasJobsList. */ export interface ProjectsLocationsNasJobsListOptions { /** * The standard list filter. Supported fields: * `display_name` supports `=`, * `!=` comparisons, and `:` wildcard. * `state` supports `=`, `!=` * comparisons. * `create_time` supports `=`, `!=`,`<`, `<=`,`>`, `>=` * comparisons. `create_time` must be in RFC 3339 format. * `labels` supports * general map functions that is: `labels.key=value` - key:value equality * `labels.key:* - key existence Some examples of using the filter are: * * `state="JOB_STATE_SUCCEEDED" AND display_name:"my_job_*"` * * `state!="JOB_STATE_FAILED" OR display_name="my_job"` * `NOT * display_name="my_job"` * `create_time>"2021-05-18T00:00:00Z"` * * `labels.keyA=valueA` * `labels.keyB:*` */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. Typically obtained via * ListNasJobsResponse.next_page_token of the previous JobService.ListNasJobs * call. */ pageToken?: string; /** * Mask specifying which fields to read. */ readMask?: string /* FieldMask */; } function serializeProjectsLocationsNasJobsListOptions(data: any): ProjectsLocationsNasJobsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } function deserializeProjectsLocationsNasJobsListOptions(data: any): ProjectsLocationsNasJobsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsNasJobsNasTrialDetailsList. */ export interface ProjectsLocationsNasJobsNasTrialDetailsListOptions { /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. Typically obtained via * ListNasTrialDetailsResponse.next_page_token of the previous * JobService.ListNasTrialDetails call. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsNotebookExecutionJobsCreate. */ export interface ProjectsLocationsNotebookExecutionJobsCreateOptions { /** * Optional. User specified ID for the NotebookExecutionJob. */ notebookExecutionJobId?: string; } /** * Additional options for AIplatform#projectsLocationsNotebookExecutionJobsGet. */ export interface ProjectsLocationsNotebookExecutionJobsGetOptions { /** * Optional. The NotebookExecutionJob view. Defaults to BASIC. */ view?: | "NOTEBOOK_EXECUTION_JOB_VIEW_UNSPECIFIED" | "NOTEBOOK_EXECUTION_JOB_VIEW_BASIC" | "NOTEBOOK_EXECUTION_JOB_VIEW_FULL"; } /** * Additional options for * AIplatform#projectsLocationsNotebookExecutionJobsList. */ export interface ProjectsLocationsNotebookExecutionJobsListOptions { /** * Optional. An expression for filtering the results of the request. For * field names both snake_case and camelCase are supported. * * `notebookExecutionJob` supports = and !=. `notebookExecutionJob` represents * the NotebookExecutionJob ID. * `displayName` supports = and != and regex. * * `schedule` supports = and != and regex. Some examples: * * `notebookExecutionJob="123"` * `notebookExecutionJob="my-execution-job"` * * `displayName="myDisplayName"` and `displayName=~"myDisplayNameRegex"` */ filter?: string; /** * Optional. A comma-separated list of fields to order by, sorted in * ascending order. Use "desc" after a field name for descending. Supported * fields: * `display_name` * `create_time` * `update_time` Example: * `display_name, create_time desc`. */ orderBy?: string; /** * Optional. The standard list page size. */ pageSize?: number; /** * Optional. The standard list page token. Typically obtained via * ListNotebookExecutionJobsResponse.next_page_token of the previous * NotebookService.ListNotebookExecutionJobs call. */ pageToken?: string; /** * Optional. The NotebookExecutionJob view. Defaults to BASIC. */ view?: | "NOTEBOOK_EXECUTION_JOB_VIEW_UNSPECIFIED" | "NOTEBOOK_EXECUTION_JOB_VIEW_BASIC" | "NOTEBOOK_EXECUTION_JOB_VIEW_FULL"; } /** * Additional options for * AIplatform#projectsLocationsNotebookExecutionJobsOperationsList. */ export interface ProjectsLocationsNotebookExecutionJobsOperationsListOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsNotebookExecutionJobsOperationsWait. */ export interface ProjectsLocationsNotebookExecutionJobsOperationsWaitOptions { /** * The maximum duration to wait before timing out. If left blank, the wait * will be at most the time permitted by the underlying HTTP/RPC protocol. If * RPC context deadline is also specified, the shorter one will be used. */ timeout?: number /* Duration */; } function serializeProjectsLocationsNotebookExecutionJobsOperationsWaitOptions(data: any): ProjectsLocationsNotebookExecutionJobsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } function deserializeProjectsLocationsNotebookExecutionJobsOperationsWaitOptions(data: any): ProjectsLocationsNotebookExecutionJobsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsNotebookRuntimesList. */ export interface ProjectsLocationsNotebookRuntimesListOptions { /** * Optional. An expression for filtering the results of the request. For * field names both snake_case and camelCase are supported. * * `notebookRuntime` supports = and !=. `notebookRuntime` represents the * NotebookRuntime ID, i.e. the last segment of the NotebookRuntime's resource * name. * `displayName` supports = and != and regex. * * `notebookRuntimeTemplate` supports = and !=. `notebookRuntimeTemplate` * represents the NotebookRuntimeTemplate ID, i.e. the last segment of the * NotebookRuntimeTemplate's resource name. * `healthState` supports = and !=. * healthState enum: [HEALTHY, UNHEALTHY, HEALTH_STATE_UNSPECIFIED]. * * `runtimeState` supports = and !=. runtimeState enum: * [RUNTIME_STATE_UNSPECIFIED, RUNNING, BEING_STARTED, BEING_STOPPED, STOPPED, * BEING_UPGRADED, ERROR, INVALID]. * `runtimeUser` supports = and !=. * API * version is UI only: `uiState` supports = and !=. uiState enum: * [UI_RESOURCE_STATE_UNSPECIFIED, UI_RESOURCE_STATE_BEING_CREATED, * UI_RESOURCE_STATE_ACTIVE, UI_RESOURCE_STATE_BEING_DELETED, * UI_RESOURCE_STATE_CREATION_FAILED]. * `notebookRuntimeType` supports = and * !=. notebookRuntimeType enum: [USER_DEFINED, ONE_CLICK]. * `machineType` * supports = and !=. * `acceleratorType` supports = and !=. Some examples: * * `notebookRuntime="notebookRuntime123"` * `displayName="myDisplayName"` and * `displayName=~"myDisplayNameRegex"` * * `notebookRuntimeTemplate="notebookRuntimeTemplate321"` * * `healthState=HEALTHY` * `runtimeState=RUNNING` * * `runtimeUser="test@google.com"` * `uiState=UI_RESOURCE_STATE_BEING_DELETED` * * `notebookRuntimeType=USER_DEFINED` * `machineType=e2-standard-4` * * `acceleratorType=NVIDIA_TESLA_T4` */ filter?: string; /** * Optional. A comma-separated list of fields to order by, sorted in * ascending order. Use "desc" after a field name for descending. Supported * fields: * `display_name` * `create_time` * `update_time` Example: * `display_name, create_time desc`. */ orderBy?: string; /** * Optional. The standard list page size. */ pageSize?: number; /** * Optional. The standard list page token. Typically obtained via * ListNotebookRuntimesResponse.next_page_token of the previous * NotebookService.ListNotebookRuntimes call. */ pageToken?: string; /** * Optional. Mask specifying which fields to read. */ readMask?: string /* FieldMask */; } function serializeProjectsLocationsNotebookRuntimesListOptions(data: any): ProjectsLocationsNotebookRuntimesListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } function deserializeProjectsLocationsNotebookRuntimesListOptions(data: any): ProjectsLocationsNotebookRuntimesListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsNotebookRuntimesOperationsList. */ export interface ProjectsLocationsNotebookRuntimesOperationsListOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsNotebookRuntimesOperationsWait. */ export interface ProjectsLocationsNotebookRuntimesOperationsWaitOptions { /** * The maximum duration to wait before timing out. If left blank, the wait * will be at most the time permitted by the underlying HTTP/RPC protocol. If * RPC context deadline is also specified, the shorter one will be used. */ timeout?: number /* Duration */; } function serializeProjectsLocationsNotebookRuntimesOperationsWaitOptions(data: any): ProjectsLocationsNotebookRuntimesOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } function deserializeProjectsLocationsNotebookRuntimesOperationsWaitOptions(data: any): ProjectsLocationsNotebookRuntimesOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsNotebookRuntimeTemplatesCreate. */ export interface ProjectsLocationsNotebookRuntimeTemplatesCreateOptions { /** * Optional. User specified ID for the notebook runtime template. */ notebookRuntimeTemplateId?: string; } /** * Additional options for * AIplatform#projectsLocationsNotebookRuntimeTemplatesGetIamPolicy. */ export interface ProjectsLocationsNotebookRuntimeTemplatesGetIamPolicyOptions { /** * Optional. The maximum policy version that will be used to format the * policy. Valid values are 0, 1, and 3. Requests specifying an invalid value * will be rejected. Requests for policies with any conditional role bindings * must specify version 3. Policies with no conditional role bindings may * specify any valid value or leave the field unset. The policy in the * response might use the policy version that you specified, or it might use a * lower policy version. For example, if you specify version 3, but the policy * has no conditional role bindings, the response uses version 1. To learn * which resources support conditions in their IAM policies, see the [IAM * documentation](https://cloud.google.com/iam/help/conditions/resource-policies). */ ["options.requestedPolicyVersion"]?: number; } /** * Additional options for * AIplatform#projectsLocationsNotebookRuntimeTemplatesList. */ export interface ProjectsLocationsNotebookRuntimeTemplatesListOptions { /** * Optional. An expression for filtering the results of the request. For * field names both snake_case and camelCase are supported. * * `notebookRuntimeTemplate` supports = and !=. `notebookRuntimeTemplate` * represents the NotebookRuntimeTemplate ID, i.e. the last segment of the * NotebookRuntimeTemplate's resource name. * `display_name` supports = and != * * `labels` supports general map functions that is: * `labels.key=value` - * key:value equality * `labels.key:* or labels:key - key existence * A key * including a space must be quoted. `labels."a key"`. * `notebookRuntimeType` * supports = and !=. notebookRuntimeType enum: [USER_DEFINED, ONE_CLICK]. * * `machineType` supports = and !=. * `acceleratorType` supports = and !=. * Some examples: * `notebookRuntimeTemplate=notebookRuntimeTemplate123` * * `displayName="myDisplayName"` * `labels.myKey="myValue"` * * `notebookRuntimeType=USER_DEFINED` * `machineType=e2-standard-4` * * `acceleratorType=NVIDIA_TESLA_T4` */ filter?: string; /** * Optional. A comma-separated list of fields to order by, sorted in * ascending order. Use "desc" after a field name for descending. Supported * fields: * `display_name` * `create_time` * `update_time` Example: * `display_name, create_time desc`. */ orderBy?: string; /** * Optional. The standard list page size. */ pageSize?: number; /** * Optional. The standard list page token. Typically obtained via * ListNotebookRuntimeTemplatesResponse.next_page_token of the previous * NotebookService.ListNotebookRuntimeTemplates call. */ pageToken?: string; /** * Optional. Mask specifying which fields to read. */ readMask?: string /* FieldMask */; } function serializeProjectsLocationsNotebookRuntimeTemplatesListOptions(data: any): ProjectsLocationsNotebookRuntimeTemplatesListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } function deserializeProjectsLocationsNotebookRuntimeTemplatesListOptions(data: any): ProjectsLocationsNotebookRuntimeTemplatesListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsNotebookRuntimeTemplatesOperationsList. */ export interface ProjectsLocationsNotebookRuntimeTemplatesOperationsListOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsNotebookRuntimeTemplatesOperationsWait. */ export interface ProjectsLocationsNotebookRuntimeTemplatesOperationsWaitOptions { /** * The maximum duration to wait before timing out. If left blank, the wait * will be at most the time permitted by the underlying HTTP/RPC protocol. If * RPC context deadline is also specified, the shorter one will be used. */ timeout?: number /* Duration */; } function serializeProjectsLocationsNotebookRuntimeTemplatesOperationsWaitOptions(data: any): ProjectsLocationsNotebookRuntimeTemplatesOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } function deserializeProjectsLocationsNotebookRuntimeTemplatesOperationsWaitOptions(data: any): ProjectsLocationsNotebookRuntimeTemplatesOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsNotebookRuntimeTemplatesPatch. */ export interface ProjectsLocationsNotebookRuntimeTemplatesPatchOptions { /** * Required. The update mask applies to the resource. For the `FieldMask` * definition, see google.protobuf.FieldMask. Input format: `{paths: * "${updated_filed}"}` Updatable fields: * `encryption_spec.kms_key_name` */ updateMask?: string /* FieldMask */; } function serializeProjectsLocationsNotebookRuntimeTemplatesPatchOptions(data: any): ProjectsLocationsNotebookRuntimeTemplatesPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } function deserializeProjectsLocationsNotebookRuntimeTemplatesPatchOptions(data: any): ProjectsLocationsNotebookRuntimeTemplatesPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsNotebookRuntimeTemplatesTestIamPermissions. */ export interface ProjectsLocationsNotebookRuntimeTemplatesTestIamPermissionsOptions { /** * The set of permissions to check for the `resource`. Permissions with * wildcards (such as `*` or `storage.*`) are not allowed. For more * information see [IAM * Overview](https://cloud.google.com/iam/docs/overview#permissions). */ permissions?: string; } /** * Additional options for AIplatform#projectsLocationsOperationsList. */ export interface ProjectsLocationsOperationsListOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } /** * Additional options for AIplatform#projectsLocationsOperationsWait. */ export interface ProjectsLocationsOperationsWaitOptions { /** * The maximum duration to wait before timing out. If left blank, the wait * will be at most the time permitted by the underlying HTTP/RPC protocol. If * RPC context deadline is also specified, the shorter one will be used. */ timeout?: number /* Duration */; } function serializeProjectsLocationsOperationsWaitOptions(data: any): ProjectsLocationsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } function deserializeProjectsLocationsOperationsWaitOptions(data: any): ProjectsLocationsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsPersistentResourcesCreate. */ export interface ProjectsLocationsPersistentResourcesCreateOptions { /** * Required. The ID to use for the PersistentResource, which become the final * component of the PersistentResource's resource name. The maximum length is * 63 characters, and valid characters are * `/^[a-z]([a-z0-9-]{0,61}[a-z0-9])?$/`. */ persistentResourceId?: string; } /** * Additional options for AIplatform#projectsLocationsPersistentResourcesList. */ export interface ProjectsLocationsPersistentResourcesListOptions { /** * Optional. The standard list page size. */ pageSize?: number; /** * Optional. The standard list page token. Typically obtained via * ListPersistentResourcesResponse.next_page_token of the previous * PersistentResourceService.ListPersistentResource call. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsPersistentResourcesOperationsList. */ export interface ProjectsLocationsPersistentResourcesOperationsListOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsPersistentResourcesOperationsWait. */ export interface ProjectsLocationsPersistentResourcesOperationsWaitOptions { /** * The maximum duration to wait before timing out. If left blank, the wait * will be at most the time permitted by the underlying HTTP/RPC protocol. If * RPC context deadline is also specified, the shorter one will be used. */ timeout?: number /* Duration */; } function serializeProjectsLocationsPersistentResourcesOperationsWaitOptions(data: any): ProjectsLocationsPersistentResourcesOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } function deserializeProjectsLocationsPersistentResourcesOperationsWaitOptions(data: any): ProjectsLocationsPersistentResourcesOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsPersistentResourcesPatch. */ export interface ProjectsLocationsPersistentResourcesPatchOptions { /** * Required. Specify the fields to be overwritten in the PersistentResource * by the update method. */ updateMask?: string /* FieldMask */; } function serializeProjectsLocationsPersistentResourcesPatchOptions(data: any): ProjectsLocationsPersistentResourcesPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } function deserializeProjectsLocationsPersistentResourcesPatchOptions(data: any): ProjectsLocationsPersistentResourcesPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsPipelineJobsCreate. */ export interface ProjectsLocationsPipelineJobsCreateOptions { /** * The ID to use for the PipelineJob, which will become the final component * of the PipelineJob name. If not provided, an ID will be automatically * generated. This value should be less than 128 characters, and valid * characters are `/a-z-/`. */ pipelineJobId?: string; } /** * Additional options for AIplatform#projectsLocationsPipelineJobsList. */ export interface ProjectsLocationsPipelineJobsListOptions { /** * Lists the PipelineJobs that match the filter expression. The following * fields are supported: * `pipeline_name`: Supports `=` and `!=` comparisons. * * `display_name`: Supports `=`, `!=` comparisons, and `:` wildcard. * * `pipeline_job_user_id`: Supports `=`, `!=` comparisons, and `:` wildcard. * for example, can check if pipeline's display_name contains *step* by doing * display_name:\"*step*\" * `state`: Supports `=` and `!=` comparisons. * * `create_time`: Supports `=`, `!=`, `<`, `>`, `<=`, and `>=` comparisons. * Values must be in RFC 3339 format. * `update_time`: Supports `=`, `!=`, * `<`, `>`, `<=`, and `>=` comparisons. Values must be in RFC 3339 format. * * `end_time`: Supports `=`, `!=`, `<`, `>`, `<=`, and `>=` comparisons. * Values must be in RFC 3339 format. * `labels`: Supports key-value equality * and key presence. * `template_uri`: Supports `=`, `!=` comparisons, and `:` * wildcard. * `template_metadata.version`: Supports `=`, `!=` comparisons, * and `:` wildcard. Filter expressions can be combined together using logical * operators (`AND` & `OR`). For example: `pipeline_name="test" AND * create_time>"2020-05-18T13:30:00Z"`. The syntax to define filter expression * is based on https://google.aip.dev/160. Examples: * * `create_time>"2021-05-18T00:00:00Z" OR update_time>"2020-05-18T00:00:00Z"` * PipelineJobs created or updated after 2020-05-18 00:00:00 UTC. * * `labels.env = "prod"` PipelineJobs with label "env" set to "prod". */ filter?: string; /** * A comma-separated list of fields to order by. The default sort order is in * ascending order. Use "desc" after a field name for descending. You can have * multiple order_by fields provided e.g. "create_time desc, end_time", * "end_time, start_time, update_time" For example, using "create_time desc, * end_time" will order results by create time in descending order, and if * there are multiple jobs having the same create time, order them by the end * time in ascending order. if order_by is not specified, it will order by * default order is create time in descending order. Supported fields: * * `create_time` * `update_time` * `end_time` * `start_time` */ orderBy?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. Typically obtained via * ListPipelineJobsResponse.next_page_token of the previous * PipelineService.ListPipelineJobs call. */ pageToken?: string; /** * Mask specifying which fields to read. */ readMask?: string /* FieldMask */; } function serializeProjectsLocationsPipelineJobsListOptions(data: any): ProjectsLocationsPipelineJobsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } function deserializeProjectsLocationsPipelineJobsListOptions(data: any): ProjectsLocationsPipelineJobsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsPipelineJobsOperationsList. */ export interface ProjectsLocationsPipelineJobsOperationsListOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsPipelineJobsOperationsWait. */ export interface ProjectsLocationsPipelineJobsOperationsWaitOptions { /** * The maximum duration to wait before timing out. If left blank, the wait * will be at most the time permitted by the underlying HTTP/RPC protocol. If * RPC context deadline is also specified, the shorter one will be used. */ timeout?: number /* Duration */; } function serializeProjectsLocationsPipelineJobsOperationsWaitOptions(data: any): ProjectsLocationsPipelineJobsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } function deserializeProjectsLocationsPipelineJobsOperationsWaitOptions(data: any): ProjectsLocationsPipelineJobsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsRagCorporaDelete. */ export interface ProjectsLocationsRagCorporaDeleteOptions { /** * Optional. If set to true, any RagFiles in this RagCorpus will also be * deleted. Otherwise, the request will only work if the RagCorpus has no * RagFiles. */ force?: boolean; } /** * Additional options for AIplatform#projectsLocationsRagCorporaList. */ export interface ProjectsLocationsRagCorporaListOptions { /** * Optional. The standard list page size. */ pageSize?: number; /** * Optional. The standard list page token. Typically obtained via * ListRagCorporaResponse.next_page_token of the previous * VertexRagDataService.ListRagCorpora call. */ pageToken?: string; } /** * Additional options for AIplatform#projectsLocationsRagCorporaOperationsList. */ export interface ProjectsLocationsRagCorporaOperationsListOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } /** * Additional options for AIplatform#projectsLocationsRagCorporaOperationsWait. */ export interface ProjectsLocationsRagCorporaOperationsWaitOptions { /** * The maximum duration to wait before timing out. If left blank, the wait * will be at most the time permitted by the underlying HTTP/RPC protocol. If * RPC context deadline is also specified, the shorter one will be used. */ timeout?: number /* Duration */; } function serializeProjectsLocationsRagCorporaOperationsWaitOptions(data: any): ProjectsLocationsRagCorporaOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } function deserializeProjectsLocationsRagCorporaOperationsWaitOptions(data: any): ProjectsLocationsRagCorporaOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsRagCorporaRagFilesList. */ export interface ProjectsLocationsRagCorporaRagFilesListOptions { /** * Optional. The standard list page size. */ pageSize?: number; /** * Optional. The standard list page token. Typically obtained via * ListRagFilesResponse.next_page_token of the previous * VertexRagDataService.ListRagFiles call. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsRagCorporaRagFilesOperationsList. */ export interface ProjectsLocationsRagCorporaRagFilesOperationsListOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsRagCorporaRagFilesOperationsWait. */ export interface ProjectsLocationsRagCorporaRagFilesOperationsWaitOptions { /** * The maximum duration to wait before timing out. If left blank, the wait * will be at most the time permitted by the underlying HTTP/RPC protocol. If * RPC context deadline is also specified, the shorter one will be used. */ timeout?: number /* Duration */; } function serializeProjectsLocationsRagCorporaRagFilesOperationsWaitOptions(data: any): ProjectsLocationsRagCorporaRagFilesOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } function deserializeProjectsLocationsRagCorporaRagFilesOperationsWaitOptions(data: any): ProjectsLocationsRagCorporaRagFilesOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsRagEngineConfigOperationsList. */ export interface ProjectsLocationsRagEngineConfigOperationsListOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsRagEngineConfigOperationsWait. */ export interface ProjectsLocationsRagEngineConfigOperationsWaitOptions { /** * The maximum duration to wait before timing out. If left blank, the wait * will be at most the time permitted by the underlying HTTP/RPC protocol. If * RPC context deadline is also specified, the shorter one will be used. */ timeout?: number /* Duration */; } function serializeProjectsLocationsRagEngineConfigOperationsWaitOptions(data: any): ProjectsLocationsRagEngineConfigOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } function deserializeProjectsLocationsRagEngineConfigOperationsWaitOptions(data: any): ProjectsLocationsRagEngineConfigOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsReasoningEnginesDelete. */ export interface ProjectsLocationsReasoningEnginesDeleteOptions { /** * Optional. If set to true, child resources of this reasoning engine will * also be deleted. Otherwise, the request will fail with FAILED_PRECONDITION * error when the reasoning engine has undeleted child resources. */ force?: boolean; } /** * Additional options for AIplatform#projectsLocationsReasoningEnginesList. */ export interface ProjectsLocationsReasoningEnginesListOptions { /** * Optional. The standard list filter. More detail in * [AIP-160](https://google.aip.dev/160). */ filter?: string; /** * Optional. The standard list page size. */ pageSize?: number; /** * Optional. The standard list page token. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsReasoningEnginesOperationsList. */ export interface ProjectsLocationsReasoningEnginesOperationsListOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsReasoningEnginesOperationsWait. */ export interface ProjectsLocationsReasoningEnginesOperationsWaitOptions { /** * The maximum duration to wait before timing out. If left blank, the wait * will be at most the time permitted by the underlying HTTP/RPC protocol. If * RPC context deadline is also specified, the shorter one will be used. */ timeout?: number /* Duration */; } function serializeProjectsLocationsReasoningEnginesOperationsWaitOptions(data: any): ProjectsLocationsReasoningEnginesOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } function deserializeProjectsLocationsReasoningEnginesOperationsWaitOptions(data: any): ProjectsLocationsReasoningEnginesOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsReasoningEnginesPatch. */ export interface ProjectsLocationsReasoningEnginesPatchOptions { /** * Optional. Mask specifying which fields to update. */ updateMask?: string /* FieldMask */; } function serializeProjectsLocationsReasoningEnginesPatchOptions(data: any): ProjectsLocationsReasoningEnginesPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } function deserializeProjectsLocationsReasoningEnginesPatchOptions(data: any): ProjectsLocationsReasoningEnginesPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsSchedulesList. */ export interface ProjectsLocationsSchedulesListOptions { /** * Lists the Schedules that match the filter expression. The following fields * are supported: * `display_name`: Supports `=`, `!=` comparisons, and `:` * wildcard. * `state`: Supports `=` and `!=` comparisons. * `request`: * Supports existence of the check. (e.g. `create_pipeline_job_request:*` --> * Schedule has create_pipeline_job_request). * `create_time`: Supports `=`, * `!=`, `<`, `>`, `<=`, and `>=` comparisons. Values must be in RFC 3339 * format. * `start_time`: Supports `=`, `!=`, `<`, `>`, `<=`, and `>=` * comparisons. Values must be in RFC 3339 format. * `end_time`: Supports `=`, * `!=`, `<`, `>`, `<=`, `>=` comparisons and `:*` existence check. Values * must be in RFC 3339 format. * `next_run_time`: Supports `=`, `!=`, `<`, * `>`, `<=`, and `>=` comparisons. Values must be in RFC 3339 format. Filter * expressions can be combined together using logical operators (`NOT`, `AND` * & `OR`). The syntax to define filter expression is based on * https://google.aip.dev/160. Examples: * `state="ACTIVE" AND * display_name:"my_schedule_*"` * `NOT display_name="my_schedule"` * * `create_time>"2021-05-18T00:00:00Z"` * `end_time>"2021-05-18T00:00:00Z" OR * NOT end_time:*` * `create_pipeline_job_request:*` */ filter?: string; /** * A comma-separated list of fields to order by. The default sort order is in * ascending order. Use "desc" after a field name for descending. You can have * multiple order_by fields provided. For example, using "create_time desc, * end_time" will order results by create time in descending order, and if * there are multiple schedules having the same create time, order them by the * end time in ascending order. If order_by is not specified, it will order by * default with create_time in descending order. Supported fields: * * `create_time` * `start_time` * `end_time` * `next_run_time` */ orderBy?: string; /** * The standard list page size. Default to 100 if not specified. */ pageSize?: number; /** * The standard list page token. Typically obtained via * ListSchedulesResponse.next_page_token of the previous * ScheduleService.ListSchedules call. */ pageToken?: string; } /** * Additional options for AIplatform#projectsLocationsSchedulesOperationsList. */ export interface ProjectsLocationsSchedulesOperationsListOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } /** * Additional options for AIplatform#projectsLocationsSchedulesOperationsWait. */ export interface ProjectsLocationsSchedulesOperationsWaitOptions { /** * The maximum duration to wait before timing out. If left blank, the wait * will be at most the time permitted by the underlying HTTP/RPC protocol. If * RPC context deadline is also specified, the shorter one will be used. */ timeout?: number /* Duration */; } function serializeProjectsLocationsSchedulesOperationsWaitOptions(data: any): ProjectsLocationsSchedulesOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } function deserializeProjectsLocationsSchedulesOperationsWaitOptions(data: any): ProjectsLocationsSchedulesOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsSchedulesPatch. */ export interface ProjectsLocationsSchedulesPatchOptions { /** * Required. The update mask applies to the resource. See * google.protobuf.FieldMask. */ updateMask?: string /* FieldMask */; } function serializeProjectsLocationsSchedulesPatchOptions(data: any): ProjectsLocationsSchedulesPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } function deserializeProjectsLocationsSchedulesPatchOptions(data: any): ProjectsLocationsSchedulesPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsSpecialistPoolsDelete. */ export interface ProjectsLocationsSpecialistPoolsDeleteOptions { /** * If set to true, any specialist managers in this SpecialistPool will also * be deleted. (Otherwise, the request will only work if the SpecialistPool * has no specialist managers.) */ force?: boolean; } /** * Additional options for AIplatform#projectsLocationsSpecialistPoolsList. */ export interface ProjectsLocationsSpecialistPoolsListOptions { /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. Typically obtained by * ListSpecialistPoolsResponse.next_page_token of the previous * SpecialistPoolService.ListSpecialistPools call. Return first page if empty. */ pageToken?: string; /** * Mask specifying which fields to read. FieldMask represents a set of */ readMask?: string /* FieldMask */; } function serializeProjectsLocationsSpecialistPoolsListOptions(data: any): ProjectsLocationsSpecialistPoolsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } function deserializeProjectsLocationsSpecialistPoolsListOptions(data: any): ProjectsLocationsSpecialistPoolsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsSpecialistPoolsOperationsList. */ export interface ProjectsLocationsSpecialistPoolsOperationsListOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsSpecialistPoolsOperationsWait. */ export interface ProjectsLocationsSpecialistPoolsOperationsWaitOptions { /** * The maximum duration to wait before timing out. If left blank, the wait * will be at most the time permitted by the underlying HTTP/RPC protocol. If * RPC context deadline is also specified, the shorter one will be used. */ timeout?: number /* Duration */; } function serializeProjectsLocationsSpecialistPoolsOperationsWaitOptions(data: any): ProjectsLocationsSpecialistPoolsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } function deserializeProjectsLocationsSpecialistPoolsOperationsWaitOptions(data: any): ProjectsLocationsSpecialistPoolsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsSpecialistPoolsPatch. */ export interface ProjectsLocationsSpecialistPoolsPatchOptions { /** * Required. The update mask applies to the resource. */ updateMask?: string /* FieldMask */; } function serializeProjectsLocationsSpecialistPoolsPatchOptions(data: any): ProjectsLocationsSpecialistPoolsPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } function deserializeProjectsLocationsSpecialistPoolsPatchOptions(data: any): ProjectsLocationsSpecialistPoolsPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsStudiesList. */ export interface ProjectsLocationsStudiesListOptions { /** * Optional. The maximum number of studies to return per "page" of results. * If unspecified, service will pick an appropriate default. */ pageSize?: number; /** * Optional. A page token to request the next page of results. If * unspecified, there are no subsequent pages. */ pageToken?: string; } /** * Additional options for AIplatform#projectsLocationsStudiesOperationsList. */ export interface ProjectsLocationsStudiesOperationsListOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } /** * Additional options for AIplatform#projectsLocationsStudiesOperationsWait. */ export interface ProjectsLocationsStudiesOperationsWaitOptions { /** * The maximum duration to wait before timing out. If left blank, the wait * will be at most the time permitted by the underlying HTTP/RPC protocol. If * RPC context deadline is also specified, the shorter one will be used. */ timeout?: number /* Duration */; } function serializeProjectsLocationsStudiesOperationsWaitOptions(data: any): ProjectsLocationsStudiesOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } function deserializeProjectsLocationsStudiesOperationsWaitOptions(data: any): ProjectsLocationsStudiesOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsStudiesTrialsList. */ export interface ProjectsLocationsStudiesTrialsListOptions { /** * Optional. The number of Trials to retrieve per "page" of results. If * unspecified, the service will pick an appropriate default. */ pageSize?: number; /** * Optional. A page token to request the next page of results. If * unspecified, there are no subsequent pages. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsStudiesTrialsOperationsList. */ export interface ProjectsLocationsStudiesTrialsOperationsListOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsStudiesTrialsOperationsWait. */ export interface ProjectsLocationsStudiesTrialsOperationsWaitOptions { /** * The maximum duration to wait before timing out. If left blank, the wait * will be at most the time permitted by the underlying HTTP/RPC protocol. If * RPC context deadline is also specified, the shorter one will be used. */ timeout?: number /* Duration */; } function serializeProjectsLocationsStudiesTrialsOperationsWaitOptions(data: any): ProjectsLocationsStudiesTrialsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } function deserializeProjectsLocationsStudiesTrialsOperationsWaitOptions(data: any): ProjectsLocationsStudiesTrialsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsTensorboardsBatchRead. */ export interface ProjectsLocationsTensorboardsBatchReadOptions { /** * Required. The resource names of the TensorboardTimeSeries to read data * from. Format: * `projects/{project}/locations/{location}/tensorboards/{tensorboard}/experiments/{experiment}/runs/{run}/timeSeries/{time_series}` */ timeSeries?: string; } /** * Additional options for * AIplatform#projectsLocationsTensorboardsExperimentsCreate. */ export interface ProjectsLocationsTensorboardsExperimentsCreateOptions { /** * Required. The ID to use for the Tensorboard experiment, which becomes the * final component of the Tensorboard experiment's resource name. This value * should be 1-128 characters, and valid characters are `/a-z-/`. */ tensorboardExperimentId?: string; } /** * Additional options for * AIplatform#projectsLocationsTensorboardsExperimentsList. */ export interface ProjectsLocationsTensorboardsExperimentsListOptions { /** * Lists the TensorboardExperiments that match the filter expression. */ filter?: string; /** * Field to use to sort the list. */ orderBy?: string; /** * The maximum number of TensorboardExperiments to return. The service may * return fewer than this value. If unspecified, at most 50 * TensorboardExperiments are returned. The maximum value is 1000; values * above 1000 are coerced to 1000. */ pageSize?: number; /** * A page token, received from a previous * TensorboardService.ListTensorboardExperiments call. Provide this to * retrieve the subsequent page. When paginating, all other parameters * provided to TensorboardService.ListTensorboardExperiments must match the * call that provided the page token. */ pageToken?: string; /** * Mask specifying which fields to read. */ readMask?: string /* FieldMask */; } function serializeProjectsLocationsTensorboardsExperimentsListOptions(data: any): ProjectsLocationsTensorboardsExperimentsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } function deserializeProjectsLocationsTensorboardsExperimentsListOptions(data: any): ProjectsLocationsTensorboardsExperimentsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsTensorboardsExperimentsOperationsList. */ export interface ProjectsLocationsTensorboardsExperimentsOperationsListOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsTensorboardsExperimentsOperationsWait. */ export interface ProjectsLocationsTensorboardsExperimentsOperationsWaitOptions { /** * The maximum duration to wait before timing out. If left blank, the wait * will be at most the time permitted by the underlying HTTP/RPC protocol. If * RPC context deadline is also specified, the shorter one will be used. */ timeout?: number /* Duration */; } function serializeProjectsLocationsTensorboardsExperimentsOperationsWaitOptions(data: any): ProjectsLocationsTensorboardsExperimentsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } function deserializeProjectsLocationsTensorboardsExperimentsOperationsWaitOptions(data: any): ProjectsLocationsTensorboardsExperimentsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsTensorboardsExperimentsPatch. */ export interface ProjectsLocationsTensorboardsExperimentsPatchOptions { /** * Required. Field mask is used to specify the fields to be overwritten in * the TensorboardExperiment resource by the update. The fields specified in * the update_mask are relative to the resource, not the full request. A field * is overwritten if it's in the mask. If the user does not provide a mask * then all fields are overwritten if new values are specified. */ updateMask?: string /* FieldMask */; } function serializeProjectsLocationsTensorboardsExperimentsPatchOptions(data: any): ProjectsLocationsTensorboardsExperimentsPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } function deserializeProjectsLocationsTensorboardsExperimentsPatchOptions(data: any): ProjectsLocationsTensorboardsExperimentsPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsTensorboardsExperimentsRunsCreate. */ export interface ProjectsLocationsTensorboardsExperimentsRunsCreateOptions { /** * Required. The ID to use for the Tensorboard run, which becomes the final * component of the Tensorboard run's resource name. This value should be * 1-128 characters, and valid characters are `/a-z-/`. */ tensorboardRunId?: string; } /** * Additional options for * AIplatform#projectsLocationsTensorboardsExperimentsRunsList. */ export interface ProjectsLocationsTensorboardsExperimentsRunsListOptions { /** * Lists the TensorboardRuns that match the filter expression. */ filter?: string; /** * Field to use to sort the list. */ orderBy?: string; /** * The maximum number of TensorboardRuns to return. The service may return * fewer than this value. If unspecified, at most 50 TensorboardRuns are * returned. The maximum value is 1000; values above 1000 are coerced to 1000. */ pageSize?: number; /** * A page token, received from a previous * TensorboardService.ListTensorboardRuns call. Provide this to retrieve the * subsequent page. When paginating, all other parameters provided to * TensorboardService.ListTensorboardRuns must match the call that provided * the page token. */ pageToken?: string; /** * Mask specifying which fields to read. */ readMask?: string /* FieldMask */; } function serializeProjectsLocationsTensorboardsExperimentsRunsListOptions(data: any): ProjectsLocationsTensorboardsExperimentsRunsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } function deserializeProjectsLocationsTensorboardsExperimentsRunsListOptions(data: any): ProjectsLocationsTensorboardsExperimentsRunsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsTensorboardsExperimentsRunsOperationsList. */ export interface ProjectsLocationsTensorboardsExperimentsRunsOperationsListOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsTensorboardsExperimentsRunsOperationsWait. */ export interface ProjectsLocationsTensorboardsExperimentsRunsOperationsWaitOptions { /** * The maximum duration to wait before timing out. If left blank, the wait * will be at most the time permitted by the underlying HTTP/RPC protocol. If * RPC context deadline is also specified, the shorter one will be used. */ timeout?: number /* Duration */; } function serializeProjectsLocationsTensorboardsExperimentsRunsOperationsWaitOptions(data: any): ProjectsLocationsTensorboardsExperimentsRunsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } function deserializeProjectsLocationsTensorboardsExperimentsRunsOperationsWaitOptions(data: any): ProjectsLocationsTensorboardsExperimentsRunsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsTensorboardsExperimentsRunsPatch. */ export interface ProjectsLocationsTensorboardsExperimentsRunsPatchOptions { /** * Required. Field mask is used to specify the fields to be overwritten in * the TensorboardRun resource by the update. The fields specified in the * update_mask are relative to the resource, not the full request. A field is * overwritten if it's in the mask. If the user does not provide a mask then * all fields are overwritten if new values are specified. */ updateMask?: string /* FieldMask */; } function serializeProjectsLocationsTensorboardsExperimentsRunsPatchOptions(data: any): ProjectsLocationsTensorboardsExperimentsRunsPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } function deserializeProjectsLocationsTensorboardsExperimentsRunsPatchOptions(data: any): ProjectsLocationsTensorboardsExperimentsRunsPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsTensorboardsExperimentsRunsTimeSeriesCreate. */ export interface ProjectsLocationsTensorboardsExperimentsRunsTimeSeriesCreateOptions { /** * Optional. The user specified unique ID to use for the * TensorboardTimeSeries, which becomes the final component of the * TensorboardTimeSeries's resource name. This value should match "a-z0-9{0, * 127}" */ tensorboardTimeSeriesId?: string; } /** * Additional options for * AIplatform#projectsLocationsTensorboardsExperimentsRunsTimeSeriesList. */ export interface ProjectsLocationsTensorboardsExperimentsRunsTimeSeriesListOptions { /** * Lists the TensorboardTimeSeries that match the filter expression. */ filter?: string; /** * Field to use to sort the list. */ orderBy?: string; /** * The maximum number of TensorboardTimeSeries to return. The service may * return fewer than this value. If unspecified, at most 50 * TensorboardTimeSeries are returned. The maximum value is 1000; values above * 1000 are coerced to 1000. */ pageSize?: number; /** * A page token, received from a previous * TensorboardService.ListTensorboardTimeSeries call. Provide this to retrieve * the subsequent page. When paginating, all other parameters provided to * TensorboardService.ListTensorboardTimeSeries must match the call that * provided the page token. */ pageToken?: string; /** * Mask specifying which fields to read. */ readMask?: string /* FieldMask */; } function serializeProjectsLocationsTensorboardsExperimentsRunsTimeSeriesListOptions(data: any): ProjectsLocationsTensorboardsExperimentsRunsTimeSeriesListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } function deserializeProjectsLocationsTensorboardsExperimentsRunsTimeSeriesListOptions(data: any): ProjectsLocationsTensorboardsExperimentsRunsTimeSeriesListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsTensorboardsExperimentsRunsTimeSeriesOperationsList. */ export interface ProjectsLocationsTensorboardsExperimentsRunsTimeSeriesOperationsListOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsTensorboardsExperimentsRunsTimeSeriesOperationsWait. */ export interface ProjectsLocationsTensorboardsExperimentsRunsTimeSeriesOperationsWaitOptions { /** * The maximum duration to wait before timing out. If left blank, the wait * will be at most the time permitted by the underlying HTTP/RPC protocol. If * RPC context deadline is also specified, the shorter one will be used. */ timeout?: number /* Duration */; } function serializeProjectsLocationsTensorboardsExperimentsRunsTimeSeriesOperationsWaitOptions(data: any): ProjectsLocationsTensorboardsExperimentsRunsTimeSeriesOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } function deserializeProjectsLocationsTensorboardsExperimentsRunsTimeSeriesOperationsWaitOptions(data: any): ProjectsLocationsTensorboardsExperimentsRunsTimeSeriesOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsTensorboardsExperimentsRunsTimeSeriesPatch. */ export interface ProjectsLocationsTensorboardsExperimentsRunsTimeSeriesPatchOptions { /** * Required. Field mask is used to specify the fields to be overwritten in * the TensorboardTimeSeries resource by the update. The fields specified in * the update_mask are relative to the resource, not the full request. A field * is overwritten if it's in the mask. If the user does not provide a mask * then all fields are overwritten if new values are specified. */ updateMask?: string /* FieldMask */; } function serializeProjectsLocationsTensorboardsExperimentsRunsTimeSeriesPatchOptions(data: any): ProjectsLocationsTensorboardsExperimentsRunsTimeSeriesPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } function deserializeProjectsLocationsTensorboardsExperimentsRunsTimeSeriesPatchOptions(data: any): ProjectsLocationsTensorboardsExperimentsRunsTimeSeriesPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsTensorboardsExperimentsRunsTimeSeriesReadBlobData. */ export interface ProjectsLocationsTensorboardsExperimentsRunsTimeSeriesReadBlobDataOptions { /** * IDs of the blobs to read. */ blobIds?: string; } /** * Additional options for * AIplatform#projectsLocationsTensorboardsExperimentsRunsTimeSeriesRead. */ export interface ProjectsLocationsTensorboardsExperimentsRunsTimeSeriesReadOptions { /** * Reads the TensorboardTimeSeries' data that match the filter expression. */ filter?: string; /** * The maximum number of TensorboardTimeSeries' data to return. This value * should be a positive integer. This value can be set to -1 to return all * data. */ maxDataPoints?: number; } /** * Additional options for AIplatform#projectsLocationsTensorboardsList. */ export interface ProjectsLocationsTensorboardsListOptions { /** * Lists the Tensorboards that match the filter expression. */ filter?: string; /** * Field to use to sort the list. */ orderBy?: string; /** * The maximum number of Tensorboards to return. The service may return fewer * than this value. If unspecified, at most 100 Tensorboards are returned. The * maximum value is 100; values above 100 are coerced to 100. */ pageSize?: number; /** * A page token, received from a previous TensorboardService.ListTensorboards * call. Provide this to retrieve the subsequent page. When paginating, all * other parameters provided to TensorboardService.ListTensorboards must match * the call that provided the page token. */ pageToken?: string; /** * Mask specifying which fields to read. */ readMask?: string /* FieldMask */; } function serializeProjectsLocationsTensorboardsListOptions(data: any): ProjectsLocationsTensorboardsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } function deserializeProjectsLocationsTensorboardsListOptions(data: any): ProjectsLocationsTensorboardsListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsTensorboardsOperationsList. */ export interface ProjectsLocationsTensorboardsOperationsListOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsTensorboardsOperationsWait. */ export interface ProjectsLocationsTensorboardsOperationsWaitOptions { /** * The maximum duration to wait before timing out. If left blank, the wait * will be at most the time permitted by the underlying HTTP/RPC protocol. If * RPC context deadline is also specified, the shorter one will be used. */ timeout?: number /* Duration */; } function serializeProjectsLocationsTensorboardsOperationsWaitOptions(data: any): ProjectsLocationsTensorboardsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } function deserializeProjectsLocationsTensorboardsOperationsWaitOptions(data: any): ProjectsLocationsTensorboardsOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsTensorboardsPatch. */ export interface ProjectsLocationsTensorboardsPatchOptions { /** * Required. Field mask is used to specify the fields to be overwritten in * the Tensorboard resource by the update. The fields specified in the * update_mask are relative to the resource, not the full request. A field is * overwritten if it's in the mask. If the user does not provide a mask then * all fields are overwritten if new values are specified. */ updateMask?: string /* FieldMask */; } function serializeProjectsLocationsTensorboardsPatchOptions(data: any): ProjectsLocationsTensorboardsPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } function deserializeProjectsLocationsTensorboardsPatchOptions(data: any): ProjectsLocationsTensorboardsPatchOptions { return { ...data, updateMask: data["updateMask"] !== undefined ? data["updateMask"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsTrainingPipelinesList. */ export interface ProjectsLocationsTrainingPipelinesListOptions { /** * The standard list filter. Supported fields: * `display_name` supports `=`, * `!=` comparisons, and `:` wildcard. * `state` supports `=`, `!=` * comparisons. * `training_task_definition` `=`, `!=` comparisons, and `:` * wildcard. * `create_time` supports `=`, `!=`,`<`, `<=`,`>`, `>=` * comparisons. `create_time` must be in RFC 3339 format. * `labels` supports * general map functions that is: `labels.key=value` - key:value equality * `labels.key:* - key existence Some examples of using the filter are: * * `state="PIPELINE_STATE_SUCCEEDED" AND display_name:"my_pipeline_*"` * * `state!="PIPELINE_STATE_FAILED" OR display_name="my_pipeline"` * `NOT * display_name="my_pipeline"` * `create_time>"2021-05-18T00:00:00Z"` * * `training_task_definition:"*automl_text_classification*"` */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. Typically obtained via * ListTrainingPipelinesResponse.next_page_token of the previous * PipelineService.ListTrainingPipelines call. */ pageToken?: string; /** * Mask specifying which fields to read. */ readMask?: string /* FieldMask */; } function serializeProjectsLocationsTrainingPipelinesListOptions(data: any): ProjectsLocationsTrainingPipelinesListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } function deserializeProjectsLocationsTrainingPipelinesListOptions(data: any): ProjectsLocationsTrainingPipelinesListOptions { return { ...data, readMask: data["readMask"] !== undefined ? data["readMask"] : undefined, }; } /** * Additional options for * AIplatform#projectsLocationsTrainingPipelinesOperationsList. */ export interface ProjectsLocationsTrainingPipelinesOperationsListOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } /** * Additional options for * AIplatform#projectsLocationsTrainingPipelinesOperationsWait. */ export interface ProjectsLocationsTrainingPipelinesOperationsWaitOptions { /** * The maximum duration to wait before timing out. If left blank, the wait * will be at most the time permitted by the underlying HTTP/RPC protocol. If * RPC context deadline is also specified, the shorter one will be used. */ timeout?: number /* Duration */; } function serializeProjectsLocationsTrainingPipelinesOperationsWaitOptions(data: any): ProjectsLocationsTrainingPipelinesOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } function deserializeProjectsLocationsTrainingPipelinesOperationsWaitOptions(data: any): ProjectsLocationsTrainingPipelinesOperationsWaitOptions { return { ...data, timeout: data["timeout"] !== undefined ? data["timeout"] : undefined, }; } /** * Additional options for AIplatform#projectsLocationsTuningJobsList. */ export interface ProjectsLocationsTuningJobsListOptions { /** * Optional. The standard list filter. */ filter?: string; /** * Optional. The standard list page size. */ pageSize?: number; /** * Optional. The standard list page token. Typically obtained via * ListTuningJobsResponse.next_page_token of the previous * GenAiTuningService.ListTuningJob][] call. */ pageToken?: string; } /** * Additional options for AIplatform#projectsLocationsTuningJobsOperationsList. */ export interface ProjectsLocationsTuningJobsOperationsListOptions { /** * The standard list filter. */ filter?: string; /** * The standard list page size. */ pageSize?: number; /** * The standard list page token. */ pageToken?: string; } /** * Additional options for AIplatform#publishersModelsGet. */ export interface PublishersModelsGetOptions { /** * Optional. Token used to access Hugging Face gated models. */ huggingFaceToken?: string; /** * Optional. Boolean indicates whether the requested model is a Hugging Face * model. */ isHuggingFaceModel?: boolean; /** * Optional. The IETF BCP-47 language code representing the language in which * the publisher model's text information should be written in. */ languageCode?: string; /** * Optional. PublisherModel view specifying which fields to read. */ view?: | "PUBLISHER_MODEL_VIEW_UNSPECIFIED" | "PUBLISHER_MODEL_VIEW_BASIC" | "PUBLISHER_MODEL_VIEW_FULL" | "PUBLISHER_MODEL_VERSION_VIEW_BASIC"; } function decodeBase64(b64: string): Uint8Array { const binString = atob(b64); const size = binString.length; const bytes = new Uint8Array(size); for (let i = 0; i < size; i++) { bytes[i] = binString.charCodeAt(i); } return bytes; } const base64abc = ["A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","W","X","Y","Z","a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z","0","1","2","3","4","5","6","7","8","9","+","/"]; /** * CREDIT: https://gist.github.com/enepomnyaschih/72c423f727d395eeaa09697058238727 * Encodes a given Uint8Array, ArrayBuffer or string into RFC4648 base64 representation * @param data */ function encodeBase64(uint8: Uint8Array): string { let result = "", i; const l = uint8.length; for (i = 2; i < l; i += 3) { result += base64abc[uint8[i - 2] >> 2]; result += base64abc[((uint8[i - 2] & 0x03) << 4) | (uint8[i - 1] >> 4)]; result += base64abc[((uint8[i - 1] & 0x0f) << 2) | (uint8[i] >> 6)]; result += base64abc[uint8[i] & 0x3f]; } if (i === l + 1) { // 1 octet yet to write result += base64abc[uint8[i - 2] >> 2]; result += base64abc[(uint8[i - 2] & 0x03) << 4]; result += "=="; } if (i === l) { // 2 octets yet to write result += base64abc[uint8[i - 2] >> 2]; result += base64abc[((uint8[i - 2] & 0x03) << 4) | (uint8[i - 1] >> 4)]; result += base64abc[(uint8[i - 1] & 0x0f) << 2]; result += "="; } return result; }